diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 000000000..0f15507c0 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,962 @@ +# Changelog + +## 0.22.2 + +*July 10th, 2018* + +IMPROVEMENTS +- More cleanup post repo merge! +- [docs] Include `ecosystem.json` and `tendermint-bft.md` from deprecated `aib-data` repository. +- [config] Add `instrumentation.max_open_connections`, which limits the number + of requests in flight to Prometheus server (if enabled). Default: 3. + + +BUG FIXES +- [rpc] Allow unquoted integers in requests + - NOTE: this is only for URI requests. JSONRPC requests and all responses + will use quoted integers (the proto3 JSON standard). +- [consensus] Fix halt on shutdown + +## 0.22.1 + +*July 5th, 2018* + +IMPROVEMENTS + +* Cleanup post repo-merge. +* [docs] Various improvements. + +BUG FIXES + +* [state] Return error when EndBlock returns a 0-power validator that isn't + already in the validator set. +* [consensus] Shut down WAL properly. + +## 0.22.0 + +*July 2nd, 2018* + +BREAKING CHANGES: +- [config] + * Remove `max_block_size_txs` and `max_block_size_bytes` in favor of + consensus params from the genesis file. + * Rename `skip_upnp` to `upnp`, and turn it off by default. + * Change `max_packet_msg_size` back to `max_packet_msg_payload_size` +- [rpc] + * All integers are encoded as strings (part of the update for Amino v0.10.1) + * `syncing` is now called `catching_up` +- [types] Update Amino to v0.10.1 + * Amino is now fully proto3 compatible for the basic types + * JSON-encoded types now use the type name instead of the prefix bytes + * Integers are encoded as strings +- [crypto] Update go-crypto to v0.10.0 and merge into `crypto` + * privKey.Sign returns error. + * ed25519 address changed to the first 20-bytes of the SHA256 of the raw pubkey bytes + * `tmlibs/merkle` -> `crypto/merkle`. Uses SHA256 instead of RIPEMD160 +- [tmlibs] Update to v0.9.0 and merge into `libs` + * remove `merkle` package (moved to `crypto/merkle`) + +FEATURES +- [cmd] Added metrics (served under `/metrics` using a Prometheus client; + disabled by default). See the new `instrumentation` section in the config and + [metrics](https://tendermint.readthedocs.io/projects/tools/en/develop/metrics.html) + guide. +- [p2p] Add IPv6 support to peering. +- [p2p] Add `external_address` to config to allow specifying the address for + peers to dial + +IMPROVEMENT +- [rpc/client] Supports https and wss now. +- [crypto] Make public key size into public constants +- [mempool] Log tx hash, not entire tx +- [abci] Merged in github.com/tendermint/abci +- [crypto] Merged in github.com/tendermint/go-crypto +- [libs] Merged in github.com/tendermint/tmlibs +- [docs] Move from .rst to .md + +BUG FIXES: +- [rpc] Limit maximum number of HTTP/WebSocket connections + (`rpc.max_open_connections`) and gRPC connections + (`rpc.grpc_max_open_connections`). Check out "Running In Production" guide if + you want to increase them. +- [rpc] Limit maximum request body size to 1MB (header is limited to 1MB). +- [consensus] Fix a halting bug where `create_empty_blocks=false` +- [p2p] Fix panic in seed mode + +## 0.21.0 + +*June 21th, 2018* + +BREAKING CHANGES + +- [config] Change default ports from 4665X to 2665X. Ports over 32768 are + ephemeral and reserved for use by the kernel. +- [cmd] `unsafe_reset_all` removes the addrbook.json + +IMPROVEMENT + +- [pubsub] Set default capacity to 0 +- [docs] Various improvements + +BUG FIXES + +- [consensus] Fix an issue where we don't make blocks after `fast_sync` when `create_empty_blocks=false` +- [mempool] Fix #1761 where we don't process txs if `cache_size=0` +- [rpc] Fix memory leak in Websocket (when using `/subscribe` method) +- [config] Escape paths in config - fixes config paths on Windows + +## 0.20.0 + +*June 6th, 2018* + +This is the first in a series of breaking releases coming to Tendermint after +soliciting developer feedback and conducting security audits. + +This release does not break any blockchain data structures or +protocols other than the ABCI messages between Tendermint and the application. + +Applications that upgrade for ABCI v0.11.0 should be able to continue running Tendermint +v0.20.0 on blockchains created with v0.19.X + +BREAKING CHANGES + +- [abci] Upgrade to + [v0.11.0](https://github.com/tendermint/abci/blob/master/CHANGELOG.md#0110) +- [abci] Change Query path for filtering peers by node ID from + `p2p/filter/pubkey/` to `p2p/filter/id/` + +## 0.19.9 + +*June 5th, 2018* + +BREAKING CHANGES + +- [types/priv_validator] Moved to top level `privval` package + +FEATURES + +- [config] Collapse PeerConfig into P2PConfig +- [docs] Add quick-install script +- [docs/spec] Add table of Amino prefixes + +BUG FIXES + +- [rpc] Return 404 for unknown endpoints +- [consensus] Flush WAL on stop +- [evidence] Don't send evidence to peers that are behind +- [p2p] Fix memory leak on peer disconnects +- [rpc] Fix panic when `per_page=0` + +## 0.19.8 + +*June 4th, 2018* + +BREAKING: + +- [p2p] Remove `auth_enc` config option, peer connections are always auth + encrypted. Technically a breaking change but seems no one was using it and + arguably a bug fix :) + +BUG FIXES + +- [mempool] Fix deadlock under high load when `skip_timeout_commit=true` and + `create_empty_blocks=false` + +## 0.19.7 + +*May 31st, 2018* + +BREAKING: + +- [libs/pubsub] TagMap#Get returns a string value +- [libs/pubsub] NewTagMap accepts a map of strings + +FEATURES + +- [rpc] the RPC documentation is now published to https://tendermint.github.io/slate +- [p2p] AllowDuplicateIP config option to refuse connections from same IP. + - true by default for now, false by default in next breaking release +- [docs] Add docs for query, tx indexing, events, pubsub +- [docs] Add some notes about running Tendermint in production + +IMPROVEMENTS: + +- [consensus] Consensus reactor now receives events from a separate synchronous event bus, + which is not dependant on external RPC load +- [consensus/wal] do not look for height in older files if we've seen height - 1 +- [docs] Various cleanup and link fixes + +## 0.19.6 + +*May 29th, 2018* + +BUG FIXES + +- [blockchain] Fix fast-sync deadlock during high peer turnover + +BUG FIX: + +- [evidence] Dont send peers evidence from heights they haven't synced to yet +- [p2p] Refuse connections to more than one peer with the same IP +- [docs] Various fixes + +## 0.19.5 + +*May 20th, 2018* + +BREAKING CHANGES + +- [rpc/client] TxSearch and UnconfirmedTxs have new arguments (see below) +- [rpc/client] TxSearch returns ResultTxSearch +- [version] Breaking changes to Go APIs will not be reflected in breaking + version change, but will be included in changelog. + +FEATURES + +- [rpc] `/tx_search` takes `page` (starts at 1) and `per_page` (max 100, default 30) args to paginate results +- [rpc] `/unconfirmed_txs` takes `limit` (max 100, default 30) arg to limit the output +- [config] `mempool.size` and `mempool.cache_size` options + +IMPROVEMENTS + +- [docs] Lots of updates +- [consensus] Only Fsync() the WAL before executing msgs from ourselves + +BUG FIXES + +- [mempool] Enforce upper bound on number of transactions + +## 0.19.4 (May 17th, 2018) + +IMPROVEMENTS + +- [state] Improve tx indexing by using batches +- [consensus, state] Improve logging (more consensus logs, fewer tx logs) +- [spec] Moved to `docs/spec` (TODO cleanup the rest of the docs ...) + +BUG FIXES + +- [consensus] Fix issue #1575 where a late proposer can get stuck + +## 0.19.3 (May 14th, 2018) + +FEATURES + +- [rpc] New `/consensus_state` returns just the votes seen at the current height + +IMPROVEMENTS + +- [rpc] Add stringified votes and fraction of power voted to `/dump_consensus_state` +- [rpc] Add PeerStateStats to `/dump_consensus_state` + +BUG FIXES + +- [cmd] Set GenesisTime during `tendermint init` +- [consensus] fix ValidBlock rules + +## 0.19.2 (April 30th, 2018) + +FEATURES: + +- [p2p] Allow peers with different Minor versions to connect +- [rpc] `/net_info` includes `n_peers` + +IMPROVEMENTS: + +- [p2p] Various code comments, cleanup, error types +- [p2p] Change some Error logs to Debug + +BUG FIXES: + +- [p2p] Fix reconnect to persistent peer when first dial fails +- [p2p] Validate NodeInfo.ListenAddr +- [p2p] Only allow (MaxNumPeers - MaxNumOutboundPeers) inbound peers +- [p2p/pex] Limit max msg size to 64kB +- [p2p] Fix panic when pex=false +- [p2p] Allow multiple IPs per ID in AddrBook +- [p2p] Fix before/after bugs in addrbook isBad() + +## 0.19.1 (April 27th, 2018) + +Note this release includes some small breaking changes in the RPC and one in the +config that are really bug fixes. v0.19.1 will work with existing chains, and make Tendermint +easier to use and debug. With <3 + +BREAKING (MINOR) + +- [config] Removed `wal_light` setting. If you really needed this, let us know + +FEATURES: + +- [networks] moved in tooling from devops repo: terraform and ansible scripts for deploying testnets ! +- [cmd] Added `gen_node_key` command + +BUG FIXES + +Some of these are breaking in the RPC response, but they're really bugs! + +- [spec] Document address format and pubkey encoding pre and post Amino +- [rpc] Lower case JSON field names +- [rpc] Fix missing entries, improve, and lower case the fields in `/dump_consensus_state` +- [rpc] Fix NodeInfo.Channels format to hex +- [rpc] Add Validator address to `/status` +- [rpc] Fix `prove` in ABCIQuery +- [cmd] MarshalJSONIndent on init + +## 0.19.0 (April 13th, 2018) + +BREAKING: +- [cmd] improved `testnet` command; now it can fill in `persistent_peers` for you in the config file and much more (see `tendermint testnet --help` for details) +- [cmd] `show_node_id` now returns an error if there is no node key +- [rpc]: changed the output format for the `/status` endpoint (see https://godoc.org/github.com/tendermint/tendermint/rpc/core#Status) + +Upgrade from go-wire to go-amino. This is a sweeping change that breaks everything that is +serialized to disk or over the network. + +See github.com/tendermint/go-amino for details on the new format. + +See `scripts/wire2amino.go` for a tool to upgrade +genesis/priv_validator/node_key JSON files. + +FEATURES + +- [test] docker-compose for local testnet setup (thanks Greg!) + +## 0.18.0 (April 6th, 2018) + +BREAKING: + +- [types] Merkle tree uses different encoding for varints (see tmlibs v0.8.0) +- [types] ValidtorSet.GetByAddress returns -1 if no validator found +- [p2p] require all addresses come with an ID no matter what +- [rpc] Listening address must contain tcp:// or unix:// prefix + +FEATURES: + +- [rpc] StartHTTPAndTLSServer (not used yet) +- [rpc] Include validator's voting power in `/status` +- [rpc] `/tx` and `/tx_search` responses now include the transaction hash +- [rpc] Include peer NodeIDs in `/net_info` + +IMPROVEMENTS: +- [config] trim whitespace from elements of lists (like `persistent_peers`) +- [rpc] `/tx_search` results are sorted by height +- [p2p] do not try to connect to ourselves (ok, maybe only once) +- [p2p] seeds respond with a bias towards good peers + +BUG FIXES: +- [rpc] fix subscribing using an abci.ResponseDeliverTx tag +- [rpc] fix tx_indexers matchRange +- [rpc] fix unsubscribing (see tmlibs v0.8.0) + +## 0.17.1 (March 27th, 2018) + +BUG FIXES: +- [types] Actually support `app_state` in genesis as `AppStateJSON` + +## 0.17.0 (March 27th, 2018) + +BREAKING: +- [types] WriteSignBytes -> SignBytes + +IMPROVEMENTS: +- [all] renamed `dummy` (`persistent_dummy`) to `kvstore` (`persistent_kvstore`) (name "dummy" is deprecated and will not work in the next breaking release) +- [docs] note on determinism (docs/determinism.rst) +- [genesis] `app_options` field is deprecated. please rename it to `app_state` in your genesis file(s). `app_options` will not work in the next breaking release +- [p2p] dial seeds directly without potential peers +- [p2p] exponential backoff for addrs in the address book +- [p2p] mark peer as good if it contributed enough votes or block parts +- [p2p] stop peer if it sends incorrect data, msg to unknown channel, msg we did not expect +- [p2p] when `auth_enc` is true, all dialed peers must have a node ID in their address +- [spec] various improvements +- switched from glide to dep internally for package management +- [wire] prep work for upgrading to new go-wire (which is now called go-amino) + +FEATURES: +- [config] exposed `auth_enc` flag to enable/disable encryption +- [config] added the `--p2p.private_peer_ids` flag and `PrivatePeerIDs` config variable (see config for description) +- [rpc] added `/health` endpoint, which returns empty result for now +- [types/priv_validator] new format and socket client, allowing for remote signing + +BUG FIXES: +- [consensus] fix liveness bug by introducing ValidBlock mechanism + +## 0.16.0 (February 20th, 2018) + +BREAKING CHANGES: +- [config] use $TMHOME/config for all config and json files +- [p2p] old `--p2p.seeds` is now `--p2p.persistent_peers` (persistent peers to which TM will always connect to) +- [p2p] now `--p2p.seeds` only used for getting addresses (if addrbook is empty; not persistent) +- [p2p] NodeInfo: remove RemoteAddr and add Channels + - we must have at least one overlapping channel with peer + - we only send msgs for channels the peer advertised +- [p2p/conn] pong timeout +- [lite] comment out IAVL related code + +FEATURES: +- [p2p] added new `/dial_peers&persistent=_` **unsafe** endpoint +- [p2p] persistent node key in `$THMHOME/config/node_key.json` +- [p2p] introduce peer ID and authenticate peers by ID using addresses like `ID@IP:PORT` +- [p2p/pex] new seed mode crawls the network and serves as a seed. +- [config] MempoolConfig.CacheSize +- [config] P2P.SeedMode (`--p2p.seed_mode`) + +IMPROVEMENT: +- [p2p/pex] stricter rules in the PEX reactor for better handling of abuse +- [p2p] various improvements to code structure including subpackages for `pex` and `conn` +- [docs] new spec! +- [all] speed up the tests! + +BUG FIX: +- [blockchain] StopPeerForError on timeout +- [consensus] StopPeerForError on a bad Maj23 message +- [state] flush mempool conn before calling commit +- [types] fix priv val signing things that only differ by timestamp +- [mempool] fix memory leak causing zombie peers +- [p2p/conn] fix potential deadlock + +## 0.15.0 (December 29, 2017) + +BREAKING CHANGES: +- [p2p] enable the Peer Exchange reactor by default +- [types] add Timestamp field to Proposal/Vote +- [types] add new fields to Header: TotalTxs, ConsensusParamsHash, LastResultsHash, EvidenceHash +- [types] add Evidence to Block +- [types] simplify ValidateBasic +- [state] updates to support changes to the header +- [state] Enforce <1/3 of validator set can change at a time + +FEATURES: +- [state] Send indices of absent validators and addresses of byzantine validators in BeginBlock +- [state] Historical ConsensusParams and ABCIResponses +- [docs] Specification for the base Tendermint data structures. +- [evidence] New evidence reactor for gossiping and managing evidence +- [rpc] `/block_results?height=X` returns the DeliverTx results for a given height. + +IMPROVEMENTS: +- [consensus] Better handling of corrupt WAL file + +BUG FIXES: +- [lite] fix race +- [state] validate block.Header.ValidatorsHash +- [p2p] allow seed addresses to be prefixed with eg. `tcp://` +- [p2p] use consistent key to refer to peers so we dont try to connect to existing peers +- [cmd] fix `tendermint init` to ignore files that are there and generate files that aren't. + +## 0.14.0 (December 11, 2017) + +BREAKING CHANGES: +- consensus/wal: removed separator +- rpc/client: changed Subscribe/Unsubscribe/UnsubscribeAll funcs signatures to be identical to event bus. + +FEATURES: +- new `tendermint lite` command (and `lite/proxy` pkg) for running a light-client RPC proxy. + NOTE it is currently insecure and its APIs are not yet covered by semver + +IMPROVEMENTS: +- rpc/client: can act as event bus subscriber (See https://github.com/tendermint/tendermint/issues/945). +- p2p: use exponential backoff from seconds to hours when attempting to reconnect to persistent peer +- config: moniker defaults to the machine's hostname instead of "anonymous" + +BUG FIXES: +- p2p: no longer exit if one of the seed addresses is incorrect + +## 0.13.0 (December 6, 2017) + +BREAKING CHANGES: +- abci: update to v0.8 using gogo/protobuf; includes tx tags, vote info in RequestBeginBlock, data.Bytes everywhere, use int64, etc. +- types: block heights are now `int64` everywhere +- types & node: EventSwitch and EventCache have been replaced by EventBus and EventBuffer; event types have been overhauled +- node: EventSwitch methods now refer to EventBus +- rpc/lib/types: RPCResponse is no longer a pointer; WSRPCConnection interface has been modified +- rpc/client: WaitForOneEvent takes an EventsClient instead of types.EventSwitch +- rpc/client: Add/RemoveListenerForEvent are now Subscribe/Unsubscribe +- rpc/core/types: ResultABCIQuery wraps an abci.ResponseQuery +- rpc: `/subscribe` and `/unsubscribe` take `query` arg instead of `event` +- rpc: `/status` returns the LatestBlockTime in human readable form instead of in nanoseconds +- mempool: cached transactions return an error instead of an ABCI response with BadNonce + +FEATURES: +- rpc: new `/unsubscribe_all` WebSocket RPC endpoint +- rpc: new `/tx_search` endpoint for filtering transactions by more complex queries +- p2p/trust: new trust metric for tracking peers. See ADR-006 +- config: TxIndexConfig allows to set what DeliverTx tags to index + +IMPROVEMENTS: +- New asynchronous events system using `tmlibs/pubsub` +- logging: Various small improvements +- consensus: Graceful shutdown when app crashes +- tests: Fix various non-deterministic errors +- p2p: more defensive programming + +BUG FIXES: +- consensus: fix panic where prs.ProposalBlockParts is not initialized +- p2p: fix panic on bad channel + +## 0.12.1 (November 27, 2017) + +BUG FIXES: +- upgrade tmlibs dependency to enable Windows builds for Tendermint + +## 0.12.0 (October 27, 2017) + +BREAKING CHANGES: + - rpc/client: websocket ResultsCh and ErrorsCh unified in ResponsesCh. + - rpc/client: ABCIQuery no longer takes `prove` + - state: remove GenesisDoc from state. + - consensus: new binary WAL format provides efficiency and uses checksums to detect corruption + - use scripts/wal2json to convert to json for debugging + +FEATURES: + - new `certifiers` pkg contains the tendermint light-client library (name subject to change)! + - rpc: `/genesis` includes the `app_options` . + - rpc: `/abci_query` takes an additional `height` parameter to support historical queries. + - rpc/client: new ABCIQueryWithOptions supports options like `trusted` (set false to get a proof) and `height` to query a historical height. + +IMPROVEMENTS: + - rpc: `/genesis` result includes `app_options` + - rpc/lib/client: add jitter to reconnects. + - rpc/lib/types: `RPCError` satisfies the `error` interface. + +BUG FIXES: + - rpc/client: fix ws deadlock after stopping + - blockchain: fix panic on AddBlock when peer is nil + - mempool: fix sending on TxsAvailable when a tx has been invalidated + - consensus: dont run WAL catchup if we fast synced + +## 0.11.1 (October 10, 2017) + +IMPROVEMENTS: + - blockchain/reactor: respondWithNoResponseMessage for missing height + +BUG FIXES: + - rpc: fixed client WebSocket timeout + - rpc: client now resubscribes on reconnection + - rpc: fix panics on missing params + - rpc: fix `/dump_consensus_state` to have normal json output (NOTE: technically breaking, but worth a bug fix label) + - types: fixed out of range error in VoteSet.addVote + - consensus: fix wal autofile via https://github.com/tendermint/tmlibs/blob/master/CHANGELOG.md#032-october-2-2017 + +## 0.11.0 (September 22, 2017) + +BREAKING: + - genesis file: validator `amount` is now `power` + - abci: Info, BeginBlock, InitChain all take structs + - rpc: various changes to match JSONRPC spec (http://www.jsonrpc.org/specification), including breaking ones: + - requests that previously returned HTTP code 4XX now return 200 with an error code in the JSONRPC. + - `rpctypes.RPCResponse` uses new `RPCError` type instead of `string`. + + - cmd: if there is no genesis, exit immediately instead of waiting around for one to show. + - types: `Signer.Sign` returns an error. + - state: every validator set change is persisted to disk, which required some changes to the `State` structure. + - p2p: new `p2p.Peer` interface used for all reactor methods (instead of `*p2p.Peer` struct). + +FEATURES: + - rpc: `/validators?height=X` allows querying of validators at previous heights. + - rpc: Leaving the `height` param empty for `/block`, `/validators`, and `/commit` will return the value for the latest height. + +IMPROVEMENTS: + - docs: Moved all docs from the website and tools repo in, converted to `.rst`, and cleaned up for presentation on `tendermint.readthedocs.io` + +BUG FIXES: + - fix WAL openning issue on Windows + +## 0.10.4 (September 5, 2017) + +IMPROVEMENTS: +- docs: Added Slate docs to each rpc function (see rpc/core) +- docs: Ported all website docs to Read The Docs +- config: expose some p2p params to tweak performance: RecvRate, SendRate, and MaxMsgPacketPayloadSize +- rpc: Upgrade the websocket client and server, including improved auto reconnect, and proper ping/pong + +BUG FIXES: +- consensus: fix panic on getVoteBitArray +- consensus: hang instead of panicking on byzantine consensus failures +- cmd: dont load config for version command + +## 0.10.3 (August 10, 2017) + +FEATURES: +- control over empty block production: + - new flag, `--consensus.create_empty_blocks`; when set to false, blocks are only created when there are txs or when the AppHash changes. + - new config option, `consensus.create_empty_blocks_interval`; an empty block is created after this many seconds. + - in normal operation, `create_empty_blocks = true` and `create_empty_blocks_interval = 0`, so blocks are being created all the time (as in all previous versions of tendermint). The number of empty blocks can be reduced by increasing `create_empty_blocks_interval` or by setting `create_empty_blocks = false`. + - new `TxsAvailable()` method added to Mempool that returns a channel which fires when txs are available. + - new heartbeat message added to consensus reactor to notify peers that a node is waiting for txs before entering propose step. +- rpc: Add `syncing` field to response returned by `/status`. Is `true` while in fast-sync mode. + +IMPROVEMENTS: +- various improvements to documentation and code comments + +BUG FIXES: +- mempool: pass height into constructor so it doesn't always start at 0 + +## 0.10.2 (July 10, 2017) + +FEATURES: +- Enable lower latency block commits by adding consensus reactor sleep durations and p2p flush throttle timeout to the config + +IMPROVEMENTS: +- More detailed logging in the consensus reactor and state machine +- More in-code documentation for many exposed functions, especially in consensus/reactor.go and p2p/switch.go +- Improved readability for some function definitions and code blocks with long lines + +## 0.10.1 (June 28, 2017) + +FEATURES: +- Use `--trace` to get stack traces for logged errors +- types: GenesisDoc.ValidatorHash returns the hash of the genesis validator set +- types: GenesisDocFromFile parses a GenesiDoc from a JSON file + +IMPROVEMENTS: +- Add a Code of Conduct +- Variety of improvements as suggested by `megacheck` tool +- rpc: deduplicate tests between rpc/client and rpc/tests +- rpc: addresses without a protocol prefix default to `tcp://`. `http://` is also accepted as an alias for `tcp://` +- cmd: commands are more easily reuseable from other tools +- DOCKER: automate build/push + +BUG FIXES: +- Fix log statements using keys with spaces (logger does not currently support spaces) +- rpc: set logger on websocket connection +- rpc: fix ws connection stability by setting write deadline on pings + +## 0.10.0 (June 2, 2017) + +Includes major updates to configuration, logging, and json serialization. +Also includes the Grand Repo-Merge of 2017. + +BREAKING CHANGES: + +- Config and Flags: + - The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/master/config/config.go#L11), +containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusConfig`, `RPCConfig` + - This affects the following flags: + - `--seeds` is now `--p2p.seeds` + - `--node_laddr` is now `--p2p.laddr` + - `--pex` is now `--p2p.pex` + - `--skip_upnp` is now `--p2p.skip_upnp` + - `--rpc_laddr` is now `--rpc.laddr` + - `--grpc_laddr` is now `--rpc.grpc_laddr` + - Any configuration option now within a substract must come under that heading in the `config.toml`, for instance: + ``` + [p2p] + laddr="tcp://1.2.3.4:46656" + + [consensus] + timeout_propose=1000 + ``` + - Use viper and `DefaultConfig() / TestConfig()` functions to handle defaults, and remove `config/tendermint` and `config/tendermint_test` + - Change some function and method signatures to + - Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) accomodate new config + +- Logger + - Replace static `log15` logger with a simple interface, and provide a new implementation using `go-kit`. +See our new [logging library](https://github.com/tendermint/tmlibs/log) and [blog post](https://tendermint.com/blog/abstracting-the-logger-interface-in-go) for more details + - Levels `warn` and `notice` are removed (you may need to change them in your `config.toml`!) + - Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) to accept a logger + +- JSON serialization: + - Replace `[TypeByte, Xxx]` with `{"type": "some-type", "data": Xxx}` in RPC and all `.json` files by using `go-wire/data`. For instance, a public key is now: + ``` + "pub_key": { + "type": "ed25519", + "data": "83DDF8775937A4A12A2704269E2729FCFCD491B933C4B0A7FFE37FE41D7760D0" + } + ``` + - Remove type information about RPC responses, so `[TypeByte, {"jsonrpc": "2.0", ... }]` is now just `{"jsonrpc": "2.0", ... }` + - Change `[]byte` to `data.Bytes` in all serialized types (for hex encoding) + - Lowercase the JSON tags in `ValidatorSet` fields + - Introduce `EventDataInner` for serializing events + +- Other: + - Send InitChain message in handshake if `appBlockHeight == 0` + - Do not include the `Accum` field when computing the validator hash. This makes the ValidatorSetHash unique for a given validator set, rather than changing with every block (as the Accum changes) + - Unsafe RPC calls are not enabled by default. This includes `/dial_seeds`, and all calls prefixed with `unsafe`. Use the `--rpc.unsafe` flag to enable. + + +FEATURES: + +- Per-module log levels. For instance, the new default is `state:info,*:error`, which means the `state` package logs at `info` level, and everything else logs at `error` level +- Log if a node is validator or not in every consensus round +- Use ldflags to set git hash as part of the version +- Ignore `address` and `pub_key` fields in `priv_validator.json` and overwrite them with the values derrived from the `priv_key` + +IMPROVEMENTS: + +- Merge `tendermint/go-p2p -> tendermint/tendermint/p2p` and `tendermint/go-rpc -> tendermint/tendermint/rpc/lib` +- Update paths for grand repo merge: + - `go-common -> tmlibs/common` + - `go-data -> go-wire/data` + - All other `go-` libs, except `go-crypto` and `go-wire`, are merged under `tmlibs` +- No global loggers (loggers are passed into constructors, or preferably set with a SetLogger method) +- Return HTTP status codes with errors for RPC responses +- Limit `/blockchain_info` call to return a maximum of 20 blocks +- Use `.Wrap()` and `.Unwrap()` instead of eg. `PubKeyS` for `go-crypto` types +- RPC JSON responses use pretty printing (via `json.MarshalIndent`) +- Color code different instances of the consensus for tests +- Isolate viper to `cmd/tendermint/commands` and do not read config from file for tests + + +## 0.9.2 (April 26, 2017) + +BUG FIXES: + +- Fix bug in `ResetPrivValidator` where we were using the global config and log (causing external consumers, eg. basecoin, to fail). + +## 0.9.1 (April 21, 2017) + +FEATURES: + +- Transaction indexing - txs are indexed by their hash using a simple key-value store; easily extended to more advanced indexers +- New `/tx?hash=X` endpoint to query for transactions and their DeliverTx result by hash. Optionally returns a proof of the tx's inclusion in the block +- `tendermint testnet` command initializes files for a testnet + +IMPROVEMENTS: + +- CLI now uses Cobra framework +- TMROOT is now TMHOME (TMROOT will stop working in 0.10.0) +- `/broadcast_tx_XXX` also returns the Hash (can be used to query for the tx) +- `/broadcast_tx_commit` also returns the height the block was committed in +- ABCIResponses struct persisted to disk before calling Commit; makes handshake replay much cleaner +- WAL uses #ENDHEIGHT instead of #HEIGHT (#HEIGHT will stop working in 0.10.0) +- Peers included via `--seeds`, under `seeds` in the config, or in `/dial_seeds` are now persistent, and will be reconnected to if the connection breaks + +BUG FIXES: + +- Fix bug in fast-sync where we stop syncing after a peer is removed, even if they're re-added later +- Fix handshake replay to handle validator set changes and results of DeliverTx when we crash after app.Commit but before state.Save() + +## 0.9.0 (March 6, 2017) + +BREAKING CHANGES: + +- Update ABCI to v0.4.0, where Query is now `Query(RequestQuery) ResponseQuery`, enabling precise proofs at particular heights: + +``` +message RequestQuery{ + bytes data = 1; + string path = 2; + uint64 height = 3; + bool prove = 4; +} + +message ResponseQuery{ + CodeType code = 1; + int64 index = 2; + bytes key = 3; + bytes value = 4; + bytes proof = 5; + uint64 height = 6; + string log = 7; +} +``` + + +- `BlockMeta` data type unifies its Hash and PartSetHash under a `BlockID`: + +``` +type BlockMeta struct { + BlockID BlockID `json:"block_id"` // the block hash and partsethash + Header *Header `json:"header"` // The block's Header +} +``` + +- `ValidatorSet.Proposer` is exposed as a field and persisted with the `State`. Use `GetProposer()` to initialize or update after validator-set changes. + +- `tendermint gen_validator` command output is now pure JSON + +FEATURES: + +- New RPC endpoint `/commit?height=X` returns header and commit for block at height `X` +- Client API for each endpoint, including mocks for testing + +IMPROVEMENTS: + +- `Node` is now a `BaseService` +- Simplified starting Tendermint in-process from another application +- Better organized Makefile +- Scripts for auto-building binaries across platforms +- Docker image improved, slimmed down (using Alpine), and changed from tendermint/tmbase to tendermint/tendermint +- New repo files: `CONTRIBUTING.md`, Github `ISSUE_TEMPLATE`, `CHANGELOG.md` +- Improvements on CircleCI for managing build/test artifacts +- Handshake replay is doen through the consensus package, possibly using a mockApp +- Graceful shutdown of RPC listeners +- Tests for the PEX reactor and DialSeeds + +BUG FIXES: + +- Check peer.Send for failure before updating PeerState in consensus +- Fix panic in `/dial_seeds` with invalid addresses +- Fix proposer selection logic in ValidatorSet by taking the address into account in the `accumComparable` +- Fix inconcistencies with `ValidatorSet.Proposer` across restarts by persisting it in the `State` + + +## 0.8.0 (January 13, 2017) + +BREAKING CHANGES: + +- New data type `BlockID` to represent blocks: + +``` +type BlockID struct { + Hash []byte `json:"hash"` + PartsHeader PartSetHeader `json:"parts"` +} +``` + +- `Vote` data type now includes validator address and index: + +``` +type Vote struct { + ValidatorAddress []byte `json:"validator_address"` + ValidatorIndex int `json:"validator_index"` + Height int `json:"height"` + Round int `json:"round"` + Type byte `json:"type"` + BlockID BlockID `json:"block_id"` // zero if vote is nil. + Signature crypto.Signature `json:"signature"` +} +``` + +- Update TMSP to v0.3.0, where it is now called ABCI and AppendTx is DeliverTx +- Hex strings in the RPC are now "0x" prefixed + + +FEATURES: + +- New message type on the ConsensusReactor, `Maj23Msg`, for peers to alert others they've seen a Maj23, +in order to track and handle conflicting votes intelligently to prevent Byzantine faults from causing halts: + +``` +type VoteSetMaj23Message struct { + Height int + Round int + Type byte + BlockID types.BlockID +} +``` + +- Configurable block part set size +- Validator set changes +- Optionally skip TimeoutCommit if we have all the votes +- Handshake between Tendermint and App on startup to sync latest state and ensure consistent recovery from crashes +- GRPC server for BroadcastTx endpoint + +IMPROVEMENTS: + +- Less verbose logging +- Better test coverage (37% -> 49%) +- Canonical SignBytes for signable types +- Write-Ahead Log for Mempool and Consensus via tmlibs/autofile +- Better in-process testing for the consensus reactor and byzantine faults +- Better crash/restart testing for individual nodes at preset failure points, and of networks at arbitrary points +- Better abstraction over timeout mechanics + +BUG FIXES: + +- Fix memory leak in mempool peer +- Fix panic on POLRound=-1 +- Actually set the CommitTime +- Actually send BeginBlock message +- Fix a liveness issues caused by Byzantine proposals/votes. Uses the new `Maj23Msg`. + + +## 0.7.4 (December 14, 2016) + +FEATURES: + +- Enable the Peer Exchange reactor with the `--pex` flag for more resilient gossip network (feature still in development, beware dragons) + +IMPROVEMENTS: + +- Remove restrictions on RPC endpoint `/dial_seeds` to enable manual network configuration + +## 0.7.3 (October 20, 2016) + +IMPROVEMENTS: + +- Type safe FireEvent +- More WAL/replay tests +- Cleanup some docs + +BUG FIXES: + +- Fix deadlock in mempool for synchronous apps +- Replay handles non-empty blocks +- Fix race condition in HeightVoteSet + +## 0.7.2 (September 11, 2016) + +BUG FIXES: + +- Set mustConnect=false so tendermint will retry connecting to the app + +## 0.7.1 (September 10, 2016) + +FEATURES: + +- New TMSP connection for Query/Info +- New RPC endpoints: + - `tmsp_query` + - `tmsp_info` +- Allow application to filter peers through Query (off by default) + +IMPROVEMENTS: + +- TMSP connection type enforced at compile time +- All listen/client urls use a "tcp://" or "unix://" prefix + +BUG FIXES: + +- Save LastSignature/LastSignBytes to `priv_validator.json` for recovery +- Fix event unsubscribe +- Fix fastsync/blockchain reactor + +## 0.7.0 (August 7, 2016) + +BREAKING CHANGES: + +- Strict SemVer starting now! +- Update to ABCI v0.2.0 +- Validation types now called Commit +- NewBlock event only returns the block header + + +FEATURES: + +- TMSP and RPC support TCP and UNIX sockets +- Addition config options including block size and consensus parameters +- New WAL mode `cswal_light`; logs only the validator's own votes +- New RPC endpoints: + - for starting/stopping profilers, and for updating config + - `/broadcast_tx_commit`, returns when tx is included in a block, else an error + - `/unsafe_flush_mempool`, empties the mempool + + +IMPROVEMENTS: + +- Various optimizations +- Remove bad or invalidated transactions from the mempool cache (allows later duplicates) +- More elaborate testing using CircleCI including benchmarking throughput on 4 digitalocean droplets + +BUG FIXES: + +- Various fixes to WAL and replay logic +- Various race conditions + +## PreHistory + +Strict versioning only began with the release of v0.7.0, in late summer 2016. +The project itself began in early summer 2014 and was workable decentralized cryptocurrency software by the end of that year. +Through the course of 2015, in collaboration with Eris Industries (now Monax Indsutries), +many additional features were integrated, including an implementation from scratch of the Ethereum Virtual Machine. +That implementation now forms the heart of [Burrow](https://github.com/hyperledger/burrow). +In the later half of 2015, the consensus algorithm was upgraded with a more asynchronous design and a more deterministic and robust implementation. + +By late 2015, frustration with the difficulty of forking a large monolithic stack to create alternative cryptocurrency designs led to the +invention of the Application Blockchain Interface (ABCI), then called the Tendermint Socket Protocol (TMSP). +The Ethereum Virtual Machine and various other transaction features were removed, and Tendermint was whittled down to a core consensus engine +driving an application running in another process. +The ABCI interface and implementation were iterated on and improved over the course of 2016, +until versioned history kicked in with v0.7.0. diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..d47c0f15e --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,56 @@ +# The Tendermint Code of Conduct +This code of conduct applies to all projects run by the Tendermint/COSMOS team and hence to tendermint. + + +---- + + +# Conduct +## Contact: adrian@tendermint.com + +* We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, nationality, or other similar characteristic. + +* On Slack, please avoid using overtly sexual nicknames or other nicknames that might detract from a friendly, safe and welcoming environment for all. + +* Please be kind and courteous. There’s no need to be mean or rude. + +* Respect that people have differences of opinion and that every design or implementation choice carries a trade-off and numerous costs. There is seldom a right answer. + +* Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works. + +* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behaviour. We interpret the term “harassment” as including the definition in the [Citizen Code of Conduct](http://citizencodeofconduct.org/); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don’t tolerate behavior that excludes people in socially marginalized groups. + +* Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the channel admins or the person mentioned above immediately. Whether you’re a regular contributor or a newcomer, we care about making this community a safe place for you and we’ve got your back. + +* Likewise any spamming, trolling, flaming, baiting or other attention-stealing behaviour is not welcome. + + +---- + + +# Moderation +These are the policies for upholding our community’s standards of conduct. If you feel that a thread needs moderation, please contact the above mentioned person. + +1. Remarks that violate the Tendermint/COSMOS standards of conduct, including hateful, hurtful, oppressive, or exclusionary remarks, are not allowed. (Cursing is allowed, but never targeting another user, and never in a hateful manner.) + +2. Remarks that moderators find inappropriate, whether listed in the code of conduct or not, are also not allowed. + +3. Moderators will first respond to such remarks with a warning. + +4. If the warning is unheeded, the user will be “kicked,” i.e., kicked out of the communication channel to cool off. + +5. If the user comes back and continues to make trouble, they will be banned, i.e., indefinitely excluded. + +6. Moderators may choose at their discretion to un-ban the user if it was a first offense and they offer the offended party a genuine apology. + +7. If a moderator bans someone and you think it was unjustified, please take it up with that moderator, or with a different moderator, in private. Complaints about bans in-channel are not allowed. + +8. Moderators are held to a higher standard than other community members. If a moderator creates an inappropriate situation, they should expect less leeway than others. + +In the Tendermint/COSMOS community we strive to go the extra step to look out for each other. Don’t just aim to be technically unimpeachable, try to be your best self. In particular, avoid flirting with offensive or sensitive issues, particularly if they’re off-topic; this all too often leads to unnecessary fights, hurt feelings, and damaged trust; worse, it can drive people away from the community entirely. + +And if someone takes issue with something you said or did, resist the urge to be defensive. Just stop doing what it was they complained about and apologize. Even if you feel you were misinterpreted or unfairly accused, chances are good there was something you could’ve communicated better — remember that it’s your responsibility to make your fellow Cosmonauts comfortable. Everyone wants to get along and we are all here first and foremost because we want to talk about cool technology. You will find that people will be eager to assume good intent and forgive as long as you earn their trust. + +The enforcement policies listed above apply to all official Tendermint/COSMOS venues.For other projects adopting the Tendermint/COSMOS Code of Conduct, please contact the maintainers of those projects for enforcement. If you wish to use this code of conduct for your own project, consider explicitly mentioning your moderation policy or making a copy with your own moderation policy so as to avoid confusion. + +*Adapted from the [Node.js Policy on Trolling](http://blog.izs.me/post/30036893703/policy-on-trolling), the [Contributor Covenant v1.3.0](http://contributor-covenant.org/version/1/3/0/) and the [Rust Code of Conduct](https://www.rust-lang.org/en-US/conduct.html). diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..3500732f5 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,117 @@ +# Contributing + +Thank you for considering making contributions to Tendermint and related repositories! Start by taking a look at the [coding repo](https://github.com/tendermint/coding) for overall information on repository workflow and standards. + +Please follow standard github best practices: fork the repo, branch from the tip of develop, make some commits, and submit a pull request to develop. See the [open issues](https://github.com/tendermint/tendermint/issues) for things we need help with! + +Please make sure to use `gofmt` before every commit - the easiest way to do this is have your editor run it for you upon saving a file. + +## Forking + +Please note that Go requires code to live under absolute paths, which complicates forking. +While my fork lives at `https://github.com/ebuchman/tendermint`, +the code should never exist at `$GOPATH/src/github.com/ebuchman/tendermint`. +Instead, we use `git remote` to add the fork as a new remote for the original repo, +`$GOPATH/src/github.com/tendermint/tendermint `, and do all the work there. + +For instance, to create a fork and work on a branch of it, I would: + + * Create the fork on github, using the fork button. + * Go to the original repo checked out locally (i.e. `$GOPATH/src/github.com/tendermint/tendermint`) + * `git remote rename origin upstream` + * `git remote add origin git@github.com:ebuchman/basecoin.git` + +Now `origin` refers to my fork and `upstream` refers to the tendermint version. +So I can `git push -u origin master` to update my fork, and make pull requests to tendermint from there. +Of course, replace `ebuchman` with your git handle. + +To pull in updates from the origin repo, run + + * `git fetch upstream` + * `git rebase upstream/master` (or whatever branch you want) + +Please don't make Pull Requests to `master`. + +## Dependencies + +We use [dep](https://github.com/golang/dep) to manage dependencies. + +That said, the master branch of every Tendermint repository should just build +with `go get`, which means they should be kept up-to-date with their +dependencies so we can get away with telling people they can just `go get` our +software. + +Since some dependencies are not under our control, a third party may break our +build, in which case we can fall back on `dep ensure` (or `make +get_vendor_deps`). Even for dependencies under our control, dep helps us to +keep multiple repos in sync as they evolve. Anything with an executable, such +as apps, tools, and the core, should use dep. + +Run `dep status` to get a list of vendor dependencies that may not be +up-to-date. + +## Vagrant + +If you are a [Vagrant](https://www.vagrantup.com/) user, you can get started +hacking Tendermint with the commands below. + +NOTE: In case you installed Vagrant in 2017, you might need to run +`vagrant box update` to upgrade to the latest `ubuntu/xenial64`. + +``` +vagrant up +vagrant ssh +make test +``` + +## Testing + +All repos should be hooked up to [CircleCI](https://circleci.com/). + +If they have `.go` files in the root directory, they will be automatically +tested by circle using `go test -v -race ./...`. If not, they will need a +`circle.yml`. Ideally, every repo has a `Makefile` that defines `make test` and +includes its continuous integration status using a badge in the `README.md`. + +## Branching Model and Release + +User-facing repos should adhere to the branching model: http://nvie.com/posts/a-successful-git-branching-model/. +That is, these repos should be well versioned, and any merge to master requires a version bump and tagged release. + +Libraries need not follow the model strictly, but would be wise to, +especially `go-p2p` and `go-rpc`, as their versions are referenced in tendermint core. + +### Development Procedure: +- the latest state of development is on `develop` +- `develop` must never fail `make test` +- no --force onto `develop` (except when reverting a broken commit, which should seldom happen) +- create a development branch either on github.com/tendermint/tendermint, or your fork (using `git remote add origin`) +- before submitting a pull request, begin `git rebase` on top of `develop` + +### Pull Merge Procedure: +- ensure pull branch is rebased on develop +- run `make test` to ensure that all tests pass +- merge pull request +- the `unstable` branch may be used to aggregate pull merges before testing once +- push master may request that pull requests be rebased on top of `unstable` + +### Release Procedure: +- start on `develop` +- run integration tests (see `test_integrations` in Makefile) +- prepare changelog/release issue +- bump versions +- push to release-vX.X.X to run the extended integration tests on the CI +- merge to master +- merge master back to develop + +### Hotfix Procedure: +- start on `master` +- checkout a new branch named hotfix-vX.X.X +- make the required changes + - these changes should be small and an absolute necessity + - add a note to CHANGELOG.md +- bump versions +- push to hotfix-vX.X.X to run the extended integration tests on the CI +- merge hotfix-vX.X.X to master +- merge hotfix-vX.X.X to develop +- delete the hotfix-vX.X.X branch diff --git a/DOCKER/.gitignore b/DOCKER/.gitignore new file mode 100644 index 000000000..9059c6848 --- /dev/null +++ b/DOCKER/.gitignore @@ -0,0 +1 @@ +tendermint diff --git a/DOCKER/Dockerfile b/DOCKER/Dockerfile new file mode 100644 index 000000000..4a855f425 --- /dev/null +++ b/DOCKER/Dockerfile @@ -0,0 +1,39 @@ +FROM alpine:3.7 +MAINTAINER Greg Szabo + +# Tendermint will be looking for the genesis file in /tendermint/config/genesis.json +# (unless you change `genesis_file` in config.toml). You can put your config.toml and +# private validator file into /tendermint/config. +# +# The /tendermint/data dir is used by tendermint to store state. +ENV TMHOME /tendermint + +# OS environment setup +# Set user right away for determinism, create directory for persistence and give our user ownership +# jq and curl used for extracting `pub_key` from private validator while +# deploying tendermint with Kubernetes. It is nice to have bash so the users +# could execute bash commands. +RUN apk update && \ + apk upgrade && \ + apk --no-cache add curl jq bash && \ + addgroup tmuser && \ + adduser -S -G tmuser tmuser -h "$TMHOME" + +# Run the container with tmuser by default. (UID=100, GID=1000) +USER tmuser + +# Expose the data directory as a volume since there's mutable state in there +VOLUME [ $TMHOME ] + +WORKDIR $TMHOME + +# p2p and rpc port +EXPOSE 26656 26657 + +ENTRYPOINT ["/usr/bin/tendermint"] +CMD ["node", "--moniker=`hostname`"] +STOPSIGNAL SIGTERM + +ARG BINARY=tendermint +COPY $BINARY /usr/bin/tendermint + diff --git a/DOCKER/Dockerfile.abci b/DOCKER/Dockerfile.abci new file mode 100644 index 000000000..c6ec05f69 --- /dev/null +++ b/DOCKER/Dockerfile.abci @@ -0,0 +1,23 @@ +FROM golang:latest + +RUN mkdir -p /go/src/github.com/tendermint/abci +WORKDIR /go/src/github.com/tendermint/abci + +COPY Makefile /go/src/github.com/tendermint/abci/ + +# see make protoc for details on ldconfig +RUN make get_protoc && ldconfig + +# killall is used in tests +RUN apt-get update && apt-get install -y \ + psmisc \ + && rm -rf /var/lib/apt/lists/* + +COPY Gopkg.toml /go/src/github.com/tendermint/abci/ +COPY Gopkg.lock /go/src/github.com/tendermint/abci/ +RUN make get_tools + +# see https://github.com/golang/dep/issues/1312 +RUN dep ensure -vendor-only + +COPY . /go/src/github.com/tendermint/abci diff --git a/DOCKER/Dockerfile.develop b/DOCKER/Dockerfile.develop new file mode 100644 index 000000000..5759e7658 --- /dev/null +++ b/DOCKER/Dockerfile.develop @@ -0,0 +1,35 @@ +FROM alpine:3.7 + +ENV DATA_ROOT /tendermint +ENV TMHOME $DATA_ROOT + +RUN addgroup tmuser && \ + adduser -S -G tmuser tmuser + +RUN mkdir -p $DATA_ROOT && \ + chown -R tmuser:tmuser $DATA_ROOT + +RUN apk add --no-cache bash curl jq + +ENV GOPATH /go +ENV PATH "$PATH:/go/bin" +RUN mkdir -p /go/src/github.com/tendermint/tendermint && \ + apk add --no-cache go build-base git && \ + cd /go/src/github.com/tendermint/tendermint && \ + git clone https://github.com/tendermint/tendermint . && \ + git checkout develop && \ + make get_tools && \ + make get_vendor_deps && \ + make install && \ + cd - && \ + rm -rf /go/src/github.com/tendermint/tendermint && \ + apk del go build-base git + +VOLUME $DATA_ROOT + +EXPOSE 26656 +EXPOSE 26657 + +ENTRYPOINT ["tendermint"] + +CMD ["node", "--moniker=`hostname`", "--proxy_app=kvstore"] diff --git a/DOCKER/Dockerfile.testing b/DOCKER/Dockerfile.testing new file mode 100644 index 000000000..b82afe2a8 --- /dev/null +++ b/DOCKER/Dockerfile.testing @@ -0,0 +1,18 @@ +FROM golang:1.10.1 + + +# Grab deps (jq, hexdump, xxd, killall) +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + jq bsdmainutils vim-common psmisc netcat + +# Add testing deps for curl +RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list && \ + apt-get update && \ + apt-get install -y --no-install-recommends curl + +VOLUME /go + +EXPOSE 26656 +EXPOSE 26657 + diff --git a/DOCKER/Makefile b/DOCKER/Makefile new file mode 100644 index 000000000..32510ebbb --- /dev/null +++ b/DOCKER/Makefile @@ -0,0 +1,16 @@ +build: + @sh -c "'$(CURDIR)/build.sh'" + +push: + @sh -c "'$(CURDIR)/push.sh'" + +build_develop: + docker build -t "tendermint/tendermint:develop" -f Dockerfile.develop . + +build_testing: + docker build --tag tendermint/testing -f ./Dockerfile.testing . + +push_develop: + docker push "tendermint/tendermint:develop" + +.PHONY: build build_develop push push_develop diff --git a/DOCKER/README.md b/DOCKER/README.md new file mode 100644 index 000000000..43edce0fc --- /dev/null +++ b/DOCKER/README.md @@ -0,0 +1,67 @@ +# Docker + +## Supported tags and respective `Dockerfile` links + +- `0.17.1`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/208ac32fa266657bd6c304e84ec828aa252bb0b8/DOCKER/Dockerfile) +- `0.15.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/170777300ea92dc21a8aec1abc16cb51812513a4/DOCKER/Dockerfile) +- `0.13.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/a28b3fff49dce2fb31f90abb2fc693834e0029c2/DOCKER/Dockerfile) +- `0.12.1` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/457c688346b565e90735431619ca3ca597ef9007/DOCKER/Dockerfile) +- `0.12.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/70d8afa6e952e24c573ece345560a5971bf2cc0e/DOCKER/Dockerfile) +- `0.11.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/9177cc1f64ca88a4a0243c5d1773d10fba67e201/DOCKER/Dockerfile) +- `0.10.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/e5342f4054ab784b2cd6150e14f01053d7c8deb2/DOCKER/Dockerfile) +- `0.9.1`, `0.9`, [(Dockerfile)](https://github.com/tendermint/tendermint/blob/809e0e8c5933604ba8b2d096803ada7c5ec4dfd3/DOCKER/Dockerfile) +- `0.9.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/d474baeeea6c22b289e7402449572f7c89ee21da/DOCKER/Dockerfile) +- `0.8.0`, `0.8` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/bf64dd21fdb193e54d8addaaaa2ecf7ac371de8c/DOCKER/Dockerfile) +- `develop` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/master/DOCKER/Dockerfile.develop) + +`develop` tag points to the [develop](https://github.com/tendermint/tendermint/tree/develop) branch. + +## Quick reference + +* **Where to get help:** + https://cosmos.network/community + +* **Where to file issues:** + https://github.com/tendermint/tendermint/issues + +* **Supported Docker versions:** + [the latest release](https://github.com/moby/moby/releases) (down to 1.6 on a best-effort basis) + +## Tendermint + +Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language, and securely replicates it on many machines. + +For more background, see the [introduction](https://tendermint.readthedocs.io/en/master/introduction.html). + +To get started developing applications, see the [application developers guide](https://tendermint.readthedocs.io/en/master/getting-started.html). + +## How to use this image + +### Start one instance of the Tendermint core with the `kvstore` app + +A quick example of a built-in app and Tendermint core in one container. + +``` +docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init +docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app=kvstore +``` + +## Local cluster + +To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/master/Makefile) and run: + +``` +make build-linux +make build-docker-localnode +make localnet-start +``` + +Note that this will build and use a different image than the ones provided here. + +## License + +- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/master/LICENSE). + +## Contributing + +Contributions are most welcome! See the [contributing file](https://github.com/tendermint/tendermint/blob/master/CONTRIBUTING.md) for more information. diff --git a/DOCKER/build.sh b/DOCKER/build.sh new file mode 100755 index 000000000..ee617cc63 --- /dev/null +++ b/DOCKER/build.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +set -e + +# Get the tag from the version, or try to figure it out. +if [ -z "$TAG" ]; then + TAG=$(awk -F\" '/Version =/ { print $2; exit }' < ../version/version.go) +fi +if [ -z "$TAG" ]; then + echo "Please specify a tag." + exit 1 +fi + +TAG_NO_PATCH=${TAG%.*} + +read -p "==> Build 3 docker images with the following tags (latest, $TAG, $TAG_NO_PATCH)? y/n" -n 1 -r +echo +if [[ $REPLY =~ ^[Yy]$ ]] +then + docker build -t "tendermint/tendermint" -t "tendermint/tendermint:$TAG" -t "tendermint/tendermint:$TAG_NO_PATCH" . +fi diff --git a/DOCKER/push.sh b/DOCKER/push.sh new file mode 100755 index 000000000..32741dce8 --- /dev/null +++ b/DOCKER/push.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +set -e + +# Get the tag from the version, or try to figure it out. +if [ -z "$TAG" ]; then + TAG=$(awk -F\" '/Version =/ { print $2; exit }' < ../version/version.go) +fi +if [ -z "$TAG" ]; then + echo "Please specify a tag." + exit 1 +fi + +TAG_NO_PATCH=${TAG%.*} + +read -p "==> Push 3 docker images with the following tags (latest, $TAG, $TAG_NO_PATCH)? y/n" -n 1 -r +echo +if [[ $REPLY =~ ^[Yy]$ ]] +then + docker push "tendermint/tendermint:latest" + docker push "tendermint/tendermint:$TAG" + docker push "tendermint/tendermint:$TAG_NO_PATCH" +fi diff --git a/Gopkg.lock b/Gopkg.lock new file mode 100644 index 000000000..b1beaa208 --- /dev/null +++ b/Gopkg.lock @@ -0,0 +1,419 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + branch = "master" + name = "github.com/beorn7/perks" + packages = ["quantile"] + revision = "3a771d992973f24aa725d07868b467d1ddfceafb" + +[[projects]] + branch = "master" + name = "github.com/btcsuite/btcd" + packages = ["btcec"] + revision = "86fed781132ac890ee03e906e4ecd5d6fa180c64" + +[[projects]] + branch = "master" + name = "github.com/btcsuite/btcutil" + packages = [ + "base58", + "bech32" + ] + revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4" + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + branch = "master" + name = "github.com/ebuchman/fail-test" + packages = ["."] + revision = "95f809107225be108efcf10a3509e4ea6ceef3c4" + +[[projects]] + branch = "master" + name = "github.com/fortytw2/leaktest" + packages = ["."] + revision = "b008db64ef8daabb22ff6daa557f33b41d8f6ccd" + +[[projects]] + name = "github.com/fsnotify/fsnotify" + packages = ["."] + revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" + version = "v1.4.7" + +[[projects]] + name = "github.com/go-kit/kit" + packages = [ + "log", + "log/level", + "log/term", + "metrics", + "metrics/discard", + "metrics/internal/lv", + "metrics/prometheus" + ] + revision = "4dc7be5d2d12881735283bcab7352178e190fc71" + version = "v0.6.0" + +[[projects]] + name = "github.com/go-logfmt/logfmt" + packages = ["."] + revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5" + version = "v0.3.0" + +[[projects]] + name = "github.com/go-stack/stack" + packages = ["."] + revision = "259ab82a6cad3992b4e21ff5cac294ccb06474bc" + version = "v1.7.0" + +[[projects]] + name = "github.com/gogo/protobuf" + packages = [ + "gogoproto", + "jsonpb", + "proto", + "protoc-gen-gogo/descriptor", + "sortkeys", + "types" + ] + revision = "1adfc126b41513cc696b209667c8656ea7aac67c" + version = "v1.0.0" + +[[projects]] + name = "github.com/golang/protobuf" + packages = [ + "proto", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/timestamp" + ] + revision = "925541529c1fa6821df4e44ce2723319eb2be768" + version = "v1.0.0" + +[[projects]] + branch = "master" + name = "github.com/golang/snappy" + packages = ["."] + revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a" + +[[projects]] + name = "github.com/gorilla/websocket" + packages = ["."] + revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b" + version = "v1.2.0" + +[[projects]] + branch = "master" + name = "github.com/hashicorp/hcl" + packages = [ + ".", + "hcl/ast", + "hcl/parser", + "hcl/scanner", + "hcl/strconv", + "hcl/token", + "json/parser", + "json/scanner", + "json/token" + ] + revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168" + +[[projects]] + name = "github.com/inconshreveable/mousetrap" + packages = ["."] + revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" + version = "v1.0" + +[[projects]] + branch = "master" + name = "github.com/jmhodges/levigo" + packages = ["."] + revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9" + +[[projects]] + branch = "master" + name = "github.com/kr/logfmt" + packages = ["."] + revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0" + +[[projects]] + name = "github.com/magiconair/properties" + packages = ["."] + revision = "c2353362d570a7bfa228149c62842019201cfb71" + version = "v1.8.0" + +[[projects]] + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" + version = "v1.0.1" + +[[projects]] + branch = "master" + name = "github.com/mitchellh/mapstructure" + packages = ["."] + revision = "bb74f1db0675b241733089d5a1faa5dd8b0ef57b" + +[[projects]] + name = "github.com/pelletier/go-toml" + packages = ["."] + revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194" + version = "v1.2.0" + +[[projects]] + name = "github.com/pkg/errors" + packages = ["."] + revision = "645ef00459ed84a119197bfb8d8205042c6df63d" + version = "v0.8.0" + +[[projects]] + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + branch = "master" + name = "github.com/prometheus/client_golang" + packages = [ + "prometheus", + "prometheus/promhttp" + ] + revision = "d6a9817c4afc94d51115e4a30d449056a3fbf547" + +[[projects]] + branch = "master" + name = "github.com/prometheus/client_model" + packages = ["go"] + revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" + +[[projects]] + branch = "master" + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model" + ] + revision = "7600349dcfe1abd18d72d3a1770870d9800a7801" + +[[projects]] + branch = "master" + name = "github.com/prometheus/procfs" + packages = [ + ".", + "internal/util", + "nfs", + "xfs" + ] + revision = "40f013a808ec4fa79def444a1a56de4d1727efcb" + +[[projects]] + branch = "master" + name = "github.com/rcrowley/go-metrics" + packages = ["."] + revision = "e2704e165165ec55d062f5919b4b29494e9fa790" + +[[projects]] + name = "github.com/spf13/afero" + packages = [ + ".", + "mem" + ] + revision = "787d034dfe70e44075ccc060d346146ef53270ad" + version = "v1.1.1" + +[[projects]] + name = "github.com/spf13/cast" + packages = ["."] + revision = "8965335b8c7107321228e3e3702cab9832751bac" + version = "v1.2.0" + +[[projects]] + name = "github.com/spf13/cobra" + packages = ["."] + revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b" + version = "v0.0.1" + +[[projects]] + branch = "master" + name = "github.com/spf13/jwalterweatherman" + packages = ["."] + revision = "7c0cea34c8ece3fbeb2b27ab9b59511d360fb394" + +[[projects]] + name = "github.com/spf13/pflag" + packages = ["."] + revision = "583c0c0531f06d5278b7d917446061adc344b5cd" + version = "v1.0.1" + +[[projects]] + name = "github.com/spf13/viper" + packages = ["."] + revision = "25b30aa063fc18e48662b86996252eabdcf2f0c7" + version = "v1.0.0" + +[[projects]] + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require" + ] + revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" + version = "v1.2.2" + +[[projects]] + branch = "master" + name = "github.com/syndtr/goleveldb" + packages = [ + "leveldb", + "leveldb/cache", + "leveldb/comparer", + "leveldb/errors", + "leveldb/filter", + "leveldb/iterator", + "leveldb/journal", + "leveldb/memdb", + "leveldb/opt", + "leveldb/storage", + "leveldb/table", + "leveldb/util" + ] + revision = "e2150783cd35f5b607daca48afd8c57ec54cc995" + +[[projects]] + branch = "master" + name = "github.com/tendermint/ed25519" + packages = [ + ".", + "edwards25519", + "extra25519" + ] + revision = "d8387025d2b9d158cf4efb07e7ebf814bcce2057" + +[[projects]] + name = "github.com/tendermint/go-amino" + packages = ["."] + revision = "2106ca61d91029c931fd54968c2bb02dc96b1412" + version = "0.10.1" + +[[projects]] + branch = "master" + name = "golang.org/x/crypto" + packages = [ + "bcrypt", + "blowfish", + "chacha20poly1305", + "curve25519", + "hkdf", + "internal/chacha20", + "internal/subtle", + "nacl/box", + "nacl/secretbox", + "openpgp/armor", + "openpgp/errors", + "poly1305", + "ripemd160", + "salsa20/salsa" + ] + revision = "a49355c7e3f8fe157a85be2f77e6e269a0f89602" + +[[projects]] + branch = "master" + name = "golang.org/x/net" + packages = [ + "context", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "netutil", + "trace" + ] + revision = "4cb1c02c05b0e749b0365f61ae859a8e0cfceed9" + +[[projects]] + branch = "master" + name = "golang.org/x/sys" + packages = [ + "cpu", + "unix" + ] + revision = "7138fd3d9dc8335c567ca206f4333fb75eb05d56" + +[[projects]] + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable" + ] + revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" + version = "v0.3.0" + +[[projects]] + name = "google.golang.org/genproto" + packages = ["googleapis/rpc/status"] + revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200" + +[[projects]] + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/roundrobin", + "codes", + "connectivity", + "credentials", + "encoding", + "encoding/proto", + "grpclb/grpc_lb_v1/messages", + "grpclog", + "internal", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "resolver/dns", + "resolver/passthrough", + "stats", + "status", + "tap", + "transport" + ] + revision = "d11072e7ca9811b1100b80ca0269ac831f06d024" + version = "v1.11.3" + +[[projects]] + name = "gopkg.in/yaml.v2" + packages = ["."] + revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" + version = "v2.2.1" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "6e854634d6c203278ce83bef7725cecbcf90023b0d0e440fb3374acedacbd5ad" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml new file mode 100644 index 000000000..ecce0e417 --- /dev/null +++ b/Gopkg.toml @@ -0,0 +1,95 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + + +[[constraint]] + name = "github.com/ebuchman/fail-test" + branch = "master" + +[[constraint]] + name = "github.com/fortytw2/leaktest" + branch = "master" + +[[constraint]] + name = "github.com/go-kit/kit" + version = "=0.6.0" + +[[constraint]] + name = "github.com/gogo/protobuf" + version = "=1.0.0" + +[[constraint]] + name = "github.com/golang/protobuf" + version = "=1.0.0" + +[[constraint]] + name = "github.com/gorilla/websocket" + version = "~1.2.0" + +[[constraint]] + name = "github.com/pkg/errors" + version = "=0.8.0" + +[[constraint]] + name = "github.com/rcrowley/go-metrics" + branch = "master" + +[[constraint]] + name = "github.com/spf13/cobra" + version = "=0.0.1" + +[[constraint]] + name = "github.com/spf13/viper" + version = "=1.0.0" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "~1.2.1" + +[[constraint]] + name = "github.com/tendermint/go-amino" + version = "~0.10.1" + +[[constraint]] + name = "google.golang.org/grpc" + version = "~1.11.3" + +# this got updated and broke, so locked to an old working commit ... +[[override]] + name = "google.golang.org/genproto" + revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200" + +[prune] + go-tests = true + unused-packages = true + +[[constraint]] + name = "github.com/prometheus/client_golang" + branch = "master" + +[[constraint]] + branch = "master" + name = "golang.org/x/net" diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..bb66bb350 --- /dev/null +++ b/LICENSE @@ -0,0 +1,204 @@ +Tendermint Core +License: Apache2.0 + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 All in Bits, Inc + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..b929dbe5c --- /dev/null +++ b/Makefile @@ -0,0 +1,310 @@ +GOTOOLS = \ + github.com/mitchellh/gox \ + github.com/golang/dep/cmd/dep \ + gopkg.in/alecthomas/gometalinter.v2 \ + github.com/gogo/protobuf/protoc-gen-gogo \ + github.com/gogo/protobuf/gogoproto \ + github.com/square/certstrap +PACKAGES=$(shell go list ./... | grep -v '/vendor/') +INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf +BUILD_TAGS?=tendermint +BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`" + +all: check build test install + +check: check_tools ensure_deps + + +######################################## +### Build Tendermint + +build: + CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/ + +build_race: + CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint + +install: + CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' ./cmd/tendermint + +######################################## +### Build ABCI + +protoc_abci: + ## If you get the following error, + ## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory" + ## See https://stackoverflow.com/a/25518702 + protoc $(INCLUDE) --gogo_out=plugins=grpc:. abci/types/*.proto + @echo "--> adding nolint declarations to protobuf generated files" + @awk '/package abci/types/ { print "//nolint: gas"; print; next }1' abci/types/types.pb.go > abci/types/types.pb.go.new + @mv abci/types/types.pb.go.new abci/types/types.pb.go + +build_abci: + @go build -i ./abci/cmd/... + +install_abci: + @go install ./abci/cmd/... + +######################################## +### Distribution + +# dist builds binaries for all platforms and packages them for distribution +# TODO add abci to these scripts +dist: + @BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/dist.sh'" + +######################################## +### Tools & dependencies + +check_tools: + @# https://stackoverflow.com/a/25668869 + @echo "Found tools: $(foreach tool,$(notdir $(GOTOOLS)),\ + $(if $(shell which $(tool)),$(tool),$(error "No $(tool) in PATH")))" + +get_tools: + @echo "--> Installing tools" + go get -u -v $(GOTOOLS) + @gometalinter.v2 --install + +update_tools: + @echo "--> Updating tools" + @go get -u $(GOTOOLS) + +#Run this from CI +get_vendor_deps: + @rm -rf vendor/ + @echo "--> Running dep" + @dep ensure -vendor-only + + +#Run this locally. +ensure_deps: + @rm -rf vendor/ + @echo "--> Running dep" + @dep ensure + +#For ABCI and libs +get_protoc: + @# https://github.com/google/protobuf/releases + curl -L https://github.com/google/protobuf/releases/download/v3.4.1/protobuf-cpp-3.4.1.tar.gz | tar xvz && \ + cd protobuf-3.4.1 && \ + DIST_LANG=cpp ./configure && \ + make && \ + make install && \ + cd .. && \ + rm -rf protobuf-3.4.1 + +draw_deps: + @# requires brew install graphviz or apt-get install graphviz + go get github.com/RobotsAndPencils/goviz + @goviz -i github.com/tendermint/tendermint/cmd/tendermint -d 3 | dot -Tpng -o dependency-graph.png + +get_deps_bin_size: + @# Copy of build recipe with additional flags to perform binary size analysis + $(eval $(shell go build -work -a $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/ 2>&1)) + @find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log + @echo "Results can be found here: $(CURDIR)/deps_bin_size.log" + +######################################## +### Libs + +protoc_libs: + ## If you get the following error, + ## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory" + ## See https://stackoverflow.com/a/25518702 + protoc $(INCLUDE) --go_out=plugins=grpc:. libs/common/*.proto + @echo "--> adding nolint declarations to protobuf generated files" + @awk '/package libs/common/ { print "//nolint: gas"; print; next }1' libs/common/types.pb.go > libs/common/types.pb.go.new + @mv libs/common/types.pb.go.new libs/common/types.pb.go + +gen_certs: clean_certs + ## Generating certificates for TLS testing... + certstrap init --common-name "tendermint.com" --passphrase "" + certstrap request-cert -ip "::" --passphrase "" + certstrap sign "::" --CA "tendermint.com" --passphrase "" + mv out/::.crt out/::.key db/remotedb + +clean_certs: + ## Cleaning TLS testing certificates... + rm -rf out + rm -f db/remotedb/::.crt db/remotedb/::.key + +test_libs: gen_certs + GOCACHE=off go test -tags gcc $(shell go list ./... | grep -v vendor) + make clean_certs + +grpc_dbserver: + protoc -I db/remotedb/proto/ db/remotedb/proto/defs.proto --go_out=plugins=grpc:db/remotedb/proto + +######################################## +### Testing + +## required to be run first by most tests +build_docker_test_image: + docker build -t tester -f ./test/docker/Dockerfile . + +### coverage, app, persistence, and libs tests +test_cover: + # run the go unit tests with coverage + bash test/test_cover.sh + +test_apps: + # run the app tests using bash + # requires `abci-cli` and `tendermint` binaries installed + bash test/app/test.sh + +test_abci_apps: + bash abci/tests/test_app/test.sh + +test_abci_cli: + # test the cli against the examples in the tutorial at: + # ./docs/abci-cli.md + # if test fails, update the docs ^ + @ bash abci/tests/test_cli/test.sh + +test_persistence: + # run the persistence tests using bash + # requires `abci-cli` installed + docker run --name run_persistence -t tester bash test/persist/test_failure_indices.sh + + # TODO undockerize + # bash test/persist/test_failure_indices.sh + +test_p2p: + docker rm -f rsyslog || true + rm -rf test/logs || true + mkdir test/logs + cd test/ + docker run -d -v "logs:/var/log/" -p 127.0.0.1:5514:514/udp --name rsyslog voxxit/rsyslog + cd .. + # requires 'tester' the image from above + bash test/p2p/test.sh tester + +test_integrations: + make build_docker_test_image + make get_tools + make get_vendor_deps + make install + make test_cover + make test_apps + make test_abci_apps + make test_abci_cli + make test_libs + make test_persistence + make test_p2p + +test_release: + @go test -tags release $(PACKAGES) + +test100: + @for i in {1..100}; do make test; done + +vagrant_test: + vagrant up + vagrant ssh -c 'make test_integrations' + +### go tests +test: + @echo "--> Running go test" + @go test $(PACKAGES) + +test_race: + @echo "--> Running go test --race" + @go test -v -race $(PACKAGES) + + +######################################## +### Formatting, linting, and vetting + +fmt: + @go fmt ./... + +metalinter: + @echo "--> Running linter" + @gometalinter.v2 --vendor --deadline=600s --disable-all \ + --enable=deadcode \ + --enable=gosimple \ + --enable=misspell \ + --enable=safesql \ + ./... + #--enable=gas \ + #--enable=maligned \ + #--enable=dupl \ + #--enable=errcheck \ + #--enable=goconst \ + #--enable=gocyclo \ + #--enable=goimports \ + #--enable=golint \ <== comments on anything exported + #--enable=gotype \ + #--enable=ineffassign \ + #--enable=interfacer \ + #--enable=megacheck \ + #--enable=staticcheck \ + #--enable=structcheck \ + #--enable=unconvert \ + #--enable=unparam \ + #--enable=unused \ + #--enable=varcheck \ + #--enable=vet \ + #--enable=vetshadow \ + +metalinter_all: + @echo "--> Running linter (all)" + gometalinter.v2 --vendor --deadline=600s --enable-all --disable=lll ./... + +########################################################### +### Docker image + +build-docker: + cp build/tendermint DOCKER/tendermint + docker build --label=tendermint --tag="tendermint/tendermint" DOCKER + rm -rf DOCKER/tendermint + +########################################################### +### Local testnet using docker + +# Build linux binary on other platforms +build-linux: + GOOS=linux GOARCH=amd64 $(MAKE) build + +build-docker-localnode: + cd networks/local + make + +# Run a 4-node testnet locally +localnet-start: localnet-stop + @if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 4 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2 ; fi + docker-compose up + +# Stop testnet +localnet-stop: + docker-compose down + +########################################################### +### Remote full-nodes (sentry) using terraform and ansible + +# Server management +sentry-start: + @if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi + @if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi + cd networks/remote/terraform && terraform init && terraform apply -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_KEY_FILE="$(HOME)/.ssh/id_rsa.pub" + @if ! [ -f $(CURDIR)/build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 0 --n 4 --o . ; fi + cd networks/remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml + @echo "Next step: Add your validator setup in the genesis.json and config.tml files and run \"make sentry-config\". (Public key of validator, chain ID, peer IP and node ID.)" + +# Configuration management +sentry-config: + cd networks/remote/ansible && ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$(CURDIR)/build/tendermint -e CONFIGDIR=$(CURDIR)/build + +sentry-stop: + @if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi + cd networks/remote/terraform && terraform destroy -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_KEY_FILE="$(HOME)/.ssh/id_rsa.pub" + +# meant for the CI, inspect script & adapt accordingly +build-slate: + bash scripts/slate.sh + +# To avoid unintended conflicts with file names, always add to .PHONY +# unless there is a reason not to. +# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html +.PHONY: check build build_race build_abci dist install install_abci check_tools get_tools update_tools get_vendor_deps draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate diff --git a/README.md b/README.md new file mode 100644 index 000000000..2f7d13cdd --- /dev/null +++ b/README.md @@ -0,0 +1,135 @@ +# Tendermint + +[Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance) +[State Machine Replication](https://en.wikipedia.org/wiki/State_machine_replication). +Or [Blockchain](https://en.wikipedia.org/wiki/Blockchain_(database)) for short. + +[![version](https://img.shields.io/github/tag/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/releases/latest) +[![API Reference]( +https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667 +)](https://godoc.org/github.com/tendermint/tendermint) +[![Go version](https://img.shields.io/badge/go-1.9.2-blue.svg)](https://github.com/moovweb/gvm) +[![riot.im](https://img.shields.io/badge/riot.im-JOIN%20CHAT-green.svg)](https://riot.im/app/#/room/#tendermint:matrix.org) +[![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE) +[![](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint) + + +Branch | Tests | Coverage +----------|-------|---------- +master | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/master.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/master) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/master/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) +develop | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/develop.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/develop) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/develop/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) + +Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - +and securely replicates it on many machines. + +For protocol details, see [the specification](/docs/spec). + +## A Note on Production Readiness + +While Tendermint is being used in production in private, permissioned +environments, we are still working actively to harden and audit it in preparation +for use in public blockchains, such as the [Cosmos Network](https://cosmos.network/). +We are also still making breaking changes to the protocol and the APIs. +Thus we tag the releases as *alpha software*. + +In any case, if you intend to run Tendermint in production, +please [contact us](https://riot.im/app/#/room/#tendermint:matrix.org) :) + +## Security + +To report a security vulnerability, see our [bug bounty +program](https://tendermint.com/security). + +For examples of the kinds of bugs we're looking for, see [SECURITY.md](SECURITY.md) + +## Minimum requirements + +Requirement|Notes +---|--- +Go version | Go1.9 or higher + +## Install + +See the [install instructions](/docs/install.md) + +## Quick Start + +- [Single node](/docs/using-tendermint.md) +- [Local cluster using docker-compose](/networks/local) +- [Remote cluster using terraform and ansible](/docs/terraform-and-ansible.md) +- [Join the public testnet](https://cosmos.network/testnet) + +## Resources + +### Tendermint Core + +For details about the blockchain data structures and the p2p protocols, see the +the [Tendermint specification](/docs/spec). + +For details on using the software, [Read The Docs](https://tendermint.readthedocs.io/en/master/). +Additional information about some - and eventually all - of the sub-projects below, can be found at Read The Docs. + + +### Sub-projects + +* [Amino](http://github.com/tendermint/go-amino), a reflection-based improvement on proto3 +* [IAVL](http://github.com/tendermint/iavl), Merkleized IAVL+ Tree implementation + +### Tools +* [Deployment, Benchmarking, and Monitoring](http://tendermint.readthedocs.io/projects/tools/en/develop/index.html#tendermint-tools) + +### Applications + +* [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework +* [Ethermint](http://github.com/tendermint/ethermint); Ethereum on Tendermint +* [Many more](https://tendermint.readthedocs.io/en/master/ecosystem.html#abci-applications) + +### More + +* [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769) +* [Original Whitepaper](https://tendermint.com/static/docs/tendermint.pdf) +* [Tendermint Blog](https://blog.cosmos.network/tendermint/home) +* [Cosmos Blog](https://blog.cosmos.network) + +## Contributing + +Yay open source! Please see our [contributing guidelines](CONTRIBUTING.md). + +## Versioning + +### SemVer + +Tendermint uses [SemVer](http://semver.org/) to determine when and how the version changes. +According to SemVer, anything in the public API can change at any time before version 1.0.0 + +To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used +to signal breaking changes across a subset of the total public API. This subset includes all +interfaces exposed to other processes (cli, rpc, p2p, etc.), but does not +include the in-process Go APIs. + +That said, breaking changes in the following packages will be documented in the +CHANGELOG even if they don't lead to MINOR version bumps: + +- types +- rpc/client +- config +- node + +Exported objects in these packages that are not covered by the versioning scheme +are explicitly marked by `// UNSTABLE` in their go doc comment and may change at any +time without notice. Functions, types, and values in any other package may also change at any time. + +### Upgrades + +In an effort to avoid accumulating technical debt prior to 1.0.0, +we do not guarantee that breaking changes (ie. bumps in the MINOR version) +will work with existing tendermint blockchains. In these cases you will +have to start a new blockchain, or write something custom to get the old +data into the new chain. + +However, any bump in the PATCH version should be compatible with existing histories +(if not please open an [issue](https://github.com/tendermint/tendermint/issues)). + +## Code of Conduct + +Please read, understand and adhere to our [code of conduct](CODE_OF_CONDUCT.md). diff --git a/ROADMAP.md b/ROADMAP.md new file mode 100644 index 000000000..60c284333 --- /dev/null +++ b/ROADMAP.md @@ -0,0 +1,23 @@ +# Roadmap + +BREAKING CHANGES: +- Better support for injecting randomness +- Upgrade consensus for more real-time use of evidence + +FEATURES: +- Use the chain as its own CA for nodes and validators +- Tooling to run multiple blockchains/apps, possibly in a single process +- State syncing (without transaction replay) +- Add authentication and rate-limitting to the RPC + +IMPROVEMENTS: +- Improve subtleties around mempool caching and logic +- Consensus optimizations: + - cache block parts for faster agreement after round changes + - propagate block parts rarest first +- Better testing of the consensus state machine (ie. use a DSL) +- Auto compiled serialization/deserialization code instead of go-wire reflection + +BUG FIXES: +- Graceful handling/recovery for apps that have non-determinism or fail to halt +- Graceful handling/recovery for violations of safety, or liveness diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000..8b9793782 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,71 @@ +# Security + +As part of our [Coordinated Vulnerability Disclosure +Policy](https://tendermint.com/security), we operate a bug bounty. +See the policy for more details on submissions and rewards. + +Here is a list of examples of the kinds of bugs we're most interested in: + +## Specification + +- Conceptual flaws +- Ambiguities, inconsistencies, or incorrect statements +- Mis-match between specification and implementation of any component + +## Consensus + +Assuming less than 1/3 of the voting power is Byzantine (malicious): + +- Validation of blockchain data structures, including blocks, block parts, + votes, and so on +- Execution of blocks +- Validator set changes +- Proposer round robin +- Two nodes committing conflicting blocks for the same height (safety failure) +- A correct node signing conflicting votes +- A node halting (liveness failure) +- Syncing new and old nodes + +## Networking + +- Authenticated encryption (MITM, information leakage) +- Eclipse attacks +- Sybil attacks +- Long-range attacks +- Denial-of-Service + +## RPC + +- Write-access to anything besides sending transactions +- Denial-of-Service +- Leakage of secrets + +## Denial-of-Service + +Attacks may come through the P2P network or the RPC: + +- Amplification attacks +- Resource abuse +- Deadlocks and race conditions +- Panics and unhandled errors + +## Libraries + +- Serialization (Amino) +- Reading/Writing files and databases +- Logging and monitoring + +## Cryptography + +- Elliptic curves for validator signatures +- Hash algorithms and Merkle trees for block validation +- Authenticated encryption for P2P connections + +## Light Client + +- Validation of blockchain data structures +- Correctly validating an incorrect proof +- Incorrectly validating a correct proof +- Syncing validator set changes + + diff --git a/Vagrantfile b/Vagrantfile new file mode 100644 index 000000000..095a6b061 --- /dev/null +++ b/Vagrantfile @@ -0,0 +1,58 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +Vagrant.configure("2") do |config| + config.vm.box = "ubuntu/xenial64" + + config.vm.provider "virtualbox" do |v| + v.memory = 4096 + v.cpus = 2 + end + + config.vm.provision "shell", inline: <<-SHELL + apt-get update + + # install base requirements + apt-get install -y --no-install-recommends wget curl jq zip \ + make shellcheck bsdmainutils psmisc + apt-get install -y language-pack-en + + # install docker + apt-get install -y --no-install-recommends apt-transport-https \ + ca-certificates curl software-properties-common + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - + add-apt-repository \ + "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) \ + stable" + apt-get install -y docker-ce + usermod -a -G docker vagrant + + # install go + wget -q https://dl.google.com/go/go1.10.1.linux-amd64.tar.gz + tar -xvf go1.10.1.linux-amd64.tar.gz + mv go /usr/local + rm -f go1.10.1.linux-amd64.tar.gz + + # cleanup + apt-get autoremove -y + + # set env variables + echo 'export GOROOT=/usr/local/go' >> /home/vagrant/.bash_profile + echo 'export GOPATH=/home/vagrant/go' >> /home/vagrant/.bash_profile + echo 'export PATH=$PATH:$GOROOT/bin:$GOPATH/bin' >> /home/vagrant/.bash_profile + echo 'export LC_ALL=en_US.UTF-8' >> /home/vagrant/.bash_profile + echo 'cd go/src/github.com/tendermint/tendermint' >> /home/vagrant/.bash_profile + + mkdir -p /home/vagrant/go/bin + mkdir -p /home/vagrant/go/src/github.com/tendermint + ln -s /vagrant /home/vagrant/go/src/github.com/tendermint/tendermint + + chown -R vagrant:vagrant /home/vagrant/go + chown vagrant:vagrant /home/vagrant/.bash_profile + + # get all deps and tools, ready to install/test + su - vagrant -c 'source /home/vagrant/.bash_profile' + su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make get_tools && make get_vendor_deps' + SHELL +end diff --git a/abci/README.md b/abci/README.md new file mode 100644 index 000000000..6de9f7069 --- /dev/null +++ b/abci/README.md @@ -0,0 +1,168 @@ +# Application BlockChain Interface (ABCI) + +[![CircleCI](https://circleci.com/gh/tendermint/abci.svg?style=svg)](https://circleci.com/gh/tendermint/abci) + +Blockchains are systems for multi-master state machine replication. +**ABCI** is an interface that defines the boundary between the replication engine (the blockchain), +and the state machine (the application). +Using a socket protocol, a consensus engine running in one process +can manage an application state running in another. + +Previously, the ABCI was referred to as TMSP. + +The community has provided a number of addtional implementations, see the [Tendermint Ecosystem](https://tendermint.com/ecosystem) + +## Specification + +A detailed description of the ABCI methods and message types is contained in: + +- [A prose specification](specification.md) +- [A protobuf file](https://github.com/tendermint/abci/blob/master/types/types.proto) +- [A Go interface](https://github.com/tendermint/abci/blob/master/types/application.go). + +For more background information on ABCI, motivations, and tendermint, please visit [the documentation](http://tendermint.readthedocs.io/en/master/). +The two guides to focus on are the `Application Development Guide` and `Using ABCI-CLI`. + + +## Protocl Buffers + +To compile the protobuf file, run: + +``` +make protoc +``` + +See `protoc --help` and [the Protocol Buffers site](https://developers.google.com/protocol-buffers) +for details on compiling for other languages. Note we also include a [GRPC](http://www.grpc.io/docs) +service definition. + +## Install ABCI-CLI + +The `abci-cli` is a simple tool for debugging ABCI servers and running some +example apps. To install it: + +``` +go get github.com/tendermint/abci +cd $GOPATH/src/github.com/tendermint/abci +make get_vendor_deps +make install +``` + +## Implementation + +We provide three implementations of the ABCI in Go: + +- Golang in-process +- ABCI-socket +- GRPC + +Note the GRPC version is maintained primarily to simplify onboarding and prototyping and is not receiving the same +attention to security and performance as the others + +### In Process + +The simplest implementation just uses function calls within Go. +This means ABCI applications written in Golang can be compiled with TendermintCore and run as a single binary. + +See the [examples](#examples) below for more information. + +### Socket (TSP) + +ABCI is best implemented as a streaming protocol. +The socket implementation provides for asynchronous, ordered message passing over unix or tcp. +Messages are serialized using Protobuf3 and length-prefixed with a [signed Varint](https://developers.google.com/protocol-buffers/docs/encoding?csw=1#signed-integers) + +For example, if the Protobuf3 encoded ABCI message is `0xDEADBEEF` (4 bytes), the length-prefixed message is `0x08DEADBEEF`, since `0x08` is the signed varint +encoding of `4`. If the Protobuf3 encoded ABCI message is 65535 bytes long, the length-prefixed message would be like `0xFEFF07...`. + +Note the benefit of using this `varint` encoding over the old version (where integers were encoded as `` is that +it is the standard way to encode integers in Protobuf. It is also generally shorter. + +### GRPC + +GRPC is an rpc framework native to Protocol Buffers with support in many languages. +Implementing the ABCI using GRPC can allow for faster prototyping, but is expected to be much slower than +the ordered, asynchronous socket protocol. The implementation has also not received as much testing or review. + +Note the length-prefixing used in the socket implementation does not apply for GRPC. + +## Usage + +The `abci-cli` tool wraps an ABCI client and can be used for probing/testing an ABCI server. +For instance, `abci-cli test` will run a test sequence against a listening server running the Counter application (see below). +It can also be used to run some example applications. +See [the documentation](http://tendermint.readthedocs.io/en/master/) for more details. + +### Examples + +Check out the variety of example applications in the [example directory](example/). +It also contains the code refered to by the `counter` and `kvstore` apps; these apps come +built into the `abci-cli` binary. + +#### Counter + +The `abci-cli counter` application illustrates nonce checking in transactions. It's code looks like: + +```golang +func cmdCounter(cmd *cobra.Command, args []string) error { + + app := counter.NewCounterApplication(flagSerial) + + logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + + // Start the listener + srv, err := server.NewServer(flagAddrC, flagAbci, app) + if err != nil { + return err + } + srv.SetLogger(logger.With("module", "abci-server")) + if err := srv.Start(); err != nil { + return err + } + + // Wait forever + cmn.TrapSignal(func() { + // Cleanup + srv.Stop() + }) + return nil +} +``` + +and can be found in [this file](cmd/abci-cli/abci-cli.go). + +#### kvstore + +The `abci-cli kvstore` application, which illustrates a simple key-value Merkle tree + +```golang +func cmdKVStore(cmd *cobra.Command, args []string) error { + logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + + // Create the application - in memory or persisted to disk + var app types.Application + if flagPersist == "" { + app = kvstore.NewKVStoreApplication() + } else { + app = kvstore.NewPersistentKVStoreApplication(flagPersist) + app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore")) + } + + // Start the listener + srv, err := server.NewServer(flagAddrD, flagAbci, app) + if err != nil { + return err + } + srv.SetLogger(logger.With("module", "abci-server")) + if err := srv.Start(); err != nil { + return err + } + + // Wait forever + cmn.TrapSignal(func() { + // Cleanup + srv.Stop() + }) + return nil +} +``` diff --git a/abci/client/client.go b/abci/client/client.go new file mode 100644 index 000000000..558588107 --- /dev/null +++ b/abci/client/client.go @@ -0,0 +1,129 @@ +package abcicli + +import ( + "fmt" + "sync" + + "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +const ( + dialRetryIntervalSeconds = 3 + echoRetryIntervalSeconds = 1 +) + +// Client defines an interface for an ABCI client. +// All `Async` methods return a `ReqRes` object. +// All `Sync` methods return the appropriate protobuf ResponseXxx struct and an error. +// Note these are client errors, eg. ABCI socket connectivity issues. +// Application-related errors are reflected in response via ABCI error codes and logs. +type Client interface { + cmn.Service + + SetResponseCallback(Callback) + Error() error + + FlushAsync() *ReqRes + EchoAsync(msg string) *ReqRes + InfoAsync(types.RequestInfo) *ReqRes + SetOptionAsync(types.RequestSetOption) *ReqRes + DeliverTxAsync(tx []byte) *ReqRes + CheckTxAsync(tx []byte) *ReqRes + QueryAsync(types.RequestQuery) *ReqRes + CommitAsync() *ReqRes + InitChainAsync(types.RequestInitChain) *ReqRes + BeginBlockAsync(types.RequestBeginBlock) *ReqRes + EndBlockAsync(types.RequestEndBlock) *ReqRes + + FlushSync() error + EchoSync(msg string) (*types.ResponseEcho, error) + InfoSync(types.RequestInfo) (*types.ResponseInfo, error) + SetOptionSync(types.RequestSetOption) (*types.ResponseSetOption, error) + DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error) + CheckTxSync(tx []byte) (*types.ResponseCheckTx, error) + QuerySync(types.RequestQuery) (*types.ResponseQuery, error) + CommitSync() (*types.ResponseCommit, error) + InitChainSync(types.RequestInitChain) (*types.ResponseInitChain, error) + BeginBlockSync(types.RequestBeginBlock) (*types.ResponseBeginBlock, error) + EndBlockSync(types.RequestEndBlock) (*types.ResponseEndBlock, error) +} + +//---------------------------------------- + +// NewClient returns a new ABCI client of the specified transport type. +// It returns an error if the transport is not "socket" or "grpc" +func NewClient(addr, transport string, mustConnect bool) (client Client, err error) { + switch transport { + case "socket": + client = NewSocketClient(addr, mustConnect) + case "grpc": + client = NewGRPCClient(addr, mustConnect) + default: + err = fmt.Errorf("Unknown abci transport %s", transport) + } + return +} + +//---------------------------------------- + +type Callback func(*types.Request, *types.Response) + +//---------------------------------------- + +type ReqRes struct { + *types.Request + *sync.WaitGroup + *types.Response // Not set atomically, so be sure to use WaitGroup. + + mtx sync.Mutex + done bool // Gets set to true once *after* WaitGroup.Done(). + cb func(*types.Response) // A single callback that may be set. +} + +func NewReqRes(req *types.Request) *ReqRes { + return &ReqRes{ + Request: req, + WaitGroup: waitGroup1(), + Response: nil, + + done: false, + cb: nil, + } +} + +// Sets the callback for this ReqRes atomically. +// If reqRes is already done, calls cb immediately. +// NOTE: reqRes.cb should not change if reqRes.done. +// NOTE: only one callback is supported. +func (reqRes *ReqRes) SetCallback(cb func(res *types.Response)) { + reqRes.mtx.Lock() + + if reqRes.done { + reqRes.mtx.Unlock() + cb(reqRes.Response) + return + } + + defer reqRes.mtx.Unlock() + reqRes.cb = cb +} + +func (reqRes *ReqRes) GetCallback() func(*types.Response) { + reqRes.mtx.Lock() + defer reqRes.mtx.Unlock() + return reqRes.cb +} + +// NOTE: it should be safe to read reqRes.cb without locks after this. +func (reqRes *ReqRes) SetDone() { + reqRes.mtx.Lock() + reqRes.done = true + reqRes.mtx.Unlock() +} + +func waitGroup1() (wg *sync.WaitGroup) { + wg = &sync.WaitGroup{} + wg.Add(1) + return +} diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go new file mode 100644 index 000000000..502ee0fcd --- /dev/null +++ b/abci/client/grpc_client.go @@ -0,0 +1,301 @@ +package abcicli + +import ( + "fmt" + "net" + "sync" + "time" + + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + + "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +var _ Client = (*grpcClient)(nil) + +// A stripped copy of the remoteClient that makes +// synchronous calls using grpc +type grpcClient struct { + cmn.BaseService + mustConnect bool + + client types.ABCIApplicationClient + + mtx sync.Mutex + addr string + err error + resCb func(*types.Request, *types.Response) // listens to all callbacks +} + +func NewGRPCClient(addr string, mustConnect bool) *grpcClient { + cli := &grpcClient{ + addr: addr, + mustConnect: mustConnect, + } + cli.BaseService = *cmn.NewBaseService(nil, "grpcClient", cli) + return cli +} + +func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) { + return cmn.Connect(addr) +} + +func (cli *grpcClient) OnStart() error { + if err := cli.BaseService.OnStart(); err != nil { + return err + } +RETRY_LOOP: + for { + conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithDialer(dialerFunc)) + if err != nil { + if cli.mustConnect { + return err + } + cli.Logger.Error(fmt.Sprintf("abci.grpcClient failed to connect to %v. Retrying...\n", cli.addr)) + time.Sleep(time.Second * dialRetryIntervalSeconds) + continue RETRY_LOOP + } + + cli.Logger.Info("Dialed server. Waiting for echo.", "addr", cli.addr) + client := types.NewABCIApplicationClient(conn) + + ENSURE_CONNECTED: + for { + _, err := client.Echo(context.Background(), &types.RequestEcho{"hello"}, grpc.FailFast(true)) + if err == nil { + break ENSURE_CONNECTED + } + cli.Logger.Error("Echo failed", "err", err) + time.Sleep(time.Second * echoRetryIntervalSeconds) + } + + cli.client = client + return nil + } +} + +func (cli *grpcClient) OnStop() { + cli.BaseService.OnStop() + cli.mtx.Lock() + defer cli.mtx.Unlock() + // TODO: how to close conn? its not a net.Conn and grpc doesn't expose a Close() + /*if cli.client.conn != nil { + cli.client.conn.Close() + }*/ +} + +func (cli *grpcClient) StopForError(err error) { + cli.mtx.Lock() + if !cli.IsRunning() { + return + } + + if cli.err == nil { + cli.err = err + } + cli.mtx.Unlock() + + cli.Logger.Error(fmt.Sprintf("Stopping abci.grpcClient for error: %v", err.Error())) + cli.Stop() +} + +func (cli *grpcClient) Error() error { + cli.mtx.Lock() + defer cli.mtx.Unlock() + return cli.err +} + +// Set listener for all responses +// NOTE: callback may get internally generated flush responses. +func (cli *grpcClient) SetResponseCallback(resCb Callback) { + cli.mtx.Lock() + defer cli.mtx.Unlock() + cli.resCb = resCb +} + +//---------------------------------------- +// GRPC calls are synchronous, but some callbacks expect to be called asynchronously +// (eg. the mempool expects to be able to lock to remove bad txs from cache). +// To accommodate, we finish each call in its own go-routine, +// which is expensive, but easy - if you want something better, use the socket protocol! +// maybe one day, if people really want it, we use grpc streams, +// but hopefully not :D + +func (cli *grpcClient) EchoAsync(msg string) *ReqRes { + req := types.ToRequestEcho(msg) + res, err := cli.client.Echo(context.Background(), req.GetEcho(), grpc.FailFast(true)) + if err != nil { + cli.StopForError(err) + } + return cli.finishAsyncCall(req, &types.Response{&types.Response_Echo{res}}) +} + +func (cli *grpcClient) FlushAsync() *ReqRes { + req := types.ToRequestFlush() + res, err := cli.client.Flush(context.Background(), req.GetFlush(), grpc.FailFast(true)) + if err != nil { + cli.StopForError(err) + } + return cli.finishAsyncCall(req, &types.Response{&types.Response_Flush{res}}) +} + +func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes { + req := types.ToRequestInfo(params) + res, err := cli.client.Info(context.Background(), req.GetInfo(), grpc.FailFast(true)) + if err != nil { + cli.StopForError(err) + } + return cli.finishAsyncCall(req, &types.Response{&types.Response_Info{res}}) +} + +func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes { + req := types.ToRequestSetOption(params) + res, err := cli.client.SetOption(context.Background(), req.GetSetOption(), grpc.FailFast(true)) + if err != nil { + cli.StopForError(err) + } + return cli.finishAsyncCall(req, &types.Response{&types.Response_SetOption{res}}) +} + +func (cli *grpcClient) DeliverTxAsync(tx []byte) *ReqRes { + req := types.ToRequestDeliverTx(tx) + res, err := cli.client.DeliverTx(context.Background(), req.GetDeliverTx(), grpc.FailFast(true)) + if err != nil { + cli.StopForError(err) + } + return cli.finishAsyncCall(req, &types.Response{&types.Response_DeliverTx{res}}) +} + +func (cli *grpcClient) CheckTxAsync(tx []byte) *ReqRes { + req := types.ToRequestCheckTx(tx) + res, err := cli.client.CheckTx(context.Background(), req.GetCheckTx(), grpc.FailFast(true)) + if err != nil { + cli.StopForError(err) + } + return cli.finishAsyncCall(req, &types.Response{&types.Response_CheckTx{res}}) +} + +func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes { + req := types.ToRequestQuery(params) + res, err := cli.client.Query(context.Background(), req.GetQuery(), grpc.FailFast(true)) + if err != nil { + cli.StopForError(err) + } + return cli.finishAsyncCall(req, &types.Response{&types.Response_Query{res}}) +} + +func (cli *grpcClient) CommitAsync() *ReqRes { + req := types.ToRequestCommit() + res, err := cli.client.Commit(context.Background(), req.GetCommit(), grpc.FailFast(true)) + if err != nil { + cli.StopForError(err) + } + return cli.finishAsyncCall(req, &types.Response{&types.Response_Commit{res}}) +} + +func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes { + req := types.ToRequestInitChain(params) + res, err := cli.client.InitChain(context.Background(), req.GetInitChain(), grpc.FailFast(true)) + if err != nil { + cli.StopForError(err) + } + return cli.finishAsyncCall(req, &types.Response{&types.Response_InitChain{res}}) +} + +func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes { + req := types.ToRequestBeginBlock(params) + res, err := cli.client.BeginBlock(context.Background(), req.GetBeginBlock(), grpc.FailFast(true)) + if err != nil { + cli.StopForError(err) + } + return cli.finishAsyncCall(req, &types.Response{&types.Response_BeginBlock{res}}) +} + +func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes { + req := types.ToRequestEndBlock(params) + res, err := cli.client.EndBlock(context.Background(), req.GetEndBlock(), grpc.FailFast(true)) + if err != nil { + cli.StopForError(err) + } + return cli.finishAsyncCall(req, &types.Response{&types.Response_EndBlock{res}}) +} + +func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes { + reqres := NewReqRes(req) + reqres.Response = res // Set response + reqres.Done() // Release waiters + reqres.SetDone() // so reqRes.SetCallback will run the callback + + // go routine for callbacks + go func() { + // Notify reqRes listener if set + if cb := reqres.GetCallback(); cb != nil { + cb(res) + } + + // Notify client listener if set + if cli.resCb != nil { + cli.resCb(reqres.Request, res) + } + }() + return reqres +} + +//---------------------------------------- + +func (cli *grpcClient) FlushSync() error { + return nil +} + +func (cli *grpcClient) EchoSync(msg string) (*types.ResponseEcho, error) { + reqres := cli.EchoAsync(msg) + // StopForError should already have been called if error is set + return reqres.Response.GetEcho(), cli.Error() +} + +func (cli *grpcClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) { + reqres := cli.InfoAsync(req) + return reqres.Response.GetInfo(), cli.Error() +} + +func (cli *grpcClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) { + reqres := cli.SetOptionAsync(req) + return reqres.Response.GetSetOption(), cli.Error() +} + +func (cli *grpcClient) DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error) { + reqres := cli.DeliverTxAsync(tx) + return reqres.Response.GetDeliverTx(), cli.Error() +} + +func (cli *grpcClient) CheckTxSync(tx []byte) (*types.ResponseCheckTx, error) { + reqres := cli.CheckTxAsync(tx) + return reqres.Response.GetCheckTx(), cli.Error() +} + +func (cli *grpcClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) { + reqres := cli.QueryAsync(req) + return reqres.Response.GetQuery(), cli.Error() +} + +func (cli *grpcClient) CommitSync() (*types.ResponseCommit, error) { + reqres := cli.CommitAsync() + return reqres.Response.GetCommit(), cli.Error() +} + +func (cli *grpcClient) InitChainSync(params types.RequestInitChain) (*types.ResponseInitChain, error) { + reqres := cli.InitChainAsync(params) + return reqres.Response.GetInitChain(), cli.Error() +} + +func (cli *grpcClient) BeginBlockSync(params types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { + reqres := cli.BeginBlockAsync(params) + return reqres.Response.GetBeginBlock(), cli.Error() +} + +func (cli *grpcClient) EndBlockSync(params types.RequestEndBlock) (*types.ResponseEndBlock, error) { + reqres := cli.EndBlockAsync(params) + return reqres.Response.GetEndBlock(), cli.Error() +} diff --git a/abci/client/local_client.go b/abci/client/local_client.go new file mode 100644 index 000000000..3d1f8d8e4 --- /dev/null +++ b/abci/client/local_client.go @@ -0,0 +1,230 @@ +package abcicli + +import ( + "sync" + + types "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +var _ Client = (*localClient)(nil) + +type localClient struct { + cmn.BaseService + mtx *sync.Mutex + types.Application + Callback +} + +func NewLocalClient(mtx *sync.Mutex, app types.Application) *localClient { + if mtx == nil { + mtx = new(sync.Mutex) + } + cli := &localClient{ + mtx: mtx, + Application: app, + } + cli.BaseService = *cmn.NewBaseService(nil, "localClient", cli) + return cli +} + +func (app *localClient) SetResponseCallback(cb Callback) { + app.mtx.Lock() + defer app.mtx.Unlock() + app.Callback = cb +} + +// TODO: change types.Application to include Error()? +func (app *localClient) Error() error { + return nil +} + +func (app *localClient) FlushAsync() *ReqRes { + // Do nothing + return newLocalReqRes(types.ToRequestFlush(), nil) +} + +func (app *localClient) EchoAsync(msg string) *ReqRes { + return app.callback( + types.ToRequestEcho(msg), + types.ToResponseEcho(msg), + ) +} + +func (app *localClient) InfoAsync(req types.RequestInfo) *ReqRes { + app.mtx.Lock() + res := app.Application.Info(req) + app.mtx.Unlock() + return app.callback( + types.ToRequestInfo(req), + types.ToResponseInfo(res), + ) +} + +func (app *localClient) SetOptionAsync(req types.RequestSetOption) *ReqRes { + app.mtx.Lock() + res := app.Application.SetOption(req) + app.mtx.Unlock() + return app.callback( + types.ToRequestSetOption(req), + types.ToResponseSetOption(res), + ) +} + +func (app *localClient) DeliverTxAsync(tx []byte) *ReqRes { + app.mtx.Lock() + res := app.Application.DeliverTx(tx) + app.mtx.Unlock() + return app.callback( + types.ToRequestDeliverTx(tx), + types.ToResponseDeliverTx(res), + ) +} + +func (app *localClient) CheckTxAsync(tx []byte) *ReqRes { + app.mtx.Lock() + res := app.Application.CheckTx(tx) + app.mtx.Unlock() + return app.callback( + types.ToRequestCheckTx(tx), + types.ToResponseCheckTx(res), + ) +} + +func (app *localClient) QueryAsync(req types.RequestQuery) *ReqRes { + app.mtx.Lock() + res := app.Application.Query(req) + app.mtx.Unlock() + return app.callback( + types.ToRequestQuery(req), + types.ToResponseQuery(res), + ) +} + +func (app *localClient) CommitAsync() *ReqRes { + app.mtx.Lock() + res := app.Application.Commit() + app.mtx.Unlock() + return app.callback( + types.ToRequestCommit(), + types.ToResponseCommit(res), + ) +} + +func (app *localClient) InitChainAsync(req types.RequestInitChain) *ReqRes { + app.mtx.Lock() + res := app.Application.InitChain(req) + reqRes := app.callback( + types.ToRequestInitChain(req), + types.ToResponseInitChain(res), + ) + app.mtx.Unlock() + return reqRes +} + +func (app *localClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes { + app.mtx.Lock() + res := app.Application.BeginBlock(req) + app.mtx.Unlock() + return app.callback( + types.ToRequestBeginBlock(req), + types.ToResponseBeginBlock(res), + ) +} + +func (app *localClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes { + app.mtx.Lock() + res := app.Application.EndBlock(req) + app.mtx.Unlock() + return app.callback( + types.ToRequestEndBlock(req), + types.ToResponseEndBlock(res), + ) +} + +//------------------------------------------------------- + +func (app *localClient) FlushSync() error { + return nil +} + +func (app *localClient) EchoSync(msg string) (*types.ResponseEcho, error) { + return &types.ResponseEcho{msg}, nil +} + +func (app *localClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) { + app.mtx.Lock() + res := app.Application.Info(req) + app.mtx.Unlock() + return &res, nil +} + +func (app *localClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) { + app.mtx.Lock() + res := app.Application.SetOption(req) + app.mtx.Unlock() + return &res, nil +} + +func (app *localClient) DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error) { + app.mtx.Lock() + res := app.Application.DeliverTx(tx) + app.mtx.Unlock() + return &res, nil +} + +func (app *localClient) CheckTxSync(tx []byte) (*types.ResponseCheckTx, error) { + app.mtx.Lock() + res := app.Application.CheckTx(tx) + app.mtx.Unlock() + return &res, nil +} + +func (app *localClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) { + app.mtx.Lock() + res := app.Application.Query(req) + app.mtx.Unlock() + return &res, nil +} + +func (app *localClient) CommitSync() (*types.ResponseCommit, error) { + app.mtx.Lock() + res := app.Application.Commit() + app.mtx.Unlock() + return &res, nil +} + +func (app *localClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) { + app.mtx.Lock() + res := app.Application.InitChain(req) + app.mtx.Unlock() + return &res, nil +} + +func (app *localClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { + app.mtx.Lock() + res := app.Application.BeginBlock(req) + app.mtx.Unlock() + return &res, nil +} + +func (app *localClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) { + app.mtx.Lock() + res := app.Application.EndBlock(req) + app.mtx.Unlock() + return &res, nil +} + +//------------------------------------------------------- + +func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes { + app.Callback(req, res) + return newLocalReqRes(req, res) +} + +func newLocalReqRes(req *types.Request, res *types.Response) *ReqRes { + reqRes := NewReqRes(req) + reqRes.Response = res + reqRes.SetDone() + return reqRes +} diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go new file mode 100644 index 000000000..c3f88725c --- /dev/null +++ b/abci/client/socket_client.go @@ -0,0 +1,399 @@ +package abcicli + +import ( + "bufio" + "container/list" + "errors" + "fmt" + "net" + "reflect" + "sync" + "time" + + "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +const reqQueueSize = 256 // TODO make configurable +// const maxResponseSize = 1048576 // 1MB TODO make configurable +const flushThrottleMS = 20 // Don't wait longer than... + +var _ Client = (*socketClient)(nil) + +// This is goroutine-safe, but users should beware that +// the application in general is not meant to be interfaced +// with concurrent callers. +type socketClient struct { + cmn.BaseService + + reqQueue chan *ReqRes + flushTimer *cmn.ThrottleTimer + mustConnect bool + + mtx sync.Mutex + addr string + conn net.Conn + err error + reqSent *list.List + resCb func(*types.Request, *types.Response) // listens to all callbacks + +} + +func NewSocketClient(addr string, mustConnect bool) *socketClient { + cli := &socketClient{ + reqQueue: make(chan *ReqRes, reqQueueSize), + flushTimer: cmn.NewThrottleTimer("socketClient", flushThrottleMS), + mustConnect: mustConnect, + + addr: addr, + reqSent: list.New(), + resCb: nil, + } + cli.BaseService = *cmn.NewBaseService(nil, "socketClient", cli) + return cli +} + +func (cli *socketClient) OnStart() error { + if err := cli.BaseService.OnStart(); err != nil { + return err + } + + var err error + var conn net.Conn +RETRY_LOOP: + for { + conn, err = cmn.Connect(cli.addr) + if err != nil { + if cli.mustConnect { + return err + } + cli.Logger.Error(fmt.Sprintf("abci.socketClient failed to connect to %v. Retrying...", cli.addr)) + time.Sleep(time.Second * dialRetryIntervalSeconds) + continue RETRY_LOOP + } + cli.conn = conn + + go cli.sendRequestsRoutine(conn) + go cli.recvResponseRoutine(conn) + + return nil + } +} + +func (cli *socketClient) OnStop() { + cli.BaseService.OnStop() + + cli.mtx.Lock() + defer cli.mtx.Unlock() + if cli.conn != nil { + cli.conn.Close() + } + + cli.flushQueue() +} + +// Stop the client and set the error +func (cli *socketClient) StopForError(err error) { + if !cli.IsRunning() { + return + } + + cli.mtx.Lock() + if cli.err == nil { + cli.err = err + } + cli.mtx.Unlock() + + cli.Logger.Error(fmt.Sprintf("Stopping abci.socketClient for error: %v", err.Error())) + cli.Stop() +} + +func (cli *socketClient) Error() error { + cli.mtx.Lock() + defer cli.mtx.Unlock() + return cli.err +} + +// Set listener for all responses +// NOTE: callback may get internally generated flush responses. +func (cli *socketClient) SetResponseCallback(resCb Callback) { + cli.mtx.Lock() + defer cli.mtx.Unlock() + cli.resCb = resCb +} + +//---------------------------------------- + +func (cli *socketClient) sendRequestsRoutine(conn net.Conn) { + + w := bufio.NewWriter(conn) + for { + select { + case <-cli.flushTimer.Ch: + select { + case cli.reqQueue <- NewReqRes(types.ToRequestFlush()): + default: + // Probably will fill the buffer, or retry later. + } + case <-cli.Quit(): + return + case reqres := <-cli.reqQueue: + cli.willSendReq(reqres) + err := types.WriteMessage(reqres.Request, w) + if err != nil { + cli.StopForError(fmt.Errorf("Error writing msg: %v", err)) + return + } + // cli.Logger.Debug("Sent request", "requestType", reflect.TypeOf(reqres.Request), "request", reqres.Request) + if _, ok := reqres.Request.Value.(*types.Request_Flush); ok { + err = w.Flush() + if err != nil { + cli.StopForError(fmt.Errorf("Error flushing writer: %v", err)) + return + } + } + } + } +} + +func (cli *socketClient) recvResponseRoutine(conn net.Conn) { + + r := bufio.NewReader(conn) // Buffer reads + for { + var res = &types.Response{} + err := types.ReadMessage(r, res) + if err != nil { + cli.StopForError(err) + return + } + switch r := res.Value.(type) { + case *types.Response_Exception: + // XXX After setting cli.err, release waiters (e.g. reqres.Done()) + cli.StopForError(errors.New(r.Exception.Error)) + return + default: + // cli.Logger.Debug("Received response", "responseType", reflect.TypeOf(res), "response", res) + err := cli.didRecvResponse(res) + if err != nil { + cli.StopForError(err) + return + } + } + } +} + +func (cli *socketClient) willSendReq(reqres *ReqRes) { + cli.mtx.Lock() + defer cli.mtx.Unlock() + cli.reqSent.PushBack(reqres) +} + +func (cli *socketClient) didRecvResponse(res *types.Response) error { + cli.mtx.Lock() + defer cli.mtx.Unlock() + + // Get the first ReqRes + next := cli.reqSent.Front() + if next == nil { + return fmt.Errorf("Unexpected result type %v when nothing expected", reflect.TypeOf(res.Value)) + } + reqres := next.Value.(*ReqRes) + if !resMatchesReq(reqres.Request, res) { + return fmt.Errorf("Unexpected result type %v when response to %v expected", + reflect.TypeOf(res.Value), reflect.TypeOf(reqres.Request.Value)) + } + + reqres.Response = res // Set response + reqres.Done() // Release waiters + cli.reqSent.Remove(next) // Pop first item from linked list + + // Notify reqRes listener if set + if cb := reqres.GetCallback(); cb != nil { + cb(res) + } + + // Notify client listener if set + if cli.resCb != nil { + cli.resCb(reqres.Request, res) + } + + return nil +} + +//---------------------------------------- + +func (cli *socketClient) EchoAsync(msg string) *ReqRes { + return cli.queueRequest(types.ToRequestEcho(msg)) +} + +func (cli *socketClient) FlushAsync() *ReqRes { + return cli.queueRequest(types.ToRequestFlush()) +} + +func (cli *socketClient) InfoAsync(req types.RequestInfo) *ReqRes { + return cli.queueRequest(types.ToRequestInfo(req)) +} + +func (cli *socketClient) SetOptionAsync(req types.RequestSetOption) *ReqRes { + return cli.queueRequest(types.ToRequestSetOption(req)) +} + +func (cli *socketClient) DeliverTxAsync(tx []byte) *ReqRes { + return cli.queueRequest(types.ToRequestDeliverTx(tx)) +} + +func (cli *socketClient) CheckTxAsync(tx []byte) *ReqRes { + return cli.queueRequest(types.ToRequestCheckTx(tx)) +} + +func (cli *socketClient) QueryAsync(req types.RequestQuery) *ReqRes { + return cli.queueRequest(types.ToRequestQuery(req)) +} + +func (cli *socketClient) CommitAsync() *ReqRes { + return cli.queueRequest(types.ToRequestCommit()) +} + +func (cli *socketClient) InitChainAsync(req types.RequestInitChain) *ReqRes { + return cli.queueRequest(types.ToRequestInitChain(req)) +} + +func (cli *socketClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes { + return cli.queueRequest(types.ToRequestBeginBlock(req)) +} + +func (cli *socketClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes { + return cli.queueRequest(types.ToRequestEndBlock(req)) +} + +//---------------------------------------- + +func (cli *socketClient) FlushSync() error { + reqRes := cli.queueRequest(types.ToRequestFlush()) + if err := cli.Error(); err != nil { + return err + } + reqRes.Wait() // NOTE: if we don't flush the queue, its possible to get stuck here + return cli.Error() +} + +func (cli *socketClient) EchoSync(msg string) (*types.ResponseEcho, error) { + reqres := cli.queueRequest(types.ToRequestEcho(msg)) + cli.FlushSync() + return reqres.Response.GetEcho(), cli.Error() +} + +func (cli *socketClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) { + reqres := cli.queueRequest(types.ToRequestInfo(req)) + cli.FlushSync() + return reqres.Response.GetInfo(), cli.Error() +} + +func (cli *socketClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) { + reqres := cli.queueRequest(types.ToRequestSetOption(req)) + cli.FlushSync() + return reqres.Response.GetSetOption(), cli.Error() +} + +func (cli *socketClient) DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error) { + reqres := cli.queueRequest(types.ToRequestDeliverTx(tx)) + cli.FlushSync() + return reqres.Response.GetDeliverTx(), cli.Error() +} + +func (cli *socketClient) CheckTxSync(tx []byte) (*types.ResponseCheckTx, error) { + reqres := cli.queueRequest(types.ToRequestCheckTx(tx)) + cli.FlushSync() + return reqres.Response.GetCheckTx(), cli.Error() +} + +func (cli *socketClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) { + reqres := cli.queueRequest(types.ToRequestQuery(req)) + cli.FlushSync() + return reqres.Response.GetQuery(), cli.Error() +} + +func (cli *socketClient) CommitSync() (*types.ResponseCommit, error) { + reqres := cli.queueRequest(types.ToRequestCommit()) + cli.FlushSync() + return reqres.Response.GetCommit(), cli.Error() +} + +func (cli *socketClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) { + reqres := cli.queueRequest(types.ToRequestInitChain(req)) + cli.FlushSync() + return reqres.Response.GetInitChain(), cli.Error() +} + +func (cli *socketClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { + reqres := cli.queueRequest(types.ToRequestBeginBlock(req)) + cli.FlushSync() + return reqres.Response.GetBeginBlock(), cli.Error() +} + +func (cli *socketClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) { + reqres := cli.queueRequest(types.ToRequestEndBlock(req)) + cli.FlushSync() + return reqres.Response.GetEndBlock(), cli.Error() +} + +//---------------------------------------- + +func (cli *socketClient) queueRequest(req *types.Request) *ReqRes { + reqres := NewReqRes(req) + + // TODO: set cli.err if reqQueue times out + cli.reqQueue <- reqres + + // Maybe auto-flush, or unset auto-flush + switch req.Value.(type) { + case *types.Request_Flush: + cli.flushTimer.Unset() + default: + cli.flushTimer.Set() + } + + return reqres +} + +func (cli *socketClient) flushQueue() { +LOOP: + for { + select { + case reqres := <-cli.reqQueue: + reqres.Done() + default: + break LOOP + } + } +} + +//---------------------------------------- + +func resMatchesReq(req *types.Request, res *types.Response) (ok bool) { + switch req.Value.(type) { + case *types.Request_Echo: + _, ok = res.Value.(*types.Response_Echo) + case *types.Request_Flush: + _, ok = res.Value.(*types.Response_Flush) + case *types.Request_Info: + _, ok = res.Value.(*types.Response_Info) + case *types.Request_SetOption: + _, ok = res.Value.(*types.Response_SetOption) + case *types.Request_DeliverTx: + _, ok = res.Value.(*types.Response_DeliverTx) + case *types.Request_CheckTx: + _, ok = res.Value.(*types.Response_CheckTx) + case *types.Request_Commit: + _, ok = res.Value.(*types.Response_Commit) + case *types.Request_Query: + _, ok = res.Value.(*types.Response_Query) + case *types.Request_InitChain: + _, ok = res.Value.(*types.Response_InitChain) + case *types.Request_BeginBlock: + _, ok = res.Value.(*types.Response_BeginBlock) + case *types.Request_EndBlock: + _, ok = res.Value.(*types.Response_EndBlock) + } + return ok +} diff --git a/abci/client/socket_client_test.go b/abci/client/socket_client_test.go new file mode 100644 index 000000000..5a9187fb4 --- /dev/null +++ b/abci/client/socket_client_test.go @@ -0,0 +1,28 @@ +package abcicli_test + +import ( + "errors" + "testing" + "time" + + "github.com/tendermint/tendermint/abci/client" +) + +func TestSocketClientStopForErrorDeadlock(t *testing.T) { + c := abcicli.NewSocketClient(":80", false) + err := errors.New("foo-tendermint") + + // See Issue https://github.com/tendermint/abci/issues/114 + doneChan := make(chan bool) + go func() { + defer close(doneChan) + c.StopForError(err) + c.StopForError(err) + }() + + select { + case <-doneChan: + case <-time.After(time.Second * 4): + t.Fatalf("Test took too long, potential deadlock still exists") + } +} diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go new file mode 100644 index 000000000..e20244011 --- /dev/null +++ b/abci/cmd/abci-cli/abci-cli.go @@ -0,0 +1,765 @@ +package main + +import ( + "bufio" + "encoding/hex" + "errors" + "fmt" + "io" + "os" + "strings" + + "github.com/spf13/cobra" + + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + + abcicli "github.com/tendermint/tendermint/abci/client" + "github.com/tendermint/tendermint/abci/example/code" + "github.com/tendermint/tendermint/abci/example/counter" + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/abci/server" + servertest "github.com/tendermint/tendermint/abci/tests/server" + "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/abci/version" +) + +// client is a global variable so it can be reused by the console +var ( + client abcicli.Client + logger log.Logger +) + +// flags +var ( + // global + flagAddress string + flagAbci string + flagVerbose bool // for the println output + flagLogLevel string // for the logger + + // query + flagPath string + flagHeight int + flagProve bool + + // counter + flagSerial bool + + // kvstore + flagPersist string +) + +var RootCmd = &cobra.Command{ + Use: "abci-cli", + Short: "the ABCI CLI tool wraps an ABCI client", + Long: "the ABCI CLI tool wraps an ABCI client and is used for testing ABCI servers", + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + + switch cmd.Use { + case "counter", "kvstore", "dummy": // for the examples apps, don't pre-run + return nil + case "version": // skip running for version command + return nil + } + + if logger == nil { + allowLevel, err := log.AllowLevel(flagLogLevel) + if err != nil { + return err + } + logger = log.NewFilter(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), allowLevel) + } + if client == nil { + var err error + client, err = abcicli.NewClient(flagAddress, flagAbci, false) + if err != nil { + return err + } + client.SetLogger(logger.With("module", "abci-client")) + if err := client.Start(); err != nil { + return err + } + } + return nil + }, +} + +// Structure for data passed to print response. +type response struct { + // generic abci response + Data []byte + Code uint32 + Info string + Log string + + Query *queryResponse +} + +type queryResponse struct { + Key []byte + Value []byte + Height int64 + Proof []byte +} + +func Execute() error { + addGlobalFlags() + addCommands() + return RootCmd.Execute() +} + +func addGlobalFlags() { + RootCmd.PersistentFlags().StringVarP(&flagAddress, "address", "", "tcp://0.0.0.0:26658", "address of application socket") + RootCmd.PersistentFlags().StringVarP(&flagAbci, "abci", "", "socket", "either socket or grpc") + RootCmd.PersistentFlags().BoolVarP(&flagVerbose, "verbose", "v", false, "print the command and results as if it were a console session") + RootCmd.PersistentFlags().StringVarP(&flagLogLevel, "log_level", "", "debug", "set the logger level") +} + +func addQueryFlags() { + queryCmd.PersistentFlags().StringVarP(&flagPath, "path", "", "/store", "path to prefix query with") + queryCmd.PersistentFlags().IntVarP(&flagHeight, "height", "", 0, "height to query the blockchain at") + queryCmd.PersistentFlags().BoolVarP(&flagProve, "prove", "", false, "whether or not to return a merkle proof of the query result") +} + +func addCounterFlags() { + counterCmd.PersistentFlags().BoolVarP(&flagSerial, "serial", "", false, "enforce incrementing (serial) transactions") +} + +func addDummyFlags() { + dummyCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database") +} + +func addKVStoreFlags() { + kvstoreCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database") +} + +func addCommands() { + RootCmd.AddCommand(batchCmd) + RootCmd.AddCommand(consoleCmd) + RootCmd.AddCommand(echoCmd) + RootCmd.AddCommand(infoCmd) + RootCmd.AddCommand(setOptionCmd) + RootCmd.AddCommand(deliverTxCmd) + RootCmd.AddCommand(checkTxCmd) + RootCmd.AddCommand(commitCmd) + RootCmd.AddCommand(versionCmd) + RootCmd.AddCommand(testCmd) + addQueryFlags() + RootCmd.AddCommand(queryCmd) + + // examples + addCounterFlags() + RootCmd.AddCommand(counterCmd) + // deprecated, left for backwards compatibility + addDummyFlags() + RootCmd.AddCommand(dummyCmd) + // replaces dummy, see issue #196 + addKVStoreFlags() + RootCmd.AddCommand(kvstoreCmd) +} + +var batchCmd = &cobra.Command{ + Use: "batch", + Short: "run a batch of abci commands against an application", + Long: `run a batch of abci commands against an application + +This command is run by piping in a file containing a series of commands +you'd like to run: + + abci-cli batch < example.file + +where example.file looks something like: + + set_option serial on + check_tx 0x00 + check_tx 0xff + deliver_tx 0x00 + check_tx 0x00 + deliver_tx 0x01 + deliver_tx 0x04 + info +`, + Args: cobra.ExactArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + return cmdBatch(cmd, args) + }, +} + +var consoleCmd = &cobra.Command{ + Use: "console", + Short: "start an interactive ABCI console for multiple commands", + Long: `start an interactive ABCI console for multiple commands + +This command opens an interactive console for running any of the other commands +without opening a new connection each time +`, + Args: cobra.ExactArgs(0), + ValidArgs: []string{"echo", "info", "set_option", "deliver_tx", "check_tx", "commit", "query"}, + RunE: func(cmd *cobra.Command, args []string) error { + return cmdConsole(cmd, args) + }, +} + +var echoCmd = &cobra.Command{ + Use: "echo", + Short: "have the application echo a message", + Long: "have the application echo a message", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return cmdEcho(cmd, args) + }, +} +var infoCmd = &cobra.Command{ + Use: "info", + Short: "get some info about the application", + Long: "get some info about the application", + Args: cobra.ExactArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + return cmdInfo(cmd, args) + }, +} +var setOptionCmd = &cobra.Command{ + Use: "set_option", + Short: "set an option on the application", + Long: "set an option on the application", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + return cmdSetOption(cmd, args) + }, +} + +var deliverTxCmd = &cobra.Command{ + Use: "deliver_tx", + Short: "deliver a new transaction to the application", + Long: "deliver a new transaction to the application", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return cmdDeliverTx(cmd, args) + }, +} + +var checkTxCmd = &cobra.Command{ + Use: "check_tx", + Short: "validate a transaction", + Long: "validate a transaction", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return cmdCheckTx(cmd, args) + }, +} + +var commitCmd = &cobra.Command{ + Use: "commit", + Short: "commit the application state and return the Merkle root hash", + Long: "commit the application state and return the Merkle root hash", + Args: cobra.ExactArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + return cmdCommit(cmd, args) + }, +} + +var versionCmd = &cobra.Command{ + Use: "version", + Short: "print ABCI console version", + Long: "print ABCI console version", + Args: cobra.ExactArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + fmt.Println(version.Version) + return nil + }, +} + +var queryCmd = &cobra.Command{ + Use: "query", + Short: "query the application state", + Long: "query the application state", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return cmdQuery(cmd, args) + }, +} + +var counterCmd = &cobra.Command{ + Use: "counter", + Short: "ABCI demo example", + Long: "ABCI demo example", + Args: cobra.ExactArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + return cmdCounter(cmd, args) + }, +} + +// deprecated, left for backwards compatibility +var dummyCmd = &cobra.Command{ + Use: "dummy", + Deprecated: "use: [abci-cli kvstore] instead", + Short: "ABCI demo example", + Long: "ABCI demo example", + Args: cobra.ExactArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + return cmdKVStore(cmd, args) + }, +} + +var kvstoreCmd = &cobra.Command{ + Use: "kvstore", + Short: "ABCI demo example", + Long: "ABCI demo example", + Args: cobra.ExactArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + return cmdKVStore(cmd, args) + }, +} + +var testCmd = &cobra.Command{ + Use: "test", + Short: "run integration tests", + Long: "run integration tests", + Args: cobra.ExactArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + return cmdTest(cmd, args) + }, +} + +// Generates new Args array based off of previous call args to maintain flag persistence +func persistentArgs(line []byte) []string { + + // generate the arguments to run from original os.Args + // to maintain flag arguments + args := os.Args + args = args[:len(args)-1] // remove the previous command argument + + if len(line) > 0 { // prevents introduction of extra space leading to argument parse errors + args = append(args, strings.Split(string(line), " ")...) + } + return args +} + +//-------------------------------------------------------------------------------- + +func compose(fs []func() error) error { + if len(fs) == 0 { + return nil + } else { + err := fs[0]() + if err == nil { + return compose(fs[1:]) + } else { + return err + } + } +} + +func cmdTest(cmd *cobra.Command, args []string) error { + return compose( + []func() error{ + func() error { return servertest.InitChain(client) }, + func() error { return servertest.SetOption(client, "serial", "on") }, + func() error { return servertest.Commit(client, nil) }, + func() error { return servertest.DeliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil) }, + func() error { return servertest.Commit(client, nil) }, + func() error { return servertest.DeliverTx(client, []byte{0x00}, code.CodeTypeOK, nil) }, + func() error { return servertest.Commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1}) }, + func() error { return servertest.DeliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil) }, + func() error { return servertest.DeliverTx(client, []byte{0x01}, code.CodeTypeOK, nil) }, + func() error { return servertest.DeliverTx(client, []byte{0x00, 0x02}, code.CodeTypeOK, nil) }, + func() error { return servertest.DeliverTx(client, []byte{0x00, 0x03}, code.CodeTypeOK, nil) }, + func() error { return servertest.DeliverTx(client, []byte{0x00, 0x00, 0x04}, code.CodeTypeOK, nil) }, + func() error { + return servertest.DeliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil) + }, + func() error { return servertest.Commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5}) }, + }) +} + +func cmdBatch(cmd *cobra.Command, args []string) error { + bufReader := bufio.NewReader(os.Stdin) + for { + + line, more, err := bufReader.ReadLine() + if more { + return errors.New("Input line is too long") + } else if err == io.EOF { + break + } else if len(line) == 0 { + continue + } else if err != nil { + return err + } + + cmdArgs := persistentArgs(line) + if err := muxOnCommands(cmd, cmdArgs); err != nil { + return err + } + fmt.Println() + } + return nil +} + +func cmdConsole(cmd *cobra.Command, args []string) error { + for { + fmt.Printf("> ") + bufReader := bufio.NewReader(os.Stdin) + line, more, err := bufReader.ReadLine() + if more { + return errors.New("Input is too long") + } else if err != nil { + return err + } + + pArgs := persistentArgs(line) + if err := muxOnCommands(cmd, pArgs); err != nil { + return err + } + } + return nil +} + +func muxOnCommands(cmd *cobra.Command, pArgs []string) error { + if len(pArgs) < 2 { + return errors.New("expecting persistent args of the form: abci-cli [command] <...>") + } + + // TODO: this parsing is fragile + args := []string{} + for i := 0; i < len(pArgs); i++ { + arg := pArgs[i] + + // check for flags + if strings.HasPrefix(arg, "-") { + // if it has an equal, we can just skip + if strings.Contains(arg, "=") { + continue + } + // if its a boolean, we can just skip + _, err := cmd.Flags().GetBool(strings.TrimLeft(arg, "-")) + if err == nil { + continue + } + + // otherwise, we need to skip the next one too + i += 1 + continue + } + + // append the actual arg + args = append(args, arg) + } + var subCommand string + var actualArgs []string + if len(args) > 1 { + subCommand = args[1] + } + if len(args) > 2 { + actualArgs = args[2:] + } + cmd.Use = subCommand // for later print statements ... + + switch strings.ToLower(subCommand) { + case "check_tx": + return cmdCheckTx(cmd, actualArgs) + case "commit": + return cmdCommit(cmd, actualArgs) + case "deliver_tx": + return cmdDeliverTx(cmd, actualArgs) + case "echo": + return cmdEcho(cmd, actualArgs) + case "info": + return cmdInfo(cmd, actualArgs) + case "query": + return cmdQuery(cmd, actualArgs) + case "set_option": + return cmdSetOption(cmd, actualArgs) + default: + return cmdUnimplemented(cmd, pArgs) + } +} + +func cmdUnimplemented(cmd *cobra.Command, args []string) error { + // TODO: Print out all the sub-commands available + msg := "unimplemented command" + if err := cmd.Help(); err != nil { + msg = err.Error() + } + if len(args) > 0 { + msg += fmt.Sprintf(" args: [%s]", strings.Join(args, " ")) + } + printResponse(cmd, args, response{ + Code: codeBad, + Log: msg, + }) + return nil +} + +// Have the application echo a message +func cmdEcho(cmd *cobra.Command, args []string) error { + msg := "" + if len(args) > 0 { + msg = args[0] + } + res, err := client.EchoSync(msg) + if err != nil { + return err + } + printResponse(cmd, args, response{ + Data: []byte(res.Message), + }) + return nil +} + +// Get some info from the application +func cmdInfo(cmd *cobra.Command, args []string) error { + var version string + if len(args) == 1 { + version = args[0] + } + res, err := client.InfoSync(types.RequestInfo{version}) + if err != nil { + return err + } + printResponse(cmd, args, response{ + Data: []byte(res.Data), + }) + return nil +} + +const codeBad uint32 = 10 + +// Set an option on the application +func cmdSetOption(cmd *cobra.Command, args []string) error { + if len(args) < 2 { + printResponse(cmd, args, response{ + Code: codeBad, + Log: "want at least arguments of the form: ", + }) + return nil + } + + key, val := args[0], args[1] + _, err := client.SetOptionSync(types.RequestSetOption{key, val}) + if err != nil { + return err + } + printResponse(cmd, args, response{Log: "OK (SetOption doesn't return anything.)"}) // NOTE: Nothing to show... + return nil +} + +// Append a new tx to application +func cmdDeliverTx(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + printResponse(cmd, args, response{ + Code: codeBad, + Log: "want the tx", + }) + return nil + } + txBytes, err := stringOrHexToBytes(args[0]) + if err != nil { + return err + } + res, err := client.DeliverTxSync(txBytes) + if err != nil { + return err + } + printResponse(cmd, args, response{ + Code: res.Code, + Data: res.Data, + Info: res.Info, + Log: res.Log, + }) + return nil +} + +// Validate a tx +func cmdCheckTx(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + printResponse(cmd, args, response{ + Code: codeBad, + Info: "want the tx", + }) + return nil + } + txBytes, err := stringOrHexToBytes(args[0]) + if err != nil { + return err + } + res, err := client.CheckTxSync(txBytes) + if err != nil { + return err + } + printResponse(cmd, args, response{ + Code: res.Code, + Data: res.Data, + Info: res.Info, + Log: res.Log, + }) + return nil +} + +// Get application Merkle root hash +func cmdCommit(cmd *cobra.Command, args []string) error { + res, err := client.CommitSync() + if err != nil { + return err + } + printResponse(cmd, args, response{ + Data: res.Data, + }) + return nil +} + +// Query application state +func cmdQuery(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + printResponse(cmd, args, response{ + Code: codeBad, + Info: "want the query", + Log: "", + }) + return nil + } + queryBytes, err := stringOrHexToBytes(args[0]) + if err != nil { + return err + } + + resQuery, err := client.QuerySync(types.RequestQuery{ + Data: queryBytes, + Path: flagPath, + Height: int64(flagHeight), + Prove: flagProve, + }) + if err != nil { + return err + } + printResponse(cmd, args, response{ + Code: resQuery.Code, + Info: resQuery.Info, + Log: resQuery.Log, + Query: &queryResponse{ + Key: resQuery.Key, + Value: resQuery.Value, + Height: resQuery.Height, + Proof: resQuery.Proof, + }, + }) + return nil +} + +func cmdCounter(cmd *cobra.Command, args []string) error { + + app := counter.NewCounterApplication(flagSerial) + + logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + + // Start the listener + srv, err := server.NewServer(flagAddress, flagAbci, app) + if err != nil { + return err + } + srv.SetLogger(logger.With("module", "abci-server")) + if err := srv.Start(); err != nil { + return err + } + + // Wait forever + cmn.TrapSignal(func() { + // Cleanup + srv.Stop() + }) + return nil +} + +func cmdKVStore(cmd *cobra.Command, args []string) error { + logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + + // Create the application - in memory or persisted to disk + var app types.Application + if flagPersist == "" { + app = kvstore.NewKVStoreApplication() + } else { + app = kvstore.NewPersistentKVStoreApplication(flagPersist) + app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore")) + } + + // Start the listener + srv, err := server.NewServer(flagAddress, flagAbci, app) + if err != nil { + return err + } + srv.SetLogger(logger.With("module", "abci-server")) + if err := srv.Start(); err != nil { + return err + } + + // Wait forever + cmn.TrapSignal(func() { + // Cleanup + srv.Stop() + }) + return nil +} + +//-------------------------------------------------------------------------------- + +func printResponse(cmd *cobra.Command, args []string, rsp response) { + + if flagVerbose { + fmt.Println(">", cmd.Use, strings.Join(args, " ")) + } + + // Always print the status code. + if rsp.Code == types.CodeTypeOK { + fmt.Printf("-> code: OK\n") + } else { + fmt.Printf("-> code: %d\n", rsp.Code) + + } + + if len(rsp.Data) != 0 { + // Do no print this line when using the commit command + // because the string comes out as gibberish + if cmd.Use != "commit" { + fmt.Printf("-> data: %s\n", rsp.Data) + } + fmt.Printf("-> data.hex: 0x%X\n", rsp.Data) + } + if rsp.Log != "" { + fmt.Printf("-> log: %s\n", rsp.Log) + } + + if rsp.Query != nil { + fmt.Printf("-> height: %d\n", rsp.Query.Height) + if rsp.Query.Key != nil { + fmt.Printf("-> key: %s\n", rsp.Query.Key) + fmt.Printf("-> key.hex: %X\n", rsp.Query.Key) + } + if rsp.Query.Value != nil { + fmt.Printf("-> value: %s\n", rsp.Query.Value) + fmt.Printf("-> value.hex: %X\n", rsp.Query.Value) + } + if rsp.Query.Proof != nil { + fmt.Printf("-> proof: %X\n", rsp.Query.Proof) + } + } +} + +// NOTE: s is interpreted as a string unless prefixed with 0x +func stringOrHexToBytes(s string) ([]byte, error) { + if len(s) > 2 && strings.ToLower(s[:2]) == "0x" { + b, err := hex.DecodeString(s[2:]) + if err != nil { + err = fmt.Errorf("Error decoding hex argument: %s", err.Error()) + return nil, err + } + return b, nil + } + + if !strings.HasPrefix(s, "\"") || !strings.HasSuffix(s, "\"") { + err := fmt.Errorf("Invalid string arg: \"%s\". Must be quoted or a \"0x\"-prefixed hex string", s) + return nil, err + } + + return []byte(s[1 : len(s)-1]), nil +} diff --git a/abci/cmd/abci-cli/main.go b/abci/cmd/abci-cli/main.go new file mode 100644 index 000000000..a927e7ed8 --- /dev/null +++ b/abci/cmd/abci-cli/main.go @@ -0,0 +1,14 @@ +package main + +import ( + "fmt" + "os" +) + +func main() { + err := Execute() + if err != nil { + fmt.Print(err) + os.Exit(1) + } +} diff --git a/abci/example/code/code.go b/abci/example/code/code.go new file mode 100644 index 000000000..94e9d015e --- /dev/null +++ b/abci/example/code/code.go @@ -0,0 +1,9 @@ +package code + +// Return codes for the examples +const ( + CodeTypeOK uint32 = 0 + CodeTypeEncodingError uint32 = 1 + CodeTypeBadNonce uint32 = 2 + CodeTypeUnauthorized uint32 = 3 +) diff --git a/abci/example/counter/counter.go b/abci/example/counter/counter.go new file mode 100644 index 000000000..857e82baf --- /dev/null +++ b/abci/example/counter/counter.go @@ -0,0 +1,104 @@ +package counter + +import ( + "encoding/binary" + "fmt" + + "github.com/tendermint/tendermint/abci/example/code" + "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +type CounterApplication struct { + types.BaseApplication + + hashCount int + txCount int + serial bool +} + +func NewCounterApplication(serial bool) *CounterApplication { + return &CounterApplication{serial: serial} +} + +func (app *CounterApplication) Info(req types.RequestInfo) types.ResponseInfo { + return types.ResponseInfo{Data: cmn.Fmt("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)} +} + +func (app *CounterApplication) SetOption(req types.RequestSetOption) types.ResponseSetOption { + key, value := req.Key, req.Value + if key == "serial" && value == "on" { + app.serial = true + } else { + /* + TODO Panic and have the ABCI server pass an exception. + The client can call SetOptionSync() and get an `error`. + return types.ResponseSetOption{ + Error: cmn.Fmt("Unknown key (%s) or value (%s)", key, value), + } + */ + return types.ResponseSetOption{} + } + + return types.ResponseSetOption{} +} + +func (app *CounterApplication) DeliverTx(tx []byte) types.ResponseDeliverTx { + if app.serial { + if len(tx) > 8 { + return types.ResponseDeliverTx{ + Code: code.CodeTypeEncodingError, + Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))} + } + tx8 := make([]byte, 8) + copy(tx8[len(tx8)-len(tx):], tx) + txValue := binary.BigEndian.Uint64(tx8) + if txValue != uint64(app.txCount) { + return types.ResponseDeliverTx{ + Code: code.CodeTypeBadNonce, + Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)} + } + } + app.txCount++ + return types.ResponseDeliverTx{Code: code.CodeTypeOK} +} + +func (app *CounterApplication) CheckTx(tx []byte) types.ResponseCheckTx { + if app.serial { + if len(tx) > 8 { + return types.ResponseCheckTx{ + Code: code.CodeTypeEncodingError, + Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))} + } + tx8 := make([]byte, 8) + copy(tx8[len(tx8)-len(tx):], tx) + txValue := binary.BigEndian.Uint64(tx8) + if txValue < uint64(app.txCount) { + return types.ResponseCheckTx{ + Code: code.CodeTypeBadNonce, + Log: fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue)} + } + } + return types.ResponseCheckTx{Code: code.CodeTypeOK} +} + +func (app *CounterApplication) Commit() (resp types.ResponseCommit) { + app.hashCount++ + if app.txCount == 0 { + return types.ResponseCommit{} + } + hash := make([]byte, 8) + binary.BigEndian.PutUint64(hash, uint64(app.txCount)) + return types.ResponseCommit{Data: hash} +} + +func (app *CounterApplication) Query(reqQuery types.RequestQuery) types.ResponseQuery { + switch reqQuery.Path { + case "hash": + return types.ResponseQuery{Value: []byte(cmn.Fmt("%v", app.hashCount))} + case "tx": + return types.ResponseQuery{Value: []byte(cmn.Fmt("%v", app.txCount))} + default: + return types.ResponseQuery{Log: cmn.Fmt("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)} + } +} diff --git a/abci/example/example.go b/abci/example/example.go new file mode 100644 index 000000000..ee491c1b5 --- /dev/null +++ b/abci/example/example.go @@ -0,0 +1,3 @@ +package example + +// so the go tool doesn't return errors about no buildable go files ... diff --git a/abci/example/example_test.go b/abci/example/example_test.go new file mode 100644 index 000000000..bbb53b5af --- /dev/null +++ b/abci/example/example_test.go @@ -0,0 +1,154 @@ +package example + +import ( + "fmt" + "net" + "reflect" + "testing" + "time" + + "google.golang.org/grpc" + + "golang.org/x/net/context" + + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + + abcicli "github.com/tendermint/tendermint/abci/client" + "github.com/tendermint/tendermint/abci/example/code" + "github.com/tendermint/tendermint/abci/example/kvstore" + abciserver "github.com/tendermint/tendermint/abci/server" + "github.com/tendermint/tendermint/abci/types" +) + +func TestKVStore(t *testing.T) { + fmt.Println("### Testing KVStore") + testStream(t, kvstore.NewKVStoreApplication()) +} + +func TestBaseApp(t *testing.T) { + fmt.Println("### Testing BaseApp") + testStream(t, types.NewBaseApplication()) +} + +func TestGRPC(t *testing.T) { + fmt.Println("### Testing GRPC") + testGRPCSync(t, types.NewGRPCApplication(types.NewBaseApplication())) +} + +func testStream(t *testing.T, app types.Application) { + numDeliverTxs := 200000 + + // Start the listener + server := abciserver.NewSocketServer("unix://test.sock", app) + server.SetLogger(log.TestingLogger().With("module", "abci-server")) + if err := server.Start(); err != nil { + t.Fatalf("Error starting socket server: %v", err.Error()) + } + defer server.Stop() + + // Connect to the socket + client := abcicli.NewSocketClient("unix://test.sock", false) + client.SetLogger(log.TestingLogger().With("module", "abci-client")) + if err := client.Start(); err != nil { + t.Fatalf("Error starting socket client: %v", err.Error()) + } + defer client.Stop() + + done := make(chan struct{}) + counter := 0 + client.SetResponseCallback(func(req *types.Request, res *types.Response) { + // Process response + switch r := res.Value.(type) { + case *types.Response_DeliverTx: + counter++ + if r.DeliverTx.Code != code.CodeTypeOK { + t.Error("DeliverTx failed with ret_code", r.DeliverTx.Code) + } + if counter > numDeliverTxs { + t.Fatalf("Too many DeliverTx responses. Got %d, expected %d", counter, numDeliverTxs) + } + if counter == numDeliverTxs { + go func() { + time.Sleep(time.Second * 2) // Wait for a bit to allow counter overflow + close(done) + }() + return + } + case *types.Response_Flush: + // ignore + default: + t.Error("Unexpected response type", reflect.TypeOf(res.Value)) + } + }) + + // Write requests + for counter := 0; counter < numDeliverTxs; counter++ { + // Send request + reqRes := client.DeliverTxAsync([]byte("test")) + _ = reqRes + // check err ? + + // Sometimes send flush messages + if counter%123 == 0 { + client.FlushAsync() + // check err ? + } + } + + // Send final flush message + client.FlushAsync() + + <-done +} + +//------------------------- +// test grpc + +func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) { + return cmn.Connect(addr) +} + +func testGRPCSync(t *testing.T, app *types.GRPCApplication) { + numDeliverTxs := 2000 + + // Start the listener + server := abciserver.NewGRPCServer("unix://test.sock", app) + server.SetLogger(log.TestingLogger().With("module", "abci-server")) + if err := server.Start(); err != nil { + t.Fatalf("Error starting GRPC server: %v", err.Error()) + } + defer server.Stop() + + // Connect to the socket + conn, err := grpc.Dial("unix://test.sock", grpc.WithInsecure(), grpc.WithDialer(dialerFunc)) + if err != nil { + t.Fatalf("Error dialing GRPC server: %v", err.Error()) + } + defer conn.Close() + + client := types.NewABCIApplicationClient(conn) + + // Write requests + for counter := 0; counter < numDeliverTxs; counter++ { + // Send request + response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{[]byte("test")}) + if err != nil { + t.Fatalf("Error in GRPC DeliverTx: %v", err.Error()) + } + counter++ + if response.Code != code.CodeTypeOK { + t.Error("DeliverTx failed with ret_code", response.Code) + } + if counter > numDeliverTxs { + t.Fatal("Too many DeliverTx responses") + } + t.Log("response", counter) + if counter == numDeliverTxs { + go func() { + time.Sleep(time.Second * 2) // Wait for a bit to allow counter overflow + }() + } + + } +} diff --git a/abci/example/js/.gitignore b/abci/example/js/.gitignore new file mode 100644 index 000000000..3c3629e64 --- /dev/null +++ b/abci/example/js/.gitignore @@ -0,0 +1 @@ +node_modules diff --git a/abci/example/js/README.md b/abci/example/js/README.md new file mode 100644 index 000000000..1bef9cbf5 --- /dev/null +++ b/abci/example/js/README.md @@ -0,0 +1 @@ +This example has been moved here: https://github.com/tendermint/js-abci/tree/master/example diff --git a/abci/example/kvstore/README.md b/abci/example/kvstore/README.md new file mode 100644 index 000000000..e988eadb0 --- /dev/null +++ b/abci/example/kvstore/README.md @@ -0,0 +1,31 @@ +# KVStore + +There are two app's here: the KVStoreApplication and the PersistentKVStoreApplication. + +## KVStoreApplication + +The KVStoreApplication is a simple merkle key-value store. +Transactions of the form `key=value` are stored as key-value pairs in the tree. +Transactions without an `=` sign set the value to the key. +The app has no replay protection (other than what the mempool provides). + +## PersistentKVStoreApplication + +The PersistentKVStoreApplication wraps the KVStoreApplication +and provides two additional features: + +1) persistence of state across app restarts (using Tendermint's ABCI-Handshake mechanism) +2) validator set changes + +The state is persisted in leveldb along with the last block committed, +and the Handshake allows any necessary blocks to be replayed. +Validator set changes are effected using the following transaction format: + +``` +val:pubkey1/power1,addr2/power2,addr3/power3" +``` + +where `power1` is the new voting power for the validator with `pubkey1` (possibly a new one). +There is no sybil protection against new validators joining. +Validators can be removed by setting their power to `0`. + diff --git a/abci/example/kvstore/helpers.go b/abci/example/kvstore/helpers.go new file mode 100644 index 000000000..0e69fab9f --- /dev/null +++ b/abci/example/kvstore/helpers.go @@ -0,0 +1,38 @@ +package kvstore + +import ( + "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +// RandVal creates one random validator, with a key derived +// from the input value +func RandVal(i int) types.Validator { + addr := cmn.RandBytes(20) + pubkey := cmn.RandBytes(32) + power := cmn.RandUint16() + 1 + v := types.Ed25519Validator(pubkey, int64(power)) + v.Address = addr + return v +} + +// RandVals returns a list of cnt validators for initializing +// the application. Note that the keys are deterministically +// derived from the index in the array, while the power is +// random (Change this if not desired) +func RandVals(cnt int) []types.Validator { + res := make([]types.Validator, cnt) + for i := 0; i < cnt; i++ { + res[i] = RandVal(i) + } + return res +} + +// InitKVStore initializes the kvstore app with some data, +// which allows tests to pass and is fine as long as you +// don't make any tx that modify the validator state +func InitKVStore(app *PersistentKVStoreApplication) { + app.InitChain(types.RequestInitChain{ + Validators: RandVals(1), + }) +} diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go new file mode 100644 index 000000000..0f72b44ea --- /dev/null +++ b/abci/example/kvstore/kvstore.go @@ -0,0 +1,126 @@ +package kvstore + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + + "github.com/tendermint/tendermint/abci/example/code" + "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" +) + +var ( + stateKey = []byte("stateKey") + kvPairPrefixKey = []byte("kvPairKey:") +) + +type State struct { + db dbm.DB + Size int64 `json:"size"` + Height int64 `json:"height"` + AppHash []byte `json:"app_hash"` +} + +func loadState(db dbm.DB) State { + stateBytes := db.Get(stateKey) + var state State + if len(stateBytes) != 0 { + err := json.Unmarshal(stateBytes, &state) + if err != nil { + panic(err) + } + } + state.db = db + return state +} + +func saveState(state State) { + stateBytes, err := json.Marshal(state) + if err != nil { + panic(err) + } + state.db.Set(stateKey, stateBytes) +} + +func prefixKey(key []byte) []byte { + return append(kvPairPrefixKey, key...) +} + +//--------------------------------------------------- + +var _ types.Application = (*KVStoreApplication)(nil) + +type KVStoreApplication struct { + types.BaseApplication + + state State +} + +func NewKVStoreApplication() *KVStoreApplication { + state := loadState(dbm.NewMemDB()) + return &KVStoreApplication{state: state} +} + +func (app *KVStoreApplication) Info(req types.RequestInfo) (resInfo types.ResponseInfo) { + return types.ResponseInfo{Data: fmt.Sprintf("{\"size\":%v}", app.state.Size)} +} + +// tx is either "key=value" or just arbitrary bytes +func (app *KVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx { + var key, value []byte + parts := bytes.Split(tx, []byte("=")) + if len(parts) == 2 { + key, value = parts[0], parts[1] + } else { + key, value = tx, tx + } + app.state.db.Set(prefixKey(key), value) + app.state.Size += 1 + + tags := []cmn.KVPair{ + {[]byte("app.creator"), []byte("jae")}, + {[]byte("app.key"), key}, + } + return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags} +} + +func (app *KVStoreApplication) CheckTx(tx []byte) types.ResponseCheckTx { + return types.ResponseCheckTx{Code: code.CodeTypeOK} +} + +func (app *KVStoreApplication) Commit() types.ResponseCommit { + // Using a memdb - just return the big endian size of the db + appHash := make([]byte, 8) + binary.PutVarint(appHash, app.state.Size) + app.state.AppHash = appHash + app.state.Height += 1 + saveState(app.state) + return types.ResponseCommit{Data: appHash} +} + +func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) { + if reqQuery.Prove { + value := app.state.db.Get(prefixKey(reqQuery.Data)) + resQuery.Index = -1 // TODO make Proof return index + resQuery.Key = reqQuery.Data + resQuery.Value = value + if value != nil { + resQuery.Log = "exists" + } else { + resQuery.Log = "does not exist" + } + return + } else { + value := app.state.db.Get(prefixKey(reqQuery.Data)) + resQuery.Value = value + if value != nil { + resQuery.Log = "exists" + } else { + resQuery.Log = "does not exist" + } + return + } +} diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go new file mode 100644 index 000000000..2d8f81272 --- /dev/null +++ b/abci/example/kvstore/kvstore_test.go @@ -0,0 +1,310 @@ +package kvstore + +import ( + "bytes" + "io/ioutil" + "sort" + "testing" + + "github.com/stretchr/testify/require" + + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + + abcicli "github.com/tendermint/tendermint/abci/client" + "github.com/tendermint/tendermint/abci/example/code" + abciserver "github.com/tendermint/tendermint/abci/server" + "github.com/tendermint/tendermint/abci/types" +) + +func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) { + ar := app.DeliverTx(tx) + require.False(t, ar.IsErr(), ar) + // repeating tx doesn't raise error + ar = app.DeliverTx(tx) + require.False(t, ar.IsErr(), ar) + + // make sure query is fine + resQuery := app.Query(types.RequestQuery{ + Path: "/store", + Data: []byte(key), + }) + require.Equal(t, code.CodeTypeOK, resQuery.Code) + require.Equal(t, value, string(resQuery.Value)) + + // make sure proof is fine + resQuery = app.Query(types.RequestQuery{ + Path: "/store", + Data: []byte(key), + Prove: true, + }) + require.EqualValues(t, code.CodeTypeOK, resQuery.Code) + require.Equal(t, value, string(resQuery.Value)) +} + +func TestKVStoreKV(t *testing.T) { + kvstore := NewKVStoreApplication() + key := "abc" + value := key + tx := []byte(key) + testKVStore(t, kvstore, tx, key, value) + + value = "def" + tx = []byte(key + "=" + value) + testKVStore(t, kvstore, tx, key, value) +} + +func TestPersistentKVStoreKV(t *testing.T) { + dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO + if err != nil { + t.Fatal(err) + } + kvstore := NewPersistentKVStoreApplication(dir) + key := "abc" + value := key + tx := []byte(key) + testKVStore(t, kvstore, tx, key, value) + + value = "def" + tx = []byte(key + "=" + value) + testKVStore(t, kvstore, tx, key, value) +} + +func TestPersistentKVStoreInfo(t *testing.T) { + dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO + if err != nil { + t.Fatal(err) + } + kvstore := NewPersistentKVStoreApplication(dir) + InitKVStore(kvstore) + height := int64(0) + + resInfo := kvstore.Info(types.RequestInfo{}) + if resInfo.LastBlockHeight != height { + t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight) + } + + // make and apply block + height = int64(1) + hash := []byte("foo") + header := types.Header{ + Height: int64(height), + } + kvstore.BeginBlock(types.RequestBeginBlock{hash, header, nil, nil}) + kvstore.EndBlock(types.RequestEndBlock{header.Height}) + kvstore.Commit() + + resInfo = kvstore.Info(types.RequestInfo{}) + if resInfo.LastBlockHeight != height { + t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight) + } + +} + +// add a validator, remove a validator, update a validator +func TestValUpdates(t *testing.T) { + dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO + if err != nil { + t.Fatal(err) + } + kvstore := NewPersistentKVStoreApplication(dir) + + // init with some validators + total := 10 + nInit := 5 + vals := RandVals(total) + // iniitalize with the first nInit + kvstore.InitChain(types.RequestInitChain{ + Validators: vals[:nInit], + }) + + vals1, vals2 := vals[:nInit], kvstore.Validators() + valsEqual(t, vals1, vals2) + + var v1, v2, v3 types.Validator + + // add some validators + v1, v2 = vals[nInit], vals[nInit+1] + diff := []types.Validator{v1, v2} + tx1 := MakeValSetChangeTx(v1.PubKey, v1.Power) + tx2 := MakeValSetChangeTx(v2.PubKey, v2.Power) + + makeApplyBlock(t, kvstore, 1, diff, tx1, tx2) + + vals1, vals2 = vals[:nInit+2], kvstore.Validators() + valsEqual(t, vals1, vals2) + + // remove some validators + v1, v2, v3 = vals[nInit-2], vals[nInit-1], vals[nInit] + v1.Power = 0 + v2.Power = 0 + v3.Power = 0 + diff = []types.Validator{v1, v2, v3} + tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power) + tx2 = MakeValSetChangeTx(v2.PubKey, v2.Power) + tx3 := MakeValSetChangeTx(v3.PubKey, v3.Power) + + makeApplyBlock(t, kvstore, 2, diff, tx1, tx2, tx3) + + vals1 = append(vals[:nInit-2], vals[nInit+1]) + vals2 = kvstore.Validators() + valsEqual(t, vals1, vals2) + + // update some validators + v1 = vals[0] + if v1.Power == 5 { + v1.Power = 6 + } else { + v1.Power = 5 + } + diff = []types.Validator{v1} + tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power) + + makeApplyBlock(t, kvstore, 3, diff, tx1) + + vals1 = append([]types.Validator{v1}, vals1[1:]...) + vals2 = kvstore.Validators() + valsEqual(t, vals1, vals2) + +} + +func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff []types.Validator, txs ...[]byte) { + // make and apply block + height := int64(heightInt) + hash := []byte("foo") + header := types.Header{ + Height: height, + } + + kvstore.BeginBlock(types.RequestBeginBlock{hash, header, nil, nil}) + for _, tx := range txs { + if r := kvstore.DeliverTx(tx); r.IsErr() { + t.Fatal(r) + } + } + resEndBlock := kvstore.EndBlock(types.RequestEndBlock{header.Height}) + kvstore.Commit() + + valsEqual(t, diff, resEndBlock.ValidatorUpdates) + +} + +// order doesn't matter +func valsEqual(t *testing.T, vals1, vals2 []types.Validator) { + if len(vals1) != len(vals2) { + t.Fatalf("vals dont match in len. got %d, expected %d", len(vals2), len(vals1)) + } + sort.Sort(types.Validators(vals1)) + sort.Sort(types.Validators(vals2)) + for i, v1 := range vals1 { + v2 := vals2[i] + if !bytes.Equal(v1.PubKey.Data, v2.PubKey.Data) || + v1.Power != v2.Power { + t.Fatalf("vals dont match at index %d. got %X/%d , expected %X/%d", i, v2.PubKey, v2.Power, v1.PubKey, v1.Power) + } + } +} + +func makeSocketClientServer(app types.Application, name string) (abcicli.Client, cmn.Service, error) { + // Start the listener + socket := cmn.Fmt("unix://%s.sock", name) + logger := log.TestingLogger() + + server := abciserver.NewSocketServer(socket, app) + server.SetLogger(logger.With("module", "abci-server")) + if err := server.Start(); err != nil { + return nil, nil, err + } + + // Connect to the socket + client := abcicli.NewSocketClient(socket, false) + client.SetLogger(logger.With("module", "abci-client")) + if err := client.Start(); err != nil { + server.Stop() + return nil, nil, err + } + + return client, server, nil +} + +func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, cmn.Service, error) { + // Start the listener + socket := cmn.Fmt("unix://%s.sock", name) + logger := log.TestingLogger() + + gapp := types.NewGRPCApplication(app) + server := abciserver.NewGRPCServer(socket, gapp) + server.SetLogger(logger.With("module", "abci-server")) + if err := server.Start(); err != nil { + return nil, nil, err + } + + client := abcicli.NewGRPCClient(socket, true) + client.SetLogger(logger.With("module", "abci-client")) + if err := client.Start(); err != nil { + server.Stop() + return nil, nil, err + } + return client, server, nil +} + +func TestClientServer(t *testing.T) { + // set up socket app + kvstore := NewKVStoreApplication() + client, server, err := makeSocketClientServer(kvstore, "kvstore-socket") + require.Nil(t, err) + defer server.Stop() + defer client.Stop() + + runClientTests(t, client) + + // set up grpc app + kvstore = NewKVStoreApplication() + gclient, gserver, err := makeGRPCClientServer(kvstore, "kvstore-grpc") + require.Nil(t, err) + defer gserver.Stop() + defer gclient.Stop() + + runClientTests(t, gclient) +} + +func runClientTests(t *testing.T, client abcicli.Client) { + // run some tests.... + key := "abc" + value := key + tx := []byte(key) + testClient(t, client, tx, key, value) + + value = "def" + tx = []byte(key + "=" + value) + testClient(t, client, tx, key, value) +} + +func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) { + ar, err := app.DeliverTxSync(tx) + require.NoError(t, err) + require.False(t, ar.IsErr(), ar) + // repeating tx doesn't raise error + ar, err = app.DeliverTxSync(tx) + require.NoError(t, err) + require.False(t, ar.IsErr(), ar) + + // make sure query is fine + resQuery, err := app.QuerySync(types.RequestQuery{ + Path: "/store", + Data: []byte(key), + }) + require.Nil(t, err) + require.Equal(t, code.CodeTypeOK, resQuery.Code) + require.Equal(t, value, string(resQuery.Value)) + + // make sure proof is fine + resQuery, err = app.QuerySync(types.RequestQuery{ + Path: "/store", + Data: []byte(key), + Prove: true, + }) + require.Nil(t, err) + require.Equal(t, code.CodeTypeOK, resQuery.Code) + require.Equal(t, value, string(resQuery.Value)) +} diff --git a/abci/example/kvstore/persistent_kvstore.go b/abci/example/kvstore/persistent_kvstore.go new file mode 100644 index 000000000..12ccbab78 --- /dev/null +++ b/abci/example/kvstore/persistent_kvstore.go @@ -0,0 +1,200 @@ +package kvstore + +import ( + "bytes" + "encoding/hex" + "fmt" + "strconv" + "strings" + + "github.com/tendermint/tendermint/abci/example/code" + "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" +) + +const ( + ValidatorSetChangePrefix string = "val:" +) + +//----------------------------------------- + +var _ types.Application = (*PersistentKVStoreApplication)(nil) + +type PersistentKVStoreApplication struct { + app *KVStoreApplication + + // validator set + ValUpdates []types.Validator + + logger log.Logger +} + +func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication { + name := "kvstore" + db, err := dbm.NewGoLevelDB(name, dbDir) + if err != nil { + panic(err) + } + + state := loadState(db) + + return &PersistentKVStoreApplication{ + app: &KVStoreApplication{state: state}, + logger: log.NewNopLogger(), + } +} + +func (app *PersistentKVStoreApplication) SetLogger(l log.Logger) { + app.logger = l +} + +func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.ResponseInfo { + res := app.app.Info(req) + res.LastBlockHeight = app.app.state.Height + res.LastBlockAppHash = app.app.state.AppHash + return res +} + +func (app *PersistentKVStoreApplication) SetOption(req types.RequestSetOption) types.ResponseSetOption { + return app.app.SetOption(req) +} + +// tx is either "val:pubkey/power" or "key=value" or just arbitrary bytes +func (app *PersistentKVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx { + // if it starts with "val:", update the validator set + // format is "val:pubkey/power" + if isValidatorTx(tx) { + // update validators in the merkle tree + // and in app.ValUpdates + return app.execValidatorTx(tx) + } + + // otherwise, update the key-value store + return app.app.DeliverTx(tx) +} + +func (app *PersistentKVStoreApplication) CheckTx(tx []byte) types.ResponseCheckTx { + return app.app.CheckTx(tx) +} + +// Commit will panic if InitChain was not called +func (app *PersistentKVStoreApplication) Commit() types.ResponseCommit { + return app.app.Commit() +} + +func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) types.ResponseQuery { + return app.app.Query(reqQuery) +} + +// Save the validators in the merkle tree +func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) types.ResponseInitChain { + for _, v := range req.Validators { + r := app.updateValidator(v) + if r.IsErr() { + app.logger.Error("Error updating validators", "r", r) + } + } + return types.ResponseInitChain{} +} + +// Track the block hash and header information +func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock { + // reset valset changes + app.ValUpdates = make([]types.Validator, 0) + return types.ResponseBeginBlock{} +} + +// Update the validator set +func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock { + return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates} +} + +//--------------------------------------------- +// update validators + +func (app *PersistentKVStoreApplication) Validators() (validators []types.Validator) { + itr := app.app.state.db.Iterator(nil, nil) + for ; itr.Valid(); itr.Next() { + if isValidatorTx(itr.Key()) { + validator := new(types.Validator) + err := types.ReadMessage(bytes.NewBuffer(itr.Value()), validator) + if err != nil { + panic(err) + } + validators = append(validators, *validator) + } + } + return +} + +func MakeValSetChangeTx(pubkey types.PubKey, power int64) []byte { + return []byte(cmn.Fmt("val:%X/%d", pubkey.Data, power)) +} + +func isValidatorTx(tx []byte) bool { + return strings.HasPrefix(string(tx), ValidatorSetChangePrefix) +} + +// format is "val:pubkey/power" +// pubkey is raw 32-byte ed25519 key +func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.ResponseDeliverTx { + tx = tx[len(ValidatorSetChangePrefix):] + + //get the pubkey and power + pubKeyAndPower := strings.Split(string(tx), "/") + if len(pubKeyAndPower) != 2 { + return types.ResponseDeliverTx{ + Code: code.CodeTypeEncodingError, + Log: fmt.Sprintf("Expected 'pubkey/power'. Got %v", pubKeyAndPower)} + } + pubkeyS, powerS := pubKeyAndPower[0], pubKeyAndPower[1] + + // decode the pubkey + pubkey, err := hex.DecodeString(pubkeyS) + if err != nil { + return types.ResponseDeliverTx{ + Code: code.CodeTypeEncodingError, + Log: fmt.Sprintf("Pubkey (%s) is invalid hex", pubkeyS)} + } + + // decode the power + power, err := strconv.ParseInt(powerS, 10, 64) + if err != nil { + return types.ResponseDeliverTx{ + Code: code.CodeTypeEncodingError, + Log: fmt.Sprintf("Power (%s) is not an int", powerS)} + } + + // update + return app.updateValidator(types.Ed25519Validator(pubkey, int64(power))) +} + +// add, update, or remove a validator +func (app *PersistentKVStoreApplication) updateValidator(v types.Validator) types.ResponseDeliverTx { + key := []byte("val:" + string(v.PubKey.Data)) + if v.Power == 0 { + // remove validator + if !app.app.state.db.Has(key) { + return types.ResponseDeliverTx{ + Code: code.CodeTypeUnauthorized, + Log: fmt.Sprintf("Cannot remove non-existent validator %X", key)} + } + app.app.state.db.Delete(key) + } else { + // add or update validator + value := bytes.NewBuffer(make([]byte, 0)) + if err := types.WriteMessage(&v, value); err != nil { + return types.ResponseDeliverTx{ + Code: code.CodeTypeEncodingError, + Log: fmt.Sprintf("Error encoding validator: %v", err)} + } + app.app.state.db.Set(key, value.Bytes()) + } + + // we only update the changes array if we successfully updated the tree + app.ValUpdates = append(app.ValUpdates, v) + + return types.ResponseDeliverTx{Code: code.CodeTypeOK} +} diff --git a/abci/example/python/abci/__init__.py b/abci/example/python/abci/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/abci/example/python/abci/msg.py b/abci/example/python/abci/msg.py new file mode 100644 index 000000000..7329f5852 --- /dev/null +++ b/abci/example/python/abci/msg.py @@ -0,0 +1,50 @@ +from wire import decode_string + +# map type_byte to message name +message_types = { + 0x01: "echo", + 0x02: "flush", + 0x03: "info", + 0x04: "set_option", + 0x21: "deliver_tx", + 0x22: "check_tx", + 0x23: "commit", + 0x24: "add_listener", + 0x25: "rm_listener", +} + +# return the decoded arguments of abci messages + +class RequestDecoder(): + + def __init__(self, reader): + self.reader = reader + + def echo(self): + return decode_string(self.reader) + + def flush(self): + return + + def info(self): + return + + def set_option(self): + return decode_string(self.reader), decode_string(self.reader) + + def deliver_tx(self): + return decode_string(self.reader) + + def check_tx(self): + return decode_string(self.reader) + + def commit(self): + return + + def add_listener(self): + # TODO + return + + def rm_listener(self): + # TODO + return diff --git a/abci/example/python/abci/reader.py b/abci/example/python/abci/reader.py new file mode 100644 index 000000000..6c0dad94e --- /dev/null +++ b/abci/example/python/abci/reader.py @@ -0,0 +1,56 @@ + +# Simple read() method around a bytearray + + +class BytesBuffer(): + + def __init__(self, b): + self.buf = b + self.readCount = 0 + + def count(self): + return self.readCount + + def reset_count(self): + self.readCount = 0 + + def size(self): + return len(self.buf) + + def peek(self): + return self.buf[0] + + def write(self, b): + # b should be castable to byte array + self.buf += bytearray(b) + + def read(self, n): + if len(self.buf) < n: + print "reader err: buf less than n" + # TODO: exception + return + self.readCount += n + r = self.buf[:n] + self.buf = self.buf[n:] + return r + +# Buffer bytes off a tcp connection and read them off in chunks + + +class ConnReader(): + + def __init__(self, conn): + self.conn = conn + self.buf = bytearray() + + # blocking + def read(self, n): + while n > len(self.buf): + moreBuf = self.conn.recv(1024) + if not moreBuf: + raise IOError("dead connection") + self.buf = self.buf + bytearray(moreBuf) + + r = self.buf[:n] + self.buf = self.buf[n:] + return r diff --git a/abci/example/python/abci/server.py b/abci/example/python/abci/server.py new file mode 100644 index 000000000..40d50896c --- /dev/null +++ b/abci/example/python/abci/server.py @@ -0,0 +1,202 @@ +import socket +import select +import sys + +from wire import decode_varint, encode +from reader import BytesBuffer +from msg import RequestDecoder, message_types + +# hold the asyncronous state of a connection +# ie. we may not get enough bytes on one read to decode the message + +class Connection(): + + def __init__(self, fd, app): + self.fd = fd + self.app = app + self.recBuf = BytesBuffer(bytearray()) + self.resBuf = BytesBuffer(bytearray()) + self.msgLength = 0 + self.decoder = RequestDecoder(self.recBuf) + self.inProgress = False # are we in the middle of a message + + def recv(this): + data = this.fd.recv(1024) + if not data: # what about len(data) == 0 + raise IOError("dead connection") + this.recBuf.write(data) + +# ABCI server responds to messges by calling methods on the app + +class ABCIServer(): + + def __init__(self, app, port=5410): + self.app = app + # map conn file descriptors to (app, reqBuf, resBuf, msgDecoder) + self.appMap = {} + + self.port = port + self.listen_backlog = 10 + + self.listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self.listener.setblocking(0) + self.listener.bind(('', port)) + + self.listener.listen(self.listen_backlog) + + self.shutdown = False + + self.read_list = [self.listener] + self.write_list = [] + + def handle_new_connection(self, r): + new_fd, new_addr = r.accept() + new_fd.setblocking(0) # non-blocking + self.read_list.append(new_fd) + self.write_list.append(new_fd) + print 'new connection to', new_addr + + self.appMap[new_fd] = Connection(new_fd, self.app) + + def handle_conn_closed(self, r): + self.read_list.remove(r) + self.write_list.remove(r) + r.close() + print "connection closed" + + def handle_recv(self, r): + # app, recBuf, resBuf, conn + conn = self.appMap[r] + while True: + try: + print "recv loop" + # check if we need more data first + if conn.inProgress: + if (conn.msgLength == 0 or conn.recBuf.size() < conn.msgLength): + conn.recv() + else: + if conn.recBuf.size() == 0: + conn.recv() + + conn.inProgress = True + + # see if we have enough to get the message length + if conn.msgLength == 0: + ll = conn.recBuf.peek() + if conn.recBuf.size() < 1 + ll: + # we don't have enough bytes to read the length yet + return + print "decoding msg length" + conn.msgLength = decode_varint(conn.recBuf) + + # see if we have enough to decode the message + if conn.recBuf.size() < conn.msgLength: + return + + # now we can decode the message + + # first read the request type and get the particular msg + # decoder + typeByte = conn.recBuf.read(1) + typeByte = int(typeByte[0]) + resTypeByte = typeByte + 0x10 + req_type = message_types[typeByte] + + if req_type == "flush": + # messages are length prefixed + conn.resBuf.write(encode(1)) + conn.resBuf.write([resTypeByte]) + conn.fd.send(str(conn.resBuf.buf)) + conn.msgLength = 0 + conn.inProgress = False + conn.resBuf = BytesBuffer(bytearray()) + return + + decoder = getattr(conn.decoder, req_type) + + print "decoding args" + req_args = decoder() + print "got args", req_args + + # done decoding message + conn.msgLength = 0 + conn.inProgress = False + + req_f = getattr(conn.app, req_type) + if req_args is None: + res = req_f() + elif isinstance(req_args, tuple): + res = req_f(*req_args) + else: + res = req_f(req_args) + + if isinstance(res, tuple): + res, ret_code = res + else: + ret_code = res + res = None + + print "called", req_type, "ret code:", ret_code + if ret_code != 0: + print "non-zero retcode:", ret_code + + if req_type in ("echo", "info"): # these dont return a ret code + enc = encode(res) + # messages are length prefixed + conn.resBuf.write(encode(len(enc) + 1)) + conn.resBuf.write([resTypeByte]) + conn.resBuf.write(enc) + else: + enc, encRet = encode(res), encode(ret_code) + # messages are length prefixed + conn.resBuf.write(encode(len(enc) + len(encRet) + 1)) + conn.resBuf.write([resTypeByte]) + conn.resBuf.write(encRet) + conn.resBuf.write(enc) + except TypeError as e: + print "TypeError on reading from connection:", e + self.handle_conn_closed(r) + return + except ValueError as e: + print "ValueError on reading from connection:", e + self.handle_conn_closed(r) + return + except IOError as e: + print "IOError on reading from connection:", e + self.handle_conn_closed(r) + return + except Exception as e: + # sys.exc_info()[0] # TODO better + print "error reading from connection", str(e) + self.handle_conn_closed(r) + return + + def main_loop(self): + while not self.shutdown: + r_list, w_list, _ = select.select( + self.read_list, self.write_list, [], 2.5) + + for r in r_list: + if (r == self.listener): + try: + self.handle_new_connection(r) + # undo adding to read list ... + except NameError as e: + print "Could not connect due to NameError:", e + except TypeError as e: + print "Could not connect due to TypeError:", e + except: + print "Could not connect due to unexpected error:", sys.exc_info()[0] + else: + self.handle_recv(r) + + def handle_shutdown(self): + for r in self.read_list: + r.close() + for w in self.write_list: + try: + w.close() + except Exception as e: + print(e) # TODO: add logging + self.shutdown = True diff --git a/abci/example/python/abci/wire.py b/abci/example/python/abci/wire.py new file mode 100644 index 000000000..1a07e89f1 --- /dev/null +++ b/abci/example/python/abci/wire.py @@ -0,0 +1,115 @@ + +# the decoder works off a reader +# the encoder returns bytearray + + +def hex2bytes(h): + return bytearray(h.decode('hex')) + + +def bytes2hex(b): + if type(b) in (str, unicode): + return "".join([hex(ord(c))[2:].zfill(2) for c in b]) + else: + return bytes2hex(b.decode()) + + +# expects uvarint64 (no crazy big nums!) +def uvarint_size(i): + if i == 0: + return 0 + for j in xrange(1, 8): + if i < 1 << j * 8: + return j + return 8 + +# expects i < 2**size + + +def encode_big_endian(i, size): + if size == 0: + return bytearray() + return encode_big_endian(i / 256, size - 1) + bytearray([i % 256]) + + +def decode_big_endian(reader, size): + if size == 0: + return 0 + firstByte = reader.read(1)[0] + return firstByte * (256 ** (size - 1)) + decode_big_endian(reader, size - 1) + +# ints are max 16 bytes long + + +def encode_varint(i): + negate = False + if i < 0: + negate = True + i = -i + size = uvarint_size(i) + if size == 0: + return bytearray([0]) + big_end = encode_big_endian(i, size) + if negate: + size += 0xF0 + return bytearray([size]) + big_end + +# returns the int and whats left of the byte array + + +def decode_varint(reader): + size = reader.read(1)[0] + if size == 0: + return 0 + + negate = True if size > int(0xF0) else False + if negate: + size = size - 0xF0 + i = decode_big_endian(reader, size) + if negate: + i = i * (-1) + return i + + +def encode_string(s): + size = encode_varint(len(s)) + return size + bytearray(s) + + +def decode_string(reader): + length = decode_varint(reader) + return str(reader.read(length)) + + +def encode_list(s): + b = bytearray() + map(b.extend, map(encode, s)) + return encode_varint(len(s)) + b + + +def encode(s): + if s is None: + return bytearray() + if isinstance(s, int): + return encode_varint(s) + elif isinstance(s, str): + return encode_string(s) + elif isinstance(s, list): + return encode_list(s) + else: + print "UNSUPPORTED TYPE!", type(s), s + + +if __name__ == '__main__': + ns = [100, 100, 1000, 256] + ss = [2, 5, 5, 2] + bs = map(encode_big_endian, ns, ss) + ds = map(decode_big_endian, bs, ss) + print ns + print [i[0] for i in ds] + + ss = ["abc", "hi there jim", "ok now what"] + e = map(encode_string, ss) + d = map(decode_string, e) + print ss + print [i[0] for i in d] diff --git a/abci/example/python/app.py b/abci/example/python/app.py new file mode 100644 index 000000000..1c041be6c --- /dev/null +++ b/abci/example/python/app.py @@ -0,0 +1,82 @@ +import sys + +from abci.wire import hex2bytes, decode_big_endian, encode_big_endian +from abci.server import ABCIServer +from abci.reader import BytesBuffer + + +class CounterApplication(): + + def __init__(self): + sys.exit("The python example is out of date. Upgrading the Python examples is currently left as an exercise to you.") + self.hashCount = 0 + self.txCount = 0 + self.serial = False + + def echo(self, msg): + return msg, 0 + + def info(self): + return ["hashes:%d, txs:%d" % (self.hashCount, self.txCount)], 0 + + def set_option(self, key, value): + if key == "serial" and value == "on": + self.serial = True + return 0 + + def deliver_tx(self, txBytes): + if self.serial: + txByteArray = bytearray(txBytes) + if len(txBytes) >= 2 and txBytes[:2] == "0x": + txByteArray = hex2bytes(txBytes[2:]) + txValue = decode_big_endian( + BytesBuffer(txByteArray), len(txBytes)) + if txValue != self.txCount: + return None, 6 + self.txCount += 1 + return None, 0 + + def check_tx(self, txBytes): + if self.serial: + txByteArray = bytearray(txBytes) + if len(txBytes) >= 2 and txBytes[:2] == "0x": + txByteArray = hex2bytes(txBytes[2:]) + txValue = decode_big_endian( + BytesBuffer(txByteArray), len(txBytes)) + if txValue < self.txCount: + return 6 + return 0 + + def commit(self): + self.hashCount += 1 + if self.txCount == 0: + return "", 0 + h = encode_big_endian(self.txCount, 8) + h.reverse() + return str(h), 0 + + def add_listener(self): + return 0 + + def rm_listener(self): + return 0 + + def event(self): + return + + +if __name__ == '__main__': + l = len(sys.argv) + if l == 1: + port = 26658 + elif l == 2: + port = int(sys.argv[1]) + else: + print "too many arguments" + quit() + + print 'ABCI Demo APP (Python)' + + app = CounterApplication() + server = ABCIServer(app, port) + server.main_loop() diff --git a/abci/example/python3/abci/__init__.py b/abci/example/python3/abci/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/abci/example/python3/abci/msg.py b/abci/example/python3/abci/msg.py new file mode 100644 index 000000000..807c4b6b0 --- /dev/null +++ b/abci/example/python3/abci/msg.py @@ -0,0 +1,50 @@ +from .wire import decode_string + +# map type_byte to message name +message_types = { + 0x01: "echo", + 0x02: "flush", + 0x03: "info", + 0x04: "set_option", + 0x21: "deliver_tx", + 0x22: "check_tx", + 0x23: "commit", + 0x24: "add_listener", + 0x25: "rm_listener", +} + +# return the decoded arguments of abci messages + +class RequestDecoder(): + + def __init__(self, reader): + self.reader = reader + + def echo(self): + return decode_string(self.reader) + + def flush(self): + return + + def info(self): + return + + def set_option(self): + return decode_string(self.reader), decode_string(self.reader) + + def deliver_tx(self): + return decode_string(self.reader) + + def check_tx(self): + return decode_string(self.reader) + + def commit(self): + return + + def add_listener(self): + # TODO + return + + def rm_listener(self): + # TODO + return diff --git a/abci/example/python3/abci/reader.py b/abci/example/python3/abci/reader.py new file mode 100644 index 000000000..c016ac604 --- /dev/null +++ b/abci/example/python3/abci/reader.py @@ -0,0 +1,56 @@ + +# Simple read() method around a bytearray + + +class BytesBuffer(): + + def __init__(self, b): + self.buf = b + self.readCount = 0 + + def count(self): + return self.readCount + + def reset_count(self): + self.readCount = 0 + + def size(self): + return len(self.buf) + + def peek(self): + return self.buf[0] + + def write(self, b): + # b should be castable to byte array + self.buf += bytearray(b) + + def read(self, n): + if len(self.buf) < n: + print("reader err: buf less than n") + # TODO: exception + return + self.readCount += n + r = self.buf[:n] + self.buf = self.buf[n:] + return r + +# Buffer bytes off a tcp connection and read them off in chunks + + +class ConnReader(): + + def __init__(self, conn): + self.conn = conn + self.buf = bytearray() + + # blocking + def read(self, n): + while n > len(self.buf): + moreBuf = self.conn.recv(1024) + if not moreBuf: + raise IOError("dead connection") + self.buf = self.buf + bytearray(moreBuf) + + r = self.buf[:n] + self.buf = self.buf[n:] + return r diff --git a/abci/example/python3/abci/server.py b/abci/example/python3/abci/server.py new file mode 100644 index 000000000..04063262d --- /dev/null +++ b/abci/example/python3/abci/server.py @@ -0,0 +1,196 @@ +import socket +import select +import sys +import logging + +from .wire import decode_varint, encode +from .reader import BytesBuffer +from .msg import RequestDecoder, message_types + +# hold the asyncronous state of a connection +# ie. we may not get enough bytes on one read to decode the message + +logger = logging.getLogger(__name__) + +class Connection(): + + def __init__(self, fd, app): + self.fd = fd + self.app = app + self.recBuf = BytesBuffer(bytearray()) + self.resBuf = BytesBuffer(bytearray()) + self.msgLength = 0 + self.decoder = RequestDecoder(self.recBuf) + self.inProgress = False # are we in the middle of a message + + def recv(this): + data = this.fd.recv(1024) + if not data: # what about len(data) == 0 + raise IOError("dead connection") + this.recBuf.write(data) + +# ABCI server responds to messges by calling methods on the app + +class ABCIServer(): + + def __init__(self, app, port=5410): + self.app = app + # map conn file descriptors to (app, reqBuf, resBuf, msgDecoder) + self.appMap = {} + + self.port = port + self.listen_backlog = 10 + + self.listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self.listener.setblocking(0) + self.listener.bind(('', port)) + + self.listener.listen(self.listen_backlog) + + self.shutdown = False + + self.read_list = [self.listener] + self.write_list = [] + + def handle_new_connection(self, r): + new_fd, new_addr = r.accept() + new_fd.setblocking(0) # non-blocking + self.read_list.append(new_fd) + self.write_list.append(new_fd) + print('new connection to', new_addr) + + self.appMap[new_fd] = Connection(new_fd, self.app) + + def handle_conn_closed(self, r): + self.read_list.remove(r) + self.write_list.remove(r) + r.close() + print("connection closed") + + def handle_recv(self, r): + # app, recBuf, resBuf, conn + conn = self.appMap[r] + while True: + try: + print("recv loop") + # check if we need more data first + if conn.inProgress: + if (conn.msgLength == 0 or conn.recBuf.size() < conn.msgLength): + conn.recv() + else: + if conn.recBuf.size() == 0: + conn.recv() + + conn.inProgress = True + + # see if we have enough to get the message length + if conn.msgLength == 0: + ll = conn.recBuf.peek() + if conn.recBuf.size() < 1 + ll: + # we don't have enough bytes to read the length yet + return + print("decoding msg length") + conn.msgLength = decode_varint(conn.recBuf) + + # see if we have enough to decode the message + if conn.recBuf.size() < conn.msgLength: + return + + # now we can decode the message + + # first read the request type and get the particular msg + # decoder + typeByte = conn.recBuf.read(1) + typeByte = int(typeByte[0]) + resTypeByte = typeByte + 0x10 + req_type = message_types[typeByte] + + if req_type == "flush": + # messages are length prefixed + conn.resBuf.write(encode(1)) + conn.resBuf.write([resTypeByte]) + conn.fd.send(conn.resBuf.buf) + conn.msgLength = 0 + conn.inProgress = False + conn.resBuf = BytesBuffer(bytearray()) + return + + decoder = getattr(conn.decoder, req_type) + + print("decoding args") + req_args = decoder() + print("got args", req_args) + + # done decoding message + conn.msgLength = 0 + conn.inProgress = False + + req_f = getattr(conn.app, req_type) + if req_args is None: + res = req_f() + elif isinstance(req_args, tuple): + res = req_f(*req_args) + else: + res = req_f(req_args) + + if isinstance(res, tuple): + res, ret_code = res + else: + ret_code = res + res = None + + print("called", req_type, "ret code:", ret_code, 'res:', res) + if ret_code != 0: + print("non-zero retcode:", ret_code) + + if req_type in ("echo", "info"): # these dont return a ret code + enc = encode(res) + # messages are length prefixed + conn.resBuf.write(encode(len(enc) + 1)) + conn.resBuf.write([resTypeByte]) + conn.resBuf.write(enc) + else: + enc, encRet = encode(res), encode(ret_code) + # messages are length prefixed + conn.resBuf.write(encode(len(enc) + len(encRet) + 1)) + conn.resBuf.write([resTypeByte]) + conn.resBuf.write(encRet) + conn.resBuf.write(enc) + except IOError as e: + print("IOError on reading from connection:", e) + self.handle_conn_closed(r) + return + except Exception as e: + logger.exception("error reading from connection") + self.handle_conn_closed(r) + return + + def main_loop(self): + while not self.shutdown: + r_list, w_list, _ = select.select( + self.read_list, self.write_list, [], 2.5) + + for r in r_list: + if (r == self.listener): + try: + self.handle_new_connection(r) + # undo adding to read list ... + except NameError as e: + print("Could not connect due to NameError:", e) + except TypeError as e: + print("Could not connect due to TypeError:", e) + except: + print("Could not connect due to unexpected error:", sys.exc_info()[0]) + else: + self.handle_recv(r) + + def handle_shutdown(self): + for r in self.read_list: + r.close() + for w in self.write_list: + try: + w.close() + except Exception as e: + print(e) # TODO: add logging + self.shutdown = True diff --git a/abci/example/python3/abci/wire.py b/abci/example/python3/abci/wire.py new file mode 100644 index 000000000..72f5fab8b --- /dev/null +++ b/abci/example/python3/abci/wire.py @@ -0,0 +1,119 @@ + +# the decoder works off a reader +# the encoder returns bytearray + + +def hex2bytes(h): + return bytearray(h.decode('hex')) + + +def bytes2hex(b): + if type(b) in (str, str): + return "".join([hex(ord(c))[2:].zfill(2) for c in b]) + else: + return bytes2hex(b.decode()) + + +# expects uvarint64 (no crazy big nums!) +def uvarint_size(i): + if i == 0: + return 0 + for j in range(1, 8): + if i < 1 << j * 8: + return j + return 8 + +# expects i < 2**size + + +def encode_big_endian(i, size): + if size == 0: + return bytearray() + return encode_big_endian(i // 256, size - 1) + bytearray([i % 256]) + + +def decode_big_endian(reader, size): + if size == 0: + return 0 + firstByte = reader.read(1)[0] + return firstByte * (256 ** (size - 1)) + decode_big_endian(reader, size - 1) + +# ints are max 16 bytes long + + +def encode_varint(i): + negate = False + if i < 0: + negate = True + i = -i + size = uvarint_size(i) + if size == 0: + return bytearray([0]) + big_end = encode_big_endian(i, size) + if negate: + size += 0xF0 + return bytearray([size]) + big_end + +# returns the int and whats left of the byte array + + +def decode_varint(reader): + size = reader.read(1)[0] + if size == 0: + return 0 + + negate = True if size > int(0xF0) else False + if negate: + size = size - 0xF0 + i = decode_big_endian(reader, size) + if negate: + i = i * (-1) + return i + + +def encode_string(s): + size = encode_varint(len(s)) + return size + bytearray(s, 'utf8') + + +def decode_string(reader): + length = decode_varint(reader) + raw_data = reader.read(length) + return raw_data.decode() + + +def encode_list(s): + b = bytearray() + list(map(b.extend, list(map(encode, s)))) + return encode_varint(len(s)) + b + + +def encode(s): + print('encoding', repr(s)) + if s is None: + return bytearray() + if isinstance(s, int): + return encode_varint(s) + elif isinstance(s, str): + return encode_string(s) + elif isinstance(s, list): + return encode_list(s) + elif isinstance(s, bytearray): + return encode_string(s) + else: + print("UNSUPPORTED TYPE!", type(s), s) + + +if __name__ == '__main__': + ns = [100, 100, 1000, 256] + ss = [2, 5, 5, 2] + bs = list(map(encode_big_endian, ns, ss)) + ds = list(map(decode_big_endian, bs, ss)) + print(ns) + print([i[0] for i in ds]) + + ss = ["abc", "hi there jim", "ok now what"] + e = list(map(encode_string, ss)) + d = list(map(decode_string, e)) + print(ss) + print([i[0] for i in d]) diff --git a/abci/example/python3/app.py b/abci/example/python3/app.py new file mode 100644 index 000000000..9f051b1e2 --- /dev/null +++ b/abci/example/python3/app.py @@ -0,0 +1,82 @@ +import sys + +from abci.wire import hex2bytes, decode_big_endian, encode_big_endian +from abci.server import ABCIServer +from abci.reader import BytesBuffer + + +class CounterApplication(): + + def __init__(self): + sys.exit("The python example is out of date. Upgrading the Python examples is currently left as an exercise to you.") + self.hashCount = 0 + self.txCount = 0 + self.serial = False + + def echo(self, msg): + return msg, 0 + + def info(self): + return ["hashes:%d, txs:%d" % (self.hashCount, self.txCount)], 0 + + def set_option(self, key, value): + if key == "serial" and value == "on": + self.serial = True + return 0 + + def deliver_tx(self, txBytes): + if self.serial: + txByteArray = bytearray(txBytes) + if len(txBytes) >= 2 and txBytes[:2] == "0x": + txByteArray = hex2bytes(txBytes[2:]) + txValue = decode_big_endian( + BytesBuffer(txByteArray), len(txBytes)) + if txValue != self.txCount: + return None, 6 + self.txCount += 1 + return None, 0 + + def check_tx(self, txBytes): + if self.serial: + txByteArray = bytearray(txBytes) + if len(txBytes) >= 2 and txBytes[:2] == "0x": + txByteArray = hex2bytes(txBytes[2:]) + txValue = decode_big_endian( + BytesBuffer(txByteArray), len(txBytes)) + if txValue < self.txCount: + return 6 + return 0 + + def commit(self): + self.hashCount += 1 + if self.txCount == 0: + return "", 0 + h = encode_big_endian(self.txCount, 8) + h.reverse() + return h.decode(), 0 + + def add_listener(self): + return 0 + + def rm_listener(self): + return 0 + + def event(self): + return + + +if __name__ == '__main__': + l = len(sys.argv) + if l == 1: + port = 26658 + elif l == 2: + port = int(sys.argv[1]) + else: + print("too many arguments") + quit() + + print('ABCI Demo APP (Python)') + + app = CounterApplication() + server = ABCIServer(app, port) + server.main_loop() diff --git a/abci/server/grpc_server.go b/abci/server/grpc_server.go new file mode 100644 index 000000000..ccbe609cc --- /dev/null +++ b/abci/server/grpc_server.go @@ -0,0 +1,57 @@ +package server + +import ( + "net" + + "google.golang.org/grpc" + + "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +type GRPCServer struct { + cmn.BaseService + + proto string + addr string + listener net.Listener + server *grpc.Server + + app types.ABCIApplicationServer +} + +// NewGRPCServer returns a new gRPC ABCI server +func NewGRPCServer(protoAddr string, app types.ABCIApplicationServer) cmn.Service { + proto, addr := cmn.ProtocolAndAddress(protoAddr) + s := &GRPCServer{ + proto: proto, + addr: addr, + listener: nil, + app: app, + } + s.BaseService = *cmn.NewBaseService(nil, "ABCIServer", s) + return s +} + +// OnStart starts the gRPC service +func (s *GRPCServer) OnStart() error { + if err := s.BaseService.OnStart(); err != nil { + return err + } + ln, err := net.Listen(s.proto, s.addr) + if err != nil { + return err + } + s.Logger.Info("Listening", "proto", s.proto, "addr", s.addr) + s.listener = ln + s.server = grpc.NewServer() + types.RegisterABCIApplicationServer(s.server, s.app) + go s.server.Serve(s.listener) + return nil +} + +// OnStop stops the gRPC server +func (s *GRPCServer) OnStop() { + s.BaseService.OnStop() + s.server.Stop() +} diff --git a/abci/server/server.go b/abci/server/server.go new file mode 100644 index 000000000..ada514fa8 --- /dev/null +++ b/abci/server/server.go @@ -0,0 +1,31 @@ +/* +Package server is used to start a new ABCI server. + +It contains two server implementation: + * gRPC server + * socket server + +*/ + +package server + +import ( + "fmt" + + "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +func NewServer(protoAddr, transport string, app types.Application) (cmn.Service, error) { + var s cmn.Service + var err error + switch transport { + case "socket": + s = NewSocketServer(protoAddr, app) + case "grpc": + s = NewGRPCServer(protoAddr, types.NewGRPCApplication(app)) + default: + err = fmt.Errorf("Unknown server type %s", transport) + } + return s, err +} diff --git a/abci/server/socket_server.go b/abci/server/socket_server.go new file mode 100644 index 000000000..4b92f04cf --- /dev/null +++ b/abci/server/socket_server.go @@ -0,0 +1,226 @@ +package server + +import ( + "bufio" + "fmt" + "io" + "net" + "sync" + + "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +// var maxNumberConnections = 2 + +type SocketServer struct { + cmn.BaseService + + proto string + addr string + listener net.Listener + + connsMtx sync.Mutex + conns map[int]net.Conn + nextConnID int + + appMtx sync.Mutex + app types.Application +} + +func NewSocketServer(protoAddr string, app types.Application) cmn.Service { + proto, addr := cmn.ProtocolAndAddress(protoAddr) + s := &SocketServer{ + proto: proto, + addr: addr, + listener: nil, + app: app, + conns: make(map[int]net.Conn), + } + s.BaseService = *cmn.NewBaseService(nil, "ABCIServer", s) + return s +} + +func (s *SocketServer) OnStart() error { + if err := s.BaseService.OnStart(); err != nil { + return err + } + ln, err := net.Listen(s.proto, s.addr) + if err != nil { + return err + } + s.listener = ln + go s.acceptConnectionsRoutine() + return nil +} + +func (s *SocketServer) OnStop() { + s.BaseService.OnStop() + if err := s.listener.Close(); err != nil { + s.Logger.Error("Error closing listener", "err", err) + } + + s.connsMtx.Lock() + defer s.connsMtx.Unlock() + for id, conn := range s.conns { + delete(s.conns, id) + if err := conn.Close(); err != nil { + s.Logger.Error("Error closing connection", "id", id, "conn", conn, "err", err) + } + } +} + +func (s *SocketServer) addConn(conn net.Conn) int { + s.connsMtx.Lock() + defer s.connsMtx.Unlock() + + connID := s.nextConnID + s.nextConnID++ + s.conns[connID] = conn + + return connID +} + +// deletes conn even if close errs +func (s *SocketServer) rmConn(connID int) error { + s.connsMtx.Lock() + defer s.connsMtx.Unlock() + + conn, ok := s.conns[connID] + if !ok { + return fmt.Errorf("Connection %d does not exist", connID) + } + + delete(s.conns, connID) + return conn.Close() +} + +func (s *SocketServer) acceptConnectionsRoutine() { + for { + // Accept a connection + s.Logger.Info("Waiting for new connection...") + conn, err := s.listener.Accept() + if err != nil { + if !s.IsRunning() { + return // Ignore error from listener closing. + } + s.Logger.Error("Failed to accept connection: " + err.Error()) + continue + } + + s.Logger.Info("Accepted a new connection") + + connID := s.addConn(conn) + + closeConn := make(chan error, 2) // Push to signal connection closed + responses := make(chan *types.Response, 1000) // A channel to buffer responses + + // Read requests from conn and deal with them + go s.handleRequests(closeConn, conn, responses) + // Pull responses from 'responses' and write them to conn. + go s.handleResponses(closeConn, conn, responses) + + // Wait until signal to close connection + go s.waitForClose(closeConn, connID) + } +} + +func (s *SocketServer) waitForClose(closeConn chan error, connID int) { + err := <-closeConn + if err == io.EOF { + s.Logger.Error("Connection was closed by client") + } else if err != nil { + s.Logger.Error("Connection error", "error", err) + } else { + // never happens + s.Logger.Error("Connection was closed.") + } + + // Close the connection + if err := s.rmConn(connID); err != nil { + s.Logger.Error("Error in closing connection", "error", err) + } +} + +// Read requests from conn and deal with them +func (s *SocketServer) handleRequests(closeConn chan error, conn net.Conn, responses chan<- *types.Response) { + var count int + var bufReader = bufio.NewReader(conn) + for { + + var req = &types.Request{} + err := types.ReadMessage(bufReader, req) + if err != nil { + if err == io.EOF { + closeConn <- err + } else { + closeConn <- fmt.Errorf("Error reading message: %v", err.Error()) + } + return + } + s.appMtx.Lock() + count++ + s.handleRequest(req, responses) + s.appMtx.Unlock() + } +} + +func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types.Response) { + switch r := req.Value.(type) { + case *types.Request_Echo: + responses <- types.ToResponseEcho(r.Echo.Message) + case *types.Request_Flush: + responses <- types.ToResponseFlush() + case *types.Request_Info: + res := s.app.Info(*r.Info) + responses <- types.ToResponseInfo(res) + case *types.Request_SetOption: + res := s.app.SetOption(*r.SetOption) + responses <- types.ToResponseSetOption(res) + case *types.Request_DeliverTx: + res := s.app.DeliverTx(r.DeliverTx.Tx) + responses <- types.ToResponseDeliverTx(res) + case *types.Request_CheckTx: + res := s.app.CheckTx(r.CheckTx.Tx) + responses <- types.ToResponseCheckTx(res) + case *types.Request_Commit: + res := s.app.Commit() + responses <- types.ToResponseCommit(res) + case *types.Request_Query: + res := s.app.Query(*r.Query) + responses <- types.ToResponseQuery(res) + case *types.Request_InitChain: + res := s.app.InitChain(*r.InitChain) + responses <- types.ToResponseInitChain(res) + case *types.Request_BeginBlock: + res := s.app.BeginBlock(*r.BeginBlock) + responses <- types.ToResponseBeginBlock(res) + case *types.Request_EndBlock: + res := s.app.EndBlock(*r.EndBlock) + responses <- types.ToResponseEndBlock(res) + default: + responses <- types.ToResponseException("Unknown request") + } +} + +// Pull responses from 'responses' and write them to conn. +func (s *SocketServer) handleResponses(closeConn chan error, conn net.Conn, responses <-chan *types.Response) { + var count int + var bufWriter = bufio.NewWriter(conn) + for { + var res = <-responses + err := types.WriteMessage(res, bufWriter) + if err != nil { + closeConn <- fmt.Errorf("Error writing message: %v", err.Error()) + return + } + if _, ok := res.Value.(*types.Response_Flush); ok { + err = bufWriter.Flush() + if err != nil { + closeConn <- fmt.Errorf("Error flushing write buffer: %v", err.Error()) + return + } + } + count++ + } +} diff --git a/abci/tests/benchmarks/blank.go b/abci/tests/benchmarks/blank.go new file mode 100644 index 000000000..20f08f14b --- /dev/null +++ b/abci/tests/benchmarks/blank.go @@ -0,0 +1 @@ +package benchmarks diff --git a/abci/tests/benchmarks/parallel/parallel.go b/abci/tests/benchmarks/parallel/parallel.go new file mode 100644 index 000000000..78b69ed12 --- /dev/null +++ b/abci/tests/benchmarks/parallel/parallel.go @@ -0,0 +1,55 @@ +package main + +import ( + "bufio" + "fmt" + "log" + + "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +func main() { + + conn, err := cmn.Connect("unix://test.sock") + if err != nil { + log.Fatal(err.Error()) + } + + // Read a bunch of responses + go func() { + counter := 0 + for { + var res = &types.Response{} + err := types.ReadMessage(conn, res) + if err != nil { + log.Fatal(err.Error()) + } + counter++ + if counter%1000 == 0 { + fmt.Println("Read", counter) + } + } + }() + + // Write a bunch of requests + counter := 0 + for i := 0; ; i++ { + var bufWriter = bufio.NewWriter(conn) + var req = types.ToRequestEcho("foobar") + + err := types.WriteMessage(req, bufWriter) + if err != nil { + log.Fatal(err.Error()) + } + err = bufWriter.Flush() + if err != nil { + log.Fatal(err.Error()) + } + + counter++ + if counter%1000 == 0 { + fmt.Println("Write", counter) + } + } +} diff --git a/abci/tests/benchmarks/simple/simple.go b/abci/tests/benchmarks/simple/simple.go new file mode 100644 index 000000000..b0819799b --- /dev/null +++ b/abci/tests/benchmarks/simple/simple.go @@ -0,0 +1,69 @@ +package main + +import ( + "bufio" + "fmt" + "log" + "net" + "reflect" + + "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +func main() { + + conn, err := cmn.Connect("unix://test.sock") + if err != nil { + log.Fatal(err.Error()) + } + + // Make a bunch of requests + counter := 0 + for i := 0; ; i++ { + req := types.ToRequestEcho("foobar") + _, err := makeRequest(conn, req) + if err != nil { + log.Fatal(err.Error()) + } + counter++ + if counter%1000 == 0 { + fmt.Println(counter) + } + } +} + +func makeRequest(conn net.Conn, req *types.Request) (*types.Response, error) { + var bufWriter = bufio.NewWriter(conn) + + // Write desired request + err := types.WriteMessage(req, bufWriter) + if err != nil { + return nil, err + } + err = types.WriteMessage(types.ToRequestFlush(), bufWriter) + if err != nil { + return nil, err + } + err = bufWriter.Flush() + if err != nil { + return nil, err + } + + // Read desired response + var res = &types.Response{} + err = types.ReadMessage(conn, res) + if err != nil { + return nil, err + } + var resFlush = &types.Response{} + err = types.ReadMessage(conn, resFlush) + if err != nil { + return nil, err + } + if _, ok := resFlush.Value.(*types.Response_Flush); !ok { + return nil, fmt.Errorf("Expected flush response but got something else: %v", reflect.TypeOf(resFlush)) + } + + return res, nil +} diff --git a/abci/tests/client_server_test.go b/abci/tests/client_server_test.go new file mode 100644 index 000000000..f76c9baf1 --- /dev/null +++ b/abci/tests/client_server_test.go @@ -0,0 +1,27 @@ +package tests + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + abciclient "github.com/tendermint/tendermint/abci/client" + "github.com/tendermint/tendermint/abci/example/kvstore" + abciserver "github.com/tendermint/tendermint/abci/server" +) + +func TestClientServerNoAddrPrefix(t *testing.T) { + addr := "localhost:26658" + transport := "socket" + app := kvstore.NewKVStoreApplication() + + server, err := abciserver.NewServer(addr, transport, app) + assert.NoError(t, err, "expected no error on NewServer") + err = server.Start() + assert.NoError(t, err, "expected no error on server.Start") + + client, err := abciclient.NewClient(addr, transport, true) + assert.NoError(t, err, "expected no error on NewClient") + err = client.Start() + assert.NoError(t, err, "expected no error on client.Start") +} diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go new file mode 100644 index 000000000..f67297cd7 --- /dev/null +++ b/abci/tests/server/client.go @@ -0,0 +1,96 @@ +package testsuite + +import ( + "bytes" + "errors" + "fmt" + + abcicli "github.com/tendermint/tendermint/abci/client" + "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +func InitChain(client abcicli.Client) error { + total := 10 + vals := make([]types.Validator, total) + for i := 0; i < total; i++ { + pubkey := cmn.RandBytes(33) + power := cmn.RandInt() + vals[i] = types.Ed25519Validator(pubkey, int64(power)) + } + _, err := client.InitChainSync(types.RequestInitChain{ + Validators: vals, + }) + if err != nil { + fmt.Printf("Failed test: InitChain - %v\n", err) + return err + } + fmt.Println("Passed test: InitChain") + return nil +} + +func SetOption(client abcicli.Client, key, value string) error { + _, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: value}) + if err != nil { + fmt.Println("Failed test: SetOption") + fmt.Printf("error while setting %v=%v: \nerror: %v\n", key, value, err) + return err + } + fmt.Println("Passed test: SetOption") + return nil +} + +func Commit(client abcicli.Client, hashExp []byte) error { + res, err := client.CommitSync() + data := res.Data + if err != nil { + fmt.Println("Failed test: Commit") + fmt.Printf("error while committing: %v\n", err) + return err + } + if !bytes.Equal(data, hashExp) { + fmt.Println("Failed test: Commit") + fmt.Printf("Commit hash was unexpected. Got %X expected %X\n", data, hashExp) + return errors.New("CommitTx failed") + } + fmt.Println("Passed test: Commit") + return nil +} + +func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { + res, _ := client.DeliverTxSync(txBytes) + code, data, log := res.Code, res.Data, res.Log + if code != codeExp { + fmt.Println("Failed test: DeliverTx") + fmt.Printf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v\n", + code, codeExp, log) + return errors.New("DeliverTx error") + } + if !bytes.Equal(data, dataExp) { + fmt.Println("Failed test: DeliverTx") + fmt.Printf("DeliverTx response data was unexpected. Got %X expected %X\n", + data, dataExp) + return errors.New("DeliverTx error") + } + fmt.Println("Passed test: DeliverTx") + return nil +} + +func CheckTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { + res, _ := client.CheckTxSync(txBytes) + code, data, log := res.Code, res.Data, res.Log + if code != codeExp { + fmt.Println("Failed test: CheckTx") + fmt.Printf("CheckTx response code was unexpected. Got %v expected %v. Log: %v\n", + code, codeExp, log) + return errors.New("CheckTx") + } + if !bytes.Equal(data, dataExp) { + fmt.Println("Failed test: CheckTx") + fmt.Printf("CheckTx response data was unexpected. Got %X expected %X\n", + data, dataExp) + return errors.New("CheckTx") + } + fmt.Println("Passed test: CheckTx") + return nil +} diff --git a/abci/tests/test_app/app.go b/abci/tests/test_app/app.go new file mode 100644 index 000000000..a33f4ee9e --- /dev/null +++ b/abci/tests/test_app/app.go @@ -0,0 +1,78 @@ +package main + +import ( + "bytes" + "fmt" + "os" + + abcicli "github.com/tendermint/tendermint/abci/client" + "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" +) + +func startClient(abciType string) abcicli.Client { + // Start client + client, err := abcicli.NewClient("tcp://127.0.0.1:26658", abciType, true) + if err != nil { + panic(err.Error()) + } + logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + client.SetLogger(logger.With("module", "abcicli")) + if err := client.Start(); err != nil { + panicf("connecting to abci_app: %v", err.Error()) + } + + return client +} + +func setOption(client abcicli.Client, key, value string) { + _, err := client.SetOptionSync(types.RequestSetOption{key, value}) + if err != nil { + panicf("setting %v=%v: \nerr: %v", key, value, err) + } +} + +func commit(client abcicli.Client, hashExp []byte) { + res, err := client.CommitSync() + if err != nil { + panicf("client error: %v", err) + } + if !bytes.Equal(res.Data, hashExp) { + panicf("Commit hash was unexpected. Got %X expected %X", res.Data, hashExp) + } +} + +func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) { + res, err := client.DeliverTxSync(txBytes) + if err != nil { + panicf("client error: %v", err) + } + if res.Code != codeExp { + panicf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v", res.Code, codeExp, res.Log) + } + if !bytes.Equal(res.Data, dataExp) { + panicf("DeliverTx response data was unexpected. Got %X expected %X", res.Data, dataExp) + } +} + +/*func checkTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) { + res, err := client.CheckTxSync(txBytes) + if err != nil { + panicf("client error: %v", err) + } + if res.IsErr() { + panicf("checking tx %X: %v\nlog: %v", txBytes, res.Log) + } + if res.Code != codeExp { + panicf("CheckTx response code was unexpected. Got %v expected %v. Log: %v", + res.Code, codeExp, res.Log) + } + if !bytes.Equal(res.Data, dataExp) { + panicf("CheckTx response data was unexpected. Got %X expected %X", + res.Data, dataExp) + } +}*/ + +func panicf(format string, a ...interface{}) { + panic(fmt.Sprintf(format, a...)) +} diff --git a/abci/tests/test_app/main.go b/abci/tests/test_app/main.go new file mode 100644 index 000000000..8f45cec3c --- /dev/null +++ b/abci/tests/test_app/main.go @@ -0,0 +1,84 @@ +package main + +import ( + "fmt" + "log" + "os" + "os/exec" + "time" + + "github.com/tendermint/tendermint/abci/example/code" + "github.com/tendermint/tendermint/abci/types" +) + +var abciType string + +func init() { + abciType = os.Getenv("ABCI") + if abciType == "" { + abciType = "socket" + } +} + +func main() { + testCounter() +} + +const ( + maxABCIConnectTries = 10 +) + +func ensureABCIIsUp(typ string, n int) error { + var err error + cmdString := "abci-cli echo hello" + if typ == "grpc" { + cmdString = "abci-cli --abci grpc echo hello" + } + + for i := 0; i < n; i++ { + cmd := exec.Command("bash", "-c", cmdString) // nolint: gas + _, err = cmd.CombinedOutput() + if err == nil { + break + } + <-time.After(500 * time.Millisecond) + } + return err +} + +func testCounter() { + abciApp := os.Getenv("ABCI_APP") + if abciApp == "" { + panic("No ABCI_APP specified") + } + + fmt.Printf("Running %s test with abci=%s\n", abciApp, abciType) + cmd := exec.Command("bash", "-c", fmt.Sprintf("abci-cli %s", abciApp)) // nolint: gas + cmd.Stdout = os.Stdout + if err := cmd.Start(); err != nil { + log.Fatalf("starting %q err: %v", abciApp, err) + } + defer cmd.Wait() + defer cmd.Process.Kill() + + if err := ensureABCIIsUp(abciType, maxABCIConnectTries); err != nil { + log.Fatalf("echo failed: %v", err) + } + + client := startClient(abciType) + defer client.Stop() + + setOption(client, "serial", "on") + commit(client, nil) + deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil) + commit(client, nil) + deliverTx(client, []byte{0x00}, types.CodeTypeOK, nil) + commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1}) + deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil) + deliverTx(client, []byte{0x01}, types.CodeTypeOK, nil) + deliverTx(client, []byte{0x00, 0x02}, types.CodeTypeOK, nil) + deliverTx(client, []byte{0x00, 0x03}, types.CodeTypeOK, nil) + deliverTx(client, []byte{0x00, 0x00, 0x04}, types.CodeTypeOK, nil) + deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil) + commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5}) +} diff --git a/abci/tests/test_app/test.sh b/abci/tests/test_app/test.sh new file mode 100755 index 000000000..230c94163 --- /dev/null +++ b/abci/tests/test_app/test.sh @@ -0,0 +1,27 @@ +#! /bin/bash +set -e + +# These tests spawn the counter app and server by execing the ABCI_APP command and run some simple client tests against it + +# Get the directory of where this script is. +SOURCE="${BASH_SOURCE[0]}" +while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done +DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" + +# Change into that dir because we expect that. +cd "$DIR" + +echo "RUN COUNTER OVER SOCKET" +# test golang counter +ABCI_APP="counter" go run ./*.go +echo "----------------------" + + +echo "RUN COUNTER OVER GRPC" +# test golang counter via grpc +ABCI_APP="counter --abci=grpc" ABCI="grpc" go run ./*.go +echo "----------------------" + +# test nodejs counter +# TODO: fix node app +#ABCI_APP="node $GOPATH/src/github.com/tendermint/js-abci/example/app.js" go test -test.run TestCounter diff --git a/abci/tests/test_cli/ex1.abci b/abci/tests/test_cli/ex1.abci new file mode 100644 index 000000000..e909266ec --- /dev/null +++ b/abci/tests/test_cli/ex1.abci @@ -0,0 +1,10 @@ +echo hello +info +commit +deliver_tx "abc" +info +commit +query "abc" +deliver_tx "def=xyz" +commit +query "def" diff --git a/abci/tests/test_cli/ex1.abci.out b/abci/tests/test_cli/ex1.abci.out new file mode 100644 index 000000000..5d4c196dc --- /dev/null +++ b/abci/tests/test_cli/ex1.abci.out @@ -0,0 +1,47 @@ +> echo hello +-> code: OK +-> data: hello +-> data.hex: 0x68656C6C6F + +> info +-> code: OK +-> data: {"size":0} +-> data.hex: 0x7B2273697A65223A307D + +> commit +-> code: OK +-> data.hex: 0x0000000000000000 + +> deliver_tx "abc" +-> code: OK + +> info +-> code: OK +-> data: {"size":1} +-> data.hex: 0x7B2273697A65223A317D + +> commit +-> code: OK +-> data.hex: 0x0200000000000000 + +> query "abc" +-> code: OK +-> log: exists +-> height: 0 +-> value: abc +-> value.hex: 616263 + +> deliver_tx "def=xyz" +-> code: OK + +> commit +-> code: OK +-> data.hex: 0x0400000000000000 + +> query "def" +-> code: OK +-> log: exists +-> height: 0 +-> value: xyz +-> value.hex: 78797A + diff --git a/abci/tests/test_cli/ex2.abci b/abci/tests/test_cli/ex2.abci new file mode 100644 index 000000000..3b435f22a --- /dev/null +++ b/abci/tests/test_cli/ex2.abci @@ -0,0 +1,8 @@ +set_option serial on +check_tx 0x00 +check_tx 0xff +deliver_tx 0x00 +check_tx 0x00 +deliver_tx 0x01 +deliver_tx 0x04 +info diff --git a/abci/tests/test_cli/ex2.abci.out b/abci/tests/test_cli/ex2.abci.out new file mode 100644 index 000000000..5bceb85d8 --- /dev/null +++ b/abci/tests/test_cli/ex2.abci.out @@ -0,0 +1,29 @@ +> set_option serial on +-> code: OK +-> log: OK (SetOption doesn't return anything.) + +> check_tx 0x00 +-> code: OK + +> check_tx 0xff +-> code: OK + +> deliver_tx 0x00 +-> code: OK + +> check_tx 0x00 +-> code: 2 +-> log: Invalid nonce. Expected >= 1, got 0 + +> deliver_tx 0x01 +-> code: OK + +> deliver_tx 0x04 +-> code: 2 +-> log: Invalid nonce. Expected 2, got 4 + +> info +-> code: OK +-> data: {"hashes":0,"txs":2} +-> data.hex: 0x7B22686173686573223A302C22747873223A327D + diff --git a/abci/tests/test_cli/test.sh b/abci/tests/test_cli/test.sh new file mode 100755 index 000000000..ce074f513 --- /dev/null +++ b/abci/tests/test_cli/test.sh @@ -0,0 +1,42 @@ +#! /bin/bash +set -e + +# Get the root directory. +SOURCE="${BASH_SOURCE[0]}" +while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done +DIR="$( cd -P "$( dirname "$SOURCE" )/../.." && pwd )" + +# Change into that dir because we expect that. +cd "$DIR" || exit + +function testExample() { + N=$1 + INPUT=$2 + APP="$3 $4" + + echo "Example $N: $APP" + $APP &> /dev/null & + sleep 2 + abci-cli --log_level=error --verbose batch < "$INPUT" > "${INPUT}.out.new" + killall "$3" + + pre=$(shasum < "${INPUT}.out") + post=$(shasum < "${INPUT}.out.new") + + if [[ "$pre" != "$post" ]]; then + echo "You broke the tutorial" + echo "Got:" + cat "${INPUT}.out.new" + echo "Expected:" + cat "${INPUT}.out" + exit 1 + fi + + rm "${INPUT}".out.new +} + +testExample 1 tests/test_cli/ex1.abci abci-cli kvstore +testExample 2 tests/test_cli/ex2.abci abci-cli counter + +echo "" +echo "PASS" diff --git a/abci/tests/tests.go b/abci/tests/tests.go new file mode 100644 index 000000000..ca8701d29 --- /dev/null +++ b/abci/tests/tests.go @@ -0,0 +1 @@ +package tests diff --git a/abci/types/application.go b/abci/types/application.go new file mode 100644 index 000000000..ef1bc92e5 --- /dev/null +++ b/abci/types/application.go @@ -0,0 +1,138 @@ +package types // nolint: goimports + +import ( + context "golang.org/x/net/context" +) + +// Application is an interface that enables any finite, deterministic state machine +// to be driven by a blockchain-based replication engine via the ABCI. +// All methods take a RequestXxx argument and return a ResponseXxx argument, +// except CheckTx/DeliverTx, which take `tx []byte`, and `Commit`, which takes nothing. +type Application interface { + // Info/Query Connection + Info(RequestInfo) ResponseInfo // Return application info + SetOption(RequestSetOption) ResponseSetOption // Set application option + Query(RequestQuery) ResponseQuery // Query for state + + // Mempool Connection + CheckTx(tx []byte) ResponseCheckTx // Validate a tx for the mempool + + // Consensus Connection + InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain with validators and other info from TendermintCore + BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block + DeliverTx(tx []byte) ResponseDeliverTx // Deliver a tx for full processing + EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set + Commit() ResponseCommit // Commit the state and return the application Merkle root hash +} + +//------------------------------------------------------- +// BaseApplication is a base form of Application + +var _ Application = (*BaseApplication)(nil) + +type BaseApplication struct { +} + +func NewBaseApplication() *BaseApplication { + return &BaseApplication{} +} + +func (BaseApplication) Info(req RequestInfo) ResponseInfo { + return ResponseInfo{} +} + +func (BaseApplication) SetOption(req RequestSetOption) ResponseSetOption { + return ResponseSetOption{} +} + +func (BaseApplication) DeliverTx(tx []byte) ResponseDeliverTx { + return ResponseDeliverTx{Code: CodeTypeOK} +} + +func (BaseApplication) CheckTx(tx []byte) ResponseCheckTx { + return ResponseCheckTx{Code: CodeTypeOK} +} + +func (BaseApplication) Commit() ResponseCommit { + return ResponseCommit{} +} + +func (BaseApplication) Query(req RequestQuery) ResponseQuery { + return ResponseQuery{Code: CodeTypeOK} +} + +func (BaseApplication) InitChain(req RequestInitChain) ResponseInitChain { + return ResponseInitChain{} +} + +func (BaseApplication) BeginBlock(req RequestBeginBlock) ResponseBeginBlock { + return ResponseBeginBlock{} +} + +func (BaseApplication) EndBlock(req RequestEndBlock) ResponseEndBlock { + return ResponseEndBlock{} +} + +//------------------------------------------------------- + +// GRPCApplication is a GRPC wrapper for Application +type GRPCApplication struct { + app Application +} + +func NewGRPCApplication(app Application) *GRPCApplication { + return &GRPCApplication{app} +} + +func (app *GRPCApplication) Echo(ctx context.Context, req *RequestEcho) (*ResponseEcho, error) { + return &ResponseEcho{req.Message}, nil +} + +func (app *GRPCApplication) Flush(ctx context.Context, req *RequestFlush) (*ResponseFlush, error) { + return &ResponseFlush{}, nil +} + +func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*ResponseInfo, error) { + res := app.app.Info(*req) + return &res, nil +} + +func (app *GRPCApplication) SetOption(ctx context.Context, req *RequestSetOption) (*ResponseSetOption, error) { + res := app.app.SetOption(*req) + return &res, nil +} + +func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) { + res := app.app.DeliverTx(req.Tx) + return &res, nil +} + +func (app *GRPCApplication) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) { + res := app.app.CheckTx(req.Tx) + return &res, nil +} + +func (app *GRPCApplication) Query(ctx context.Context, req *RequestQuery) (*ResponseQuery, error) { + res := app.app.Query(*req) + return &res, nil +} + +func (app *GRPCApplication) Commit(ctx context.Context, req *RequestCommit) (*ResponseCommit, error) { + res := app.app.Commit() + return &res, nil +} + +func (app *GRPCApplication) InitChain(ctx context.Context, req *RequestInitChain) (*ResponseInitChain, error) { + res := app.app.InitChain(*req) + return &res, nil +} + +func (app *GRPCApplication) BeginBlock(ctx context.Context, req *RequestBeginBlock) (*ResponseBeginBlock, error) { + res := app.app.BeginBlock(*req) + return &res, nil +} + +func (app *GRPCApplication) EndBlock(ctx context.Context, req *RequestEndBlock) (*ResponseEndBlock, error) { + res := app.app.EndBlock(*req) + return &res, nil +} diff --git a/abci/types/messages.go b/abci/types/messages.go new file mode 100644 index 000000000..52e4b6758 --- /dev/null +++ b/abci/types/messages.go @@ -0,0 +1,210 @@ +package types + +import ( + "bufio" + "encoding/binary" + "io" + + "github.com/gogo/protobuf/proto" +) + +const ( + maxMsgSize = 104857600 // 100MB +) + +// WriteMessage writes a varint length-delimited protobuf message. +func WriteMessage(msg proto.Message, w io.Writer) error { + bz, err := proto.Marshal(msg) + if err != nil { + return err + } + return encodeByteSlice(w, bz) +} + +// ReadMessage reads a varint length-delimited protobuf message. +func ReadMessage(r io.Reader, msg proto.Message) error { + return readProtoMsg(r, msg, maxMsgSize) +} + +func readProtoMsg(r io.Reader, msg proto.Message, maxSize int) error { + // binary.ReadVarint takes an io.ByteReader, eg. a bufio.Reader + reader, ok := r.(*bufio.Reader) + if !ok { + reader = bufio.NewReader(r) + } + length64, err := binary.ReadVarint(reader) + if err != nil { + return err + } + length := int(length64) + if length < 0 || length > maxSize { + return io.ErrShortBuffer + } + buf := make([]byte, length) + if _, err := io.ReadFull(reader, buf); err != nil { + return err + } + return proto.Unmarshal(buf, msg) +} + +//----------------------------------------------------------------------- +// NOTE: we copied wire.EncodeByteSlice from go-wire rather than keep +// go-wire as a dep + +func encodeByteSlice(w io.Writer, bz []byte) (err error) { + err = encodeVarint(w, int64(len(bz))) + if err != nil { + return + } + _, err = w.Write(bz) + return +} + +func encodeVarint(w io.Writer, i int64) (err error) { + var buf [10]byte + n := binary.PutVarint(buf[:], i) + _, err = w.Write(buf[0:n]) + return +} + +//---------------------------------------- + +func ToRequestEcho(message string) *Request { + return &Request{ + Value: &Request_Echo{&RequestEcho{message}}, + } +} + +func ToRequestFlush() *Request { + return &Request{ + Value: &Request_Flush{&RequestFlush{}}, + } +} + +func ToRequestInfo(req RequestInfo) *Request { + return &Request{ + Value: &Request_Info{&req}, + } +} + +func ToRequestSetOption(req RequestSetOption) *Request { + return &Request{ + Value: &Request_SetOption{&req}, + } +} + +func ToRequestDeliverTx(tx []byte) *Request { + return &Request{ + Value: &Request_DeliverTx{&RequestDeliverTx{tx}}, + } +} + +func ToRequestCheckTx(tx []byte) *Request { + return &Request{ + Value: &Request_CheckTx{&RequestCheckTx{tx}}, + } +} + +func ToRequestCommit() *Request { + return &Request{ + Value: &Request_Commit{&RequestCommit{}}, + } +} + +func ToRequestQuery(req RequestQuery) *Request { + return &Request{ + Value: &Request_Query{&req}, + } +} + +func ToRequestInitChain(req RequestInitChain) *Request { + return &Request{ + Value: &Request_InitChain{&req}, + } +} + +func ToRequestBeginBlock(req RequestBeginBlock) *Request { + return &Request{ + Value: &Request_BeginBlock{&req}, + } +} + +func ToRequestEndBlock(req RequestEndBlock) *Request { + return &Request{ + Value: &Request_EndBlock{&req}, + } +} + +//---------------------------------------- + +func ToResponseException(errStr string) *Response { + return &Response{ + Value: &Response_Exception{&ResponseException{errStr}}, + } +} + +func ToResponseEcho(message string) *Response { + return &Response{ + Value: &Response_Echo{&ResponseEcho{message}}, + } +} + +func ToResponseFlush() *Response { + return &Response{ + Value: &Response_Flush{&ResponseFlush{}}, + } +} + +func ToResponseInfo(res ResponseInfo) *Response { + return &Response{ + Value: &Response_Info{&res}, + } +} + +func ToResponseSetOption(res ResponseSetOption) *Response { + return &Response{ + Value: &Response_SetOption{&res}, + } +} + +func ToResponseDeliverTx(res ResponseDeliverTx) *Response { + return &Response{ + Value: &Response_DeliverTx{&res}, + } +} + +func ToResponseCheckTx(res ResponseCheckTx) *Response { + return &Response{ + Value: &Response_CheckTx{&res}, + } +} + +func ToResponseCommit(res ResponseCommit) *Response { + return &Response{ + Value: &Response_Commit{&res}, + } +} + +func ToResponseQuery(res ResponseQuery) *Response { + return &Response{ + Value: &Response_Query{&res}, + } +} + +func ToResponseInitChain(res ResponseInitChain) *Response { + return &Response{ + Value: &Response_InitChain{&res}, + } +} + +func ToResponseBeginBlock(res ResponseBeginBlock) *Response { + return &Response{ + Value: &Response_BeginBlock{&res}, + } +} + +func ToResponseEndBlock(res ResponseEndBlock) *Response { + return &Response{ + Value: &Response_EndBlock{&res}, + } +} diff --git a/abci/types/messages_test.go b/abci/types/messages_test.go new file mode 100644 index 000000000..da6595a46 --- /dev/null +++ b/abci/types/messages_test.go @@ -0,0 +1,104 @@ +package types + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + + "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/assert" + cmn "github.com/tendermint/tendermint/libs/common" +) + +func TestMarshalJSON(t *testing.T) { + b, err := json.Marshal(&ResponseDeliverTx{}) + assert.Nil(t, err) + // Do not include empty fields. + assert.False(t, strings.Contains(string(b), "code")) + + r1 := ResponseCheckTx{ + Code: 1, + Data: []byte("hello"), + GasWanted: 43, + Tags: []cmn.KVPair{ + {[]byte("pho"), []byte("bo")}, + }, + } + b, err = json.Marshal(&r1) + assert.Nil(t, err) + + var r2 ResponseCheckTx + err = json.Unmarshal(b, &r2) + assert.Nil(t, err) + assert.Equal(t, r1, r2) +} + +func TestWriteReadMessageSimple(t *testing.T) { + cases := []proto.Message{ + &RequestEcho{ + Message: "Hello", + }, + } + + for _, c := range cases { + buf := new(bytes.Buffer) + err := WriteMessage(c, buf) + assert.Nil(t, err) + + msg := new(RequestEcho) + err = ReadMessage(buf, msg) + assert.Nil(t, err) + + assert.Equal(t, c, msg) + } +} + +func TestWriteReadMessage(t *testing.T) { + cases := []proto.Message{ + &Header{ + NumTxs: 4, + }, + // TODO: add the rest + } + + for _, c := range cases { + buf := new(bytes.Buffer) + err := WriteMessage(c, buf) + assert.Nil(t, err) + + msg := new(Header) + err = ReadMessage(buf, msg) + assert.Nil(t, err) + + assert.Equal(t, c, msg) + } +} + +func TestWriteReadMessage2(t *testing.T) { + phrase := "hello-world" + cases := []proto.Message{ + &ResponseCheckTx{ + Data: []byte(phrase), + Log: phrase, + GasWanted: 10, + Tags: []cmn.KVPair{ + cmn.KVPair{[]byte("abc"), []byte("def")}, + }, + // Fee: cmn.KI64Pair{ + }, + // TODO: add the rest + } + + for _, c := range cases { + buf := new(bytes.Buffer) + err := WriteMessage(c, buf) + assert.Nil(t, err) + + msg := new(ResponseCheckTx) + err = ReadMessage(buf, msg) + assert.Nil(t, err) + + assert.Equal(t, c, msg) + } +} diff --git a/abci/types/protoreplace/protoreplace.go b/abci/types/protoreplace/protoreplace.go new file mode 100644 index 000000000..3ea0c73da --- /dev/null +++ b/abci/types/protoreplace/protoreplace.go @@ -0,0 +1,55 @@ +// +build ignore + +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "regexp" + "strings" +) + +// This script replaces most `[]byte` with `data.Bytes` in a `.pb.go` file. +// It was written before we realized we could use `gogo/protobuf` to achieve +// this more natively. So it's here for safe keeping in case we ever need to +// abandon `gogo/protobuf`. + +func main() { + bytePattern := regexp.MustCompile("[[][]]byte") + const oldPath = "types/types.pb.go" + const tmpPath = "types/types.pb.new" + content, err := ioutil.ReadFile(oldPath) + if err != nil { + panic("cannot read " + oldPath) + os.Exit(1) + } + lines := bytes.Split(content, []byte("\n")) + outFile, _ := os.Create(tmpPath) + wroteImport := false + for _, line_bytes := range lines { + line := string(line_bytes) + gotPackageLine := strings.HasPrefix(line, "package ") + writeImportTime := strings.HasPrefix(line, "import ") + containsDescriptor := strings.Contains(line, "Descriptor") + containsByteArray := strings.Contains(line, "[]byte") + if containsByteArray && !containsDescriptor { + line = string(bytePattern.ReplaceAll([]byte(line), []byte("data.Bytes"))) + } + if writeImportTime && !wroteImport { + wroteImport = true + fmt.Fprintf(outFile, "import \"github.com/tendermint/go-wire/data\"\n") + + } + if gotPackageLine { + fmt.Fprintf(outFile, "%s\n", "//nolint: gas") + } + fmt.Fprintf(outFile, "%s\n", line) + } + outFile.Close() + os.Remove(oldPath) + os.Rename(tmpPath, oldPath) + exec.Command("goimports", "-w", oldPath) +} diff --git a/abci/types/pubkey.go b/abci/types/pubkey.go new file mode 100644 index 000000000..e5cd5fbf3 --- /dev/null +++ b/abci/types/pubkey.go @@ -0,0 +1,16 @@ +package types + +const ( + PubKeyEd25519 = "ed25519" +) + +func Ed25519Validator(pubkey []byte, power int64) Validator { + return Validator{ + // Address: + PubKey: PubKey{ + Type: PubKeyEd25519, + Data: pubkey, + }, + Power: power, + } +} diff --git a/abci/types/result.go b/abci/types/result.go new file mode 100644 index 000000000..dbf409f4c --- /dev/null +++ b/abci/types/result.go @@ -0,0 +1,121 @@ +package types + +import ( + "bytes" + "encoding/json" + + "github.com/gogo/protobuf/jsonpb" +) + +const ( + CodeTypeOK uint32 = 0 +) + +// IsOK returns true if Code is OK. +func (r ResponseCheckTx) IsOK() bool { + return r.Code == CodeTypeOK +} + +// IsErr returns true if Code is something other than OK. +func (r ResponseCheckTx) IsErr() bool { + return r.Code != CodeTypeOK +} + +// IsOK returns true if Code is OK. +func (r ResponseDeliverTx) IsOK() bool { + return r.Code == CodeTypeOK +} + +// IsErr returns true if Code is something other than OK. +func (r ResponseDeliverTx) IsErr() bool { + return r.Code != CodeTypeOK +} + +// IsOK returns true if Code is OK. +func (r ResponseQuery) IsOK() bool { + return r.Code == CodeTypeOK +} + +// IsErr returns true if Code is something other than OK. +func (r ResponseQuery) IsErr() bool { + return r.Code != CodeTypeOK +} + +//--------------------------------------------------------------------------- +// override JSON marshalling so we dont emit defaults (ie. disable omitempty) +// note we need Unmarshal functions too because protobuf had the bright idea +// to marshal int64->string. cool. cool, cool, cool: https://developers.google.com/protocol-buffers/docs/proto3#json + +var ( + jsonpbMarshaller = jsonpb.Marshaler{ + EnumsAsInts: true, + EmitDefaults: false, + } + jsonpbUnmarshaller = jsonpb.Unmarshaler{} +) + +func (r *ResponseSetOption) MarshalJSON() ([]byte, error) { + s, err := jsonpbMarshaller.MarshalToString(r) + return []byte(s), err +} + +func (r *ResponseSetOption) UnmarshalJSON(b []byte) error { + reader := bytes.NewBuffer(b) + return jsonpbUnmarshaller.Unmarshal(reader, r) +} + +func (r *ResponseCheckTx) MarshalJSON() ([]byte, error) { + s, err := jsonpbMarshaller.MarshalToString(r) + return []byte(s), err +} + +func (r *ResponseCheckTx) UnmarshalJSON(b []byte) error { + reader := bytes.NewBuffer(b) + return jsonpbUnmarshaller.Unmarshal(reader, r) +} + +func (r *ResponseDeliverTx) MarshalJSON() ([]byte, error) { + s, err := jsonpbMarshaller.MarshalToString(r) + return []byte(s), err +} + +func (r *ResponseDeliverTx) UnmarshalJSON(b []byte) error { + reader := bytes.NewBuffer(b) + return jsonpbUnmarshaller.Unmarshal(reader, r) +} + +func (r *ResponseQuery) MarshalJSON() ([]byte, error) { + s, err := jsonpbMarshaller.MarshalToString(r) + return []byte(s), err +} + +func (r *ResponseQuery) UnmarshalJSON(b []byte) error { + reader := bytes.NewBuffer(b) + return jsonpbUnmarshaller.Unmarshal(reader, r) +} + +func (r *ResponseCommit) MarshalJSON() ([]byte, error) { + s, err := jsonpbMarshaller.MarshalToString(r) + return []byte(s), err +} + +func (r *ResponseCommit) UnmarshalJSON(b []byte) error { + reader := bytes.NewBuffer(b) + return jsonpbUnmarshaller.Unmarshal(reader, r) +} + +// Some compile time assertions to ensure we don't +// have accidental runtime surprises later on. + +// jsonEncodingRoundTripper ensures that asserted +// interfaces implement both MarshalJSON and UnmarshalJSON +type jsonRoundTripper interface { + json.Marshaler + json.Unmarshaler +} + +var _ jsonRoundTripper = (*ResponseCommit)(nil) +var _ jsonRoundTripper = (*ResponseQuery)(nil) +var _ jsonRoundTripper = (*ResponseDeliverTx)(nil) +var _ jsonRoundTripper = (*ResponseCheckTx)(nil) +var _ jsonRoundTripper = (*ResponseSetOption)(nil) diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go new file mode 100644 index 000000000..8135db50f --- /dev/null +++ b/abci/types/types.pb.go @@ -0,0 +1,2455 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: types/types.proto + +/* +Package types is a generated protocol buffer package. + +It is generated from these files: + types/types.proto + +It has these top-level messages: + Request + RequestEcho + RequestFlush + RequestInfo + RequestSetOption + RequestInitChain + RequestQuery + RequestBeginBlock + RequestCheckTx + RequestDeliverTx + RequestEndBlock + RequestCommit + Response + ResponseException + ResponseEcho + ResponseFlush + ResponseInfo + ResponseSetOption + ResponseInitChain + ResponseQuery + ResponseBeginBlock + ResponseCheckTx + ResponseDeliverTx + ResponseEndBlock + ResponseCommit + ConsensusParams + BlockSize + TxSize + BlockGossip + Header + Validator + SigningValidator + PubKey + Evidence +*/ +//nolint: gas +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import common "github.com/tendermint/tendermint/libs/common" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Request struct { + // Types that are valid to be assigned to Value: + // *Request_Echo + // *Request_Flush + // *Request_Info + // *Request_SetOption + // *Request_InitChain + // *Request_Query + // *Request_BeginBlock + // *Request_CheckTx + // *Request_DeliverTx + // *Request_EndBlock + // *Request_Commit + Value isRequest_Value `protobuf_oneof:"value"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} } + +type isRequest_Value interface { + isRequest_Value() +} + +type Request_Echo struct { + Echo *RequestEcho `protobuf:"bytes,2,opt,name=echo,oneof"` +} +type Request_Flush struct { + Flush *RequestFlush `protobuf:"bytes,3,opt,name=flush,oneof"` +} +type Request_Info struct { + Info *RequestInfo `protobuf:"bytes,4,opt,name=info,oneof"` +} +type Request_SetOption struct { + SetOption *RequestSetOption `protobuf:"bytes,5,opt,name=set_option,json=setOption,oneof"` +} +type Request_InitChain struct { + InitChain *RequestInitChain `protobuf:"bytes,6,opt,name=init_chain,json=initChain,oneof"` +} +type Request_Query struct { + Query *RequestQuery `protobuf:"bytes,7,opt,name=query,oneof"` +} +type Request_BeginBlock struct { + BeginBlock *RequestBeginBlock `protobuf:"bytes,8,opt,name=begin_block,json=beginBlock,oneof"` +} +type Request_CheckTx struct { + CheckTx *RequestCheckTx `protobuf:"bytes,9,opt,name=check_tx,json=checkTx,oneof"` +} +type Request_DeliverTx struct { + DeliverTx *RequestDeliverTx `protobuf:"bytes,19,opt,name=deliver_tx,json=deliverTx,oneof"` +} +type Request_EndBlock struct { + EndBlock *RequestEndBlock `protobuf:"bytes,11,opt,name=end_block,json=endBlock,oneof"` +} +type Request_Commit struct { + Commit *RequestCommit `protobuf:"bytes,12,opt,name=commit,oneof"` +} + +func (*Request_Echo) isRequest_Value() {} +func (*Request_Flush) isRequest_Value() {} +func (*Request_Info) isRequest_Value() {} +func (*Request_SetOption) isRequest_Value() {} +func (*Request_InitChain) isRequest_Value() {} +func (*Request_Query) isRequest_Value() {} +func (*Request_BeginBlock) isRequest_Value() {} +func (*Request_CheckTx) isRequest_Value() {} +func (*Request_DeliverTx) isRequest_Value() {} +func (*Request_EndBlock) isRequest_Value() {} +func (*Request_Commit) isRequest_Value() {} + +func (m *Request) GetValue() isRequest_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Request) GetEcho() *RequestEcho { + if x, ok := m.GetValue().(*Request_Echo); ok { + return x.Echo + } + return nil +} + +func (m *Request) GetFlush() *RequestFlush { + if x, ok := m.GetValue().(*Request_Flush); ok { + return x.Flush + } + return nil +} + +func (m *Request) GetInfo() *RequestInfo { + if x, ok := m.GetValue().(*Request_Info); ok { + return x.Info + } + return nil +} + +func (m *Request) GetSetOption() *RequestSetOption { + if x, ok := m.GetValue().(*Request_SetOption); ok { + return x.SetOption + } + return nil +} + +func (m *Request) GetInitChain() *RequestInitChain { + if x, ok := m.GetValue().(*Request_InitChain); ok { + return x.InitChain + } + return nil +} + +func (m *Request) GetQuery() *RequestQuery { + if x, ok := m.GetValue().(*Request_Query); ok { + return x.Query + } + return nil +} + +func (m *Request) GetBeginBlock() *RequestBeginBlock { + if x, ok := m.GetValue().(*Request_BeginBlock); ok { + return x.BeginBlock + } + return nil +} + +func (m *Request) GetCheckTx() *RequestCheckTx { + if x, ok := m.GetValue().(*Request_CheckTx); ok { + return x.CheckTx + } + return nil +} + +func (m *Request) GetDeliverTx() *RequestDeliverTx { + if x, ok := m.GetValue().(*Request_DeliverTx); ok { + return x.DeliverTx + } + return nil +} + +func (m *Request) GetEndBlock() *RequestEndBlock { + if x, ok := m.GetValue().(*Request_EndBlock); ok { + return x.EndBlock + } + return nil +} + +func (m *Request) GetCommit() *RequestCommit { + if x, ok := m.GetValue().(*Request_Commit); ok { + return x.Commit + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Request) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Request_OneofMarshaler, _Request_OneofUnmarshaler, _Request_OneofSizer, []interface{}{ + (*Request_Echo)(nil), + (*Request_Flush)(nil), + (*Request_Info)(nil), + (*Request_SetOption)(nil), + (*Request_InitChain)(nil), + (*Request_Query)(nil), + (*Request_BeginBlock)(nil), + (*Request_CheckTx)(nil), + (*Request_DeliverTx)(nil), + (*Request_EndBlock)(nil), + (*Request_Commit)(nil), + } +} + +func _Request_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Request) + // value + switch x := m.Value.(type) { + case *Request_Echo: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Echo); err != nil { + return err + } + case *Request_Flush: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Flush); err != nil { + return err + } + case *Request_Info: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Info); err != nil { + return err + } + case *Request_SetOption: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SetOption); err != nil { + return err + } + case *Request_InitChain: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InitChain); err != nil { + return err + } + case *Request_Query: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Query); err != nil { + return err + } + case *Request_BeginBlock: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BeginBlock); err != nil { + return err + } + case *Request_CheckTx: + _ = b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CheckTx); err != nil { + return err + } + case *Request_DeliverTx: + _ = b.EncodeVarint(19<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DeliverTx); err != nil { + return err + } + case *Request_EndBlock: + _ = b.EncodeVarint(11<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.EndBlock); err != nil { + return err + } + case *Request_Commit: + _ = b.EncodeVarint(12<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Commit); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Request.Value has unexpected type %T", x) + } + return nil +} + +func _Request_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Request) + switch tag { + case 2: // value.echo + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RequestEcho) + err := b.DecodeMessage(msg) + m.Value = &Request_Echo{msg} + return true, err + case 3: // value.flush + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RequestFlush) + err := b.DecodeMessage(msg) + m.Value = &Request_Flush{msg} + return true, err + case 4: // value.info + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RequestInfo) + err := b.DecodeMessage(msg) + m.Value = &Request_Info{msg} + return true, err + case 5: // value.set_option + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RequestSetOption) + err := b.DecodeMessage(msg) + m.Value = &Request_SetOption{msg} + return true, err + case 6: // value.init_chain + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RequestInitChain) + err := b.DecodeMessage(msg) + m.Value = &Request_InitChain{msg} + return true, err + case 7: // value.query + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RequestQuery) + err := b.DecodeMessage(msg) + m.Value = &Request_Query{msg} + return true, err + case 8: // value.begin_block + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RequestBeginBlock) + err := b.DecodeMessage(msg) + m.Value = &Request_BeginBlock{msg} + return true, err + case 9: // value.check_tx + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RequestCheckTx) + err := b.DecodeMessage(msg) + m.Value = &Request_CheckTx{msg} + return true, err + case 19: // value.deliver_tx + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RequestDeliverTx) + err := b.DecodeMessage(msg) + m.Value = &Request_DeliverTx{msg} + return true, err + case 11: // value.end_block + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RequestEndBlock) + err := b.DecodeMessage(msg) + m.Value = &Request_EndBlock{msg} + return true, err + case 12: // value.commit + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RequestCommit) + err := b.DecodeMessage(msg) + m.Value = &Request_Commit{msg} + return true, err + default: + return false, nil + } +} + +func _Request_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Request) + // value + switch x := m.Value.(type) { + case *Request_Echo: + s := proto.Size(x.Echo) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Request_Flush: + s := proto.Size(x.Flush) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Request_Info: + s := proto.Size(x.Info) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Request_SetOption: + s := proto.Size(x.SetOption) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Request_InitChain: + s := proto.Size(x.InitChain) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Request_Query: + s := proto.Size(x.Query) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Request_BeginBlock: + s := proto.Size(x.BeginBlock) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Request_CheckTx: + s := proto.Size(x.CheckTx) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Request_DeliverTx: + s := proto.Size(x.DeliverTx) + n += proto.SizeVarint(19<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Request_EndBlock: + s := proto.Size(x.EndBlock) + n += proto.SizeVarint(11<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Request_Commit: + s := proto.Size(x.Commit) + n += proto.SizeVarint(12<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type RequestEcho struct { + Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` +} + +func (m *RequestEcho) Reset() { *m = RequestEcho{} } +func (m *RequestEcho) String() string { return proto.CompactTextString(m) } +func (*RequestEcho) ProtoMessage() {} +func (*RequestEcho) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} } + +func (m *RequestEcho) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +type RequestFlush struct { +} + +func (m *RequestFlush) Reset() { *m = RequestFlush{} } +func (m *RequestFlush) String() string { return proto.CompactTextString(m) } +func (*RequestFlush) ProtoMessage() {} +func (*RequestFlush) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{2} } + +type RequestInfo struct { + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` +} + +func (m *RequestInfo) Reset() { *m = RequestInfo{} } +func (m *RequestInfo) String() string { return proto.CompactTextString(m) } +func (*RequestInfo) ProtoMessage() {} +func (*RequestInfo) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{3} } + +func (m *RequestInfo) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +// nondeterministic +type RequestSetOption struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *RequestSetOption) Reset() { *m = RequestSetOption{} } +func (m *RequestSetOption) String() string { return proto.CompactTextString(m) } +func (*RequestSetOption) ProtoMessage() {} +func (*RequestSetOption) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{4} } + +func (m *RequestSetOption) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *RequestSetOption) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +type RequestInitChain struct { + Time int64 `protobuf:"varint,1,opt,name=time,proto3" json:"time,omitempty"` + ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + ConsensusParams *ConsensusParams `protobuf:"bytes,3,opt,name=consensus_params,json=consensusParams" json:"consensus_params,omitempty"` + Validators []Validator `protobuf:"bytes,4,rep,name=validators" json:"validators"` + AppStateBytes []byte `protobuf:"bytes,5,opt,name=app_state_bytes,json=appStateBytes,proto3" json:"app_state_bytes,omitempty"` +} + +func (m *RequestInitChain) Reset() { *m = RequestInitChain{} } +func (m *RequestInitChain) String() string { return proto.CompactTextString(m) } +func (*RequestInitChain) ProtoMessage() {} +func (*RequestInitChain) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{5} } + +func (m *RequestInitChain) GetTime() int64 { + if m != nil { + return m.Time + } + return 0 +} + +func (m *RequestInitChain) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +func (m *RequestInitChain) GetConsensusParams() *ConsensusParams { + if m != nil { + return m.ConsensusParams + } + return nil +} + +func (m *RequestInitChain) GetValidators() []Validator { + if m != nil { + return m.Validators + } + return nil +} + +func (m *RequestInitChain) GetAppStateBytes() []byte { + if m != nil { + return m.AppStateBytes + } + return nil +} + +type RequestQuery struct { + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` + Prove bool `protobuf:"varint,4,opt,name=prove,proto3" json:"prove,omitempty"` +} + +func (m *RequestQuery) Reset() { *m = RequestQuery{} } +func (m *RequestQuery) String() string { return proto.CompactTextString(m) } +func (*RequestQuery) ProtoMessage() {} +func (*RequestQuery) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{6} } + +func (m *RequestQuery) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *RequestQuery) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *RequestQuery) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *RequestQuery) GetProve() bool { + if m != nil { + return m.Prove + } + return false +} + +type RequestBeginBlock struct { + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + Header Header `protobuf:"bytes,2,opt,name=header" json:"header"` + Validators []SigningValidator `protobuf:"bytes,3,rep,name=validators" json:"validators"` + ByzantineValidators []Evidence `protobuf:"bytes,4,rep,name=byzantine_validators,json=byzantineValidators" json:"byzantine_validators"` +} + +func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} } +func (m *RequestBeginBlock) String() string { return proto.CompactTextString(m) } +func (*RequestBeginBlock) ProtoMessage() {} +func (*RequestBeginBlock) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{7} } + +func (m *RequestBeginBlock) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *RequestBeginBlock) GetHeader() Header { + if m != nil { + return m.Header + } + return Header{} +} + +func (m *RequestBeginBlock) GetValidators() []SigningValidator { + if m != nil { + return m.Validators + } + return nil +} + +func (m *RequestBeginBlock) GetByzantineValidators() []Evidence { + if m != nil { + return m.ByzantineValidators + } + return nil +} + +type RequestCheckTx struct { + Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` +} + +func (m *RequestCheckTx) Reset() { *m = RequestCheckTx{} } +func (m *RequestCheckTx) String() string { return proto.CompactTextString(m) } +func (*RequestCheckTx) ProtoMessage() {} +func (*RequestCheckTx) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{8} } + +func (m *RequestCheckTx) GetTx() []byte { + if m != nil { + return m.Tx + } + return nil +} + +type RequestDeliverTx struct { + Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` +} + +func (m *RequestDeliverTx) Reset() { *m = RequestDeliverTx{} } +func (m *RequestDeliverTx) String() string { return proto.CompactTextString(m) } +func (*RequestDeliverTx) ProtoMessage() {} +func (*RequestDeliverTx) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{9} } + +func (m *RequestDeliverTx) GetTx() []byte { + if m != nil { + return m.Tx + } + return nil +} + +type RequestEndBlock struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *RequestEndBlock) Reset() { *m = RequestEndBlock{} } +func (m *RequestEndBlock) String() string { return proto.CompactTextString(m) } +func (*RequestEndBlock) ProtoMessage() {} +func (*RequestEndBlock) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{10} } + +func (m *RequestEndBlock) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +type RequestCommit struct { +} + +func (m *RequestCommit) Reset() { *m = RequestCommit{} } +func (m *RequestCommit) String() string { return proto.CompactTextString(m) } +func (*RequestCommit) ProtoMessage() {} +func (*RequestCommit) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{11} } + +type Response struct { + // Types that are valid to be assigned to Value: + // *Response_Exception + // *Response_Echo + // *Response_Flush + // *Response_Info + // *Response_SetOption + // *Response_InitChain + // *Response_Query + // *Response_BeginBlock + // *Response_CheckTx + // *Response_DeliverTx + // *Response_EndBlock + // *Response_Commit + Value isResponse_Value `protobuf_oneof:"value"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{12} } + +type isResponse_Value interface { + isResponse_Value() +} + +type Response_Exception struct { + Exception *ResponseException `protobuf:"bytes,1,opt,name=exception,oneof"` +} +type Response_Echo struct { + Echo *ResponseEcho `protobuf:"bytes,2,opt,name=echo,oneof"` +} +type Response_Flush struct { + Flush *ResponseFlush `protobuf:"bytes,3,opt,name=flush,oneof"` +} +type Response_Info struct { + Info *ResponseInfo `protobuf:"bytes,4,opt,name=info,oneof"` +} +type Response_SetOption struct { + SetOption *ResponseSetOption `protobuf:"bytes,5,opt,name=set_option,json=setOption,oneof"` +} +type Response_InitChain struct { + InitChain *ResponseInitChain `protobuf:"bytes,6,opt,name=init_chain,json=initChain,oneof"` +} +type Response_Query struct { + Query *ResponseQuery `protobuf:"bytes,7,opt,name=query,oneof"` +} +type Response_BeginBlock struct { + BeginBlock *ResponseBeginBlock `protobuf:"bytes,8,opt,name=begin_block,json=beginBlock,oneof"` +} +type Response_CheckTx struct { + CheckTx *ResponseCheckTx `protobuf:"bytes,9,opt,name=check_tx,json=checkTx,oneof"` +} +type Response_DeliverTx struct { + DeliverTx *ResponseDeliverTx `protobuf:"bytes,10,opt,name=deliver_tx,json=deliverTx,oneof"` +} +type Response_EndBlock struct { + EndBlock *ResponseEndBlock `protobuf:"bytes,11,opt,name=end_block,json=endBlock,oneof"` +} +type Response_Commit struct { + Commit *ResponseCommit `protobuf:"bytes,12,opt,name=commit,oneof"` +} + +func (*Response_Exception) isResponse_Value() {} +func (*Response_Echo) isResponse_Value() {} +func (*Response_Flush) isResponse_Value() {} +func (*Response_Info) isResponse_Value() {} +func (*Response_SetOption) isResponse_Value() {} +func (*Response_InitChain) isResponse_Value() {} +func (*Response_Query) isResponse_Value() {} +func (*Response_BeginBlock) isResponse_Value() {} +func (*Response_CheckTx) isResponse_Value() {} +func (*Response_DeliverTx) isResponse_Value() {} +func (*Response_EndBlock) isResponse_Value() {} +func (*Response_Commit) isResponse_Value() {} + +func (m *Response) GetValue() isResponse_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Response) GetException() *ResponseException { + if x, ok := m.GetValue().(*Response_Exception); ok { + return x.Exception + } + return nil +} + +func (m *Response) GetEcho() *ResponseEcho { + if x, ok := m.GetValue().(*Response_Echo); ok { + return x.Echo + } + return nil +} + +func (m *Response) GetFlush() *ResponseFlush { + if x, ok := m.GetValue().(*Response_Flush); ok { + return x.Flush + } + return nil +} + +func (m *Response) GetInfo() *ResponseInfo { + if x, ok := m.GetValue().(*Response_Info); ok { + return x.Info + } + return nil +} + +func (m *Response) GetSetOption() *ResponseSetOption { + if x, ok := m.GetValue().(*Response_SetOption); ok { + return x.SetOption + } + return nil +} + +func (m *Response) GetInitChain() *ResponseInitChain { + if x, ok := m.GetValue().(*Response_InitChain); ok { + return x.InitChain + } + return nil +} + +func (m *Response) GetQuery() *ResponseQuery { + if x, ok := m.GetValue().(*Response_Query); ok { + return x.Query + } + return nil +} + +func (m *Response) GetBeginBlock() *ResponseBeginBlock { + if x, ok := m.GetValue().(*Response_BeginBlock); ok { + return x.BeginBlock + } + return nil +} + +func (m *Response) GetCheckTx() *ResponseCheckTx { + if x, ok := m.GetValue().(*Response_CheckTx); ok { + return x.CheckTx + } + return nil +} + +func (m *Response) GetDeliverTx() *ResponseDeliverTx { + if x, ok := m.GetValue().(*Response_DeliverTx); ok { + return x.DeliverTx + } + return nil +} + +func (m *Response) GetEndBlock() *ResponseEndBlock { + if x, ok := m.GetValue().(*Response_EndBlock); ok { + return x.EndBlock + } + return nil +} + +func (m *Response) GetCommit() *ResponseCommit { + if x, ok := m.GetValue().(*Response_Commit); ok { + return x.Commit + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Response) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Response_OneofMarshaler, _Response_OneofUnmarshaler, _Response_OneofSizer, []interface{}{ + (*Response_Exception)(nil), + (*Response_Echo)(nil), + (*Response_Flush)(nil), + (*Response_Info)(nil), + (*Response_SetOption)(nil), + (*Response_InitChain)(nil), + (*Response_Query)(nil), + (*Response_BeginBlock)(nil), + (*Response_CheckTx)(nil), + (*Response_DeliverTx)(nil), + (*Response_EndBlock)(nil), + (*Response_Commit)(nil), + } +} + +func _Response_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Response) + // value + switch x := m.Value.(type) { + case *Response_Exception: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Exception); err != nil { + return err + } + case *Response_Echo: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Echo); err != nil { + return err + } + case *Response_Flush: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Flush); err != nil { + return err + } + case *Response_Info: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Info); err != nil { + return err + } + case *Response_SetOption: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SetOption); err != nil { + return err + } + case *Response_InitChain: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InitChain); err != nil { + return err + } + case *Response_Query: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Query); err != nil { + return err + } + case *Response_BeginBlock: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BeginBlock); err != nil { + return err + } + case *Response_CheckTx: + _ = b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CheckTx); err != nil { + return err + } + case *Response_DeliverTx: + _ = b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DeliverTx); err != nil { + return err + } + case *Response_EndBlock: + _ = b.EncodeVarint(11<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.EndBlock); err != nil { + return err + } + case *Response_Commit: + _ = b.EncodeVarint(12<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Commit); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Response.Value has unexpected type %T", x) + } + return nil +} + +func _Response_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Response) + switch tag { + case 1: // value.exception + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResponseException) + err := b.DecodeMessage(msg) + m.Value = &Response_Exception{msg} + return true, err + case 2: // value.echo + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResponseEcho) + err := b.DecodeMessage(msg) + m.Value = &Response_Echo{msg} + return true, err + case 3: // value.flush + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResponseFlush) + err := b.DecodeMessage(msg) + m.Value = &Response_Flush{msg} + return true, err + case 4: // value.info + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResponseInfo) + err := b.DecodeMessage(msg) + m.Value = &Response_Info{msg} + return true, err + case 5: // value.set_option + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResponseSetOption) + err := b.DecodeMessage(msg) + m.Value = &Response_SetOption{msg} + return true, err + case 6: // value.init_chain + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResponseInitChain) + err := b.DecodeMessage(msg) + m.Value = &Response_InitChain{msg} + return true, err + case 7: // value.query + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResponseQuery) + err := b.DecodeMessage(msg) + m.Value = &Response_Query{msg} + return true, err + case 8: // value.begin_block + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResponseBeginBlock) + err := b.DecodeMessage(msg) + m.Value = &Response_BeginBlock{msg} + return true, err + case 9: // value.check_tx + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResponseCheckTx) + err := b.DecodeMessage(msg) + m.Value = &Response_CheckTx{msg} + return true, err + case 10: // value.deliver_tx + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResponseDeliverTx) + err := b.DecodeMessage(msg) + m.Value = &Response_DeliverTx{msg} + return true, err + case 11: // value.end_block + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResponseEndBlock) + err := b.DecodeMessage(msg) + m.Value = &Response_EndBlock{msg} + return true, err + case 12: // value.commit + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResponseCommit) + err := b.DecodeMessage(msg) + m.Value = &Response_Commit{msg} + return true, err + default: + return false, nil + } +} + +func _Response_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Response) + // value + switch x := m.Value.(type) { + case *Response_Exception: + s := proto.Size(x.Exception) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Response_Echo: + s := proto.Size(x.Echo) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Response_Flush: + s := proto.Size(x.Flush) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Response_Info: + s := proto.Size(x.Info) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Response_SetOption: + s := proto.Size(x.SetOption) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Response_InitChain: + s := proto.Size(x.InitChain) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Response_Query: + s := proto.Size(x.Query) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Response_BeginBlock: + s := proto.Size(x.BeginBlock) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Response_CheckTx: + s := proto.Size(x.CheckTx) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Response_DeliverTx: + s := proto.Size(x.DeliverTx) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Response_EndBlock: + s := proto.Size(x.EndBlock) + n += proto.SizeVarint(11<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Response_Commit: + s := proto.Size(x.Commit) + n += proto.SizeVarint(12<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// nondeterministic +type ResponseException struct { + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *ResponseException) Reset() { *m = ResponseException{} } +func (m *ResponseException) String() string { return proto.CompactTextString(m) } +func (*ResponseException) ProtoMessage() {} +func (*ResponseException) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{13} } + +func (m *ResponseException) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +type ResponseEcho struct { + Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` +} + +func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } +func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } +func (*ResponseEcho) ProtoMessage() {} +func (*ResponseEcho) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{14} } + +func (m *ResponseEcho) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +type ResponseFlush struct { +} + +func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } +func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } +func (*ResponseFlush) ProtoMessage() {} +func (*ResponseFlush) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{15} } + +type ResponseInfo struct { + Data string `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + LastBlockHeight int64 `protobuf:"varint,3,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` + LastBlockAppHash []byte `protobuf:"bytes,4,opt,name=last_block_app_hash,json=lastBlockAppHash,proto3" json:"last_block_app_hash,omitempty"` +} + +func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } +func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } +func (*ResponseInfo) ProtoMessage() {} +func (*ResponseInfo) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16} } + +func (m *ResponseInfo) GetData() string { + if m != nil { + return m.Data + } + return "" +} + +func (m *ResponseInfo) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *ResponseInfo) GetLastBlockHeight() int64 { + if m != nil { + return m.LastBlockHeight + } + return 0 +} + +func (m *ResponseInfo) GetLastBlockAppHash() []byte { + if m != nil { + return m.LastBlockAppHash + } + return nil +} + +// nondeterministic +type ResponseSetOption struct { + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // bytes data = 2; + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` +} + +func (m *ResponseSetOption) Reset() { *m = ResponseSetOption{} } +func (m *ResponseSetOption) String() string { return proto.CompactTextString(m) } +func (*ResponseSetOption) ProtoMessage() {} +func (*ResponseSetOption) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{17} } + +func (m *ResponseSetOption) GetCode() uint32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *ResponseSetOption) GetLog() string { + if m != nil { + return m.Log + } + return "" +} + +func (m *ResponseSetOption) GetInfo() string { + if m != nil { + return m.Info + } + return "" +} + +type ResponseInitChain struct { + ConsensusParams *ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams" json:"consensus_params,omitempty"` + Validators []Validator `protobuf:"bytes,2,rep,name=validators" json:"validators"` +} + +func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } +func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } +func (*ResponseInitChain) ProtoMessage() {} +func (*ResponseInitChain) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{18} } + +func (m *ResponseInitChain) GetConsensusParams() *ConsensusParams { + if m != nil { + return m.ConsensusParams + } + return nil +} + +func (m *ResponseInitChain) GetValidators() []Validator { + if m != nil { + return m.Validators + } + return nil +} + +type ResponseQuery struct { + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // bytes data = 2; // use "value" instead. + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + Index int64 `protobuf:"varint,5,opt,name=index,proto3" json:"index,omitempty"` + Key []byte `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,7,opt,name=value,proto3" json:"value,omitempty"` + Proof []byte `protobuf:"bytes,8,opt,name=proof,proto3" json:"proof,omitempty"` + Height int64 `protobuf:"varint,9,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } +func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } +func (*ResponseQuery) ProtoMessage() {} +func (*ResponseQuery) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{19} } + +func (m *ResponseQuery) GetCode() uint32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *ResponseQuery) GetLog() string { + if m != nil { + return m.Log + } + return "" +} + +func (m *ResponseQuery) GetInfo() string { + if m != nil { + return m.Info + } + return "" +} + +func (m *ResponseQuery) GetIndex() int64 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *ResponseQuery) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *ResponseQuery) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *ResponseQuery) GetProof() []byte { + if m != nil { + return m.Proof + } + return nil +} + +func (m *ResponseQuery) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +type ResponseBeginBlock struct { + Tags []common.KVPair `protobuf:"bytes,1,rep,name=tags" json:"tags,omitempty"` +} + +func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } +func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } +func (*ResponseBeginBlock) ProtoMessage() {} +func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{20} } + +func (m *ResponseBeginBlock) GetTags() []common.KVPair { + if m != nil { + return m.Tags + } + return nil +} + +type ResponseCheckTx struct { + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` + Tags []common.KVPair `protobuf:"bytes,7,rep,name=tags" json:"tags,omitempty"` + Fee common.KI64Pair `protobuf:"bytes,8,opt,name=fee" json:"fee"` +} + +func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } +func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } +func (*ResponseCheckTx) ProtoMessage() {} +func (*ResponseCheckTx) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{21} } + +func (m *ResponseCheckTx) GetCode() uint32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *ResponseCheckTx) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *ResponseCheckTx) GetLog() string { + if m != nil { + return m.Log + } + return "" +} + +func (m *ResponseCheckTx) GetInfo() string { + if m != nil { + return m.Info + } + return "" +} + +func (m *ResponseCheckTx) GetGasWanted() int64 { + if m != nil { + return m.GasWanted + } + return 0 +} + +func (m *ResponseCheckTx) GetGasUsed() int64 { + if m != nil { + return m.GasUsed + } + return 0 +} + +func (m *ResponseCheckTx) GetTags() []common.KVPair { + if m != nil { + return m.Tags + } + return nil +} + +func (m *ResponseCheckTx) GetFee() common.KI64Pair { + if m != nil { + return m.Fee + } + return common.KI64Pair{} +} + +type ResponseDeliverTx struct { + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` + Tags []common.KVPair `protobuf:"bytes,7,rep,name=tags" json:"tags,omitempty"` + Fee common.KI64Pair `protobuf:"bytes,8,opt,name=fee" json:"fee"` +} + +func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } +func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } +func (*ResponseDeliverTx) ProtoMessage() {} +func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{22} } + +func (m *ResponseDeliverTx) GetCode() uint32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *ResponseDeliverTx) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *ResponseDeliverTx) GetLog() string { + if m != nil { + return m.Log + } + return "" +} + +func (m *ResponseDeliverTx) GetInfo() string { + if m != nil { + return m.Info + } + return "" +} + +func (m *ResponseDeliverTx) GetGasWanted() int64 { + if m != nil { + return m.GasWanted + } + return 0 +} + +func (m *ResponseDeliverTx) GetGasUsed() int64 { + if m != nil { + return m.GasUsed + } + return 0 +} + +func (m *ResponseDeliverTx) GetTags() []common.KVPair { + if m != nil { + return m.Tags + } + return nil +} + +func (m *ResponseDeliverTx) GetFee() common.KI64Pair { + if m != nil { + return m.Fee + } + return common.KI64Pair{} +} + +type ResponseEndBlock struct { + ValidatorUpdates []Validator `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates" json:"validator_updates"` + ConsensusParamUpdates *ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates" json:"consensus_param_updates,omitempty"` + Tags []common.KVPair `protobuf:"bytes,3,rep,name=tags" json:"tags,omitempty"` +} + +func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } +func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } +func (*ResponseEndBlock) ProtoMessage() {} +func (*ResponseEndBlock) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{23} } + +func (m *ResponseEndBlock) GetValidatorUpdates() []Validator { + if m != nil { + return m.ValidatorUpdates + } + return nil +} + +func (m *ResponseEndBlock) GetConsensusParamUpdates() *ConsensusParams { + if m != nil { + return m.ConsensusParamUpdates + } + return nil +} + +func (m *ResponseEndBlock) GetTags() []common.KVPair { + if m != nil { + return m.Tags + } + return nil +} + +type ResponseCommit struct { + // reserve 1 + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } +func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } +func (*ResponseCommit) ProtoMessage() {} +func (*ResponseCommit) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{24} } + +func (m *ResponseCommit) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +// ConsensusParams contains all consensus-relevant parameters +// that can be adjusted by the abci app +type ConsensusParams struct { + BlockSize *BlockSize `protobuf:"bytes,1,opt,name=block_size,json=blockSize" json:"block_size,omitempty"` + TxSize *TxSize `protobuf:"bytes,2,opt,name=tx_size,json=txSize" json:"tx_size,omitempty"` + BlockGossip *BlockGossip `protobuf:"bytes,3,opt,name=block_gossip,json=blockGossip" json:"block_gossip,omitempty"` +} + +func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } +func (m *ConsensusParams) String() string { return proto.CompactTextString(m) } +func (*ConsensusParams) ProtoMessage() {} +func (*ConsensusParams) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{25} } + +func (m *ConsensusParams) GetBlockSize() *BlockSize { + if m != nil { + return m.BlockSize + } + return nil +} + +func (m *ConsensusParams) GetTxSize() *TxSize { + if m != nil { + return m.TxSize + } + return nil +} + +func (m *ConsensusParams) GetBlockGossip() *BlockGossip { + if m != nil { + return m.BlockGossip + } + return nil +} + +// BlockSize contain limits on the block size. +type BlockSize struct { + MaxBytes int32 `protobuf:"varint,1,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` + MaxTxs int32 `protobuf:"varint,2,opt,name=max_txs,json=maxTxs,proto3" json:"max_txs,omitempty"` + MaxGas int64 `protobuf:"varint,3,opt,name=max_gas,json=maxGas,proto3" json:"max_gas,omitempty"` +} + +func (m *BlockSize) Reset() { *m = BlockSize{} } +func (m *BlockSize) String() string { return proto.CompactTextString(m) } +func (*BlockSize) ProtoMessage() {} +func (*BlockSize) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{26} } + +func (m *BlockSize) GetMaxBytes() int32 { + if m != nil { + return m.MaxBytes + } + return 0 +} + +func (m *BlockSize) GetMaxTxs() int32 { + if m != nil { + return m.MaxTxs + } + return 0 +} + +func (m *BlockSize) GetMaxGas() int64 { + if m != nil { + return m.MaxGas + } + return 0 +} + +// TxSize contain limits on the tx size. +type TxSize struct { + MaxBytes int32 `protobuf:"varint,1,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` + MaxGas int64 `protobuf:"varint,2,opt,name=max_gas,json=maxGas,proto3" json:"max_gas,omitempty"` +} + +func (m *TxSize) Reset() { *m = TxSize{} } +func (m *TxSize) String() string { return proto.CompactTextString(m) } +func (*TxSize) ProtoMessage() {} +func (*TxSize) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{27} } + +func (m *TxSize) GetMaxBytes() int32 { + if m != nil { + return m.MaxBytes + } + return 0 +} + +func (m *TxSize) GetMaxGas() int64 { + if m != nil { + return m.MaxGas + } + return 0 +} + +// BlockGossip determine consensus critical +// elements of how blocks are gossiped +type BlockGossip struct { + // Note: must not be 0 + BlockPartSizeBytes int32 `protobuf:"varint,1,opt,name=block_part_size_bytes,json=blockPartSizeBytes,proto3" json:"block_part_size_bytes,omitempty"` +} + +func (m *BlockGossip) Reset() { *m = BlockGossip{} } +func (m *BlockGossip) String() string { return proto.CompactTextString(m) } +func (*BlockGossip) ProtoMessage() {} +func (*BlockGossip) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{28} } + +func (m *BlockGossip) GetBlockPartSizeBytes() int32 { + if m != nil { + return m.BlockPartSizeBytes + } + return 0 +} + +// just the minimum the app might need +type Header struct { + // basics + ChainID string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` + Time int64 `protobuf:"varint,3,opt,name=time,proto3" json:"time,omitempty"` + // txs + NumTxs int32 `protobuf:"varint,4,opt,name=num_txs,json=numTxs,proto3" json:"num_txs,omitempty"` + TotalTxs int64 `protobuf:"varint,5,opt,name=total_txs,json=totalTxs,proto3" json:"total_txs,omitempty"` + // hashes + LastBlockHash []byte `protobuf:"bytes,6,opt,name=last_block_hash,json=lastBlockHash,proto3" json:"last_block_hash,omitempty"` + ValidatorsHash []byte `protobuf:"bytes,7,opt,name=validators_hash,json=validatorsHash,proto3" json:"validators_hash,omitempty"` + AppHash []byte `protobuf:"bytes,8,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` + // consensus + Proposer Validator `protobuf:"bytes,9,opt,name=proposer" json:"proposer"` +} + +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{29} } + +func (m *Header) GetChainID() string { + if m != nil { + return m.ChainID + } + return "" +} + +func (m *Header) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *Header) GetTime() int64 { + if m != nil { + return m.Time + } + return 0 +} + +func (m *Header) GetNumTxs() int32 { + if m != nil { + return m.NumTxs + } + return 0 +} + +func (m *Header) GetTotalTxs() int64 { + if m != nil { + return m.TotalTxs + } + return 0 +} + +func (m *Header) GetLastBlockHash() []byte { + if m != nil { + return m.LastBlockHash + } + return nil +} + +func (m *Header) GetValidatorsHash() []byte { + if m != nil { + return m.ValidatorsHash + } + return nil +} + +func (m *Header) GetAppHash() []byte { + if m != nil { + return m.AppHash + } + return nil +} + +func (m *Header) GetProposer() Validator { + if m != nil { + return m.Proposer + } + return Validator{} +} + +// Validator +type Validator struct { + Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + PubKey PubKey `protobuf:"bytes,2,opt,name=pub_key,json=pubKey" json:"pub_key"` + Power int64 `protobuf:"varint,3,opt,name=power,proto3" json:"power,omitempty"` +} + +func (m *Validator) Reset() { *m = Validator{} } +func (m *Validator) String() string { return proto.CompactTextString(m) } +func (*Validator) ProtoMessage() {} +func (*Validator) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{30} } + +func (m *Validator) GetAddress() []byte { + if m != nil { + return m.Address + } + return nil +} + +func (m *Validator) GetPubKey() PubKey { + if m != nil { + return m.PubKey + } + return PubKey{} +} + +func (m *Validator) GetPower() int64 { + if m != nil { + return m.Power + } + return 0 +} + +// Validator with an extra bool +type SigningValidator struct { + Validator Validator `protobuf:"bytes,1,opt,name=validator" json:"validator"` + SignedLastBlock bool `protobuf:"varint,2,opt,name=signed_last_block,json=signedLastBlock,proto3" json:"signed_last_block,omitempty"` +} + +func (m *SigningValidator) Reset() { *m = SigningValidator{} } +func (m *SigningValidator) String() string { return proto.CompactTextString(m) } +func (*SigningValidator) ProtoMessage() {} +func (*SigningValidator) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{31} } + +func (m *SigningValidator) GetValidator() Validator { + if m != nil { + return m.Validator + } + return Validator{} +} + +func (m *SigningValidator) GetSignedLastBlock() bool { + if m != nil { + return m.SignedLastBlock + } + return false +} + +type PubKey struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *PubKey) Reset() { *m = PubKey{} } +func (m *PubKey) String() string { return proto.CompactTextString(m) } +func (*PubKey) ProtoMessage() {} +func (*PubKey) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{32} } + +func (m *PubKey) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PubKey) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type Evidence struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Validator Validator `protobuf:"bytes,2,opt,name=validator" json:"validator"` + Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` + Time int64 `protobuf:"varint,4,opt,name=time,proto3" json:"time,omitempty"` + TotalVotingPower int64 `protobuf:"varint,5,opt,name=total_voting_power,json=totalVotingPower,proto3" json:"total_voting_power,omitempty"` +} + +func (m *Evidence) Reset() { *m = Evidence{} } +func (m *Evidence) String() string { return proto.CompactTextString(m) } +func (*Evidence) ProtoMessage() {} +func (*Evidence) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{33} } + +func (m *Evidence) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Evidence) GetValidator() Validator { + if m != nil { + return m.Validator + } + return Validator{} +} + +func (m *Evidence) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *Evidence) GetTime() int64 { + if m != nil { + return m.Time + } + return 0 +} + +func (m *Evidence) GetTotalVotingPower() int64 { + if m != nil { + return m.TotalVotingPower + } + return 0 +} + +func init() { + proto.RegisterType((*Request)(nil), "types.Request") + proto.RegisterType((*RequestEcho)(nil), "types.RequestEcho") + proto.RegisterType((*RequestFlush)(nil), "types.RequestFlush") + proto.RegisterType((*RequestInfo)(nil), "types.RequestInfo") + proto.RegisterType((*RequestSetOption)(nil), "types.RequestSetOption") + proto.RegisterType((*RequestInitChain)(nil), "types.RequestInitChain") + proto.RegisterType((*RequestQuery)(nil), "types.RequestQuery") + proto.RegisterType((*RequestBeginBlock)(nil), "types.RequestBeginBlock") + proto.RegisterType((*RequestCheckTx)(nil), "types.RequestCheckTx") + proto.RegisterType((*RequestDeliverTx)(nil), "types.RequestDeliverTx") + proto.RegisterType((*RequestEndBlock)(nil), "types.RequestEndBlock") + proto.RegisterType((*RequestCommit)(nil), "types.RequestCommit") + proto.RegisterType((*Response)(nil), "types.Response") + proto.RegisterType((*ResponseException)(nil), "types.ResponseException") + proto.RegisterType((*ResponseEcho)(nil), "types.ResponseEcho") + proto.RegisterType((*ResponseFlush)(nil), "types.ResponseFlush") + proto.RegisterType((*ResponseInfo)(nil), "types.ResponseInfo") + proto.RegisterType((*ResponseSetOption)(nil), "types.ResponseSetOption") + proto.RegisterType((*ResponseInitChain)(nil), "types.ResponseInitChain") + proto.RegisterType((*ResponseQuery)(nil), "types.ResponseQuery") + proto.RegisterType((*ResponseBeginBlock)(nil), "types.ResponseBeginBlock") + proto.RegisterType((*ResponseCheckTx)(nil), "types.ResponseCheckTx") + proto.RegisterType((*ResponseDeliverTx)(nil), "types.ResponseDeliverTx") + proto.RegisterType((*ResponseEndBlock)(nil), "types.ResponseEndBlock") + proto.RegisterType((*ResponseCommit)(nil), "types.ResponseCommit") + proto.RegisterType((*ConsensusParams)(nil), "types.ConsensusParams") + proto.RegisterType((*BlockSize)(nil), "types.BlockSize") + proto.RegisterType((*TxSize)(nil), "types.TxSize") + proto.RegisterType((*BlockGossip)(nil), "types.BlockGossip") + proto.RegisterType((*Header)(nil), "types.Header") + proto.RegisterType((*Validator)(nil), "types.Validator") + proto.RegisterType((*SigningValidator)(nil), "types.SigningValidator") + proto.RegisterType((*PubKey)(nil), "types.PubKey") + proto.RegisterType((*Evidence)(nil), "types.Evidence") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ABCIApplication service + +type ABCIApplicationClient interface { + Echo(ctx context.Context, in *RequestEcho, opts ...grpc.CallOption) (*ResponseEcho, error) + Flush(ctx context.Context, in *RequestFlush, opts ...grpc.CallOption) (*ResponseFlush, error) + Info(ctx context.Context, in *RequestInfo, opts ...grpc.CallOption) (*ResponseInfo, error) + SetOption(ctx context.Context, in *RequestSetOption, opts ...grpc.CallOption) (*ResponseSetOption, error) + DeliverTx(ctx context.Context, in *RequestDeliverTx, opts ...grpc.CallOption) (*ResponseDeliverTx, error) + CheckTx(ctx context.Context, in *RequestCheckTx, opts ...grpc.CallOption) (*ResponseCheckTx, error) + Query(ctx context.Context, in *RequestQuery, opts ...grpc.CallOption) (*ResponseQuery, error) + Commit(ctx context.Context, in *RequestCommit, opts ...grpc.CallOption) (*ResponseCommit, error) + InitChain(ctx context.Context, in *RequestInitChain, opts ...grpc.CallOption) (*ResponseInitChain, error) + BeginBlock(ctx context.Context, in *RequestBeginBlock, opts ...grpc.CallOption) (*ResponseBeginBlock, error) + EndBlock(ctx context.Context, in *RequestEndBlock, opts ...grpc.CallOption) (*ResponseEndBlock, error) +} + +type aBCIApplicationClient struct { + cc *grpc.ClientConn +} + +func NewABCIApplicationClient(cc *grpc.ClientConn) ABCIApplicationClient { + return &aBCIApplicationClient{cc} +} + +func (c *aBCIApplicationClient) Echo(ctx context.Context, in *RequestEcho, opts ...grpc.CallOption) (*ResponseEcho, error) { + out := new(ResponseEcho) + err := grpc.Invoke(ctx, "/types.ABCIApplication/Echo", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) Flush(ctx context.Context, in *RequestFlush, opts ...grpc.CallOption) (*ResponseFlush, error) { + out := new(ResponseFlush) + err := grpc.Invoke(ctx, "/types.ABCIApplication/Flush", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) Info(ctx context.Context, in *RequestInfo, opts ...grpc.CallOption) (*ResponseInfo, error) { + out := new(ResponseInfo) + err := grpc.Invoke(ctx, "/types.ABCIApplication/Info", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) SetOption(ctx context.Context, in *RequestSetOption, opts ...grpc.CallOption) (*ResponseSetOption, error) { + out := new(ResponseSetOption) + err := grpc.Invoke(ctx, "/types.ABCIApplication/SetOption", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) DeliverTx(ctx context.Context, in *RequestDeliverTx, opts ...grpc.CallOption) (*ResponseDeliverTx, error) { + out := new(ResponseDeliverTx) + err := grpc.Invoke(ctx, "/types.ABCIApplication/DeliverTx", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) CheckTx(ctx context.Context, in *RequestCheckTx, opts ...grpc.CallOption) (*ResponseCheckTx, error) { + out := new(ResponseCheckTx) + err := grpc.Invoke(ctx, "/types.ABCIApplication/CheckTx", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) Query(ctx context.Context, in *RequestQuery, opts ...grpc.CallOption) (*ResponseQuery, error) { + out := new(ResponseQuery) + err := grpc.Invoke(ctx, "/types.ABCIApplication/Query", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) Commit(ctx context.Context, in *RequestCommit, opts ...grpc.CallOption) (*ResponseCommit, error) { + out := new(ResponseCommit) + err := grpc.Invoke(ctx, "/types.ABCIApplication/Commit", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) InitChain(ctx context.Context, in *RequestInitChain, opts ...grpc.CallOption) (*ResponseInitChain, error) { + out := new(ResponseInitChain) + err := grpc.Invoke(ctx, "/types.ABCIApplication/InitChain", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) BeginBlock(ctx context.Context, in *RequestBeginBlock, opts ...grpc.CallOption) (*ResponseBeginBlock, error) { + out := new(ResponseBeginBlock) + err := grpc.Invoke(ctx, "/types.ABCIApplication/BeginBlock", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) EndBlock(ctx context.Context, in *RequestEndBlock, opts ...grpc.CallOption) (*ResponseEndBlock, error) { + out := new(ResponseEndBlock) + err := grpc.Invoke(ctx, "/types.ABCIApplication/EndBlock", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ABCIApplication service + +type ABCIApplicationServer interface { + Echo(context.Context, *RequestEcho) (*ResponseEcho, error) + Flush(context.Context, *RequestFlush) (*ResponseFlush, error) + Info(context.Context, *RequestInfo) (*ResponseInfo, error) + SetOption(context.Context, *RequestSetOption) (*ResponseSetOption, error) + DeliverTx(context.Context, *RequestDeliverTx) (*ResponseDeliverTx, error) + CheckTx(context.Context, *RequestCheckTx) (*ResponseCheckTx, error) + Query(context.Context, *RequestQuery) (*ResponseQuery, error) + Commit(context.Context, *RequestCommit) (*ResponseCommit, error) + InitChain(context.Context, *RequestInitChain) (*ResponseInitChain, error) + BeginBlock(context.Context, *RequestBeginBlock) (*ResponseBeginBlock, error) + EndBlock(context.Context, *RequestEndBlock) (*ResponseEndBlock, error) +} + +func RegisterABCIApplicationServer(s *grpc.Server, srv ABCIApplicationServer) { + s.RegisterService(&_ABCIApplication_serviceDesc, srv) +} + +func _ABCIApplication_Echo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestEcho) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).Echo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.ABCIApplication/Echo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Echo(ctx, req.(*RequestEcho)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_Flush_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestFlush) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).Flush(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.ABCIApplication/Flush", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Flush(ctx, req.(*RequestFlush)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestInfo) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).Info(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.ABCIApplication/Info", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Info(ctx, req.(*RequestInfo)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_SetOption_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestSetOption) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).SetOption(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.ABCIApplication/SetOption", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).SetOption(ctx, req.(*RequestSetOption)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_DeliverTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestDeliverTx) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).DeliverTx(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.ABCIApplication/DeliverTx", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).DeliverTx(ctx, req.(*RequestDeliverTx)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_CheckTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestCheckTx) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).CheckTx(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.ABCIApplication/CheckTx", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).CheckTx(ctx, req.(*RequestCheckTx)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestQuery) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).Query(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.ABCIApplication/Query", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Query(ctx, req.(*RequestQuery)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestCommit) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).Commit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.ABCIApplication/Commit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Commit(ctx, req.(*RequestCommit)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_InitChain_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestInitChain) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).InitChain(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.ABCIApplication/InitChain", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).InitChain(ctx, req.(*RequestInitChain)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_BeginBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestBeginBlock) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).BeginBlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.ABCIApplication/BeginBlock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).BeginBlock(ctx, req.(*RequestBeginBlock)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_EndBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestEndBlock) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).EndBlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.ABCIApplication/EndBlock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).EndBlock(ctx, req.(*RequestEndBlock)) + } + return interceptor(ctx, in, info, handler) +} + +var _ABCIApplication_serviceDesc = grpc.ServiceDesc{ + ServiceName: "types.ABCIApplication", + HandlerType: (*ABCIApplicationServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Echo", + Handler: _ABCIApplication_Echo_Handler, + }, + { + MethodName: "Flush", + Handler: _ABCIApplication_Flush_Handler, + }, + { + MethodName: "Info", + Handler: _ABCIApplication_Info_Handler, + }, + { + MethodName: "SetOption", + Handler: _ABCIApplication_SetOption_Handler, + }, + { + MethodName: "DeliverTx", + Handler: _ABCIApplication_DeliverTx_Handler, + }, + { + MethodName: "CheckTx", + Handler: _ABCIApplication_CheckTx_Handler, + }, + { + MethodName: "Query", + Handler: _ABCIApplication_Query_Handler, + }, + { + MethodName: "Commit", + Handler: _ABCIApplication_Commit_Handler, + }, + { + MethodName: "InitChain", + Handler: _ABCIApplication_InitChain_Handler, + }, + { + MethodName: "BeginBlock", + Handler: _ABCIApplication_BeginBlock_Handler, + }, + { + MethodName: "EndBlock", + Handler: _ABCIApplication_EndBlock_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "types/types.proto", +} + +func init() { proto.RegisterFile("types/types.proto", fileDescriptorTypes) } + +var fileDescriptorTypes = []byte{ + // 1846 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x58, 0xcd, 0x6e, 0x1b, 0xc9, + 0x11, 0x16, 0xff, 0x39, 0xa5, 0x1f, 0xd2, 0x2d, 0xdb, 0xa2, 0xb9, 0x08, 0x6c, 0x0c, 0x02, 0xaf, + 0x9c, 0xd5, 0x8a, 0x89, 0x76, 0x6d, 0xd8, 0xbb, 0xc9, 0x22, 0x92, 0xd6, 0x59, 0x0a, 0x9b, 0x1f, + 0x65, 0xec, 0x75, 0x80, 0x5c, 0x88, 0x26, 0xa7, 0x45, 0x0e, 0x4c, 0xce, 0xcc, 0x4e, 0x37, 0xb5, + 0x94, 0x6f, 0xb9, 0x2f, 0x72, 0xcd, 0x39, 0x2f, 0x90, 0x43, 0x80, 0xbc, 0x42, 0x90, 0x97, 0x88, + 0x0f, 0x49, 0x4e, 0x79, 0x89, 0x04, 0x55, 0xdd, 0xf3, 0xab, 0xa1, 0xe1, 0x38, 0xc7, 0xbd, 0x48, + 0x5d, 0x5d, 0x55, 0x3d, 0x5d, 0xc5, 0xaa, 0xaf, 0xaa, 0x1a, 0x6e, 0xa8, 0xab, 0x50, 0xc8, 0x01, + 0xfd, 0x3d, 0x0c, 0xa3, 0x40, 0x05, 0xac, 0x41, 0x44, 0xff, 0xc3, 0xa9, 0xa7, 0x66, 0xcb, 0xf1, + 0xe1, 0x24, 0x58, 0x0c, 0xa6, 0xc1, 0x34, 0x18, 0x10, 0x77, 0xbc, 0xbc, 0x20, 0x8a, 0x08, 0x5a, + 0x69, 0xad, 0xfe, 0x20, 0x23, 0xae, 0x84, 0xef, 0x8a, 0x68, 0xe1, 0xf9, 0x6a, 0xa0, 0x16, 0x73, + 0x6f, 0x2c, 0x07, 0x93, 0x60, 0xb1, 0x08, 0xfc, 0xec, 0x67, 0xec, 0xbf, 0xd6, 0xa1, 0xe5, 0x88, + 0xaf, 0x97, 0x42, 0x2a, 0xb6, 0x0f, 0x75, 0x31, 0x99, 0x05, 0xbd, 0xea, 0xbd, 0xca, 0xfe, 0xe6, + 0x11, 0x3b, 0xd4, 0x72, 0x86, 0xfb, 0x74, 0x32, 0x0b, 0x86, 0x1b, 0x0e, 0x49, 0xb0, 0x0f, 0xa0, + 0x71, 0x31, 0x5f, 0xca, 0x59, 0xaf, 0x46, 0xa2, 0xbb, 0x79, 0xd1, 0x9f, 0x21, 0x6b, 0xb8, 0xe1, + 0x68, 0x19, 0x3c, 0xd6, 0xf3, 0x2f, 0x82, 0x5e, 0xbd, 0xec, 0xd8, 0x33, 0xff, 0x82, 0x8e, 0x45, + 0x09, 0xf6, 0x18, 0x40, 0x0a, 0x35, 0x0a, 0x42, 0xe5, 0x05, 0x7e, 0xaf, 0x41, 0xf2, 0x7b, 0x79, + 0xf9, 0x67, 0x42, 0xfd, 0x8a, 0xd8, 0xc3, 0x0d, 0xc7, 0x92, 0x31, 0x81, 0x9a, 0x9e, 0xef, 0xa9, + 0xd1, 0x64, 0xc6, 0x3d, 0xbf, 0xd7, 0x2c, 0xd3, 0x3c, 0xf3, 0x3d, 0x75, 0x8a, 0x6c, 0xd4, 0xf4, + 0x62, 0x02, 0x4d, 0xf9, 0x7a, 0x29, 0xa2, 0xab, 0x5e, 0xab, 0xcc, 0x94, 0x5f, 0x23, 0x0b, 0x4d, + 0x21, 0x19, 0xf6, 0x29, 0x6c, 0x8e, 0xc5, 0xd4, 0xf3, 0x47, 0xe3, 0x79, 0x30, 0x79, 0xd9, 0x6b, + 0x93, 0x4a, 0x2f, 0xaf, 0x72, 0x82, 0x02, 0x27, 0xc8, 0x1f, 0x6e, 0x38, 0x30, 0x4e, 0x28, 0x76, + 0x04, 0xed, 0xc9, 0x4c, 0x4c, 0x5e, 0x8e, 0xd4, 0xaa, 0x67, 0x91, 0xe6, 0xad, 0xbc, 0xe6, 0x29, + 0x72, 0x9f, 0xaf, 0x86, 0x1b, 0x4e, 0x6b, 0xa2, 0x97, 0x68, 0x97, 0x2b, 0xe6, 0xde, 0xa5, 0x88, + 0x50, 0x6b, 0xb7, 0xcc, 0xae, 0xcf, 0x35, 0x9f, 0xf4, 0x2c, 0x37, 0x26, 0xd8, 0x43, 0xb0, 0x84, + 0xef, 0x9a, 0x8b, 0x6e, 0x92, 0xe2, 0xed, 0xc2, 0x2f, 0xea, 0xbb, 0xf1, 0x35, 0xdb, 0xc2, 0xac, + 0xd9, 0x21, 0x34, 0x31, 0x4a, 0x3c, 0xd5, 0xdb, 0x22, 0x9d, 0x9b, 0x85, 0x2b, 0x12, 0x6f, 0xb8, + 0xe1, 0x18, 0xa9, 0x93, 0x16, 0x34, 0x2e, 0xf9, 0x7c, 0x29, 0xec, 0xf7, 0x61, 0x33, 0x13, 0x29, + 0xac, 0x07, 0xad, 0x85, 0x90, 0x92, 0x4f, 0x45, 0xaf, 0x72, 0xaf, 0xb2, 0x6f, 0x39, 0x31, 0x69, + 0xef, 0xc0, 0x56, 0x36, 0x4e, 0x32, 0x8a, 0x18, 0x0b, 0xa8, 0x78, 0x29, 0x22, 0x89, 0x01, 0x60, + 0x14, 0x0d, 0x69, 0x7f, 0x02, 0xdd, 0x62, 0x10, 0xb0, 0x2e, 0xd4, 0x5e, 0x8a, 0x2b, 0x23, 0x89, + 0x4b, 0x76, 0xd3, 0x5c, 0x88, 0xa2, 0xd8, 0x72, 0xcc, 0xed, 0xfe, 0x55, 0x49, 0x94, 0x93, 0x38, + 0x60, 0x0c, 0xea, 0xca, 0x5b, 0xe8, 0x0b, 0xd6, 0x1c, 0x5a, 0xb3, 0x3b, 0xf8, 0x23, 0x71, 0xcf, + 0x1f, 0x79, 0xae, 0x39, 0xa1, 0x45, 0xf4, 0x99, 0xcb, 0x8e, 0xa1, 0x3b, 0x09, 0x7c, 0x29, 0x7c, + 0xb9, 0x94, 0xa3, 0x90, 0x47, 0x7c, 0x21, 0x4d, 0xfc, 0xc7, 0x8e, 0x3d, 0x8d, 0xd9, 0xe7, 0xc4, + 0x75, 0x3a, 0x93, 0xfc, 0x06, 0x7b, 0x04, 0x70, 0xc9, 0xe7, 0x9e, 0xcb, 0x55, 0x10, 0xc9, 0x5e, + 0xfd, 0x5e, 0x6d, 0x7f, 0xf3, 0xa8, 0x6b, 0x94, 0x5f, 0xc4, 0x8c, 0x93, 0xfa, 0xdf, 0x5e, 0xdf, + 0xdd, 0x70, 0x32, 0x92, 0xec, 0x3e, 0x74, 0x78, 0x18, 0x8e, 0xa4, 0xe2, 0x4a, 0x8c, 0xc6, 0x57, + 0x4a, 0x48, 0xca, 0x8e, 0x2d, 0x67, 0x9b, 0x87, 0xe1, 0x33, 0xdc, 0x3d, 0xc1, 0x4d, 0xdb, 0x4d, + 0x7c, 0x4b, 0x81, 0x8b, 0x16, 0xba, 0x5c, 0x71, 0xb2, 0x70, 0xcb, 0xa1, 0x35, 0xee, 0x85, 0x5c, + 0xcd, 0x8c, 0x75, 0xb4, 0x66, 0xb7, 0xa1, 0x39, 0x13, 0xde, 0x74, 0xa6, 0xc8, 0xa0, 0x9a, 0x63, + 0x28, 0x74, 0x66, 0x18, 0x05, 0x97, 0x82, 0x72, 0xb7, 0xed, 0x68, 0xc2, 0xfe, 0x7b, 0x05, 0x6e, + 0x5c, 0x0b, 0x76, 0x3c, 0x77, 0xc6, 0xe5, 0x2c, 0xfe, 0x16, 0xae, 0xd9, 0x07, 0x78, 0x2e, 0x77, + 0x45, 0x64, 0x30, 0x65, 0xdb, 0xd8, 0x3a, 0xa4, 0x4d, 0x63, 0xa8, 0x11, 0x61, 0x3f, 0xc9, 0x39, + 0xa7, 0x46, 0xce, 0x89, 0x63, 0xfd, 0x99, 0x37, 0xf5, 0x3d, 0x7f, 0xfa, 0x26, 0x1f, 0x0d, 0xe1, + 0xe6, 0xf8, 0xea, 0x15, 0xf7, 0x95, 0xe7, 0x8b, 0xd1, 0x35, 0x2f, 0x77, 0xcc, 0x41, 0x4f, 0x2f, + 0x3d, 0x57, 0xf8, 0x13, 0x61, 0x0e, 0xd8, 0x4d, 0x54, 0x92, 0xa3, 0xa5, 0x7d, 0x0f, 0x76, 0xf2, + 0x19, 0xc9, 0x76, 0xa0, 0xaa, 0x56, 0xc6, 0xb2, 0xaa, 0x5a, 0xd9, 0x76, 0x12, 0x4d, 0x49, 0xf6, + 0x5d, 0x93, 0x79, 0x00, 0x9d, 0x42, 0xa2, 0x65, 0xdc, 0x5c, 0xc9, 0xba, 0xd9, 0xee, 0xc0, 0x76, + 0x2e, 0xbf, 0xec, 0x6f, 0x1b, 0xd0, 0x76, 0x84, 0x0c, 0x31, 0x7c, 0xd8, 0x63, 0xb0, 0xc4, 0x6a, + 0x22, 0x34, 0x28, 0x56, 0x0a, 0x90, 0xa3, 0x65, 0x9e, 0xc6, 0x7c, 0xc4, 0x80, 0x44, 0x98, 0x3d, + 0xc8, 0x01, 0xfa, 0x6e, 0x51, 0x29, 0x8b, 0xe8, 0x07, 0x79, 0x44, 0xbf, 0x59, 0x90, 0x2d, 0x40, + 0xfa, 0x83, 0x1c, 0xa4, 0x17, 0x0f, 0xce, 0x61, 0xfa, 0x93, 0x12, 0x4c, 0x2f, 0x5e, 0x7f, 0x0d, + 0xa8, 0x3f, 0x29, 0x01, 0xf5, 0xde, 0xb5, 0x6f, 0x95, 0xa2, 0xfa, 0x41, 0x1e, 0xd5, 0x8b, 0xe6, + 0x14, 0x60, 0xfd, 0xc7, 0x65, 0xb0, 0x7e, 0xa7, 0xa0, 0xb3, 0x16, 0xd7, 0x3f, 0xba, 0x86, 0xeb, + 0xb7, 0x0b, 0xaa, 0x25, 0xc0, 0xfe, 0x24, 0x07, 0xec, 0x50, 0x6a, 0xdb, 0x1a, 0x64, 0x7f, 0x74, + 0x1d, 0xd9, 0xf7, 0x8a, 0x3f, 0x6d, 0x19, 0xb4, 0x0f, 0x0a, 0xd0, 0x7e, 0xab, 0x78, 0xcb, 0xb5, + 0xd8, 0xfe, 0x00, 0xf3, 0xbd, 0x10, 0x69, 0x88, 0x0d, 0x22, 0x8a, 0x82, 0xc8, 0x80, 0xaf, 0x26, + 0xec, 0x7d, 0x44, 0xa0, 0x34, 0xbe, 0xde, 0x50, 0x07, 0x28, 0xe8, 0x33, 0xd1, 0x65, 0xff, 0xa1, + 0x92, 0xea, 0x52, 0x29, 0xc8, 0xa2, 0x97, 0x65, 0xd0, 0x2b, 0x53, 0x1e, 0xaa, 0xb9, 0xf2, 0xc0, + 0x7e, 0x00, 0x37, 0xe6, 0x5c, 0x2a, 0xed, 0x97, 0x51, 0x0e, 0xce, 0x3a, 0xc8, 0xd0, 0x0e, 0xd1, + 0xb8, 0xf6, 0x21, 0xec, 0x66, 0x64, 0x11, 0x5a, 0x09, 0xba, 0xea, 0x94, 0xbc, 0xdd, 0x44, 0xfa, + 0x38, 0x0c, 0x87, 0x5c, 0xce, 0xec, 0x5f, 0xa4, 0xf6, 0xa7, 0xa5, 0x87, 0x41, 0x7d, 0x12, 0xb8, + 0xda, 0xac, 0x6d, 0x87, 0xd6, 0x58, 0x8e, 0xe6, 0xc1, 0x94, 0xbe, 0x6a, 0x39, 0xb8, 0x44, 0xa9, + 0x24, 0x53, 0x2c, 0x9d, 0x12, 0xf6, 0xef, 0x2b, 0xe9, 0x79, 0x69, 0x35, 0x2a, 0x2b, 0x2f, 0x95, + 0xff, 0xa7, 0xbc, 0x54, 0xdf, 0xb6, 0xbc, 0xd8, 0x7f, 0xa9, 0xa4, 0xbf, 0x45, 0x52, 0x38, 0xde, + 0xcd, 0x38, 0x0c, 0x0b, 0xcf, 0x77, 0xc5, 0x8a, 0x52, 0xbd, 0xe6, 0x68, 0x22, 0xae, 0xd3, 0x4d, + 0x72, 0x70, 0xbe, 0x4e, 0xb7, 0x68, 0x4f, 0x13, 0xa6, 0xe0, 0x04, 0x17, 0x94, 0x83, 0x5b, 0x8e, + 0x26, 0x32, 0xb8, 0x69, 0xe5, 0x70, 0xf3, 0x1c, 0xd8, 0xf5, 0xec, 0x64, 0x9f, 0x40, 0x5d, 0xf1, + 0x29, 0x3a, 0x0f, 0xed, 0xdf, 0x39, 0xd4, 0x5d, 0xef, 0xe1, 0x97, 0x2f, 0xce, 0xb9, 0x17, 0x9d, + 0xdc, 0x46, 0xeb, 0xff, 0xfd, 0xfa, 0xee, 0x0e, 0xca, 0x1c, 0x04, 0x0b, 0x4f, 0x89, 0x45, 0xa8, + 0xae, 0x1c, 0xd2, 0xb1, 0xff, 0x53, 0x41, 0xd4, 0xce, 0x65, 0x6d, 0xa9, 0x2f, 0xe2, 0xd0, 0xac, + 0x66, 0x0a, 0xeb, 0xdb, 0xf9, 0xe7, 0x7b, 0x00, 0x53, 0x2e, 0x47, 0xdf, 0x70, 0x5f, 0x09, 0xd7, + 0x38, 0xc9, 0x9a, 0x72, 0xf9, 0x1b, 0xda, 0xc0, 0xfe, 0x03, 0xd9, 0x4b, 0x29, 0x5c, 0xf2, 0x56, + 0xcd, 0x69, 0x4d, 0xb9, 0xfc, 0x4a, 0x0a, 0x37, 0xb1, 0xab, 0xf5, 0xbf, 0xdb, 0xc5, 0xf6, 0xa1, + 0x76, 0x21, 0x84, 0x41, 0xb6, 0x6e, 0xa2, 0x7a, 0xf6, 0xe8, 0x63, 0x52, 0xd6, 0x21, 0x81, 0x22, + 0xf6, 0xef, 0xaa, 0x69, 0x70, 0xa6, 0xc5, 0xed, 0xbb, 0xe5, 0x83, 0x7f, 0x52, 0xb7, 0x98, 0x87, + 0x52, 0x76, 0x0a, 0x37, 0x92, 0x94, 0x19, 0x2d, 0x43, 0x97, 0x63, 0x17, 0x56, 0x79, 0x63, 0x8e, + 0x75, 0x13, 0x85, 0xaf, 0xb4, 0x3c, 0xfb, 0x25, 0xec, 0x15, 0x92, 0x3c, 0x39, 0xaa, 0xfa, 0xc6, + 0x5c, 0xbf, 0x95, 0xcf, 0xf5, 0xf8, 0xbc, 0xd8, 0x1f, 0xb5, 0x77, 0x88, 0xf5, 0xef, 0x63, 0x9b, + 0x93, 0x85, 0xfe, 0xb2, 0x5f, 0xd4, 0xfe, 0x63, 0x05, 0x3a, 0x85, 0xcb, 0xb0, 0x01, 0x80, 0x46, + 0x4e, 0xe9, 0xbd, 0x12, 0x06, 0xa4, 0x62, 0x1f, 0x90, 0xb3, 0x9e, 0x79, 0xaf, 0x84, 0x63, 0x8d, + 0xe3, 0x25, 0xbb, 0x0f, 0x2d, 0xb5, 0xd2, 0xd2, 0xf9, 0x46, 0xf0, 0xf9, 0x8a, 0x44, 0x9b, 0x8a, + 0xfe, 0xb3, 0x87, 0xb0, 0xa5, 0x0f, 0x9e, 0x06, 0x52, 0x7a, 0xa1, 0x69, 0x46, 0x58, 0xf6, 0xe8, + 0x2f, 0x88, 0xe3, 0x6c, 0x8e, 0x53, 0xc2, 0xfe, 0x2d, 0x58, 0xc9, 0x67, 0xd9, 0x7b, 0x60, 0x2d, + 0xf8, 0xca, 0x74, 0xc9, 0x78, 0xb7, 0x86, 0xd3, 0x5e, 0xf0, 0x15, 0x35, 0xc8, 0x6c, 0x0f, 0x5a, + 0xc8, 0x54, 0x2b, 0xed, 0xef, 0x86, 0xd3, 0x5c, 0xf0, 0xd5, 0xf3, 0x55, 0xc2, 0x98, 0x72, 0x19, + 0xb7, 0xc0, 0x0b, 0xbe, 0xfa, 0x82, 0x4b, 0xfb, 0x33, 0x68, 0xea, 0x4b, 0xbe, 0xd5, 0xc1, 0xa8, + 0x5f, 0xcd, 0xe9, 0xff, 0x14, 0x36, 0x33, 0xf7, 0x66, 0x3f, 0x82, 0x5b, 0xda, 0xc2, 0x90, 0x47, + 0x8a, 0x3c, 0x92, 0x3b, 0x90, 0x11, 0xf3, 0x9c, 0x47, 0x0a, 0x3f, 0xa9, 0x9b, 0xfa, 0x3f, 0x57, + 0xa1, 0xa9, 0x1b, 0x66, 0x76, 0x3f, 0x33, 0x9d, 0x50, 0x55, 0x3c, 0xd9, 0xfc, 0xc7, 0xeb, 0xbb, + 0x2d, 0x2a, 0x20, 0x67, 0x9f, 0xa7, 0xa3, 0x4a, 0x0a, 0x98, 0xd5, 0x5c, 0x3f, 0x1f, 0x4f, 0x3c, + 0xb5, 0xcc, 0xc4, 0xb3, 0x07, 0x2d, 0x7f, 0xb9, 0x20, 0x97, 0xd4, 0xb5, 0x4b, 0xfc, 0xe5, 0x02, + 0x5d, 0xf2, 0x1e, 0x58, 0x2a, 0x50, 0x7c, 0x4e, 0x2c, 0x9d, 0xa4, 0x6d, 0xda, 0x40, 0xe6, 0x7d, + 0xe8, 0x64, 0xab, 0x2d, 0x56, 0x4f, 0x0d, 0xee, 0xdb, 0x69, 0xad, 0xc5, 0x09, 0xe0, 0x7d, 0xe8, + 0xa4, 0x85, 0x46, 0xcb, 0x69, 0xc0, 0xdf, 0x49, 0xb7, 0x49, 0xf0, 0x0e, 0xb4, 0x93, 0x3a, 0xac, + 0xc1, 0xbf, 0xc5, 0x75, 0xf9, 0xc5, 0xc1, 0x39, 0x8c, 0x82, 0x30, 0x90, 0x22, 0x32, 0x0d, 0xd6, + 0xba, 0x84, 0x4b, 0xe4, 0x6c, 0x0f, 0xac, 0x84, 0x89, 0x4d, 0x03, 0x77, 0xdd, 0x48, 0x48, 0x69, + 0xfa, 0xf3, 0x98, 0x64, 0x07, 0xd0, 0x0a, 0x97, 0xe3, 0x11, 0xd6, 0xa6, 0x7c, 0x60, 0x9e, 0x2f, + 0xc7, 0x5f, 0x8a, 0xab, 0x78, 0x42, 0x09, 0x89, 0xa2, 0xea, 0x14, 0x7c, 0x23, 0x22, 0xe3, 0x3f, + 0x4d, 0xd8, 0x0a, 0xba, 0xc5, 0xf1, 0x84, 0x7d, 0x0c, 0x56, 0x62, 0x5f, 0x21, 0x41, 0x8a, 0x77, + 0x4e, 0x05, 0xb1, 0x85, 0x91, 0xde, 0xd4, 0x17, 0xee, 0x28, 0xf5, 0x2d, 0xdd, 0xab, 0xed, 0x74, + 0x34, 0xe3, 0xe7, 0xb1, 0x73, 0xed, 0x1f, 0x42, 0x53, 0xdf, 0x91, 0x7e, 0xd4, 0xab, 0x30, 0xee, + 0xaf, 0x68, 0x5d, 0x9a, 0xc9, 0x7f, 0xaa, 0x40, 0x3b, 0x1e, 0x7f, 0x4a, 0x95, 0x72, 0x97, 0xae, + 0xbe, 0xed, 0xa5, 0xd7, 0xcd, 0x8e, 0x71, 0xac, 0xd5, 0x33, 0xb1, 0x76, 0x00, 0x4c, 0x87, 0xd4, + 0x65, 0xa0, 0x3c, 0x7f, 0x3a, 0xd2, 0xde, 0xd4, 0xb1, 0xd5, 0x25, 0xce, 0x0b, 0x62, 0x9c, 0xe3, + 0xfe, 0xd1, 0xb7, 0x0d, 0xe8, 0x1c, 0x9f, 0x9c, 0x9e, 0x1d, 0x87, 0xe1, 0xdc, 0x9b, 0x70, 0xea, + 0xba, 0x06, 0x50, 0xa7, 0xbe, 0xb2, 0xe4, 0x75, 0xaa, 0x5f, 0x36, 0xe0, 0xb0, 0x23, 0x68, 0x50, + 0x7b, 0xc9, 0xca, 0x1e, 0xa9, 0xfa, 0xa5, 0x73, 0x0e, 0x7e, 0x44, 0x37, 0xa0, 0xd7, 0xdf, 0xaa, + 0xfa, 0x65, 0xc3, 0x0e, 0xfb, 0x0c, 0xac, 0xb4, 0x31, 0x5c, 0xf7, 0x62, 0xd5, 0x5f, 0x3b, 0xf6, + 0xa0, 0x7e, 0x5a, 0x6b, 0xd7, 0xbd, 0xef, 0xf4, 0xd7, 0xce, 0x07, 0xec, 0x31, 0xb4, 0xe2, 0x6e, + 0xa5, 0xfc, 0x4d, 0xa9, 0xbf, 0x66, 0x24, 0x41, 0xf7, 0xe8, 0x8e, 0xaf, 0xec, 0xe1, 0xab, 0x5f, + 0x3a, 0x37, 0xb1, 0x87, 0xd0, 0x34, 0x05, 0xa3, 0xf4, 0x75, 0xa8, 0x5f, 0x3e, 0x58, 0xa0, 0x91, + 0x69, 0xb7, 0xbb, 0xee, 0x71, 0xae, 0xbf, 0x76, 0xc0, 0x63, 0xc7, 0x00, 0x99, 0x2e, 0x6f, 0xed, + 0xab, 0x5b, 0x7f, 0xfd, 0xe0, 0xc6, 0x3e, 0x85, 0x76, 0x3a, 0x8c, 0x97, 0xbf, 0x86, 0xf5, 0xd7, + 0xcd, 0x52, 0xe3, 0x26, 0xbd, 0x98, 0x7e, 0xf4, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe5, 0xf3, + 0xb2, 0x34, 0xad, 0x15, 0x00, 0x00, +} diff --git a/abci/types/types.proto b/abci/types/types.proto new file mode 100644 index 000000000..b4f4b2aa6 --- /dev/null +++ b/abci/types/types.proto @@ -0,0 +1,282 @@ +syntax = "proto3"; +package types; + +// For more information on gogo.proto, see: +// https://github.com/gogo/protobuf/blob/master/extensions.md +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "github.com/tendermint/tmlibs/common/types.proto"; + +// This file is copied from http://github.com/tendermint/abci +// NOTE: When using custom types, mind the warnings. +// https://github.com/gogo/protobuf/blob/master/custom_types.md#warnings-and-issues + +//---------------------------------------- +// Request types + +message Request { + oneof value { + RequestEcho echo = 2; + RequestFlush flush = 3; + RequestInfo info = 4; + RequestSetOption set_option = 5; + RequestInitChain init_chain = 6; + RequestQuery query = 7; + RequestBeginBlock begin_block = 8; + RequestCheckTx check_tx = 9; + RequestDeliverTx deliver_tx = 19; + RequestEndBlock end_block = 11; + RequestCommit commit = 12; + } +} + +message RequestEcho { + string message = 1; +} + +message RequestFlush { +} + +message RequestInfo { + string version = 1; +} + +// nondeterministic +message RequestSetOption { + string key = 1; + string value = 2; +} + +message RequestInitChain { + int64 time = 1; + string chain_id = 2; + ConsensusParams consensus_params = 3; + repeated Validator validators = 4 [(gogoproto.nullable)=false]; + bytes app_state_bytes = 5; +} + +message RequestQuery { + bytes data = 1; + string path = 2; + int64 height = 3; + bool prove = 4; +} + +message RequestBeginBlock { + bytes hash = 1; + Header header = 2 [(gogoproto.nullable)=false]; + repeated SigningValidator validators = 3 [(gogoproto.nullable)=false]; + repeated Evidence byzantine_validators = 4 [(gogoproto.nullable)=false]; +} + +message RequestCheckTx { + bytes tx = 1; +} + +message RequestDeliverTx { + bytes tx = 1; +} + +message RequestEndBlock { + int64 height = 1; +} + +message RequestCommit { +} + +//---------------------------------------- +// Response types + +message Response { + oneof value { + ResponseException exception = 1; + ResponseEcho echo = 2; + ResponseFlush flush = 3; + ResponseInfo info = 4; + ResponseSetOption set_option = 5; + ResponseInitChain init_chain = 6; + ResponseQuery query = 7; + ResponseBeginBlock begin_block = 8; + ResponseCheckTx check_tx = 9; + ResponseDeliverTx deliver_tx = 10; + ResponseEndBlock end_block = 11; + ResponseCommit commit = 12; + } +} + +// nondeterministic +message ResponseException { + string error = 1; +} + +message ResponseEcho { + string message = 1; +} + +message ResponseFlush { +} + +message ResponseInfo { + string data = 1; + string version = 2; + int64 last_block_height = 3; + bytes last_block_app_hash = 4; +} + +// nondeterministic +message ResponseSetOption { + uint32 code = 1; + // bytes data = 2; + string log = 3; + string info = 4; +} + +message ResponseInitChain { + ConsensusParams consensus_params = 1; + repeated Validator validators = 2 [(gogoproto.nullable)=false]; +} + +message ResponseQuery { + uint32 code = 1; + // bytes data = 2; // use "value" instead. + string log = 3; // nondeterministic + string info = 4; // nondeterministic + int64 index = 5; + bytes key = 6; + bytes value = 7; + bytes proof = 8; + int64 height = 9; +} + +message ResponseBeginBlock { + repeated common.KVPair tags = 1 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"]; +} + +message ResponseCheckTx { + uint32 code = 1; + bytes data = 2; + string log = 3; // nondeterministic + string info = 4; // nondeterministic + int64 gas_wanted = 5; + int64 gas_used = 6; + repeated common.KVPair tags = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"]; + common.KI64Pair fee = 8 [(gogoproto.nullable)=false]; +} + +message ResponseDeliverTx { + uint32 code = 1; + bytes data = 2; + string log = 3; // nondeterministic + string info = 4; // nondeterministic + int64 gas_wanted = 5; + int64 gas_used = 6; + repeated common.KVPair tags = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"]; + common.KI64Pair fee = 8 [(gogoproto.nullable)=false]; +} + +message ResponseEndBlock { + repeated Validator validator_updates = 1 [(gogoproto.nullable)=false]; + ConsensusParams consensus_param_updates = 2; + repeated common.KVPair tags = 3 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"]; +} + +message ResponseCommit { + // reserve 1 + bytes data = 2; +} + +//---------------------------------------- +// Misc. + +// ConsensusParams contains all consensus-relevant parameters +// that can be adjusted by the abci app +message ConsensusParams { + BlockSize block_size = 1; + TxSize tx_size = 2; + BlockGossip block_gossip = 3; +} + +// BlockSize contain limits on the block size. +message BlockSize { + int32 max_bytes = 1; + int32 max_txs = 2; + int64 max_gas = 3; +} + +// TxSize contain limits on the tx size. +message TxSize { + int32 max_bytes = 1; + int64 max_gas = 2; +} + +// BlockGossip determine consensus critical +// elements of how blocks are gossiped +message BlockGossip { + // Note: must not be 0 + int32 block_part_size_bytes = 1; +} + +//---------------------------------------- +// Blockchain Types + +// just the minimum the app might need +message Header { + // basics + string chain_id = 1 [(gogoproto.customname)="ChainID"]; + int64 height = 2; + int64 time = 3; + + // txs + int32 num_txs = 4; + int64 total_txs = 5; + + // hashes + bytes last_block_hash = 6; + bytes validators_hash = 7; + bytes app_hash = 8; + + // consensus + Validator proposer = 9 [(gogoproto.nullable)=false]; +} + +// Validator +message Validator { + bytes address = 1; + PubKey pub_key = 2 [(gogoproto.nullable)=false]; + int64 power = 3; +} + +// Validator with an extra bool +message SigningValidator { + Validator validator = 1 [(gogoproto.nullable)=false]; + bool signed_last_block = 2; +} + +message PubKey { + string type = 1; + bytes data = 2; +} + +message Evidence { + string type = 1; + Validator validator = 2 [(gogoproto.nullable)=false]; + int64 height = 3; + int64 time = 4; + int64 total_voting_power = 5; +} + +//---------------------------------------- +// Service Definition + +service ABCIApplication { + rpc Echo(RequestEcho) returns (ResponseEcho) ; + rpc Flush(RequestFlush) returns (ResponseFlush); + rpc Info(RequestInfo) returns (ResponseInfo); + rpc SetOption(RequestSetOption) returns (ResponseSetOption); + rpc DeliverTx(RequestDeliverTx) returns (ResponseDeliverTx); + rpc CheckTx(RequestCheckTx) returns (ResponseCheckTx); + rpc Query(RequestQuery) returns (ResponseQuery); + rpc Commit(RequestCommit) returns (ResponseCommit); + rpc InitChain(RequestInitChain) returns (ResponseInitChain); + rpc BeginBlock(RequestBeginBlock) returns (ResponseBeginBlock); + rpc EndBlock(RequestEndBlock) returns (ResponseEndBlock); +} diff --git a/abci/types/types_test.go b/abci/types/types_test.go new file mode 100644 index 000000000..baa8155cd --- /dev/null +++ b/abci/types/types_test.go @@ -0,0 +1,31 @@ +package types + +import ( + "testing" + + asrt "github.com/stretchr/testify/assert" +) + +func TestConsensusParams(t *testing.T) { + assert := asrt.New(t) + + params := &ConsensusParams{ + BlockSize: &BlockSize{MaxGas: 12345}, + BlockGossip: &BlockGossip{BlockPartSizeBytes: 54321}, + } + var noParams *ConsensusParams // nil + + // no error with nil fields + assert.Nil(noParams.GetBlockSize()) + assert.EqualValues(noParams.GetBlockSize().GetMaxGas(), 0) + + // get values with real fields + assert.NotNil(params.GetBlockSize()) + assert.EqualValues(params.GetBlockSize().GetMaxTxs(), 0) + assert.EqualValues(params.GetBlockSize().GetMaxGas(), 12345) + assert.NotNil(params.GetBlockGossip()) + assert.EqualValues(params.GetBlockGossip().GetBlockPartSizeBytes(), 54321) + assert.Nil(params.GetTxSize()) + assert.EqualValues(params.GetTxSize().GetMaxBytes(), 0) + +} diff --git a/abci/types/util.go b/abci/types/util.go new file mode 100644 index 000000000..458024c58 --- /dev/null +++ b/abci/types/util.go @@ -0,0 +1,59 @@ +package types + +import ( + "bytes" + "encoding/json" + "sort" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +//------------------------------------------------------------------------------ + +// Validators is a list of validators that implements the Sort interface +type Validators []Validator + +var _ sort.Interface = (Validators)(nil) + +// All these methods for Validators: +// Len, Less and Swap +// are for Validators to implement sort.Interface +// which will be used by the sort package. +// See Issue https://github.com/tendermint/abci/issues/212 + +func (v Validators) Len() int { + return len(v) +} + +// XXX: doesn't distinguish same validator with different power +func (v Validators) Less(i, j int) bool { + return bytes.Compare(v[i].PubKey.Data, v[j].PubKey.Data) <= 0 +} + +func (v Validators) Swap(i, j int) { + v1 := v[i] + v[i] = v[j] + v[j] = v1 +} + +func ValidatorsString(vs Validators) string { + s := make([]validatorPretty, len(vs)) + for i, v := range vs { + s[i] = validatorPretty{ + Address: v.Address, + PubKey: v.PubKey.Data, + Power: v.Power, + } + } + b, err := json.Marshal(s) + if err != nil { + panic(err.Error()) + } + return string(b) +} + +type validatorPretty struct { + Address cmn.HexBytes `json:"address"` + PubKey []byte `json:"pub_key"` + Power int64 `json:"power"` +} diff --git a/abci/version/version.go b/abci/version/version.go new file mode 100644 index 000000000..7223a86ad --- /dev/null +++ b/abci/version/version.go @@ -0,0 +1,9 @@ +package version + +// NOTE: we should probably be versioning the ABCI and the abci-cli separately + +const Maj = "0" +const Min = "12" +const Fix = "0" + +const Version = "0.12.0" diff --git a/appveyor.yml b/appveyor.yml new file mode 100644 index 000000000..1ddf8fdd2 --- /dev/null +++ b/appveyor.yml @@ -0,0 +1,13 @@ +version: 1.0.{build} +configuration: Release +platform: +- x64 +- x86 +clone_folder: c:\go\path\src\github.com\tendermint\tendermint +before_build: +- cmd: set GOPATH=%GOROOT%\path +- cmd: set PATH=%GOPATH%\bin;%PATH% +- cmd: make get_vendor_deps +build_script: +- cmd: make test +test: off diff --git a/benchmarks/atomic_test.go b/benchmarks/atomic_test.go new file mode 100644 index 000000000..5fe4832df --- /dev/null +++ b/benchmarks/atomic_test.go @@ -0,0 +1,29 @@ +package benchmarks + +import ( + "sync/atomic" + "testing" + "unsafe" +) + +func BenchmarkAtomicUintPtr(b *testing.B) { + b.StopTimer() + pointers := make([]uintptr, 1000) + b.Log(unsafe.Sizeof(pointers[0])) + b.StartTimer() + + for j := 0; j < b.N; j++ { + atomic.StoreUintptr(&pointers[j%1000], uintptr(j)) + } +} + +func BenchmarkAtomicPointer(b *testing.B) { + b.StopTimer() + pointers := make([]unsafe.Pointer, 1000) + b.Log(unsafe.Sizeof(pointers[0])) + b.StartTimer() + + for j := 0; j < b.N; j++ { + atomic.StorePointer(&pointers[j%1000], unsafe.Pointer(uintptr(j))) + } +} diff --git a/benchmarks/blockchain/.gitignore b/benchmarks/blockchain/.gitignore new file mode 100644 index 000000000..9e67bd47d --- /dev/null +++ b/benchmarks/blockchain/.gitignore @@ -0,0 +1,2 @@ +data + diff --git a/benchmarks/blockchain/localsync.sh b/benchmarks/blockchain/localsync.sh new file mode 100755 index 000000000..2dc3e49c0 --- /dev/null +++ b/benchmarks/blockchain/localsync.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +DATA=$GOPATH/src/github.com/tendermint/tendermint/benchmarks/blockchain/data +if [ ! -d $DATA ]; then + echo "no data found, generating a chain... (this only has to happen once)" + + tendermint init --home $DATA + cp $DATA/config.toml $DATA/config2.toml + echo " + [consensus] + timeout_commit = 0 + " >> $DATA/config.toml + + echo "starting node" + tendermint node \ + --home $DATA \ + --proxy_app kvstore \ + --p2p.laddr tcp://127.0.0.1:56656 \ + --rpc.laddr tcp://127.0.0.1:56657 \ + --log_level error & + + echo "making blocks for 60s" + sleep 60 + + mv $DATA/config2.toml $DATA/config.toml + + kill %1 + + echo "done generating chain." +fi + +# validator node +HOME1=$TMPDIR$RANDOM$RANDOM +cp -R $DATA $HOME1 +echo "starting validator node" +tendermint node \ + --home $HOME1 \ + --proxy_app kvstore \ + --p2p.laddr tcp://127.0.0.1:56656 \ + --rpc.laddr tcp://127.0.0.1:56657 \ + --log_level error & +sleep 1 + +# downloader node +HOME2=$TMPDIR$RANDOM$RANDOM +tendermint init --home $HOME2 +cp $HOME1/genesis.json $HOME2 +printf "starting downloader node" +tendermint node \ + --home $HOME2 \ + --proxy_app kvstore \ + --p2p.laddr tcp://127.0.0.1:56666 \ + --rpc.laddr tcp://127.0.0.1:56667 \ + --p2p.persistent_peers 127.0.0.1:56656 \ + --log_level error & + +# wait for node to start up so we only count time where we are actually syncing +sleep 0.5 +while curl localhost:56667/status 2> /dev/null | grep "\"latest_block_height\": 0," > /dev/null +do + printf '.' + sleep 0.2 +done +echo + +echo "syncing blockchain for 10s" +for i in {1..10} +do + sleep 1 + HEIGHT="$(curl localhost:56667/status 2> /dev/null \ + | grep 'latest_block_height' \ + | grep -o ' [0-9]*' \ + | xargs)" + let 'RATE = HEIGHT / i' + echo "height: $HEIGHT, blocks/sec: $RATE" +done + +kill %1 +kill %2 +rm -rf $HOME1 $HOME2 diff --git a/benchmarks/chan_test.go b/benchmarks/chan_test.go new file mode 100644 index 000000000..78b70c9be --- /dev/null +++ b/benchmarks/chan_test.go @@ -0,0 +1,19 @@ +package benchmarks + +import ( + "testing" +) + +func BenchmarkChanMakeClose(b *testing.B) { + b.StopTimer() + b.StartTimer() + + for j := 0; j < b.N; j++ { + foo := make(chan struct{}) + close(foo) + something, ok := <-foo + if ok { + b.Error(something, ok) + } + } +} diff --git a/benchmarks/codec_test.go b/benchmarks/codec_test.go new file mode 100644 index 000000000..53cbf632c --- /dev/null +++ b/benchmarks/codec_test.go @@ -0,0 +1,129 @@ +package benchmarks + +import ( + "testing" + "time" + + "github.com/tendermint/go-amino" + + proto "github.com/tendermint/tendermint/benchmarks/proto" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/p2p" + ctypes "github.com/tendermint/tendermint/rpc/core/types" +) + +func BenchmarkEncodeStatusWire(b *testing.B) { + b.StopTimer() + cdc := amino.NewCodec() + ctypes.RegisterAmino(cdc) + nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()} + status := &ctypes.ResultStatus{ + NodeInfo: p2p.NodeInfo{ + ID: nodeKey.ID(), + Moniker: "SOMENAME", + Network: "SOMENAME", + ListenAddr: "SOMEADDR", + Version: "SOMEVER", + Other: []string{"SOMESTRING", "OTHERSTRING"}, + }, + SyncInfo: ctypes.SyncInfo{ + LatestBlockHash: []byte("SOMEBYTES"), + LatestBlockHeight: 123, + LatestBlockTime: time.Unix(0, 1234), + }, + ValidatorInfo: ctypes.ValidatorInfo{ + PubKey: nodeKey.PubKey(), + }, + } + b.StartTimer() + + counter := 0 + for i := 0; i < b.N; i++ { + jsonBytes, err := cdc.MarshalJSON(status) + if err != nil { + panic(err) + } + counter += len(jsonBytes) + } + +} + +func BenchmarkEncodeNodeInfoWire(b *testing.B) { + b.StopTimer() + cdc := amino.NewCodec() + ctypes.RegisterAmino(cdc) + nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()} + nodeInfo := p2p.NodeInfo{ + ID: nodeKey.ID(), + Moniker: "SOMENAME", + Network: "SOMENAME", + ListenAddr: "SOMEADDR", + Version: "SOMEVER", + Other: []string{"SOMESTRING", "OTHERSTRING"}, + } + b.StartTimer() + + counter := 0 + for i := 0; i < b.N; i++ { + jsonBytes, err := cdc.MarshalJSON(nodeInfo) + if err != nil { + panic(err) + } + counter += len(jsonBytes) + } +} + +func BenchmarkEncodeNodeInfoBinary(b *testing.B) { + b.StopTimer() + cdc := amino.NewCodec() + ctypes.RegisterAmino(cdc) + nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()} + nodeInfo := p2p.NodeInfo{ + ID: nodeKey.ID(), + Moniker: "SOMENAME", + Network: "SOMENAME", + ListenAddr: "SOMEADDR", + Version: "SOMEVER", + Other: []string{"SOMESTRING", "OTHERSTRING"}, + } + b.StartTimer() + + counter := 0 + for i := 0; i < b.N; i++ { + jsonBytes := cdc.MustMarshalBinaryBare(nodeInfo) + counter += len(jsonBytes) + } + +} + +func BenchmarkEncodeNodeInfoProto(b *testing.B) { + b.StopTimer() + nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()} + nodeID := string(nodeKey.ID()) + someName := "SOMENAME" + someAddr := "SOMEADDR" + someVer := "SOMEVER" + someString := "SOMESTRING" + otherString := "OTHERSTRING" + nodeInfo := proto.NodeInfo{ + Id: &proto.ID{Id: &nodeID}, + Moniker: &someName, + Network: &someName, + ListenAddr: &someAddr, + Version: &someVer, + Other: []string{someString, otherString}, + } + b.StartTimer() + + counter := 0 + for i := 0; i < b.N; i++ { + bytes, err := nodeInfo.Marshal() + if err != nil { + b.Fatal(err) + return + } + //jsonBytes := wire.JSONBytes(nodeInfo) + counter += len(bytes) + } + +} diff --git a/benchmarks/empty.go b/benchmarks/empty.go new file mode 100644 index 000000000..20f08f14b --- /dev/null +++ b/benchmarks/empty.go @@ -0,0 +1 @@ +package benchmarks diff --git a/benchmarks/map_test.go b/benchmarks/map_test.go new file mode 100644 index 000000000..d13a19edf --- /dev/null +++ b/benchmarks/map_test.go @@ -0,0 +1,35 @@ +package benchmarks + +import ( + "testing" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +func BenchmarkSomething(b *testing.B) { + b.StopTimer() + numItems := 100000 + numChecks := 100000 + keys := make([]string, numItems) + for i := 0; i < numItems; i++ { + keys[i] = cmn.RandStr(100) + } + txs := make([]string, numChecks) + for i := 0; i < numChecks; i++ { + txs[i] = cmn.RandStr(100) + } + b.StartTimer() + + counter := 0 + for j := 0; j < b.N; j++ { + foo := make(map[string]string) + for _, key := range keys { + foo[key] = key + } + for _, tx := range txs { + if _, ok := foo[tx]; ok { + counter++ + } + } + } +} diff --git a/benchmarks/os_test.go b/benchmarks/os_test.go new file mode 100644 index 000000000..406038b9d --- /dev/null +++ b/benchmarks/os_test.go @@ -0,0 +1,33 @@ +package benchmarks + +import ( + "os" + "testing" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +func BenchmarkFileWrite(b *testing.B) { + b.StopTimer() + file, err := os.OpenFile("benchmark_file_write.out", + os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) + if err != nil { + b.Error(err) + } + testString := cmn.RandStr(200) + "\n" + b.StartTimer() + + for i := 0; i < b.N; i++ { + _, err := file.Write([]byte(testString)) + if err != nil { + b.Error(err) + } + } + + if err := file.Close(); err != nil { + b.Error(err) + } + if err := os.Remove("benchmark_file_write.out"); err != nil { + b.Error(err) + } +} diff --git a/benchmarks/proto/README b/benchmarks/proto/README new file mode 100644 index 000000000..87ece2576 --- /dev/null +++ b/benchmarks/proto/README @@ -0,0 +1,2 @@ +Doing some protobuf tests here. +Using gogoprotobuf. diff --git a/benchmarks/proto/test.pb.go b/benchmarks/proto/test.pb.go new file mode 100644 index 000000000..d430eeb08 --- /dev/null +++ b/benchmarks/proto/test.pb.go @@ -0,0 +1,1456 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: test.proto + +/* + Package test is a generated protocol buffer package. + + It is generated from these files: + test.proto + + It has these top-level messages: + ResultStatus + NodeInfo + ID + PubKey + PubKeyEd25519 +*/ +package test + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type ResultStatus struct { + NodeInfo *NodeInfo `protobuf:"bytes,1,opt,name=nodeInfo" json:"nodeInfo,omitempty"` + PubKey *PubKey `protobuf:"bytes,2,req,name=pubKey" json:"pubKey,omitempty"` + LatestBlockHash []byte `protobuf:"bytes,3,req,name=latestBlockHash" json:"latestBlockHash,omitempty"` + LatestBlockHeight *int64 `protobuf:"varint,4,req,name=latestBlockHeight" json:"latestBlockHeight,omitempty"` + LatestBlocktime *int64 `protobuf:"varint,5,req,name=latestBlocktime" json:"latestBlocktime,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ResultStatus) Reset() { *m = ResultStatus{} } +func (m *ResultStatus) String() string { return proto.CompactTextString(m) } +func (*ResultStatus) ProtoMessage() {} +func (*ResultStatus) Descriptor() ([]byte, []int) { return fileDescriptorTest, []int{0} } + +func (m *ResultStatus) GetNodeInfo() *NodeInfo { + if m != nil { + return m.NodeInfo + } + return nil +} + +func (m *ResultStatus) GetPubKey() *PubKey { + if m != nil { + return m.PubKey + } + return nil +} + +func (m *ResultStatus) GetLatestBlockHash() []byte { + if m != nil { + return m.LatestBlockHash + } + return nil +} + +func (m *ResultStatus) GetLatestBlockHeight() int64 { + if m != nil && m.LatestBlockHeight != nil { + return *m.LatestBlockHeight + } + return 0 +} + +func (m *ResultStatus) GetLatestBlocktime() int64 { + if m != nil && m.LatestBlocktime != nil { + return *m.LatestBlocktime + } + return 0 +} + +type NodeInfo struct { + Id *ID `protobuf:"bytes,1,req,name=id" json:"id,omitempty"` + Moniker *string `protobuf:"bytes,2,req,name=moniker" json:"moniker,omitempty"` + Network *string `protobuf:"bytes,3,req,name=network" json:"network,omitempty"` + RemoteAddr *string `protobuf:"bytes,4,req,name=remoteAddr" json:"remoteAddr,omitempty"` + ListenAddr *string `protobuf:"bytes,5,req,name=listenAddr" json:"listenAddr,omitempty"` + Version *string `protobuf:"bytes,6,req,name=version" json:"version,omitempty"` + Other []string `protobuf:"bytes,7,rep,name=other" json:"other,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NodeInfo) Reset() { *m = NodeInfo{} } +func (m *NodeInfo) String() string { return proto.CompactTextString(m) } +func (*NodeInfo) ProtoMessage() {} +func (*NodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorTest, []int{1} } + +func (m *NodeInfo) GetId() *ID { + if m != nil { + return m.Id + } + return nil +} + +func (m *NodeInfo) GetMoniker() string { + if m != nil && m.Moniker != nil { + return *m.Moniker + } + return "" +} + +func (m *NodeInfo) GetNetwork() string { + if m != nil && m.Network != nil { + return *m.Network + } + return "" +} + +func (m *NodeInfo) GetRemoteAddr() string { + if m != nil && m.RemoteAddr != nil { + return *m.RemoteAddr + } + return "" +} + +func (m *NodeInfo) GetListenAddr() string { + if m != nil && m.ListenAddr != nil { + return *m.ListenAddr + } + return "" +} + +func (m *NodeInfo) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +func (m *NodeInfo) GetOther() []string { + if m != nil { + return m.Other + } + return nil +} + +type ID struct { + Id *string `protobuf:"bytes,1,req,name=id" json:"id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ID) Reset() { *m = ID{} } +func (m *ID) String() string { return proto.CompactTextString(m) } +func (*ID) ProtoMessage() {} +func (*ID) Descriptor() ([]byte, []int) { return fileDescriptorTest, []int{2} } + +func (m *ID) GetId() string { + if m != nil && m.Id != nil { + return *m.Id + } + return "" +} + +type PubKey struct { + Ed25519 *PubKeyEd25519 `protobuf:"bytes,1,opt,name=ed25519" json:"ed25519,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PubKey) Reset() { *m = PubKey{} } +func (m *PubKey) String() string { return proto.CompactTextString(m) } +func (*PubKey) ProtoMessage() {} +func (*PubKey) Descriptor() ([]byte, []int) { return fileDescriptorTest, []int{3} } + +func (m *PubKey) GetEd25519() *PubKeyEd25519 { + if m != nil { + return m.Ed25519 + } + return nil +} + +type PubKeyEd25519 struct { + Bytes []byte `protobuf:"bytes,1,req,name=bytes" json:"bytes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PubKeyEd25519) Reset() { *m = PubKeyEd25519{} } +func (m *PubKeyEd25519) String() string { return proto.CompactTextString(m) } +func (*PubKeyEd25519) ProtoMessage() {} +func (*PubKeyEd25519) Descriptor() ([]byte, []int) { return fileDescriptorTest, []int{4} } + +func (m *PubKeyEd25519) GetBytes() []byte { + if m != nil { + return m.Bytes + } + return nil +} + +func init() { + proto.RegisterType((*ResultStatus)(nil), "ResultStatus") + proto.RegisterType((*NodeInfo)(nil), "NodeInfo") + proto.RegisterType((*ID)(nil), "ID") + proto.RegisterType((*PubKey)(nil), "PubKey") + proto.RegisterType((*PubKeyEd25519)(nil), "PubKeyEd25519") +} +func (m *ResultStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResultStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NodeInfo != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTest(dAtA, i, uint64(m.NodeInfo.Size())) + n1, err := m.NodeInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.PubKey == nil { + return 0, proto.NewRequiredNotSetError("pubKey") + } else { + dAtA[i] = 0x12 + i++ + i = encodeVarintTest(dAtA, i, uint64(m.PubKey.Size())) + n2, err := m.PubKey.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.LatestBlockHash == nil { + return 0, proto.NewRequiredNotSetError("latestBlockHash") + } else { + dAtA[i] = 0x1a + i++ + i = encodeVarintTest(dAtA, i, uint64(len(m.LatestBlockHash))) + i += copy(dAtA[i:], m.LatestBlockHash) + } + if m.LatestBlockHeight == nil { + return 0, proto.NewRequiredNotSetError("latestBlockHeight") + } else { + dAtA[i] = 0x20 + i++ + i = encodeVarintTest(dAtA, i, uint64(*m.LatestBlockHeight)) + } + if m.LatestBlocktime == nil { + return 0, proto.NewRequiredNotSetError("latestBlocktime") + } else { + dAtA[i] = 0x28 + i++ + i = encodeVarintTest(dAtA, i, uint64(*m.LatestBlocktime)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *NodeInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Id == nil { + return 0, proto.NewRequiredNotSetError("id") + } else { + dAtA[i] = 0xa + i++ + i = encodeVarintTest(dAtA, i, uint64(m.Id.Size())) + n3, err := m.Id.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.Moniker == nil { + return 0, proto.NewRequiredNotSetError("moniker") + } else { + dAtA[i] = 0x12 + i++ + i = encodeVarintTest(dAtA, i, uint64(len(*m.Moniker))) + i += copy(dAtA[i:], *m.Moniker) + } + if m.Network == nil { + return 0, proto.NewRequiredNotSetError("network") + } else { + dAtA[i] = 0x1a + i++ + i = encodeVarintTest(dAtA, i, uint64(len(*m.Network))) + i += copy(dAtA[i:], *m.Network) + } + if m.RemoteAddr == nil { + return 0, proto.NewRequiredNotSetError("remoteAddr") + } else { + dAtA[i] = 0x22 + i++ + i = encodeVarintTest(dAtA, i, uint64(len(*m.RemoteAddr))) + i += copy(dAtA[i:], *m.RemoteAddr) + } + if m.ListenAddr == nil { + return 0, proto.NewRequiredNotSetError("listenAddr") + } else { + dAtA[i] = 0x2a + i++ + i = encodeVarintTest(dAtA, i, uint64(len(*m.ListenAddr))) + i += copy(dAtA[i:], *m.ListenAddr) + } + if m.Version == nil { + return 0, proto.NewRequiredNotSetError("version") + } else { + dAtA[i] = 0x32 + i++ + i = encodeVarintTest(dAtA, i, uint64(len(*m.Version))) + i += copy(dAtA[i:], *m.Version) + } + if len(m.Other) > 0 { + for _, s := range m.Other { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ID) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Id == nil { + return 0, proto.NewRequiredNotSetError("id") + } else { + dAtA[i] = 0xa + i++ + i = encodeVarintTest(dAtA, i, uint64(len(*m.Id))) + i += copy(dAtA[i:], *m.Id) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *PubKey) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PubKey) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Ed25519 != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTest(dAtA, i, uint64(m.Ed25519.Size())) + n4, err := m.Ed25519.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *PubKeyEd25519) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PubKeyEd25519) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Bytes == nil { + return 0, proto.NewRequiredNotSetError("bytes") + } else { + dAtA[i] = 0xa + i++ + i = encodeVarintTest(dAtA, i, uint64(len(m.Bytes))) + i += copy(dAtA[i:], m.Bytes) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintTest(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *ResultStatus) Size() (n int) { + var l int + _ = l + if m.NodeInfo != nil { + l = m.NodeInfo.Size() + n += 1 + l + sovTest(uint64(l)) + } + if m.PubKey != nil { + l = m.PubKey.Size() + n += 1 + l + sovTest(uint64(l)) + } + if m.LatestBlockHash != nil { + l = len(m.LatestBlockHash) + n += 1 + l + sovTest(uint64(l)) + } + if m.LatestBlockHeight != nil { + n += 1 + sovTest(uint64(*m.LatestBlockHeight)) + } + if m.LatestBlocktime != nil { + n += 1 + sovTest(uint64(*m.LatestBlocktime)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *NodeInfo) Size() (n int) { + var l int + _ = l + if m.Id != nil { + l = m.Id.Size() + n += 1 + l + sovTest(uint64(l)) + } + if m.Moniker != nil { + l = len(*m.Moniker) + n += 1 + l + sovTest(uint64(l)) + } + if m.Network != nil { + l = len(*m.Network) + n += 1 + l + sovTest(uint64(l)) + } + if m.RemoteAddr != nil { + l = len(*m.RemoteAddr) + n += 1 + l + sovTest(uint64(l)) + } + if m.ListenAddr != nil { + l = len(*m.ListenAddr) + n += 1 + l + sovTest(uint64(l)) + } + if m.Version != nil { + l = len(*m.Version) + n += 1 + l + sovTest(uint64(l)) + } + if len(m.Other) > 0 { + for _, s := range m.Other { + l = len(s) + n += 1 + l + sovTest(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ID) Size() (n int) { + var l int + _ = l + if m.Id != nil { + l = len(*m.Id) + n += 1 + l + sovTest(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PubKey) Size() (n int) { + var l int + _ = l + if m.Ed25519 != nil { + l = m.Ed25519.Size() + n += 1 + l + sovTest(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PubKeyEd25519) Size() (n int) { + var l int + _ = l + if m.Bytes != nil { + l = len(m.Bytes) + n += 1 + l + sovTest(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovTest(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +func (m *ResultStatus) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResultStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResultStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTest + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeInfo == nil { + m.NodeInfo = &NodeInfo{} + } + if err := m.NodeInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTest + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PubKey == nil { + m.PubKey = &PubKey{} + } + if err := m.PubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LatestBlockHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTest + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LatestBlockHash = append(m.LatestBlockHash[:0], dAtA[iNdEx:postIndex]...) + if m.LatestBlockHash == nil { + m.LatestBlockHash = []byte{} + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LatestBlockHeight", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LatestBlockHeight = &v + hasFields[0] |= uint64(0x00000004) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LatestBlocktime", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LatestBlocktime = &v + hasFields[0] |= uint64(0x00000008) + default: + iNdEx = preIndex + skippy, err := skipTest(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTest + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return proto.NewRequiredNotSetError("pubKey") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return proto.NewRequiredNotSetError("latestBlockHash") + } + if hasFields[0]&uint64(0x00000004) == 0 { + return proto.NewRequiredNotSetError("latestBlockHeight") + } + if hasFields[0]&uint64(0x00000008) == 0 { + return proto.NewRequiredNotSetError("latestBlocktime") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeInfo) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTest + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Id == nil { + m.Id = &ID{} + } + if err := m.Id.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Moniker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTest + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Moniker = &s + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTest + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Network = &s + iNdEx = postIndex + hasFields[0] |= uint64(0x00000004) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RemoteAddr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTest + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.RemoteAddr = &s + iNdEx = postIndex + hasFields[0] |= uint64(0x00000008) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListenAddr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTest + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ListenAddr = &s + iNdEx = postIndex + hasFields[0] |= uint64(0x00000010) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTest + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Version = &s + iNdEx = postIndex + hasFields[0] |= uint64(0x00000020) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Other", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTest + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Other = append(m.Other, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTest(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTest + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return proto.NewRequiredNotSetError("id") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return proto.NewRequiredNotSetError("moniker") + } + if hasFields[0]&uint64(0x00000004) == 0 { + return proto.NewRequiredNotSetError("network") + } + if hasFields[0]&uint64(0x00000008) == 0 { + return proto.NewRequiredNotSetError("remoteAddr") + } + if hasFields[0]&uint64(0x00000010) == 0 { + return proto.NewRequiredNotSetError("listenAddr") + } + if hasFields[0]&uint64(0x00000020) == 0 { + return proto.NewRequiredNotSetError("version") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ID) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTest + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Id = &s + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipTest(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTest + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return proto.NewRequiredNotSetError("id") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PubKey) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PubKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PubKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ed25519", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTest + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Ed25519 == nil { + m.Ed25519 = &PubKeyEd25519{} + } + if err := m.Ed25519.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTest(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTest + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PubKeyEd25519) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PubKeyEd25519: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PubKeyEd25519: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bytes", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTest + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bytes = append(m.Bytes[:0], dAtA[iNdEx:postIndex]...) + if m.Bytes == nil { + m.Bytes = []byte{} + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipTest(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTest + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return proto.NewRequiredNotSetError("bytes") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTest(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTest + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTest + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTest + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthTest + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTest + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTest(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTest = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTest = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("test.proto", fileDescriptorTest) } + +var fileDescriptorTest = []byte{ + // 342 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x91, 0x41, 0x4b, 0xf3, 0x30, + 0x1c, 0xc6, 0x49, 0xfb, 0x6e, 0x5d, 0xff, 0xeb, 0x3b, 0x31, 0xee, 0x90, 0x53, 0x2d, 0x85, 0x41, + 0x0f, 0x52, 0xb0, 0xb0, 0x83, 0x47, 0xc7, 0x04, 0x87, 0x20, 0x23, 0x7e, 0x82, 0xcd, 0xfe, 0x75, + 0x65, 0x5d, 0x33, 0x92, 0x4c, 0xd9, 0xe7, 0xf3, 0xe2, 0xd1, 0xa3, 0x47, 0xd9, 0x27, 0x91, 0xa6, + 0xed, 0x9c, 0xf3, 0xf8, 0xfc, 0x7e, 0xe5, 0xc9, 0xd3, 0x04, 0x40, 0xa3, 0xd2, 0xf1, 0x5a, 0x0a, + 0x2d, 0xc2, 0x4f, 0x02, 0x1e, 0x47, 0xb5, 0xc9, 0xf5, 0x83, 0x9e, 0xe9, 0x8d, 0xa2, 0x03, 0xe8, + 0x14, 0x22, 0xc5, 0x49, 0xf1, 0x24, 0x18, 0x09, 0x48, 0xd4, 0x4d, 0xdc, 0xf8, 0xbe, 0x06, 0x7c, + 0xaf, 0xe8, 0x39, 0xb4, 0xd7, 0x9b, 0xf9, 0x1d, 0x6e, 0x99, 0x15, 0x58, 0x51, 0x37, 0x71, 0xe2, + 0xa9, 0x89, 0xbc, 0xc6, 0x34, 0x82, 0x93, 0x7c, 0x56, 0x1e, 0x34, 0xca, 0xc5, 0xe3, 0xf2, 0x76, + 0xa6, 0x16, 0xcc, 0x0e, 0xac, 0xc8, 0xe3, 0xc7, 0x98, 0x5e, 0xc0, 0xe9, 0x21, 0xc2, 0xec, 0x79, + 0xa1, 0xd9, 0xbf, 0xc0, 0x8a, 0x6c, 0xfe, 0x57, 0x1c, 0xf5, 0xea, 0x6c, 0x85, 0xac, 0x65, 0xbe, + 0x3d, 0xc6, 0xe1, 0x1b, 0x81, 0x4e, 0xb3, 0x9c, 0x9e, 0x81, 0x95, 0xa5, 0x8c, 0x98, 0xad, 0x76, + 0x3c, 0x19, 0x73, 0x2b, 0x4b, 0x29, 0x03, 0x67, 0x25, 0x8a, 0x6c, 0x89, 0xd2, 0xfc, 0x85, 0xcb, + 0x9b, 0x58, 0x9a, 0x02, 0xf5, 0xab, 0x90, 0x4b, 0xb3, 0xda, 0xe5, 0x4d, 0xa4, 0x3e, 0x80, 0xc4, + 0x95, 0xd0, 0x78, 0x9d, 0xa6, 0xd2, 0xcc, 0x74, 0xf9, 0x01, 0x29, 0x7d, 0x9e, 0x29, 0x8d, 0x85, + 0xf1, 0xad, 0xca, 0xff, 0x90, 0xb2, 0xf9, 0x05, 0xa5, 0xca, 0x44, 0xc1, 0xda, 0x55, 0x73, 0x1d, + 0x69, 0x1f, 0x5a, 0x42, 0x2f, 0x50, 0x32, 0x27, 0xb0, 0x23, 0x97, 0x57, 0x21, 0xec, 0x83, 0x35, + 0x19, 0xd3, 0xde, 0x7e, 0xbe, 0x5b, 0x2e, 0x0f, 0x13, 0x68, 0x4f, 0x9b, 0x7b, 0x76, 0x30, 0x4d, + 0x86, 0xc3, 0xcb, 0xab, 0xfa, 0xb9, 0x7a, 0xf5, 0x4b, 0xdc, 0x54, 0x94, 0x37, 0x3a, 0x1c, 0xc0, + 0xff, 0x5f, 0xa6, 0x3c, 0x70, 0xbe, 0xd5, 0xa8, 0x4c, 0xaf, 0xc7, 0xab, 0x30, 0xf2, 0xde, 0x77, + 0x3e, 0xf9, 0xd8, 0xf9, 0xe4, 0x6b, 0xe7, 0x93, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb1, 0xee, + 0x6b, 0xdd, 0x2c, 0x02, 0x00, 0x00, +} diff --git a/benchmarks/proto/test.proto b/benchmarks/proto/test.proto new file mode 100644 index 000000000..6d770d98a --- /dev/null +++ b/benchmarks/proto/test.proto @@ -0,0 +1,29 @@ +message ResultStatus { + optional NodeInfo nodeInfo = 1; + required PubKey pubKey = 2; + required bytes latestBlockHash = 3; + required int64 latestBlockHeight = 4; + required int64 latestBlocktime = 5; +} + +message NodeInfo { + required ID id = 1; + required string moniker = 2; + required string network = 3; + required string remoteAddr = 4; + required string listenAddr = 5; + required string version = 6; + repeated string other = 7; +} + +message ID { + required string id = 1; +} + +message PubKey { + optional PubKeyEd25519 ed25519 = 1; +} + +message PubKeyEd25519 { + required bytes bytes = 1; +} diff --git a/benchmarks/simu/counter.go b/benchmarks/simu/counter.go new file mode 100644 index 000000000..b7d2c4d63 --- /dev/null +++ b/benchmarks/simu/counter.go @@ -0,0 +1,47 @@ +package main + +import ( + "context" + "encoding/binary" + "fmt" + "time" + + rpcclient "github.com/tendermint/tendermint/rpc/lib/client" + cmn "github.com/tendermint/tendermint/libs/common" +) + +func main() { + wsc := rpcclient.NewWSClient("127.0.0.1:26657", "/websocket") + err := wsc.Start() + if err != nil { + cmn.Exit(err.Error()) + } + defer wsc.Stop() + + // Read a bunch of responses + go func() { + for { + _, ok := <-wsc.ResponsesCh + if !ok { + break + } + //fmt.Println("Received response", string(wire.JSONBytes(res))) + } + }() + + // Make a bunch of requests + buf := make([]byte, 32) + for i := 0; ; i++ { + binary.BigEndian.PutUint64(buf, uint64(i)) + //txBytes := hex.EncodeToString(buf[:n]) + fmt.Print(".") + err = wsc.Call(context.TODO(), "broadcast_tx", map[string]interface{}{"tx": buf[:8]}) + if err != nil { + cmn.Exit(err.Error()) + } + if i%1000 == 0 { + fmt.Println(i) + } + time.Sleep(time.Microsecond * 1000) + } +} diff --git a/blockchain/pool.go b/blockchain/pool.go new file mode 100644 index 000000000..e379d846a --- /dev/null +++ b/blockchain/pool.go @@ -0,0 +1,587 @@ +package blockchain + +import ( + "errors" + "fmt" + "math" + "sync" + "sync/atomic" + "time" + + cmn "github.com/tendermint/tendermint/libs/common" + flow "github.com/tendermint/tendermint/libs/flowrate" + "github.com/tendermint/tendermint/libs/log" + + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/types" +) + +/* +eg, L = latency = 0.1s + P = num peers = 10 + FN = num full nodes + BS = 1kB block size + CB = 1 Mbit/s = 128 kB/s + CB/P = 12.8 kB + B/S = CB/P/BS = 12.8 blocks/s + + 12.8 * 0.1 = 1.28 blocks on conn +*/ + +const ( + requestIntervalMS = 100 + maxTotalRequesters = 1000 + maxPendingRequests = maxTotalRequesters + maxPendingRequestsPerPeer = 50 + + // Minimum recv rate to ensure we're receiving blocks from a peer fast + // enough. If a peer is not sending us data at at least that rate, we + // consider them to have timedout and we disconnect. + // + // Assuming a DSL connection (not a good choice) 128 Kbps (upload) ~ 15 KB/s, + // sending data across atlantic ~ 7.5 KB/s. + minRecvRate = 7680 + + // Maximum difference between current and new block's height. + maxDiffBetweenCurrentAndReceivedBlockHeight = 100 +) + +var peerTimeout = 15 * time.Second // not const so we can override with tests + +/* + Peers self report their heights when we join the block pool. + Starting from our latest pool.height, we request blocks + in sequence from peers that reported higher heights than ours. + Every so often we ask peers what height they're on so we can keep going. + + Requests are continuously made for blocks of higher heights until + the limit is reached. If most of the requests have no available peers, and we + are not at peer limits, we can probably switch to consensus reactor +*/ + +type BlockPool struct { + cmn.BaseService + startTime time.Time + + mtx sync.Mutex + // block requests + requesters map[int64]*bpRequester + height int64 // the lowest key in requesters. + // peers + peers map[p2p.ID]*bpPeer + maxPeerHeight int64 + + // atomic + numPending int32 // number of requests pending assignment or block response + + requestsCh chan<- BlockRequest + errorsCh chan<- peerError +} + +func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool { + bp := &BlockPool{ + peers: make(map[p2p.ID]*bpPeer), + + requesters: make(map[int64]*bpRequester), + height: start, + numPending: 0, + + requestsCh: requestsCh, + errorsCh: errorsCh, + } + bp.BaseService = *cmn.NewBaseService(nil, "BlockPool", bp) + return bp +} + +func (pool *BlockPool) OnStart() error { + go pool.makeRequestersRoutine() + pool.startTime = time.Now() + return nil +} + +func (pool *BlockPool) OnStop() {} + +// Run spawns requesters as needed. +func (pool *BlockPool) makeRequestersRoutine() { + for { + if !pool.IsRunning() { + break + } + + _, numPending, lenRequesters := pool.GetStatus() + if numPending >= maxPendingRequests { + // sleep for a bit. + time.Sleep(requestIntervalMS * time.Millisecond) + // check for timed out peers + pool.removeTimedoutPeers() + } else if lenRequesters >= maxTotalRequesters { + // sleep for a bit. + time.Sleep(requestIntervalMS * time.Millisecond) + // check for timed out peers + pool.removeTimedoutPeers() + } else { + // request for more blocks. + pool.makeNextRequester() + } + } +} + +func (pool *BlockPool) removeTimedoutPeers() { + pool.mtx.Lock() + defer pool.mtx.Unlock() + + for _, peer := range pool.peers { + if !peer.didTimeout && peer.numPending > 0 { + curRate := peer.recvMonitor.Status().CurRate + // curRate can be 0 on start + if curRate != 0 && curRate < minRecvRate { + err := errors.New("peer is not sending us data fast enough") + pool.sendError(err, peer.id) + pool.Logger.Error("SendTimeout", "peer", peer.id, + "reason", err, + "curRate", fmt.Sprintf("%d KB/s", curRate/1024), + "minRate", fmt.Sprintf("%d KB/s", minRecvRate/1024)) + peer.didTimeout = true + } + } + if peer.didTimeout { + pool.removePeer(peer.id) + } + } +} + +func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequesters int) { + pool.mtx.Lock() + defer pool.mtx.Unlock() + + return pool.height, atomic.LoadInt32(&pool.numPending), len(pool.requesters) +} + +// TODO: relax conditions, prevent abuse. +func (pool *BlockPool) IsCaughtUp() bool { + pool.mtx.Lock() + defer pool.mtx.Unlock() + + // Need at least 1 peer to be considered caught up. + if len(pool.peers) == 0 { + pool.Logger.Debug("Blockpool has no peers") + return false + } + + // some conditions to determine if we're caught up + receivedBlockOrTimedOut := (pool.height > 0 || time.Since(pool.startTime) > 5*time.Second) + ourChainIsLongestAmongPeers := pool.maxPeerHeight == 0 || pool.height >= pool.maxPeerHeight + isCaughtUp := receivedBlockOrTimedOut && ourChainIsLongestAmongPeers + return isCaughtUp +} + +// We need to see the second block's Commit to validate the first block. +// So we peek two blocks at a time. +// The caller will verify the commit. +func (pool *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block) { + pool.mtx.Lock() + defer pool.mtx.Unlock() + + if r := pool.requesters[pool.height]; r != nil { + first = r.getBlock() + } + if r := pool.requesters[pool.height+1]; r != nil { + second = r.getBlock() + } + return +} + +// Pop the first block at pool.height +// It must have been validated by 'second'.Commit from PeekTwoBlocks(). +func (pool *BlockPool) PopRequest() { + pool.mtx.Lock() + defer pool.mtx.Unlock() + + if r := pool.requesters[pool.height]; r != nil { + /* The block can disappear at any time, due to removePeer(). + if r := pool.requesters[pool.height]; r == nil || r.block == nil { + PanicSanity("PopRequest() requires a valid block") + } + */ + r.Stop() + delete(pool.requesters, pool.height) + pool.height++ + } else { + panic(fmt.Sprintf("Expected requester to pop, got nothing at height %v", pool.height)) + } +} + +// Invalidates the block at pool.height, +// Remove the peer and redo request from others. +// Returns the ID of the removed peer. +func (pool *BlockPool) RedoRequest(height int64) p2p.ID { + pool.mtx.Lock() + defer pool.mtx.Unlock() + + request := pool.requesters[height] + + if request.block == nil { + panic("Expected block to be non-nil") + } + + // RemovePeer will redo all requesters associated with this peer. + pool.removePeer(request.peerID) + return request.peerID +} + +// TODO: ensure that blocks come in order for each peer. +func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int) { + pool.mtx.Lock() + defer pool.mtx.Unlock() + + requester := pool.requesters[block.Height] + if requester == nil { + pool.Logger.Info("peer sent us a block we didn't expect", "peer", peerID, "curHeight", pool.height, "blockHeight", block.Height) + diff := pool.height - block.Height + if diff < 0 { + diff *= -1 + } + if diff > maxDiffBetweenCurrentAndReceivedBlockHeight { + pool.sendError(errors.New("peer sent us a block we didn't expect with a height too far ahead/behind"), peerID) + } + return + } + + if requester.setBlock(block, peerID) { + atomic.AddInt32(&pool.numPending, -1) + peer := pool.peers[peerID] + if peer != nil { + peer.decrPending(blockSize) + } + } else { + // Bad peer? + } +} + +// MaxPeerHeight returns the highest height reported by a peer. +func (pool *BlockPool) MaxPeerHeight() int64 { + pool.mtx.Lock() + defer pool.mtx.Unlock() + return pool.maxPeerHeight +} + +// Sets the peer's alleged blockchain height. +func (pool *BlockPool) SetPeerHeight(peerID p2p.ID, height int64) { + pool.mtx.Lock() + defer pool.mtx.Unlock() + + peer := pool.peers[peerID] + if peer != nil { + peer.height = height + } else { + peer = newBPPeer(pool, peerID, height) + peer.setLogger(pool.Logger.With("peer", peerID)) + pool.peers[peerID] = peer + } + + if height > pool.maxPeerHeight { + pool.maxPeerHeight = height + } +} + +func (pool *BlockPool) RemovePeer(peerID p2p.ID) { + pool.mtx.Lock() + defer pool.mtx.Unlock() + + pool.removePeer(peerID) +} + +func (pool *BlockPool) removePeer(peerID p2p.ID) { + for _, requester := range pool.requesters { + if requester.getPeerID() == peerID { + requester.redo() + } + } + delete(pool.peers, peerID) +} + +// Pick an available peer with at least the given minHeight. +// If no peers are available, returns nil. +func (pool *BlockPool) pickIncrAvailablePeer(minHeight int64) *bpPeer { + pool.mtx.Lock() + defer pool.mtx.Unlock() + + for _, peer := range pool.peers { + if peer.didTimeout { + pool.removePeer(peer.id) + continue + } + if peer.numPending >= maxPendingRequestsPerPeer { + continue + } + if peer.height < minHeight { + continue + } + peer.incrPending() + return peer + } + return nil +} + +func (pool *BlockPool) makeNextRequester() { + pool.mtx.Lock() + defer pool.mtx.Unlock() + + nextHeight := pool.height + pool.requestersLen() + request := newBPRequester(pool, nextHeight) + // request.SetLogger(pool.Logger.With("height", nextHeight)) + + pool.requesters[nextHeight] = request + atomic.AddInt32(&pool.numPending, 1) + + err := request.Start() + if err != nil { + request.Logger.Error("Error starting request", "err", err) + } +} + +func (pool *BlockPool) requestersLen() int64 { + return int64(len(pool.requesters)) +} + +func (pool *BlockPool) sendRequest(height int64, peerID p2p.ID) { + if !pool.IsRunning() { + return + } + pool.requestsCh <- BlockRequest{height, peerID} +} + +func (pool *BlockPool) sendError(err error, peerID p2p.ID) { + if !pool.IsRunning() { + return + } + pool.errorsCh <- peerError{err, peerID} +} + +// unused by tendermint; left for debugging purposes +func (pool *BlockPool) debug() string { + pool.mtx.Lock() + defer pool.mtx.Unlock() + + str := "" + nextHeight := pool.height + pool.requestersLen() + for h := pool.height; h < nextHeight; h++ { + if pool.requesters[h] == nil { + str += cmn.Fmt("H(%v):X ", h) + } else { + str += cmn.Fmt("H(%v):", h) + str += cmn.Fmt("B?(%v) ", pool.requesters[h].block != nil) + } + } + return str +} + +//------------------------------------- + +type bpPeer struct { + pool *BlockPool + id p2p.ID + recvMonitor *flow.Monitor + + height int64 + numPending int32 + timeout *time.Timer + didTimeout bool + + logger log.Logger +} + +func newBPPeer(pool *BlockPool, peerID p2p.ID, height int64) *bpPeer { + peer := &bpPeer{ + pool: pool, + id: peerID, + height: height, + numPending: 0, + logger: log.NewNopLogger(), + } + return peer +} + +func (peer *bpPeer) setLogger(l log.Logger) { + peer.logger = l +} + +func (peer *bpPeer) resetMonitor() { + peer.recvMonitor = flow.New(time.Second, time.Second*40) + initialValue := float64(minRecvRate) * math.E + peer.recvMonitor.SetREMA(initialValue) +} + +func (peer *bpPeer) resetTimeout() { + if peer.timeout == nil { + peer.timeout = time.AfterFunc(peerTimeout, peer.onTimeout) + } else { + peer.timeout.Reset(peerTimeout) + } +} + +func (peer *bpPeer) incrPending() { + if peer.numPending == 0 { + peer.resetMonitor() + peer.resetTimeout() + } + peer.numPending++ +} + +func (peer *bpPeer) decrPending(recvSize int) { + peer.numPending-- + if peer.numPending == 0 { + peer.timeout.Stop() + } else { + peer.recvMonitor.Update(recvSize) + peer.resetTimeout() + } +} + +func (peer *bpPeer) onTimeout() { + peer.pool.mtx.Lock() + defer peer.pool.mtx.Unlock() + + err := errors.New("peer did not send us anything") + peer.pool.sendError(err, peer.id) + peer.logger.Error("SendTimeout", "reason", err, "timeout", peerTimeout) + peer.didTimeout = true +} + +//------------------------------------- + +type bpRequester struct { + cmn.BaseService + pool *BlockPool + height int64 + gotBlockCh chan struct{} + redoCh chan struct{} + + mtx sync.Mutex + peerID p2p.ID + block *types.Block +} + +func newBPRequester(pool *BlockPool, height int64) *bpRequester { + bpr := &bpRequester{ + pool: pool, + height: height, + gotBlockCh: make(chan struct{}, 1), + redoCh: make(chan struct{}, 1), + + peerID: "", + block: nil, + } + bpr.BaseService = *cmn.NewBaseService(nil, "bpRequester", bpr) + return bpr +} + +func (bpr *bpRequester) OnStart() error { + go bpr.requestRoutine() + return nil +} + +// Returns true if the peer matches and block doesn't already exist. +func (bpr *bpRequester) setBlock(block *types.Block, peerID p2p.ID) bool { + bpr.mtx.Lock() + if bpr.block != nil || bpr.peerID != peerID { + bpr.mtx.Unlock() + return false + } + bpr.block = block + bpr.mtx.Unlock() + + select { + case bpr.gotBlockCh <- struct{}{}: + default: + } + return true +} + +func (bpr *bpRequester) getBlock() *types.Block { + bpr.mtx.Lock() + defer bpr.mtx.Unlock() + return bpr.block +} + +func (bpr *bpRequester) getPeerID() p2p.ID { + bpr.mtx.Lock() + defer bpr.mtx.Unlock() + return bpr.peerID +} + +// This is called from the requestRoutine, upon redo(). +func (bpr *bpRequester) reset() { + bpr.mtx.Lock() + defer bpr.mtx.Unlock() + + if bpr.block != nil { + atomic.AddInt32(&bpr.pool.numPending, 1) + } + + bpr.peerID = "" + bpr.block = nil +} + +// Tells bpRequester to pick another peer and try again. +// NOTE: Nonblocking, and does nothing if another redo +// was already requested. +func (bpr *bpRequester) redo() { + select { + case bpr.redoCh <- struct{}{}: + default: + } +} + +// Responsible for making more requests as necessary +// Returns only when a block is found (e.g. AddBlock() is called) +func (bpr *bpRequester) requestRoutine() { +OUTER_LOOP: + for { + // Pick a peer to send request to. + var peer *bpPeer + PICK_PEER_LOOP: + for { + if !bpr.IsRunning() || !bpr.pool.IsRunning() { + return + } + peer = bpr.pool.pickIncrAvailablePeer(bpr.height) + if peer == nil { + //log.Info("No peers available", "height", height) + time.Sleep(requestIntervalMS * time.Millisecond) + continue PICK_PEER_LOOP + } + break PICK_PEER_LOOP + } + bpr.mtx.Lock() + bpr.peerID = peer.id + bpr.mtx.Unlock() + + // Send request and wait. + bpr.pool.sendRequest(bpr.height, peer.id) + WAIT_LOOP: + for { + select { + case <-bpr.pool.Quit(): + bpr.Stop() + return + case <-bpr.Quit(): + return + case <-bpr.redoCh: + bpr.reset() + continue OUTER_LOOP + case <-bpr.gotBlockCh: + // We got a block! + // Continue the for-loop and wait til Quit. + continue WAIT_LOOP + } + } + } +} + +//------------------------------------- + +type BlockRequest struct { + Height int64 + PeerID p2p.ID +} diff --git a/blockchain/pool_test.go b/blockchain/pool_test.go new file mode 100644 index 000000000..c2f615f94 --- /dev/null +++ b/blockchain/pool_test.go @@ -0,0 +1,148 @@ +package blockchain + +import ( + "math/rand" + "testing" + "time" + + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/types" +) + +func init() { + peerTimeout = 2 * time.Second +} + +type testPeer struct { + id p2p.ID + height int64 +} + +func makePeers(numPeers int, minHeight, maxHeight int64) map[p2p.ID]testPeer { + peers := make(map[p2p.ID]testPeer, numPeers) + for i := 0; i < numPeers; i++ { + peerID := p2p.ID(cmn.RandStr(12)) + height := minHeight + rand.Int63n(maxHeight-minHeight) + peers[peerID] = testPeer{peerID, height} + } + return peers +} + +func TestBasic(t *testing.T) { + start := int64(42) + peers := makePeers(10, start+1, 1000) + errorsCh := make(chan peerError, 1000) + requestsCh := make(chan BlockRequest, 1000) + pool := NewBlockPool(start, requestsCh, errorsCh) + pool.SetLogger(log.TestingLogger()) + + err := pool.Start() + if err != nil { + t.Error(err) + } + + defer pool.Stop() + + // Introduce each peer. + go func() { + for _, peer := range peers { + pool.SetPeerHeight(peer.id, peer.height) + } + }() + + // Start a goroutine to pull blocks + go func() { + for { + if !pool.IsRunning() { + return + } + first, second := pool.PeekTwoBlocks() + if first != nil && second != nil { + pool.PopRequest() + } else { + time.Sleep(1 * time.Second) + } + } + }() + + // Pull from channels + for { + select { + case err := <-errorsCh: + t.Error(err) + case request := <-requestsCh: + t.Logf("Pulled new BlockRequest %v", request) + if request.Height == 300 { + return // Done! + } + // Request desired, pretend like we got the block immediately. + go func() { + block := &types.Block{Header: &types.Header{Height: request.Height}} + pool.AddBlock(request.PeerID, block, 123) + t.Logf("Added block from peer %v (height: %v)", request.PeerID, request.Height) + }() + } + } +} + +func TestTimeout(t *testing.T) { + start := int64(42) + peers := makePeers(10, start+1, 1000) + errorsCh := make(chan peerError, 1000) + requestsCh := make(chan BlockRequest, 1000) + pool := NewBlockPool(start, requestsCh, errorsCh) + pool.SetLogger(log.TestingLogger()) + err := pool.Start() + if err != nil { + t.Error(err) + } + defer pool.Stop() + + for _, peer := range peers { + t.Logf("Peer %v", peer.id) + } + + // Introduce each peer. + go func() { + for _, peer := range peers { + pool.SetPeerHeight(peer.id, peer.height) + } + }() + + // Start a goroutine to pull blocks + go func() { + for { + if !pool.IsRunning() { + return + } + first, second := pool.PeekTwoBlocks() + if first != nil && second != nil { + pool.PopRequest() + } else { + time.Sleep(1 * time.Second) + } + } + }() + + // Pull from channels + counter := 0 + timedOut := map[p2p.ID]struct{}{} + for { + select { + case err := <-errorsCh: + t.Log(err) + // consider error to be always timeout here + if _, ok := timedOut[err.peerID]; !ok { + counter++ + if counter == len(peers) { + return // Done! + } + } + case request := <-requestsCh: + t.Logf("Pulled new BlockRequest %+v", request) + } + } +} diff --git a/blockchain/reactor.go b/blockchain/reactor.go new file mode 100644 index 000000000..449a42ff0 --- /dev/null +++ b/blockchain/reactor.go @@ -0,0 +1,400 @@ +package blockchain + +import ( + "fmt" + "reflect" + "time" + + amino "github.com/tendermint/go-amino" + + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/p2p" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" +) + +const ( + // BlockchainChannel is a channel for blocks and status updates (`BlockStore` height) + BlockchainChannel = byte(0x40) + + trySyncIntervalMS = 50 + // stop syncing when last block's time is + // within this much of the system time. + // stopSyncingDurationMinutes = 10 + + // ask for best height every 10s + statusUpdateIntervalSeconds = 10 + // check if we should switch to consensus reactor + switchToConsensusIntervalSeconds = 1 + + // NOTE: keep up to date with bcBlockResponseMessage + bcBlockResponseMessagePrefixSize = 4 + bcBlockResponseMessageFieldKeySize = 1 + maxMsgSize = types.MaxBlockSizeBytes + + bcBlockResponseMessagePrefixSize + + bcBlockResponseMessageFieldKeySize +) + +type consensusReactor interface { + // for when we switch from blockchain reactor and fast sync to + // the consensus machine + SwitchToConsensus(sm.State, int) +} + +type peerError struct { + err error + peerID p2p.ID +} + +func (e peerError) Error() string { + return fmt.Sprintf("error with peer %v: %s", e.peerID, e.err.Error()) +} + +// BlockchainReactor handles long-term catchup syncing. +type BlockchainReactor struct { + p2p.BaseReactor + + // immutable + initialState sm.State + + blockExec *sm.BlockExecutor + store *BlockStore + pool *BlockPool + fastSync bool + + requestsCh <-chan BlockRequest + errorsCh <-chan peerError +} + +// NewBlockchainReactor returns new reactor instance. +func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *BlockStore, + fastSync bool) *BlockchainReactor { + + if state.LastBlockHeight != store.Height() { + panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, + store.Height())) + } + + const capacity = 1000 // must be bigger than peers count + requestsCh := make(chan BlockRequest, capacity) + errorsCh := make(chan peerError, capacity) // so we don't block in #Receive#pool.AddBlock + + pool := NewBlockPool( + store.Height()+1, + requestsCh, + errorsCh, + ) + + bcR := &BlockchainReactor{ + initialState: state, + blockExec: blockExec, + store: store, + pool: pool, + fastSync: fastSync, + requestsCh: requestsCh, + errorsCh: errorsCh, + } + bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR) + return bcR +} + +// SetLogger implements cmn.Service by setting the logger on reactor and pool. +func (bcR *BlockchainReactor) SetLogger(l log.Logger) { + bcR.BaseService.Logger = l + bcR.pool.Logger = l +} + +// OnStart implements cmn.Service. +func (bcR *BlockchainReactor) OnStart() error { + if err := bcR.BaseReactor.OnStart(); err != nil { + return err + } + if bcR.fastSync { + err := bcR.pool.Start() + if err != nil { + return err + } + go bcR.poolRoutine() + } + return nil +} + +// OnStop implements cmn.Service. +func (bcR *BlockchainReactor) OnStop() { + bcR.BaseReactor.OnStop() + bcR.pool.Stop() +} + +// GetChannels implements Reactor +func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { + return []*p2p.ChannelDescriptor{ + { + ID: BlockchainChannel, + Priority: 10, + SendQueueCapacity: 1000, + RecvBufferCapacity: 50 * 4096, + RecvMessageCapacity: maxMsgSize, + }, + } +} + +// AddPeer implements Reactor by sending our state to peer. +func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) { + msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()}) + if !peer.Send(BlockchainChannel, msgBytes) { + // doing nothing, will try later in `poolRoutine` + } + // peer is added to the pool once we receive the first + // bcStatusResponseMessage from the peer and call pool.SetPeerHeight +} + +// RemovePeer implements Reactor by removing peer from the pool. +func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) { + bcR.pool.RemovePeer(peer.ID()) +} + +// respondToPeer loads a block and sends it to the requesting peer, +// if we have it. Otherwise, we'll respond saying we don't have it. +// According to the Tendermint spec, if all nodes are honest, +// no node should be requesting for a block that's non-existent. +func (bcR *BlockchainReactor) respondToPeer(msg *bcBlockRequestMessage, + src p2p.Peer) (queued bool) { + + block := bcR.store.LoadBlock(msg.Height) + if block != nil { + msgBytes := cdc.MustMarshalBinaryBare(&bcBlockResponseMessage{Block: block}) + return src.TrySend(BlockchainChannel, msgBytes) + } + + bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height) + + msgBytes := cdc.MustMarshalBinaryBare(&bcNoBlockResponseMessage{Height: msg.Height}) + return src.TrySend(BlockchainChannel, msgBytes) +} + +// Receive implements Reactor by handling 4 types of messages (look below). +func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { + msg, err := decodeMsg(msgBytes) + if err != nil { + bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) + bcR.Switch.StopPeerForError(src, err) + return + } + + bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg) + + switch msg := msg.(type) { + case *bcBlockRequestMessage: + if queued := bcR.respondToPeer(msg, src); !queued { + // Unfortunately not queued since the queue is full. + } + case *bcBlockResponseMessage: + // Got a block. + bcR.pool.AddBlock(src.ID(), msg.Block, len(msgBytes)) + case *bcStatusRequestMessage: + // Send peer our state. + msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()}) + queued := src.TrySend(BlockchainChannel, msgBytes) + if !queued { + // sorry + } + case *bcStatusResponseMessage: + // Got a peer status. Unverified. + bcR.pool.SetPeerHeight(src.ID(), msg.Height) + default: + bcR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg))) + } +} + +// Handle messages from the poolReactor telling the reactor what to do. +// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down! +// (Except for the SYNC_LOOP, which is the primary purpose and must be synchronous.) +func (bcR *BlockchainReactor) poolRoutine() { + + trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond) + statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second) + switchToConsensusTicker := time.NewTicker(switchToConsensusIntervalSeconds * time.Second) + + blocksSynced := 0 + + chainID := bcR.initialState.ChainID + state := bcR.initialState + + lastHundred := time.Now() + lastRate := 0.0 + +FOR_LOOP: + for { + select { + case request := <-bcR.requestsCh: + peer := bcR.Switch.Peers().Get(request.PeerID) + if peer == nil { + continue FOR_LOOP // Peer has since been disconnected. + } + msgBytes := cdc.MustMarshalBinaryBare(&bcBlockRequestMessage{request.Height}) + queued := peer.TrySend(BlockchainChannel, msgBytes) + if !queued { + // We couldn't make the request, send-queue full. + // The pool handles timeouts, just let it go. + continue FOR_LOOP + } + case err := <-bcR.errorsCh: + peer := bcR.Switch.Peers().Get(err.peerID) + if peer != nil { + bcR.Switch.StopPeerForError(peer, err) + } + case <-statusUpdateTicker.C: + // ask for status updates + go bcR.BroadcastStatusRequest() // nolint: errcheck + case <-switchToConsensusTicker.C: + height, numPending, lenRequesters := bcR.pool.GetStatus() + outbound, inbound, _ := bcR.Switch.NumPeers() + bcR.Logger.Debug("Consensus ticker", "numPending", numPending, "total", lenRequesters, + "outbound", outbound, "inbound", inbound) + if bcR.pool.IsCaughtUp() { + bcR.Logger.Info("Time to switch to consensus reactor!", "height", height) + bcR.pool.Stop() + + conR := bcR.Switch.Reactor("CONSENSUS").(consensusReactor) + conR.SwitchToConsensus(state, blocksSynced) + + break FOR_LOOP + } + case <-trySyncTicker.C: // chan time + // This loop can be slow as long as it's doing syncing work. + SYNC_LOOP: + for i := 0; i < 10; i++ { + // See if there are any blocks to sync. + first, second := bcR.pool.PeekTwoBlocks() + //bcR.Logger.Info("TrySync peeked", "first", first, "second", second) + if first == nil || second == nil { + // We need both to sync the first block. + break SYNC_LOOP + } + firstParts := first.MakePartSet(state.ConsensusParams.BlockPartSizeBytes) + firstPartsHeader := firstParts.Header() + firstID := types.BlockID{first.Hash(), firstPartsHeader} + // Finally, verify the first block using the second's commit + // NOTE: we can probably make this more efficient, but note that calling + // first.Hash() doesn't verify the tx contents, so MakePartSet() is + // currently necessary. + err := state.Validators.VerifyCommit( + chainID, firstID, first.Height, second.LastCommit) + if err != nil { + bcR.Logger.Error("Error in validation", "err", err) + peerID := bcR.pool.RedoRequest(first.Height) + peer := bcR.Switch.Peers().Get(peerID) + if peer != nil { + bcR.Switch.StopPeerForError(peer, fmt.Errorf("BlockchainReactor validation error: %v", err)) + } + break SYNC_LOOP + } else { + bcR.pool.PopRequest() + + // TODO: batch saves so we dont persist to disk every block + bcR.store.SaveBlock(first, firstParts, second.LastCommit) + + // TODO: same thing for app - but we would need a way to + // get the hash without persisting the state + var err error + state, err = bcR.blockExec.ApplyBlock(state, firstID, first) + if err != nil { + // TODO This is bad, are we zombie? + cmn.PanicQ(cmn.Fmt("Failed to process committed block (%d:%X): %v", + first.Height, first.Hash(), err)) + } + blocksSynced++ + + if blocksSynced%100 == 0 { + lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds()) + bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height, + "max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate) + lastHundred = time.Now() + } + } + } + continue FOR_LOOP + case <-bcR.Quit(): + break FOR_LOOP + } + } +} + +// BroadcastStatusRequest broadcasts `BlockStore` height. +func (bcR *BlockchainReactor) BroadcastStatusRequest() error { + msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{bcR.store.Height()}) + bcR.Switch.Broadcast(BlockchainChannel, msgBytes) + return nil +} + +//----------------------------------------------------------------------------- +// Messages + +// BlockchainMessage is a generic message for this reactor. +type BlockchainMessage interface{} + +func RegisterBlockchainMessages(cdc *amino.Codec) { + cdc.RegisterInterface((*BlockchainMessage)(nil), nil) + cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/mempool/BlockRequest", nil) + cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/mempool/BlockResponse", nil) + cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/mempool/NoBlockResponse", nil) + cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/mempool/StatusResponse", nil) + cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/mempool/StatusRequest", nil) +} + +func decodeMsg(bz []byte) (msg BlockchainMessage, err error) { + if len(bz) > maxMsgSize { + return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize) + } + err = cdc.UnmarshalBinaryBare(bz, &msg) + return +} + +//------------------------------------- + +type bcBlockRequestMessage struct { + Height int64 +} + +func (m *bcBlockRequestMessage) String() string { + return cmn.Fmt("[bcBlockRequestMessage %v]", m.Height) +} + +type bcNoBlockResponseMessage struct { + Height int64 +} + +func (brm *bcNoBlockResponseMessage) String() string { + return cmn.Fmt("[bcNoBlockResponseMessage %d]", brm.Height) +} + +//------------------------------------- + +type bcBlockResponseMessage struct { + Block *types.Block +} + +func (m *bcBlockResponseMessage) String() string { + return cmn.Fmt("[bcBlockResponseMessage %v]", m.Block.Height) +} + +//------------------------------------- + +type bcStatusRequestMessage struct { + Height int64 +} + +func (m *bcStatusRequestMessage) String() string { + return cmn.Fmt("[bcStatusRequestMessage %v]", m.Height) +} + +//------------------------------------- + +type bcStatusResponseMessage struct { + Height int64 +} + +func (m *bcStatusResponseMessage) String() string { + return cmn.Fmt("[bcStatusResponseMessage %v]", m.Height) +} diff --git a/blockchain/reactor_test.go b/blockchain/reactor_test.go new file mode 100644 index 000000000..2bb6e9762 --- /dev/null +++ b/blockchain/reactor_test.go @@ -0,0 +1,208 @@ +package blockchain + +import ( + "net" + "testing" + + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" +) + +func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) { + config := cfg.ResetTestRoot("blockchain_reactor_test") + // blockDB := dbm.NewDebugDB("blockDB", dbm.NewMemDB()) + // stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB()) + blockDB := dbm.NewMemDB() + stateDB := dbm.NewMemDB() + blockStore := NewBlockStore(blockDB) + state, err := sm.LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile()) + if err != nil { + panic(cmn.ErrorWrap(err, "error constructing state from genesis file")) + } + return state, blockStore +} + +func newBlockchainReactor(logger log.Logger, maxBlockHeight int64) *BlockchainReactor { + state, blockStore := makeStateAndBlockStore(logger) + + // Make the blockchainReactor itself + fastSync := true + var nilApp proxy.AppConnConsensus + blockExec := sm.NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), nilApp, + sm.MockMempool{}, sm.MockEvidencePool{}) + + bcReactor := NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) + bcReactor.SetLogger(logger.With("module", "blockchain")) + + // Next: we need to set a switch in order for peers to be added in + bcReactor.Switch = p2p.NewSwitch(cfg.DefaultP2PConfig()) + + // Lastly: let's add some blocks in + for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { + firstBlock := makeBlock(blockHeight, state) + secondBlock := makeBlock(blockHeight+1, state) + firstParts := firstBlock.MakePartSet(state.ConsensusParams.BlockGossip.BlockPartSizeBytes) + blockStore.SaveBlock(firstBlock, firstParts, secondBlock.LastCommit) + } + + return bcReactor +} + +func TestNoBlockResponse(t *testing.T) { + maxBlockHeight := int64(20) + + bcr := newBlockchainReactor(log.TestingLogger(), maxBlockHeight) + bcr.Start() + defer bcr.Stop() + + // Add some peers in + peer := newbcrTestPeer(p2p.ID(cmn.RandStr(12))) + bcr.AddPeer(peer) + + chID := byte(0x01) + + tests := []struct { + height int64 + existent bool + }{ + {maxBlockHeight + 2, false}, + {10, true}, + {1, true}, + {100, false}, + } + + // receive a request message from peer, + // wait for our response to be received on the peer + for _, tt := range tests { + reqBlockMsg := &bcBlockRequestMessage{tt.height} + reqBlockBytes := cdc.MustMarshalBinaryBare(reqBlockMsg) + bcr.Receive(chID, peer, reqBlockBytes) + msg := peer.lastBlockchainMessage() + + if tt.existent { + if blockMsg, ok := msg.(*bcBlockResponseMessage); !ok { + t.Fatalf("Expected to receive a block response for height %d", tt.height) + } else if blockMsg.Block.Height != tt.height { + t.Fatalf("Expected response to be for height %d, got %d", tt.height, blockMsg.Block.Height) + } + } else { + if noBlockMsg, ok := msg.(*bcNoBlockResponseMessage); !ok { + t.Fatalf("Expected to receive a no block response for height %d", tt.height) + } else if noBlockMsg.Height != tt.height { + t.Fatalf("Expected response to be for height %d, got %d", tt.height, noBlockMsg.Height) + } + } + } +} + +/* +// NOTE: This is too hard to test without +// an easy way to add test peer to switch +// or without significant refactoring of the module. +// Alternatively we could actually dial a TCP conn but +// that seems extreme. +func TestBadBlockStopsPeer(t *testing.T) { + maxBlockHeight := int64(20) + + bcr := newBlockchainReactor(log.TestingLogger(), maxBlockHeight) + bcr.Start() + defer bcr.Stop() + + // Add some peers in + peer := newbcrTestPeer(p2p.ID(cmn.RandStr(12))) + + // XXX: This doesn't add the peer to anything, + // so it's hard to check that it's later removed + bcr.AddPeer(peer) + assert.True(t, bcr.Switch.Peers().Size() > 0) + + // send a bad block from the peer + // default blocks already dont have commits, so should fail + block := bcr.store.LoadBlock(3) + msg := &bcBlockResponseMessage{Block: block} + peer.Send(BlockchainChannel, struct{ BlockchainMessage }{msg}) + + ticker := time.NewTicker(time.Millisecond * 10) + timer := time.NewTimer(time.Second * 2) +LOOP: + for { + select { + case <-ticker.C: + if bcr.Switch.Peers().Size() == 0 { + break LOOP + } + case <-timer.C: + t.Fatal("Timed out waiting to disconnect peer") + } + } +} +*/ + +//---------------------------------------------- +// utility funcs + +func makeTxs(height int64) (txs []types.Tx) { + for i := 0; i < 10; i++ { + txs = append(txs, types.Tx([]byte{byte(height), byte(i)})) + } + return txs +} + +func makeBlock(height int64, state sm.State) *types.Block { + block, _ := state.MakeBlock(height, makeTxs(height), new(types.Commit)) + return block +} + +// The Test peer +type bcrTestPeer struct { + cmn.BaseService + id p2p.ID + ch chan interface{} +} + +var _ p2p.Peer = (*bcrTestPeer)(nil) + +func newbcrTestPeer(id p2p.ID) *bcrTestPeer { + bcr := &bcrTestPeer{ + id: id, + ch: make(chan interface{}, 2), + } + bcr.BaseService = *cmn.NewBaseService(nil, "bcrTestPeer", bcr) + return bcr +} + +func (tp *bcrTestPeer) lastBlockchainMessage() interface{} { return <-tp.ch } + +func (tp *bcrTestPeer) TrySend(chID byte, msgBytes []byte) bool { + var msg BlockchainMessage + err := cdc.UnmarshalBinaryBare(msgBytes, &msg) + if err != nil { + panic(cmn.ErrorWrap(err, "Error while trying to parse a BlockchainMessage")) + } + if _, ok := msg.(*bcStatusResponseMessage); ok { + // Discard status response messages since they skew our results + // We only want to deal with: + // + bcBlockResponseMessage + // + bcNoBlockResponseMessage + } else { + tp.ch <- msg + } + return true +} + +func (tp *bcrTestPeer) Send(chID byte, msgBytes []byte) bool { return tp.TrySend(chID, msgBytes) } +func (tp *bcrTestPeer) NodeInfo() p2p.NodeInfo { return p2p.NodeInfo{} } +func (tp *bcrTestPeer) Status() p2p.ConnectionStatus { return p2p.ConnectionStatus{} } +func (tp *bcrTestPeer) ID() p2p.ID { return tp.id } +func (tp *bcrTestPeer) IsOutbound() bool { return false } +func (tp *bcrTestPeer) IsPersistent() bool { return true } +func (tp *bcrTestPeer) Get(s string) interface{} { return s } +func (tp *bcrTestPeer) Set(string, interface{}) {} +func (tp *bcrTestPeer) RemoteIP() net.IP { return []byte{127, 0, 0, 1} } diff --git a/blockchain/store.go b/blockchain/store.go new file mode 100644 index 000000000..f02d4facb --- /dev/null +++ b/blockchain/store.go @@ -0,0 +1,247 @@ +package blockchain + +import ( + "fmt" + "sync" + + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + + "github.com/tendermint/tendermint/types" +) + +/* +BlockStore is a simple low level store for blocks. + +There are three types of information stored: + - BlockMeta: Meta information about each block + - Block part: Parts of each block, aggregated w/ PartSet + - Commit: The commit part of each block, for gossiping precommit votes + +Currently the precommit signatures are duplicated in the Block parts as +well as the Commit. In the future this may change, perhaps by moving +the Commit data outside the Block. (TODO) + +// NOTE: BlockStore methods will panic if they encounter errors +// deserializing loaded data, indicating probable corruption on disk. +*/ +type BlockStore struct { + db dbm.DB + + mtx sync.RWMutex + height int64 +} + +// NewBlockStore returns a new BlockStore with the given DB, +// initialized to the last height that was committed to the DB. +func NewBlockStore(db dbm.DB) *BlockStore { + bsjson := LoadBlockStoreStateJSON(db) + return &BlockStore{ + height: bsjson.Height, + db: db, + } +} + +// Height returns the last known contiguous block height. +func (bs *BlockStore) Height() int64 { + bs.mtx.RLock() + defer bs.mtx.RUnlock() + return bs.height +} + +// LoadBlock returns the block with the given height. +// If no block is found for that height, it returns nil. +func (bs *BlockStore) LoadBlock(height int64) *types.Block { + var blockMeta = bs.LoadBlockMeta(height) + if blockMeta == nil { + return nil + } + + var block = new(types.Block) + buf := []byte{} + for i := 0; i < blockMeta.BlockID.PartsHeader.Total; i++ { + part := bs.LoadBlockPart(height, i) + buf = append(buf, part.Bytes...) + } + err := cdc.UnmarshalBinary(buf, block) + if err != nil { + // NOTE: The existence of meta should imply the existence of the + // block. So, make sure meta is only saved after blocks are saved. + panic(cmn.ErrorWrap(err, "Error reading block")) + } + return block +} + +// LoadBlockPart returns the Part at the given index +// from the block at the given height. +// If no part is found for the given height and index, it returns nil. +func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part { + var part = new(types.Part) + bz := bs.db.Get(calcBlockPartKey(height, index)) + if len(bz) == 0 { + return nil + } + err := cdc.UnmarshalBinaryBare(bz, part) + if err != nil { + panic(cmn.ErrorWrap(err, "Error reading block part")) + } + return part +} + +// LoadBlockMeta returns the BlockMeta for the given height. +// If no block is found for the given height, it returns nil. +func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { + var blockMeta = new(types.BlockMeta) + bz := bs.db.Get(calcBlockMetaKey(height)) + if len(bz) == 0 { + return nil + } + err := cdc.UnmarshalBinaryBare(bz, blockMeta) + if err != nil { + panic(cmn.ErrorWrap(err, "Error reading block meta")) + } + return blockMeta +} + +// LoadBlockCommit returns the Commit for the given height. +// This commit consists of the +2/3 and other Precommit-votes for block at `height`, +// and it comes from the block.LastCommit for `height+1`. +// If no commit is found for the given height, it returns nil. +func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { + var commit = new(types.Commit) + bz := bs.db.Get(calcBlockCommitKey(height)) + if len(bz) == 0 { + return nil + } + err := cdc.UnmarshalBinaryBare(bz, commit) + if err != nil { + panic(cmn.ErrorWrap(err, "Error reading block commit")) + } + return commit +} + +// LoadSeenCommit returns the locally seen Commit for the given height. +// This is useful when we've seen a commit, but there has not yet been +// a new block at `height + 1` that includes this commit in its block.LastCommit. +func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit { + var commit = new(types.Commit) + bz := bs.db.Get(calcSeenCommitKey(height)) + if len(bz) == 0 { + return nil + } + err := cdc.UnmarshalBinaryBare(bz, commit) + if err != nil { + panic(cmn.ErrorWrap(err, "Error reading block seen commit")) + } + return commit +} + +// SaveBlock persists the given block, blockParts, and seenCommit to the underlying db. +// blockParts: Must be parts of the block +// seenCommit: The +2/3 precommits that were seen which committed at height. +// If all the nodes restart after committing a block, +// we need this to reload the precommits to catch-up nodes to the +// most recent height. Otherwise they'd stall at H-1. +func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { + if block == nil { + cmn.PanicSanity("BlockStore can only save a non-nil block") + } + height := block.Height + if g, w := height, bs.Height()+1; g != w { + cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g)) + } + if !blockParts.IsComplete() { + cmn.PanicSanity(cmn.Fmt("BlockStore can only save complete block part sets")) + } + + // Save block meta + blockMeta := types.NewBlockMeta(block, blockParts) + metaBytes := cdc.MustMarshalBinaryBare(blockMeta) + bs.db.Set(calcBlockMetaKey(height), metaBytes) + + // Save block parts + for i := 0; i < blockParts.Total(); i++ { + part := blockParts.GetPart(i) + bs.saveBlockPart(height, i, part) + } + + // Save block commit (duplicate and separate from the Block) + blockCommitBytes := cdc.MustMarshalBinaryBare(block.LastCommit) + bs.db.Set(calcBlockCommitKey(height-1), blockCommitBytes) + + // Save seen commit (seen +2/3 precommits for block) + // NOTE: we can delete this at a later height + seenCommitBytes := cdc.MustMarshalBinaryBare(seenCommit) + bs.db.Set(calcSeenCommitKey(height), seenCommitBytes) + + // Save new BlockStoreStateJSON descriptor + BlockStoreStateJSON{Height: height}.Save(bs.db) + + // Done! + bs.mtx.Lock() + bs.height = height + bs.mtx.Unlock() + + // Flush + bs.db.SetSync(nil, nil) +} + +func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) { + if height != bs.Height()+1 { + cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height)) + } + partBytes := cdc.MustMarshalBinaryBare(part) + bs.db.Set(calcBlockPartKey(height, index), partBytes) +} + +//----------------------------------------------------------------------------- + +func calcBlockMetaKey(height int64) []byte { + return []byte(fmt.Sprintf("H:%v", height)) +} + +func calcBlockPartKey(height int64, partIndex int) []byte { + return []byte(fmt.Sprintf("P:%v:%v", height, partIndex)) +} + +func calcBlockCommitKey(height int64) []byte { + return []byte(fmt.Sprintf("C:%v", height)) +} + +func calcSeenCommitKey(height int64) []byte { + return []byte(fmt.Sprintf("SC:%v", height)) +} + +//----------------------------------------------------------------------------- + +var blockStoreKey = []byte("blockStore") + +type BlockStoreStateJSON struct { + Height int64 `json:"height"` +} + +// Save persists the blockStore state to the database as JSON. +func (bsj BlockStoreStateJSON) Save(db dbm.DB) { + bytes, err := cdc.MarshalJSON(bsj) + if err != nil { + cmn.PanicSanity(cmn.Fmt("Could not marshal state bytes: %v", err)) + } + db.SetSync(blockStoreKey, bytes) +} + +// LoadBlockStoreStateJSON returns the BlockStoreStateJSON as loaded from disk. +// If no BlockStoreStateJSON was previously persisted, it returns the zero value. +func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON { + bytes := db.Get(blockStoreKey) + if len(bytes) == 0 { + return BlockStoreStateJSON{ + Height: 0, + } + } + bsj := BlockStoreStateJSON{} + err := cdc.UnmarshalJSON(bytes, &bsj) + if err != nil { + panic(fmt.Sprintf("Could not unmarshal bytes: %X", bytes)) + } + return bsj +} diff --git a/blockchain/store_test.go b/blockchain/store_test.go new file mode 100644 index 000000000..888040bdf --- /dev/null +++ b/blockchain/store_test.go @@ -0,0 +1,383 @@ +package blockchain + +import ( + "bytes" + "fmt" + "runtime/debug" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" + + "github.com/tendermint/tendermint/types" +) + +func TestLoadBlockStoreStateJSON(t *testing.T) { + db := db.NewMemDB() + + bsj := &BlockStoreStateJSON{Height: 1000} + bsj.Save(db) + + retrBSJ := LoadBlockStoreStateJSON(db) + + assert.Equal(t, *bsj, retrBSJ, "expected the retrieved DBs to match") +} + +func TestNewBlockStore(t *testing.T) { + db := db.NewMemDB() + db.Set(blockStoreKey, []byte(`{"height": "10000"}`)) + bs := NewBlockStore(db) + require.Equal(t, int64(10000), bs.Height(), "failed to properly parse blockstore") + + panicCausers := []struct { + data []byte + wantErr string + }{ + {[]byte("artful-doger"), "not unmarshal bytes"}, + {[]byte(" "), "unmarshal bytes"}, + } + + for i, tt := range panicCausers { + // Expecting a panic here on trying to parse an invalid blockStore + _, _, panicErr := doFn(func() (interface{}, error) { + db.Set(blockStoreKey, tt.data) + _ = NewBlockStore(db) + return nil, nil + }) + require.NotNil(t, panicErr, "#%d panicCauser: %q expected a panic", i, tt.data) + assert.Contains(t, panicErr.Error(), tt.wantErr, "#%d data: %q", i, tt.data) + } + + db.Set(blockStoreKey, nil) + bs = NewBlockStore(db) + assert.Equal(t, bs.Height(), int64(0), "expecting nil bytes to be unmarshaled alright") +} + +func freshBlockStore() (*BlockStore, db.DB) { + db := db.NewMemDB() + return NewBlockStore(db), db +} + +var ( + state, _ = makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer))) + + block = makeBlock(1, state) + partSet = block.MakePartSet(2) + part1 = partSet.GetPart(0) + part2 = partSet.GetPart(1) + seenCommit1 = &types.Commit{Precommits: []*types.Vote{{Height: 10, + Timestamp: time.Now().UTC()}}} +) + +// TODO: This test should be simplified ... + +func TestBlockStoreSaveLoadBlock(t *testing.T) { + state, bs := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer))) + require.Equal(t, bs.Height(), int64(0), "initially the height should be zero") + + // check there are no blocks at various heights + noBlockHeights := []int64{0, -1, 100, 1000, 2} + for i, height := range noBlockHeights { + if g := bs.LoadBlock(height); g != nil { + t.Errorf("#%d: height(%d) got a block; want nil", i, height) + } + } + + // save a block + block := makeBlock(bs.Height()+1, state) + validPartSet := block.MakePartSet(2) + seenCommit := &types.Commit{Precommits: []*types.Vote{{Height: 10, + Timestamp: time.Now().UTC()}}} + bs.SaveBlock(block, partSet, seenCommit) + require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed") + + incompletePartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 2}) + uncontiguousPartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 0}) + uncontiguousPartSet.AddPart(part2) + + header1 := types.Header{ + Height: 1, + NumTxs: 100, + ChainID: "block_test", + Time: time.Now(), + } + header2 := header1 + header2.Height = 4 + + // End of setup, test data + + commitAtH10 := &types.Commit{Precommits: []*types.Vote{{Height: 10, + Timestamp: time.Now().UTC()}}} + tuples := []struct { + block *types.Block + parts *types.PartSet + seenCommit *types.Commit + wantErr bool + wantPanic string + + corruptBlockInDB bool + corruptCommitInDB bool + corruptSeenCommitInDB bool + eraseCommitInDB bool + eraseSeenCommitInDB bool + }{ + { + block: newBlock(&header1, commitAtH10), + parts: validPartSet, + seenCommit: seenCommit1, + }, + + { + block: nil, + wantPanic: "only save a non-nil block", + }, + + { + block: newBlock(&header2, commitAtH10), + parts: uncontiguousPartSet, + wantPanic: "only save contiguous blocks", // and incomplete and uncontiguous parts + }, + + { + block: newBlock(&header1, commitAtH10), + parts: incompletePartSet, + wantPanic: "only save complete block", // incomplete parts + }, + + { + block: newBlock(&header1, commitAtH10), + parts: validPartSet, + seenCommit: seenCommit1, + corruptCommitInDB: true, // Corrupt the DB's commit entry + wantPanic: "unmarshal to types.Commit failed", + }, + + { + block: newBlock(&header1, commitAtH10), + parts: validPartSet, + seenCommit: seenCommit1, + wantPanic: "unmarshal to types.BlockMeta failed", + corruptBlockInDB: true, // Corrupt the DB's block entry + }, + + { + block: newBlock(&header1, commitAtH10), + parts: validPartSet, + seenCommit: seenCommit1, + + // Expecting no error and we want a nil back + eraseSeenCommitInDB: true, + }, + + { + block: newBlock(&header1, commitAtH10), + parts: validPartSet, + seenCommit: seenCommit1, + + corruptSeenCommitInDB: true, + wantPanic: "unmarshal to types.Commit failed", + }, + + { + block: newBlock(&header1, commitAtH10), + parts: validPartSet, + seenCommit: seenCommit1, + + // Expecting no error and we want a nil back + eraseCommitInDB: true, + }, + } + + type quad struct { + block *types.Block + commit *types.Commit + meta *types.BlockMeta + + seenCommit *types.Commit + } + + for i, tuple := range tuples { + bs, db := freshBlockStore() + // SaveBlock + res, err, panicErr := doFn(func() (interface{}, error) { + bs.SaveBlock(tuple.block, tuple.parts, tuple.seenCommit) + if tuple.block == nil { + return nil, nil + } + + if tuple.corruptBlockInDB { + db.Set(calcBlockMetaKey(tuple.block.Height), []byte("block-bogus")) + } + bBlock := bs.LoadBlock(tuple.block.Height) + bBlockMeta := bs.LoadBlockMeta(tuple.block.Height) + + if tuple.eraseSeenCommitInDB { + db.Delete(calcSeenCommitKey(tuple.block.Height)) + } + if tuple.corruptSeenCommitInDB { + db.Set(calcSeenCommitKey(tuple.block.Height), []byte("bogus-seen-commit")) + } + bSeenCommit := bs.LoadSeenCommit(tuple.block.Height) + + commitHeight := tuple.block.Height - 1 + if tuple.eraseCommitInDB { + db.Delete(calcBlockCommitKey(commitHeight)) + } + if tuple.corruptCommitInDB { + db.Set(calcBlockCommitKey(commitHeight), []byte("foo-bogus")) + } + bCommit := bs.LoadBlockCommit(commitHeight) + return &quad{block: bBlock, seenCommit: bSeenCommit, commit: bCommit, + meta: bBlockMeta}, nil + }) + + if subStr := tuple.wantPanic; subStr != "" { + if panicErr == nil { + t.Errorf("#%d: want a non-nil panic", i) + } else if got := panicErr.Error(); !strings.Contains(got, subStr) { + t.Errorf("#%d:\n\tgotErr: %q\nwant substring: %q", i, got, subStr) + } + continue + } + + if tuple.wantErr { + if err == nil { + t.Errorf("#%d: got nil error", i) + } + continue + } + + assert.Nil(t, panicErr, "#%d: unexpected panic", i) + assert.Nil(t, err, "#%d: expecting a non-nil error", i) + qua, ok := res.(*quad) + if !ok || qua == nil { + t.Errorf("#%d: got nil quad back; gotType=%T", i, res) + continue + } + if tuple.eraseSeenCommitInDB { + assert.Nil(t, qua.seenCommit, + "erased the seenCommit in the DB hence we should get back a nil seenCommit") + } + if tuple.eraseCommitInDB { + assert.Nil(t, qua.commit, + "erased the commit in the DB hence we should get back a nil commit") + } + } +} + +func TestLoadBlockPart(t *testing.T) { + bs, db := freshBlockStore() + height, index := int64(10), 1 + loadPart := func() (interface{}, error) { + part := bs.LoadBlockPart(height, index) + return part, nil + } + + // Initially no contents. + // 1. Requesting for a non-existent block shouldn't fail + res, _, panicErr := doFn(loadPart) + require.Nil(t, panicErr, "a non-existent block part shouldn't cause a panic") + require.Nil(t, res, "a non-existent block part should return nil") + + // 2. Next save a corrupted block then try to load it + db.Set(calcBlockPartKey(height, index), []byte("Tendermint")) + res, _, panicErr = doFn(loadPart) + require.NotNil(t, panicErr, "expecting a non-nil panic") + require.Contains(t, panicErr.Error(), "unmarshal to types.Part failed") + + // 3. A good block serialized and saved to the DB should be retrievable + db.Set(calcBlockPartKey(height, index), cdc.MustMarshalBinaryBare(part1)) + gotPart, _, panicErr := doFn(loadPart) + require.Nil(t, panicErr, "an existent and proper block should not panic") + require.Nil(t, res, "a properly saved block should return a proper block") + require.Equal(t, gotPart.(*types.Part).Hash(), part1.Hash(), + "expecting successful retrieval of previously saved block") +} + +func TestLoadBlockMeta(t *testing.T) { + bs, db := freshBlockStore() + height := int64(10) + loadMeta := func() (interface{}, error) { + meta := bs.LoadBlockMeta(height) + return meta, nil + } + + // Initially no contents. + // 1. Requesting for a non-existent blockMeta shouldn't fail + res, _, panicErr := doFn(loadMeta) + require.Nil(t, panicErr, "a non-existent blockMeta shouldn't cause a panic") + require.Nil(t, res, "a non-existent blockMeta should return nil") + + // 2. Next save a corrupted blockMeta then try to load it + db.Set(calcBlockMetaKey(height), []byte("Tendermint-Meta")) + res, _, panicErr = doFn(loadMeta) + require.NotNil(t, panicErr, "expecting a non-nil panic") + require.Contains(t, panicErr.Error(), "unmarshal to types.BlockMeta") + + // 3. A good blockMeta serialized and saved to the DB should be retrievable + meta := &types.BlockMeta{} + db.Set(calcBlockMetaKey(height), cdc.MustMarshalBinaryBare(meta)) + gotMeta, _, panicErr := doFn(loadMeta) + require.Nil(t, panicErr, "an existent and proper block should not panic") + require.Nil(t, res, "a properly saved blockMeta should return a proper blocMeta ") + require.Equal(t, cdc.MustMarshalBinaryBare(meta), cdc.MustMarshalBinaryBare(gotMeta), + "expecting successful retrieval of previously saved blockMeta") +} + +func TestBlockFetchAtHeight(t *testing.T) { + state, bs := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer))) + require.Equal(t, bs.Height(), int64(0), "initially the height should be zero") + block := makeBlock(bs.Height()+1, state) + + partSet := block.MakePartSet(2) + seenCommit := &types.Commit{Precommits: []*types.Vote{{Height: 10, + Timestamp: time.Now().UTC()}}} + + bs.SaveBlock(block, partSet, seenCommit) + require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed") + + blockAtHeight := bs.LoadBlock(bs.Height()) + bz1 := cdc.MustMarshalBinaryBare(block) + bz2 := cdc.MustMarshalBinaryBare(blockAtHeight) + require.Equal(t, bz1, bz2) + require.Equal(t, block.Hash(), blockAtHeight.Hash(), + "expecting a successful load of the last saved block") + + blockAtHeightPlus1 := bs.LoadBlock(bs.Height() + 1) + require.Nil(t, blockAtHeightPlus1, "expecting an unsuccessful load of Height()+1") + blockAtHeightPlus2 := bs.LoadBlock(bs.Height() + 2) + require.Nil(t, blockAtHeightPlus2, "expecting an unsuccessful load of Height()+2") +} + +func doFn(fn func() (interface{}, error)) (res interface{}, err error, panicErr error) { + defer func() { + if r := recover(); r != nil { + switch e := r.(type) { + case error: + panicErr = e + case string: + panicErr = fmt.Errorf("%s", e) + default: + if st, ok := r.(fmt.Stringer); ok { + panicErr = fmt.Errorf("%s", st) + } else { + panicErr = fmt.Errorf("%s", debug.Stack()) + } + } + } + }() + + res, err = fn() + return res, err, panicErr +} + +func newBlock(hdr *types.Header, lastCommit *types.Commit) *types.Block { + return &types.Block{ + Header: hdr, + LastCommit: lastCommit, + } +} diff --git a/blockchain/wire.go b/blockchain/wire.go new file mode 100644 index 000000000..70b50565d --- /dev/null +++ b/blockchain/wire.go @@ -0,0 +1,13 @@ +package blockchain + +import ( + "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" +) + +var cdc = amino.NewCodec() + +func init() { + RegisterBlockchainMessages(cdc) + crypto.RegisterAmino(cdc) +} diff --git a/cmd/priv_val_server/main.go b/cmd/priv_val_server/main.go new file mode 100644 index 000000000..20c23f4c4 --- /dev/null +++ b/cmd/priv_val_server/main.go @@ -0,0 +1,53 @@ +package main + +import ( + "flag" + "os" + + crypto "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + + "github.com/tendermint/tendermint/privval" +) + +func main() { + var ( + addr = flag.String("addr", ":26659", "Address of client to connect to") + chainID = flag.String("chain-id", "mychain", "chain id") + privValPath = flag.String("priv", "", "priv val file path") + + logger = log.NewTMLogger( + log.NewSyncWriter(os.Stdout), + ).With("module", "priv_val") + ) + flag.Parse() + + logger.Info( + "Starting private validator", + "addr", *addr, + "chainID", *chainID, + "privPath", *privValPath, + ) + + pv := privval.LoadFilePV(*privValPath) + + rs := privval.NewRemoteSigner( + logger, + *chainID, + *addr, + pv, + crypto.GenPrivKeyEd25519(), + ) + err := rs.Start() + if err != nil { + panic(err) + } + + cmn.TrapSignal(func() { + err := rs.Stop() + if err != nil { + panic(err) + } + }) +} diff --git a/cmd/tendermint/commands/gen_node_key.go b/cmd/tendermint/commands/gen_node_key.go new file mode 100644 index 000000000..7aedcd0dc --- /dev/null +++ b/cmd/tendermint/commands/gen_node_key.go @@ -0,0 +1,32 @@ +package commands + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/tendermint/tendermint/p2p" + cmn "github.com/tendermint/tendermint/libs/common" +) + +// GenNodeKeyCmd allows the generation of a node key. It prints node's ID to +// the standard output. +var GenNodeKeyCmd = &cobra.Command{ + Use: "gen_node_key", + Short: "Generate a node key for this node and print its ID", + RunE: genNodeKey, +} + +func genNodeKey(cmd *cobra.Command, args []string) error { + nodeKeyFile := config.NodeKeyFile() + if cmn.FileExists(nodeKeyFile) { + return fmt.Errorf("node key at %s already exists", nodeKeyFile) + } + + nodeKey, err := p2p.LoadOrGenNodeKey(nodeKeyFile) + if err != nil { + return err + } + fmt.Println(nodeKey.ID()) + return nil +} diff --git a/cmd/tendermint/commands/gen_validator.go b/cmd/tendermint/commands/gen_validator.go new file mode 100644 index 000000000..20d43d4dd --- /dev/null +++ b/cmd/tendermint/commands/gen_validator.go @@ -0,0 +1,27 @@ +package commands + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/tendermint/tendermint/privval" +) + +// GenValidatorCmd allows the generation of a keypair for a +// validator. +var GenValidatorCmd = &cobra.Command{ + Use: "gen_validator", + Short: "Generate new validator keypair", + Run: genValidator, +} + +func genValidator(cmd *cobra.Command, args []string) { + pv := privval.GenFilePV("") + jsbz, err := cdc.MarshalJSON(pv) + if err != nil { + panic(err) + } + fmt.Printf(`%v +`, string(jsbz)) +} diff --git a/cmd/tendermint/commands/init.go b/cmd/tendermint/commands/init.go new file mode 100644 index 000000000..a44c73ebf --- /dev/null +++ b/cmd/tendermint/commands/init.go @@ -0,0 +1,71 @@ +package commands + +import ( + "time" + + "github.com/spf13/cobra" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +// InitFilesCmd initialises a fresh Tendermint Core instance. +var InitFilesCmd = &cobra.Command{ + Use: "init", + Short: "Initialize Tendermint", + RunE: initFiles, +} + +func initFiles(cmd *cobra.Command, args []string) error { + return initFilesWithConfig(config) +} + +func initFilesWithConfig(config *cfg.Config) error { + // private validator + privValFile := config.PrivValidatorFile() + var pv *privval.FilePV + if cmn.FileExists(privValFile) { + pv = privval.LoadFilePV(privValFile) + logger.Info("Found private validator", "path", privValFile) + } else { + pv = privval.GenFilePV(privValFile) + pv.Save() + logger.Info("Generated private validator", "path", privValFile) + } + + nodeKeyFile := config.NodeKeyFile() + if cmn.FileExists(nodeKeyFile) { + logger.Info("Found node key", "path", nodeKeyFile) + } else { + if _, err := p2p.LoadOrGenNodeKey(nodeKeyFile); err != nil { + return err + } + logger.Info("Generated node key", "path", nodeKeyFile) + } + + // genesis file + genFile := config.GenesisFile() + if cmn.FileExists(genFile) { + logger.Info("Found genesis file", "path", genFile) + } else { + genDoc := types.GenesisDoc{ + ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)), + GenesisTime: time.Now(), + ConsensusParams: types.DefaultConsensusParams(), + } + genDoc.Validators = []types.GenesisValidator{{ + PubKey: pv.GetPubKey(), + Power: 10, + }} + + if err := genDoc.SaveAs(genFile); err != nil { + return err + } + logger.Info("Generated genesis file", "path", genFile) + } + + return nil +} diff --git a/cmd/tendermint/commands/lite.go b/cmd/tendermint/commands/lite.go new file mode 100644 index 000000000..d57598816 --- /dev/null +++ b/cmd/tendermint/commands/lite.go @@ -0,0 +1,87 @@ +package commands + +import ( + "fmt" + "net/url" + + "github.com/spf13/cobra" + + cmn "github.com/tendermint/tendermint/libs/common" + + "github.com/tendermint/tendermint/lite/proxy" + rpcclient "github.com/tendermint/tendermint/rpc/client" +) + +// LiteCmd represents the base command when called without any subcommands +var LiteCmd = &cobra.Command{ + Use: "lite", + Short: "Run lite-client proxy server, verifying tendermint rpc", + Long: `This node will run a secure proxy to a tendermint rpc server. + +All calls that can be tracked back to a block header by a proof +will be verified before passing them back to the caller. Other that +that it will present the same interface as a full tendermint node, +just with added trust and running locally.`, + RunE: runProxy, + SilenceUsage: true, +} + +var ( + listenAddr string + nodeAddr string + chainID string + home string +) + +func init() { + LiteCmd.Flags().StringVar(&listenAddr, "laddr", "tcp://localhost:8888", "Serve the proxy on the given address") + LiteCmd.Flags().StringVar(&nodeAddr, "node", "tcp://localhost:26657", "Connect to a Tendermint node at this address") + LiteCmd.Flags().StringVar(&chainID, "chain-id", "tendermint", "Specify the Tendermint chain ID") + LiteCmd.Flags().StringVar(&home, "home-dir", ".tendermint-lite", "Specify the home directory") +} + +func ensureAddrHasSchemeOrDefaultToTCP(addr string) (string, error) { + u, err := url.Parse(addr) + if err != nil { + return "", err + } + switch u.Scheme { + case "tcp", "unix": + case "": + u.Scheme = "tcp" + default: + return "", fmt.Errorf("unknown scheme %q, use either tcp or unix", u.Scheme) + } + return u.String(), nil +} + +func runProxy(cmd *cobra.Command, args []string) error { + nodeAddr, err := ensureAddrHasSchemeOrDefaultToTCP(nodeAddr) + if err != nil { + return err + } + listenAddr, err := ensureAddrHasSchemeOrDefaultToTCP(listenAddr) + if err != nil { + return err + } + + // First, connect a client + node := rpcclient.NewHTTP(nodeAddr, "/websocket") + + cert, err := proxy.GetCertifier(chainID, home, nodeAddr) + if err != nil { + return err + } + sc := proxy.SecureClient(node, cert) + + err = proxy.StartProxy(sc, listenAddr, logger) + if err != nil { + return err + } + + cmn.TrapSignal(func() { + // TODO: close up shop + }) + + return nil +} diff --git a/cmd/tendermint/commands/probe_upnp.go b/cmd/tendermint/commands/probe_upnp.go new file mode 100644 index 000000000..35c3c354d --- /dev/null +++ b/cmd/tendermint/commands/probe_upnp.go @@ -0,0 +1,31 @@ +package commands + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/tendermint/tendermint/p2p/upnp" +) + +// ProbeUpnpCmd adds capabilities to test the UPnP functionality. +var ProbeUpnpCmd = &cobra.Command{ + Use: "probe_upnp", + Short: "Test UPnP functionality", + RunE: probeUpnp, +} + +func probeUpnp(cmd *cobra.Command, args []string) error { + capabilities, err := upnp.Probe(logger) + if err != nil { + fmt.Println("Probe failed: ", err) + } else { + fmt.Println("Probe success!") + jsonBytes, err := cdc.MarshalJSON(capabilities) + if err != nil { + return err + } + fmt.Println(string(jsonBytes)) + } + return nil +} diff --git a/cmd/tendermint/commands/replay.go b/cmd/tendermint/commands/replay.go new file mode 100644 index 000000000..303ccba6b --- /dev/null +++ b/cmd/tendermint/commands/replay.go @@ -0,0 +1,26 @@ +package commands + +import ( + "github.com/spf13/cobra" + + "github.com/tendermint/tendermint/consensus" +) + +// ReplayCmd allows replaying of messages from the WAL. +var ReplayCmd = &cobra.Command{ + Use: "replay", + Short: "Replay messages from WAL", + Run: func(cmd *cobra.Command, args []string) { + consensus.RunReplayFile(config.BaseConfig, config.Consensus, false) + }, +} + +// ReplayConsoleCmd allows replaying of messages from the WAL in a +// console. +var ReplayConsoleCmd = &cobra.Command{ + Use: "replay_console", + Short: "Replay messages from WAL in a console", + Run: func(cmd *cobra.Command, args []string) { + consensus.RunReplayFile(config.BaseConfig, config.Consensus, true) + }, +} diff --git a/cmd/tendermint/commands/reset_priv_validator.go b/cmd/tendermint/commands/reset_priv_validator.go new file mode 100644 index 000000000..ef0ba3019 --- /dev/null +++ b/cmd/tendermint/commands/reset_priv_validator.go @@ -0,0 +1,69 @@ +package commands + +import ( + "os" + + "github.com/spf13/cobra" + + "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/libs/log" +) + +// ResetAllCmd removes the database of this Tendermint core +// instance. +var ResetAllCmd = &cobra.Command{ + Use: "unsafe_reset_all", + Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state", + Run: resetAll, +} + +// ResetPrivValidatorCmd resets the private validator files. +var ResetPrivValidatorCmd = &cobra.Command{ + Use: "unsafe_reset_priv_validator", + Short: "(unsafe) Reset this node's validator to genesis state", + Run: resetPrivValidator, +} + +// XXX: this is totally unsafe. +// it's only suitable for testnets. +func resetAll(cmd *cobra.Command, args []string) { + ResetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidatorFile(), logger) +} + +// XXX: this is totally unsafe. +// it's only suitable for testnets. +func resetPrivValidator(cmd *cobra.Command, args []string) { + resetFilePV(config.PrivValidatorFile(), logger) +} + +// ResetAll removes the privValidator and address book files plus all data. +// Exported so other CLI tools can use it. +func ResetAll(dbDir, addrBookFile, privValFile string, logger log.Logger) { + resetFilePV(privValFile, logger) + removeAddrBook(addrBookFile, logger) + if err := os.RemoveAll(dbDir); err == nil { + logger.Info("Removed all blockchain history", "dir", dbDir) + } else { + logger.Error("Error removing all blockchain history", "dir", dbDir, "err", err) + } +} + +func resetFilePV(privValFile string, logger log.Logger) { + if _, err := os.Stat(privValFile); err == nil { + pv := privval.LoadFilePV(privValFile) + pv.Reset() + logger.Info("Reset private validator file to genesis state", "file", privValFile) + } else { + pv := privval.GenFilePV(privValFile) + pv.Save() + logger.Info("Generated private validator file", "file", privValFile) + } +} + +func removeAddrBook(addrBookFile string, logger log.Logger) { + if err := os.Remove(addrBookFile); err == nil { + logger.Info("Removed existing address book", "file", addrBookFile) + } else if !os.IsNotExist(err) { + logger.Info("Error removing address book", "file", addrBookFile, "err", err) + } +} diff --git a/cmd/tendermint/commands/root.go b/cmd/tendermint/commands/root.go new file mode 100644 index 000000000..3c67ddc14 --- /dev/null +++ b/cmd/tendermint/commands/root.go @@ -0,0 +1,63 @@ +package commands + +import ( + "os" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/cli" + tmflags "github.com/tendermint/tendermint/libs/cli/flags" + "github.com/tendermint/tendermint/libs/log" +) + +var ( + config = cfg.DefaultConfig() + logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) +) + +func init() { + registerFlagsRootCmd(RootCmd) +} + +func registerFlagsRootCmd(cmd *cobra.Command) { + cmd.PersistentFlags().String("log_level", config.LogLevel, "Log level") +} + +// ParseConfig retrieves the default environment configuration, +// sets up the Tendermint root and ensures that the root exists +func ParseConfig() (*cfg.Config, error) { + conf := cfg.DefaultConfig() + err := viper.Unmarshal(conf) + if err != nil { + return nil, err + } + conf.SetRoot(conf.RootDir) + cfg.EnsureRoot(conf.RootDir) + return conf, err +} + +// RootCmd is the root command for Tendermint core. +var RootCmd = &cobra.Command{ + Use: "tendermint", + Short: "Tendermint Core (BFT Consensus) in Go", + PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { + if cmd.Name() == VersionCmd.Name() { + return nil + } + config, err = ParseConfig() + if err != nil { + return err + } + logger, err = tmflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel()) + if err != nil { + return err + } + if viper.GetBool(cli.TraceFlag) { + logger = log.NewTracingLogger(logger) + } + logger = logger.With("module", "main") + return nil + }, +} diff --git a/cmd/tendermint/commands/root_test.go b/cmd/tendermint/commands/root_test.go new file mode 100644 index 000000000..e8095b387 --- /dev/null +++ b/cmd/tendermint/commands/root_test.go @@ -0,0 +1,176 @@ +package commands + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "testing" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/cli" + cmn "github.com/tendermint/tendermint/libs/common" +) + +var ( + defaultRoot = os.ExpandEnv("$HOME/.some/test/dir") +) + +const ( + rootName = "root" +) + +// clearConfig clears env vars, the given root dir, and resets viper. +func clearConfig(dir string) { + if err := os.Unsetenv("TMHOME"); err != nil { + panic(err) + } + if err := os.Unsetenv("TM_HOME"); err != nil { + panic(err) + } + + if err := os.RemoveAll(dir); err != nil { + panic(err) + } + viper.Reset() + config = cfg.DefaultConfig() +} + +// prepare new rootCmd +func testRootCmd() *cobra.Command { + rootCmd := &cobra.Command{ + Use: RootCmd.Use, + PersistentPreRunE: RootCmd.PersistentPreRunE, + Run: func(cmd *cobra.Command, args []string) {}, + } + registerFlagsRootCmd(rootCmd) + var l string + rootCmd.PersistentFlags().String("log", l, "Log") + return rootCmd +} + +func testSetup(rootDir string, args []string, env map[string]string) error { + clearConfig(defaultRoot) + + rootCmd := testRootCmd() + cmd := cli.PrepareBaseCmd(rootCmd, "TM", defaultRoot) + + // run with the args and env + args = append([]string{rootCmd.Use}, args...) + return cli.RunWithArgs(cmd, args, env) +} + +func TestRootHome(t *testing.T) { + newRoot := filepath.Join(defaultRoot, "something-else") + cases := []struct { + args []string + env map[string]string + root string + }{ + {nil, nil, defaultRoot}, + {[]string{"--home", newRoot}, nil, newRoot}, + {nil, map[string]string{"TMHOME": newRoot}, newRoot}, + } + + for i, tc := range cases { + idxString := strconv.Itoa(i) + + err := testSetup(defaultRoot, tc.args, tc.env) + require.Nil(t, err, idxString) + + assert.Equal(t, tc.root, config.RootDir, idxString) + assert.Equal(t, tc.root, config.P2P.RootDir, idxString) + assert.Equal(t, tc.root, config.Consensus.RootDir, idxString) + assert.Equal(t, tc.root, config.Mempool.RootDir, idxString) + } +} + +func TestRootFlagsEnv(t *testing.T) { + + // defaults + defaults := cfg.DefaultConfig() + defaultLogLvl := defaults.LogLevel + + cases := []struct { + args []string + env map[string]string + logLevel string + }{ + {[]string{"--log", "debug"}, nil, defaultLogLvl}, // wrong flag + {[]string{"--log_level", "debug"}, nil, "debug"}, // right flag + {nil, map[string]string{"TM_LOW": "debug"}, defaultLogLvl}, // wrong env flag + {nil, map[string]string{"MT_LOG_LEVEL": "debug"}, defaultLogLvl}, // wrong env prefix + {nil, map[string]string{"TM_LOG_LEVEL": "debug"}, "debug"}, // right env + } + + for i, tc := range cases { + idxString := strconv.Itoa(i) + + err := testSetup(defaultRoot, tc.args, tc.env) + require.Nil(t, err, idxString) + + assert.Equal(t, tc.logLevel, config.LogLevel, idxString) + } +} + +func TestRootConfig(t *testing.T) { + + // write non-default config + nonDefaultLogLvl := "abc:debug" + cvals := map[string]string{ + "log_level": nonDefaultLogLvl, + } + + cases := []struct { + args []string + env map[string]string + + logLvl string + }{ + {nil, nil, nonDefaultLogLvl}, // should load config + {[]string{"--log_level=abc:info"}, nil, "abc:info"}, // flag over rides + {nil, map[string]string{"TM_LOG_LEVEL": "abc:info"}, "abc:info"}, // env over rides + } + + for i, tc := range cases { + idxString := strconv.Itoa(i) + clearConfig(defaultRoot) + + // XXX: path must match cfg.defaultConfigPath + configFilePath := filepath.Join(defaultRoot, "config") + err := cmn.EnsureDir(configFilePath, 0700) + require.Nil(t, err) + + // write the non-defaults to a different path + // TODO: support writing sub configs so we can test that too + err = WriteConfigVals(configFilePath, cvals) + require.Nil(t, err) + + rootCmd := testRootCmd() + cmd := cli.PrepareBaseCmd(rootCmd, "TM", defaultRoot) + + // run with the args and env + tc.args = append([]string{rootCmd.Use}, tc.args...) + err = cli.RunWithArgs(cmd, tc.args, tc.env) + require.Nil(t, err, idxString) + + assert.Equal(t, tc.logLvl, config.LogLevel, idxString) + } +} + +// WriteConfigVals writes a toml file with the given values. +// It returns an error if writing was impossible. +func WriteConfigVals(dir string, vals map[string]string) error { + data := "" + for k, v := range vals { + data = data + fmt.Sprintf("%s = \"%s\"\n", k, v) + } + cfile := filepath.Join(dir, "config.toml") + return ioutil.WriteFile(cfile, []byte(data), 0666) +} diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go new file mode 100644 index 000000000..542e5c991 --- /dev/null +++ b/cmd/tendermint/commands/run_node.go @@ -0,0 +1,72 @@ +package commands + +import ( + "fmt" + + "github.com/spf13/cobra" + + nm "github.com/tendermint/tendermint/node" +) + +// AddNodeFlags exposes some common configuration options on the command-line +// These are exposed for convenience of commands embedding a tendermint node +func AddNodeFlags(cmd *cobra.Command) { + // bind flags + cmd.Flags().String("moniker", config.Moniker, "Node Name") + + // priv val flags + cmd.Flags().String("priv_validator_laddr", config.PrivValidatorListenAddr, "Socket address to listen on for connections from external priv_validator process") + + // node flags + cmd.Flags().Bool("fast_sync", config.FastSync, "Fast blockchain syncing") + + // abci flags + cmd.Flags().String("proxy_app", config.ProxyApp, "Proxy app address, or 'nilapp' or 'kvstore' for local testing.") + cmd.Flags().String("abci", config.ABCI, "Specify abci transport (socket | grpc)") + + // rpc flags + cmd.Flags().String("rpc.laddr", config.RPC.ListenAddress, "RPC listen address. Port required") + cmd.Flags().String("rpc.grpc_laddr", config.RPC.GRPCListenAddress, "GRPC listen address (BroadcastTx only). Port required") + cmd.Flags().Bool("rpc.unsafe", config.RPC.Unsafe, "Enabled unsafe rpc methods") + + // p2p flags + cmd.Flags().String("p2p.laddr", config.P2P.ListenAddress, "Node listen address. (0.0.0.0:0 means any interface, any port)") + cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "Comma-delimited ID@host:port seed nodes") + cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "Comma-delimited ID@host:port persistent peers") + cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "Enable/disable UPNP port forwarding") + cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "Enable/disable Peer-Exchange") + cmd.Flags().Bool("p2p.seed_mode", config.P2P.SeedMode, "Enable/disable seed mode") + cmd.Flags().String("p2p.private_peer_ids", config.P2P.PrivatePeerIDs, "Comma-delimited private peer IDs") + + // consensus flags + cmd.Flags().Bool("consensus.create_empty_blocks", config.Consensus.CreateEmptyBlocks, "Set this to false to only produce blocks when there are txs or when the AppHash changes") +} + +// NewRunNodeCmd returns the command that allows the CLI to start a node. +// It can be used with a custom PrivValidator and in-process ABCI application. +func NewRunNodeCmd(nodeProvider nm.NodeProvider) *cobra.Command { + cmd := &cobra.Command{ + Use: "node", + Short: "Run the tendermint node", + RunE: func(cmd *cobra.Command, args []string) error { + // Create & start node + n, err := nodeProvider(config, logger) + if err != nil { + return fmt.Errorf("Failed to create node: %v", err) + } + + if err := n.Start(); err != nil { + return fmt.Errorf("Failed to start node: %v", err) + } + logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo()) + + // Trap signal, run forever. + n.RunForever() + + return nil + }, + } + + AddNodeFlags(cmd) + return cmd +} diff --git a/cmd/tendermint/commands/show_node_id.go b/cmd/tendermint/commands/show_node_id.go new file mode 100644 index 000000000..02ab1a9bb --- /dev/null +++ b/cmd/tendermint/commands/show_node_id.go @@ -0,0 +1,27 @@ +package commands + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/tendermint/tendermint/p2p" +) + +// ShowNodeIDCmd dumps node's ID to the standard output. +var ShowNodeIDCmd = &cobra.Command{ + Use: "show_node_id", + Short: "Show this node's ID", + RunE: showNodeID, +} + +func showNodeID(cmd *cobra.Command, args []string) error { + + nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile()) + if err != nil { + return err + } + fmt.Println(nodeKey.ID()) + + return nil +} diff --git a/cmd/tendermint/commands/show_validator.go b/cmd/tendermint/commands/show_validator.go new file mode 100644 index 000000000..54765164b --- /dev/null +++ b/cmd/tendermint/commands/show_validator.go @@ -0,0 +1,22 @@ +package commands + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/tendermint/tendermint/privval" +) + +// ShowValidatorCmd adds capabilities for showing the validator info. +var ShowValidatorCmd = &cobra.Command{ + Use: "show_validator", + Short: "Show this node's validator info", + Run: showValidator, +} + +func showValidator(cmd *cobra.Command, args []string) { + privValidator := privval.LoadOrGenFilePV(config.PrivValidatorFile()) + pubKeyJSONBytes, _ := cdc.MarshalJSON(privValidator.GetPubKey()) + fmt.Println(string(pubKeyJSONBytes)) +} diff --git a/cmd/tendermint/commands/testnet.go b/cmd/tendermint/commands/testnet.go new file mode 100644 index 000000000..f7639fb27 --- /dev/null +++ b/cmd/tendermint/commands/testnet.go @@ -0,0 +1,183 @@ +package commands + +import ( + "fmt" + "net" + "os" + "path/filepath" + "strings" + "time" + + "github.com/spf13/cobra" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +var ( + nValidators int + nNonValidators int + outputDir string + nodeDirPrefix string + + populatePersistentPeers bool + hostnamePrefix string + startingIPAddress string + p2pPort int +) + +const ( + nodeDirPerm = 0755 +) + +func init() { + TestnetFilesCmd.Flags().IntVar(&nValidators, "v", 4, + "Number of validators to initialize the testnet with") + TestnetFilesCmd.Flags().IntVar(&nNonValidators, "n", 0, + "Number of non-validators to initialize the testnet with") + TestnetFilesCmd.Flags().StringVar(&outputDir, "o", "./mytestnet", + "Directory to store initialization data for the testnet") + TestnetFilesCmd.Flags().StringVar(&nodeDirPrefix, "node-dir-prefix", "node", + "Prefix the directory name for each node with (node results in node0, node1, ...)") + + TestnetFilesCmd.Flags().BoolVar(&populatePersistentPeers, "populate-persistent-peers", true, + "Update config of each node with the list of persistent peers build using either hostname-prefix or starting-ip-address") + TestnetFilesCmd.Flags().StringVar(&hostnamePrefix, "hostname-prefix", "node", + "Hostname prefix (node results in persistent peers list ID0@node0:26656, ID1@node1:26656, ...)") + TestnetFilesCmd.Flags().StringVar(&startingIPAddress, "starting-ip-address", "", + "Starting IP address (192.168.0.1 results in persistent peers list ID0@192.168.0.1:26656, ID1@192.168.0.2:26656, ...)") + TestnetFilesCmd.Flags().IntVar(&p2pPort, "p2p-port", 26656, + "P2P Port") +} + +// TestnetFilesCmd allows initialisation of files for a Tendermint testnet. +var TestnetFilesCmd = &cobra.Command{ + Use: "testnet", + Short: "Initialize files for a Tendermint testnet", + Long: `testnet will create "v" + "n" number of directories and populate each with +necessary files (private validator, genesis, config, etc.). + +Note, strict routability for addresses is turned off in the config file. + +Optionally, it will fill in persistent_peers list in config file using either hostnames or IPs. + +Example: + + tendermint testnet --v 4 --o ./output --populate-persistent-peers --starting-ip-address 192.168.10.2 + `, + RunE: testnetFiles, +} + +func testnetFiles(cmd *cobra.Command, args []string) error { + config := cfg.DefaultConfig() + genVals := make([]types.GenesisValidator, nValidators) + + for i := 0; i < nValidators; i++ { + nodeDirName := cmn.Fmt("%s%d", nodeDirPrefix, i) + nodeDir := filepath.Join(outputDir, nodeDirName) + config.SetRoot(nodeDir) + + err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm) + if err != nil { + _ = os.RemoveAll(outputDir) + return err + } + + initFilesWithConfig(config) + + pvFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidator) + pv := privval.LoadFilePV(pvFile) + genVals[i] = types.GenesisValidator{ + PubKey: pv.GetPubKey(), + Power: 1, + Name: nodeDirName, + } + } + + for i := 0; i < nNonValidators; i++ { + nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i+nValidators)) + config.SetRoot(nodeDir) + + err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm) + if err != nil { + _ = os.RemoveAll(outputDir) + return err + } + + initFilesWithConfig(config) + } + + // Generate genesis doc from generated validators + genDoc := &types.GenesisDoc{ + GenesisTime: time.Now(), + ChainID: "chain-" + cmn.RandStr(6), + Validators: genVals, + } + + // Write genesis file. + for i := 0; i < nValidators+nNonValidators; i++ { + nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i)) + if err := genDoc.SaveAs(filepath.Join(nodeDir, config.BaseConfig.Genesis)); err != nil { + _ = os.RemoveAll(outputDir) + return err + } + } + + if populatePersistentPeers { + err := populatePersistentPeersInConfigAndWriteIt(config) + if err != nil { + _ = os.RemoveAll(outputDir) + return err + } + } + + fmt.Printf("Successfully initialized %v node directories\n", nValidators+nNonValidators) + return nil +} + +func hostnameOrIP(i int) string { + if startingIPAddress != "" { + ip := net.ParseIP(startingIPAddress) + ip = ip.To4() + if ip == nil { + fmt.Printf("%v: non ipv4 address\n", startingIPAddress) + os.Exit(1) + } + + for j := 0; j < i; j++ { + ip[3]++ + } + return ip.String() + } + + return fmt.Sprintf("%s%d", hostnamePrefix, i) +} + +func populatePersistentPeersInConfigAndWriteIt(config *cfg.Config) error { + persistentPeers := make([]string, nValidators+nNonValidators) + for i := 0; i < nValidators+nNonValidators; i++ { + nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i)) + config.SetRoot(nodeDir) + nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile()) + if err != nil { + return err + } + persistentPeers[i] = p2p.IDAddressString(nodeKey.ID(), fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort)) + } + persistentPeersList := strings.Join(persistentPeers, ",") + + for i := 0; i < nValidators+nNonValidators; i++ { + nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i)) + config.SetRoot(nodeDir) + config.P2P.PersistentPeers = persistentPeersList + config.P2P.AddrBookStrict = false + + // overwrite default config + cfg.WriteConfigFile(filepath.Join(nodeDir, "config", "config.toml"), config) + } + + return nil +} diff --git a/cmd/tendermint/commands/version.go b/cmd/tendermint/commands/version.go new file mode 100644 index 000000000..f9f545e59 --- /dev/null +++ b/cmd/tendermint/commands/version.go @@ -0,0 +1,18 @@ +package commands + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/tendermint/tendermint/version" +) + +// VersionCmd ... +var VersionCmd = &cobra.Command{ + Use: "version", + Short: "Show version info", + Run: func(cmd *cobra.Command, args []string) { + fmt.Println(version.Version) + }, +} diff --git a/cmd/tendermint/commands/wire.go b/cmd/tendermint/commands/wire.go new file mode 100644 index 000000000..a09019133 --- /dev/null +++ b/cmd/tendermint/commands/wire.go @@ -0,0 +1,12 @@ +package commands + +import ( + "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" +) + +var cdc = amino.NewCodec() + +func init() { + crypto.RegisterAmino(cdc) +} diff --git a/cmd/tendermint/main.go b/cmd/tendermint/main.go new file mode 100644 index 000000000..a5a8d2d80 --- /dev/null +++ b/cmd/tendermint/main.go @@ -0,0 +1,48 @@ +package main + +import ( + "os" + "path/filepath" + + "github.com/tendermint/tendermint/libs/cli" + + cmd "github.com/tendermint/tendermint/cmd/tendermint/commands" + cfg "github.com/tendermint/tendermint/config" + nm "github.com/tendermint/tendermint/node" +) + +func main() { + rootCmd := cmd.RootCmd + rootCmd.AddCommand( + cmd.GenValidatorCmd, + cmd.InitFilesCmd, + cmd.ProbeUpnpCmd, + cmd.LiteCmd, + cmd.ReplayCmd, + cmd.ReplayConsoleCmd, + cmd.ResetAllCmd, + cmd.ResetPrivValidatorCmd, + cmd.ShowValidatorCmd, + cmd.TestnetFilesCmd, + cmd.ShowNodeIDCmd, + cmd.GenNodeKeyCmd, + cmd.VersionCmd) + + // NOTE: + // Users wishing to: + // * Use an external signer for their validators + // * Supply an in-proc abci app + // * Supply a genesis doc file from another source + // * Provide their own DB implementation + // can copy this file and use something other than the + // DefaultNewNode function + nodeFunc := nm.DefaultNewNode + + // Create & start node + rootCmd.AddCommand(cmd.NewRunNodeCmd(nodeFunc)) + + cmd := cli.PrepareBaseCmd(rootCmd, "TM", os.ExpandEnv(filepath.Join("$HOME", cfg.DefaultTendermintDir))) + if err := cmd.Execute(); err != nil { + panic(err) + } +} diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 000000000..b190853de --- /dev/null +++ b/codecov.yml @@ -0,0 +1,23 @@ +coverage: + precision: 2 + round: down + range: "70...100" + + status: + project: + default: + threshold: 1% + patch: on + changes: off + +comment: + layout: "diff, files" + behavior: default + require_changes: no + require_base: no + require_head: yes + +ignore: + - "docs" + - "DOCKER" + - "scripts" diff --git a/config/config.go b/config/config.go new file mode 100644 index 000000000..2df8eb8e8 --- /dev/null +++ b/config/config.go @@ -0,0 +1,657 @@ +package config + +import ( + "fmt" + "os" + "path/filepath" + "time" +) + +const ( + // FuzzModeDrop is a mode in which we randomly drop reads/writes, connections or sleep + FuzzModeDrop = iota + // FuzzModeDelay is a mode in which we randomly sleep + FuzzModeDelay +) + +// NOTE: Most of the structs & relevant comments + the +// default configuration options were used to manually +// generate the config.toml. Please reflect any changes +// made here in the defaultConfigTemplate constant in +// config/toml.go +// NOTE: tmlibs/cli must know to look in the config dir! +var ( + DefaultTendermintDir = ".tendermint" + defaultConfigDir = "config" + defaultDataDir = "data" + + defaultConfigFileName = "config.toml" + defaultGenesisJSONName = "genesis.json" + + defaultPrivValName = "priv_validator.json" + defaultNodeKeyName = "node_key.json" + defaultAddrBookName = "addrbook.json" + + defaultConfigFilePath = filepath.Join(defaultConfigDir, defaultConfigFileName) + defaultGenesisJSONPath = filepath.Join(defaultConfigDir, defaultGenesisJSONName) + defaultPrivValPath = filepath.Join(defaultConfigDir, defaultPrivValName) + defaultNodeKeyPath = filepath.Join(defaultConfigDir, defaultNodeKeyName) + defaultAddrBookPath = filepath.Join(defaultConfigDir, defaultAddrBookName) +) + +// Config defines the top level configuration for a Tendermint node +type Config struct { + // Top level options use an anonymous struct + BaseConfig `mapstructure:",squash"` + + // Options for services + RPC *RPCConfig `mapstructure:"rpc"` + P2P *P2PConfig `mapstructure:"p2p"` + Mempool *MempoolConfig `mapstructure:"mempool"` + Consensus *ConsensusConfig `mapstructure:"consensus"` + TxIndex *TxIndexConfig `mapstructure:"tx_index"` + Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"` +} + +// DefaultConfig returns a default configuration for a Tendermint node +func DefaultConfig() *Config { + return &Config{ + BaseConfig: DefaultBaseConfig(), + RPC: DefaultRPCConfig(), + P2P: DefaultP2PConfig(), + Mempool: DefaultMempoolConfig(), + Consensus: DefaultConsensusConfig(), + TxIndex: DefaultTxIndexConfig(), + Instrumentation: DefaultInstrumentationConfig(), + } +} + +// TestConfig returns a configuration that can be used for testing +func TestConfig() *Config { + return &Config{ + BaseConfig: TestBaseConfig(), + RPC: TestRPCConfig(), + P2P: TestP2PConfig(), + Mempool: TestMempoolConfig(), + Consensus: TestConsensusConfig(), + TxIndex: TestTxIndexConfig(), + Instrumentation: TestInstrumentationConfig(), + } +} + +// SetRoot sets the RootDir for all Config structs +func (cfg *Config) SetRoot(root string) *Config { + cfg.BaseConfig.RootDir = root + cfg.RPC.RootDir = root + cfg.P2P.RootDir = root + cfg.Mempool.RootDir = root + cfg.Consensus.RootDir = root + return cfg +} + +//----------------------------------------------------------------------------- +// BaseConfig + +// BaseConfig defines the base configuration for a Tendermint node +type BaseConfig struct { + + // chainID is unexposed and immutable but here for convenience + chainID string + + // The root directory for all data. + // This should be set in viper so it can unmarshal into this struct + RootDir string `mapstructure:"home"` + + // Path to the JSON file containing the initial validator set and other meta data + Genesis string `mapstructure:"genesis_file"` + + // Path to the JSON file containing the private key to use as a validator in the consensus protocol + PrivValidator string `mapstructure:"priv_validator_file"` + + // A JSON file containing the private key to use for p2p authenticated encryption + NodeKey string `mapstructure:"node_key_file"` + + // A custom human readable name for this node + Moniker string `mapstructure:"moniker"` + + // TCP or UNIX socket address for Tendermint to listen on for + // connections from an external PrivValidator process + PrivValidatorListenAddr string `mapstructure:"priv_validator_laddr"` + + // TCP or UNIX socket address of the ABCI application, + // or the name of an ABCI application compiled in with the Tendermint binary + ProxyApp string `mapstructure:"proxy_app"` + + // Mechanism to connect to the ABCI application: socket | grpc + ABCI string `mapstructure:"abci"` + + // Output level for logging + LogLevel string `mapstructure:"log_level"` + + // TCP or UNIX socket address for the profiling server to listen on + ProfListenAddress string `mapstructure:"prof_laddr"` + + // If this node is many blocks behind the tip of the chain, FastSync + // allows them to catchup quickly by downloading blocks in parallel + // and verifying their commits + FastSync bool `mapstructure:"fast_sync"` + + // If true, query the ABCI app on connecting to a new peer + // so the app can decide if we should keep the connection or not + FilterPeers bool `mapstructure:"filter_peers"` // false + + // Database backend: leveldb | memdb + DBBackend string `mapstructure:"db_backend"` + + // Database directory + DBPath string `mapstructure:"db_dir"` +} + +// DefaultBaseConfig returns a default base configuration for a Tendermint node +func DefaultBaseConfig() BaseConfig { + return BaseConfig{ + Genesis: defaultGenesisJSONPath, + PrivValidator: defaultPrivValPath, + NodeKey: defaultNodeKeyPath, + Moniker: defaultMoniker, + ProxyApp: "tcp://127.0.0.1:26658", + ABCI: "socket", + LogLevel: DefaultPackageLogLevels(), + ProfListenAddress: "", + FastSync: true, + FilterPeers: false, + DBBackend: "leveldb", + DBPath: "data", + } +} + +// TestBaseConfig returns a base configuration for testing a Tendermint node +func TestBaseConfig() BaseConfig { + cfg := DefaultBaseConfig() + cfg.chainID = "tendermint_test" + cfg.ProxyApp = "kvstore" + cfg.FastSync = false + cfg.DBBackend = "memdb" + return cfg +} + +func (cfg BaseConfig) ChainID() string { + return cfg.chainID +} + +// GenesisFile returns the full path to the genesis.json file +func (cfg BaseConfig) GenesisFile() string { + return rootify(cfg.Genesis, cfg.RootDir) +} + +// PrivValidatorFile returns the full path to the priv_validator.json file +func (cfg BaseConfig) PrivValidatorFile() string { + return rootify(cfg.PrivValidator, cfg.RootDir) +} + +// NodeKeyFile returns the full path to the node_key.json file +func (cfg BaseConfig) NodeKeyFile() string { + return rootify(cfg.NodeKey, cfg.RootDir) +} + +// DBDir returns the full path to the database directory +func (cfg BaseConfig) DBDir() string { + return rootify(cfg.DBPath, cfg.RootDir) +} + +// DefaultLogLevel returns a default log level of "error" +func DefaultLogLevel() string { + return "error" +} + +// DefaultPackageLogLevels returns a default log level setting so all packages +// log at "error", while the `state` and `main` packages log at "info" +func DefaultPackageLogLevels() string { + return fmt.Sprintf("main:info,state:info,*:%s", DefaultLogLevel()) +} + +//----------------------------------------------------------------------------- +// RPCConfig + +// RPCConfig defines the configuration options for the Tendermint RPC server +type RPCConfig struct { + RootDir string `mapstructure:"home"` + + // TCP or UNIX socket address for the RPC server to listen on + ListenAddress string `mapstructure:"laddr"` + + // TCP or UNIX socket address for the gRPC server to listen on + // NOTE: This server only supports /broadcast_tx_commit + GRPCListenAddress string `mapstructure:"grpc_laddr"` + + // Maximum number of simultaneous connections. + // Does not include RPC (HTTP&WebSocket) connections. See max_open_connections + // If you want to accept more significant number than the default, make sure + // you increase your OS limits. + // 0 - unlimited. + GRPCMaxOpenConnections int `mapstructure:"grpc_max_open_connections"` + + // Activate unsafe RPC commands like /dial_persistent_peers and /unsafe_flush_mempool + Unsafe bool `mapstructure:"unsafe"` + + // Maximum number of simultaneous connections (including WebSocket). + // Does not include gRPC connections. See grpc_max_open_connections + // If you want to accept more significant number than the default, make sure + // you increase your OS limits. + // 0 - unlimited. + MaxOpenConnections int `mapstructure:"max_open_connections"` +} + +// DefaultRPCConfig returns a default configuration for the RPC server +func DefaultRPCConfig() *RPCConfig { + return &RPCConfig{ + ListenAddress: "tcp://0.0.0.0:26657", + + GRPCListenAddress: "", + GRPCMaxOpenConnections: 900, // no ipv4 + + Unsafe: false, + // should be < {ulimit -Sn} - {MaxNumPeers} - {N of wal, db and other open files} + // 1024 - 50 - 50 = 924 = ~900 + MaxOpenConnections: 900, + } +} + +// TestRPCConfig returns a configuration for testing the RPC server +func TestRPCConfig() *RPCConfig { + cfg := DefaultRPCConfig() + cfg.ListenAddress = "tcp://0.0.0.0:36657" + cfg.GRPCListenAddress = "tcp://0.0.0.0:36658" + cfg.Unsafe = true + return cfg +} + +//----------------------------------------------------------------------------- +// P2PConfig + +// P2PConfig defines the configuration options for the Tendermint peer-to-peer networking layer +type P2PConfig struct { + RootDir string `mapstructure:"home"` + + // Address to listen for incoming connections + ListenAddress string `mapstructure:"laddr"` + + // Address to advertise to peers for them to dial + ExternalAddress string `mapstructure:"external_address"` + + // Comma separated list of seed nodes to connect to + // We only use these if we can’t connect to peers in the addrbook + Seeds string `mapstructure:"seeds"` + + // Comma separated list of nodes to keep persistent connections to + // Do not add private peers to this list if you don't want them advertised + PersistentPeers string `mapstructure:"persistent_peers"` + + // UPNP port forwarding + UPNP bool `mapstructure:"upnp"` + + // Path to address book + AddrBook string `mapstructure:"addr_book_file"` + + // Set true for strict address routability rules + AddrBookStrict bool `mapstructure:"addr_book_strict"` + + // Maximum number of peers to connect to + MaxNumPeers int `mapstructure:"max_num_peers"` + + // Time to wait before flushing messages out on the connection, in ms + FlushThrottleTimeout int `mapstructure:"flush_throttle_timeout"` + + // Maximum size of a message packet payload, in bytes + MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"` + + // Rate at which packets can be sent, in bytes/second + SendRate int64 `mapstructure:"send_rate"` + + // Rate at which packets can be received, in bytes/second + RecvRate int64 `mapstructure:"recv_rate"` + + // Set true to enable the peer-exchange reactor + PexReactor bool `mapstructure:"pex"` + + // Seed mode, in which node constantly crawls the network and looks for + // peers. If another node asks it for addresses, it responds and disconnects. + // + // Does not work if the peer-exchange reactor is disabled. + SeedMode bool `mapstructure:"seed_mode"` + + // Comma separated list of peer IDs to keep private (will not be gossiped to + // other peers) + PrivatePeerIDs string `mapstructure:"private_peer_ids"` + + // Toggle to disable guard against peers connecting from the same ip. + AllowDuplicateIP bool `mapstructure:"allow_duplicate_ip"` + + // Peer connection configuration. + HandshakeTimeout time.Duration `mapstructure:"handshake_timeout"` + DialTimeout time.Duration `mapstructure:"dial_timeout"` + + // Testing params. + // Force dial to fail + TestDialFail bool `mapstructure:"test_dial_fail"` + // FUzz connection + TestFuzz bool `mapstructure:"test_fuzz"` + TestFuzzConfig *FuzzConnConfig `mapstructure:"test_fuzz_config"` +} + +// DefaultP2PConfig returns a default configuration for the peer-to-peer layer +func DefaultP2PConfig() *P2PConfig { + return &P2PConfig{ + ListenAddress: "tcp://0.0.0.0:26656", + ExternalAddress: "", + UPNP: false, + AddrBook: defaultAddrBookPath, + AddrBookStrict: true, + MaxNumPeers: 50, + FlushThrottleTimeout: 100, + MaxPacketMsgPayloadSize: 1024, // 1 kB + SendRate: 512000, // 500 kB/s + RecvRate: 512000, // 500 kB/s + PexReactor: true, + SeedMode: false, + AllowDuplicateIP: true, // so non-breaking yet + HandshakeTimeout: 20 * time.Second, + DialTimeout: 3 * time.Second, + TestDialFail: false, + TestFuzz: false, + TestFuzzConfig: DefaultFuzzConnConfig(), + } +} + +// TestP2PConfig returns a configuration for testing the peer-to-peer layer +func TestP2PConfig() *P2PConfig { + cfg := DefaultP2PConfig() + cfg.ListenAddress = "tcp://0.0.0.0:36656" + cfg.FlushThrottleTimeout = 10 + cfg.AllowDuplicateIP = true + return cfg +} + +// AddrBookFile returns the full path to the address book +func (cfg *P2PConfig) AddrBookFile() string { + return rootify(cfg.AddrBook, cfg.RootDir) +} + +// FuzzConnConfig is a FuzzedConnection configuration. +type FuzzConnConfig struct { + Mode int + MaxDelay time.Duration + ProbDropRW float64 + ProbDropConn float64 + ProbSleep float64 +} + +// DefaultFuzzConnConfig returns the default config. +func DefaultFuzzConnConfig() *FuzzConnConfig { + return &FuzzConnConfig{ + Mode: FuzzModeDrop, + MaxDelay: 3 * time.Second, + ProbDropRW: 0.2, + ProbDropConn: 0.00, + ProbSleep: 0.00, + } +} + +//----------------------------------------------------------------------------- +// MempoolConfig + +// MempoolConfig defines the configuration options for the Tendermint mempool +type MempoolConfig struct { + RootDir string `mapstructure:"home"` + Recheck bool `mapstructure:"recheck"` + RecheckEmpty bool `mapstructure:"recheck_empty"` + Broadcast bool `mapstructure:"broadcast"` + WalPath string `mapstructure:"wal_dir"` + Size int `mapstructure:"size"` + CacheSize int `mapstructure:"cache_size"` +} + +// DefaultMempoolConfig returns a default configuration for the Tendermint mempool +func DefaultMempoolConfig() *MempoolConfig { + return &MempoolConfig{ + Recheck: true, + RecheckEmpty: true, + Broadcast: true, + WalPath: filepath.Join(defaultDataDir, "mempool.wal"), + Size: 100000, + CacheSize: 100000, + } +} + +// TestMempoolConfig returns a configuration for testing the Tendermint mempool +func TestMempoolConfig() *MempoolConfig { + cfg := DefaultMempoolConfig() + cfg.CacheSize = 1000 + return cfg +} + +// WalDir returns the full path to the mempool's write-ahead log +func (cfg *MempoolConfig) WalDir() string { + return rootify(cfg.WalPath, cfg.RootDir) +} + +//----------------------------------------------------------------------------- +// ConsensusConfig + +// ConsensusConfig defines the configuration for the Tendermint consensus service, +// including timeouts and details about the WAL and the block structure. +type ConsensusConfig struct { + RootDir string `mapstructure:"home"` + WalPath string `mapstructure:"wal_file"` + walFile string // overrides WalPath if set + + // All timeouts are in milliseconds + TimeoutPropose int `mapstructure:"timeout_propose"` + TimeoutProposeDelta int `mapstructure:"timeout_propose_delta"` + TimeoutPrevote int `mapstructure:"timeout_prevote"` + TimeoutPrevoteDelta int `mapstructure:"timeout_prevote_delta"` + TimeoutPrecommit int `mapstructure:"timeout_precommit"` + TimeoutPrecommitDelta int `mapstructure:"timeout_precommit_delta"` + TimeoutCommit int `mapstructure:"timeout_commit"` + + // Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) + SkipTimeoutCommit bool `mapstructure:"skip_timeout_commit"` + + // EmptyBlocks mode and possible interval between empty blocks in seconds + CreateEmptyBlocks bool `mapstructure:"create_empty_blocks"` + CreateEmptyBlocksInterval int `mapstructure:"create_empty_blocks_interval"` + + // Reactor sleep duration parameters are in milliseconds + PeerGossipSleepDuration int `mapstructure:"peer_gossip_sleep_duration"` + PeerQueryMaj23SleepDuration int `mapstructure:"peer_query_maj23_sleep_duration"` +} + +// DefaultConsensusConfig returns a default configuration for the consensus service +func DefaultConsensusConfig() *ConsensusConfig { + return &ConsensusConfig{ + WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"), + TimeoutPropose: 3000, + TimeoutProposeDelta: 500, + TimeoutPrevote: 1000, + TimeoutPrevoteDelta: 500, + TimeoutPrecommit: 1000, + TimeoutPrecommitDelta: 500, + TimeoutCommit: 1000, + SkipTimeoutCommit: false, + CreateEmptyBlocks: true, + CreateEmptyBlocksInterval: 0, + PeerGossipSleepDuration: 100, + PeerQueryMaj23SleepDuration: 2000, + } +} + +// TestConsensusConfig returns a configuration for testing the consensus service +func TestConsensusConfig() *ConsensusConfig { + cfg := DefaultConsensusConfig() + cfg.TimeoutPropose = 100 + cfg.TimeoutProposeDelta = 1 + cfg.TimeoutPrevote = 10 + cfg.TimeoutPrevoteDelta = 1 + cfg.TimeoutPrecommit = 10 + cfg.TimeoutPrecommitDelta = 1 + cfg.TimeoutCommit = 10 + cfg.SkipTimeoutCommit = true + cfg.PeerGossipSleepDuration = 5 + cfg.PeerQueryMaj23SleepDuration = 250 + return cfg +} + +// WaitForTxs returns true if the consensus should wait for transactions before entering the propose step +func (cfg *ConsensusConfig) WaitForTxs() bool { + return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0 +} + +// EmptyBlocks returns the amount of time to wait before proposing an empty block or starting the propose timer if there are no txs available +func (cfg *ConsensusConfig) EmptyBlocksInterval() time.Duration { + return time.Duration(cfg.CreateEmptyBlocksInterval) * time.Second +} + +// Propose returns the amount of time to wait for a proposal +func (cfg *ConsensusConfig) Propose(round int) time.Duration { + return time.Duration(cfg.TimeoutPropose+cfg.TimeoutProposeDelta*round) * time.Millisecond +} + +// Prevote returns the amount of time to wait for straggler votes after receiving any +2/3 prevotes +func (cfg *ConsensusConfig) Prevote(round int) time.Duration { + return time.Duration(cfg.TimeoutPrevote+cfg.TimeoutPrevoteDelta*round) * time.Millisecond +} + +// Precommit returns the amount of time to wait for straggler votes after receiving any +2/3 precommits +func (cfg *ConsensusConfig) Precommit(round int) time.Duration { + return time.Duration(cfg.TimeoutPrecommit+cfg.TimeoutPrecommitDelta*round) * time.Millisecond +} + +// Commit returns the amount of time to wait for straggler votes after receiving +2/3 precommits for a single block (ie. a commit). +func (cfg *ConsensusConfig) Commit(t time.Time) time.Time { + return t.Add(time.Duration(cfg.TimeoutCommit) * time.Millisecond) +} + +// PeerGossipSleep returns the amount of time to sleep if there is nothing to send from the ConsensusReactor +func (cfg *ConsensusConfig) PeerGossipSleep() time.Duration { + return time.Duration(cfg.PeerGossipSleepDuration) * time.Millisecond +} + +// PeerQueryMaj23Sleep returns the amount of time to sleep after each VoteSetMaj23Message is sent in the ConsensusReactor +func (cfg *ConsensusConfig) PeerQueryMaj23Sleep() time.Duration { + return time.Duration(cfg.PeerQueryMaj23SleepDuration) * time.Millisecond +} + +// WalFile returns the full path to the write-ahead log file +func (cfg *ConsensusConfig) WalFile() string { + if cfg.walFile != "" { + return cfg.walFile + } + return rootify(cfg.WalPath, cfg.RootDir) +} + +// SetWalFile sets the path to the write-ahead log file +func (cfg *ConsensusConfig) SetWalFile(walFile string) { + cfg.walFile = walFile +} + +//----------------------------------------------------------------------------- +// TxIndexConfig + +// TxIndexConfig defines the configuration for the transaction +// indexer, including tags to index. +type TxIndexConfig struct { + // What indexer to use for transactions + // + // Options: + // 1) "null" + // 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). + Indexer string `mapstructure:"indexer"` + + // Comma-separated list of tags to index (by default the only tag is tx hash) + // + // It's recommended to index only a subset of tags due to possible memory + // bloat. This is, of course, depends on the indexer's DB and the volume of + // transactions. + IndexTags string `mapstructure:"index_tags"` + + // When set to true, tells indexer to index all tags. Note this may be not + // desirable (see the comment above). IndexTags has a precedence over + // IndexAllTags (i.e. when given both, IndexTags will be indexed). + IndexAllTags bool `mapstructure:"index_all_tags"` +} + +// DefaultTxIndexConfig returns a default configuration for the transaction indexer. +func DefaultTxIndexConfig() *TxIndexConfig { + return &TxIndexConfig{ + Indexer: "kv", + IndexTags: "", + IndexAllTags: false, + } +} + +// TestTxIndexConfig returns a default configuration for the transaction indexer. +func TestTxIndexConfig() *TxIndexConfig { + return DefaultTxIndexConfig() +} + +//----------------------------------------------------------------------------- +// InstrumentationConfig + +// InstrumentationConfig defines the configuration for metrics reporting. +type InstrumentationConfig struct { + // When true, Prometheus metrics are served under /metrics on + // PrometheusListenAddr. + // Check out the documentation for the list of available metrics. + Prometheus bool `mapstructure:"prometheus"` + + // Address to listen for Prometheus collector(s) connections. + PrometheusListenAddr string `mapstructure:"prometheus_listen_addr"` + + // Maximum number of simultaneous connections. + // If you want to accept more significant number than the default, make sure + // you increase your OS limits. + // 0 - unlimited. + MaxOpenConnections int `mapstructure:"max_open_connections"` +} + +// DefaultInstrumentationConfig returns a default configuration for metrics +// reporting. +func DefaultInstrumentationConfig() *InstrumentationConfig { + return &InstrumentationConfig{ + Prometheus: false, + PrometheusListenAddr: ":26660", + MaxOpenConnections: 3, + } +} + +// TestInstrumentationConfig returns a default configuration for metrics +// reporting. +func TestInstrumentationConfig() *InstrumentationConfig { + return DefaultInstrumentationConfig() +} + +//----------------------------------------------------------------------------- +// Utils + +// helper function to make config creation independent of root dir +func rootify(path, root string) string { + if filepath.IsAbs(path) { + return path + } + return filepath.Join(root, path) +} + +//----------------------------------------------------------------------------- +// Moniker + +var defaultMoniker = getDefaultMoniker() + +// getDefaultMoniker returns a default moniker, which is the host name. If runtime +// fails to get the host name, "anonymous" will be returned. +func getDefaultMoniker() string { + moniker, err := os.Hostname() + if err != nil { + moniker = "anonymous" + } + return moniker +} diff --git a/config/config_test.go b/config/config_test.go new file mode 100644 index 000000000..6379960fa --- /dev/null +++ b/config/config_test.go @@ -0,0 +1,28 @@ +package config + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDefaultConfig(t *testing.T) { + assert := assert.New(t) + + // set up some defaults + cfg := DefaultConfig() + assert.NotNil(cfg.P2P) + assert.NotNil(cfg.Mempool) + assert.NotNil(cfg.Consensus) + + // check the root dir stuff... + cfg.SetRoot("/foo") + cfg.Genesis = "bar" + cfg.DBPath = "/opt/data" + cfg.Mempool.WalPath = "wal/mem/" + + assert.Equal("/foo/bar", cfg.GenesisFile()) + assert.Equal("/opt/data", cfg.DBDir()) + assert.Equal("/foo/wal/mem", cfg.Mempool.WalDir()) + +} diff --git a/config/toml.go b/config/toml.go new file mode 100644 index 000000000..858d9b31d --- /dev/null +++ b/config/toml.go @@ -0,0 +1,349 @@ +package config + +import ( + "bytes" + "os" + "path/filepath" + "text/template" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +var configTemplate *template.Template + +func init() { + var err error + if configTemplate, err = template.New("configFileTemplate").Parse(defaultConfigTemplate); err != nil { + panic(err) + } +} + +/****** these are for production settings ***********/ + +// EnsureRoot creates the root, config, and data directories if they don't exist, +// and panics if it fails. +func EnsureRoot(rootDir string) { + if err := cmn.EnsureDir(rootDir, 0700); err != nil { + cmn.PanicSanity(err.Error()) + } + if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), 0700); err != nil { + cmn.PanicSanity(err.Error()) + } + if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), 0700); err != nil { + cmn.PanicSanity(err.Error()) + } + + configFilePath := filepath.Join(rootDir, defaultConfigFilePath) + + // Write default config file if missing. + if !cmn.FileExists(configFilePath) { + writeDefaultConfigFile(configFilePath) + } +} + +// XXX: this func should probably be called by cmd/tendermint/commands/init.go +// alongside the writing of the genesis.json and priv_validator.json +func writeDefaultConfigFile(configFilePath string) { + WriteConfigFile(configFilePath, DefaultConfig()) +} + +// WriteConfigFile renders config using the template and writes it to configFilePath. +func WriteConfigFile(configFilePath string, config *Config) { + var buffer bytes.Buffer + + if err := configTemplate.Execute(&buffer, config); err != nil { + panic(err) + } + + cmn.MustWriteFile(configFilePath, buffer.Bytes(), 0644) +} + +// Note: any changes to the comments/variables/mapstructure +// must be reflected in the appropriate struct in config/config.go +const defaultConfigTemplate = `# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +##### main base config options ##### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the Tendermint binary +proxy_app = "{{ .BaseConfig.ProxyApp }}" + +# A custom human readable name for this node +moniker = "{{ .BaseConfig.Moniker }}" + +# If this node is many blocks behind the tip of the chain, FastSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +fast_sync = {{ .BaseConfig.FastSync }} + +# Database backend: leveldb | memdb +db_backend = "{{ .BaseConfig.DBBackend }}" + +# Database directory +db_path = "{{ js .BaseConfig.DBPath }}" + +# Output level for logging, including package level options +log_level = "{{ .BaseConfig.LogLevel }}" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "{{ js .BaseConfig.Genesis }}" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_file = "{{ js .BaseConfig.PrivValidator }}" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "{{ js .BaseConfig.NodeKey}}" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "{{ .BaseConfig.ABCI }}" + +# TCP or UNIX socket address for the profiling server to listen on +prof_laddr = "{{ .BaseConfig.ProfListenAddress }}" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = {{ .BaseConfig.FilterPeers }} + +##### advanced configuration options ##### + +##### rpc server configuration options ##### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "{{ .RPC.ListenAddress }}" + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "{{ .RPC.GRPCListenAddress }}" + +# Maximum number of simultaneous connections. +# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections +# If you want to accept more significant number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +grpc_max_open_connections = {{ .RPC.GRPCMaxOpenConnections }} + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = {{ .RPC.Unsafe }} + +# Maximum number of simultaneous connections (including WebSocket). +# Does not include gRPC connections. See grpc_max_open_connections +# If you want to accept more significant number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = {{ .RPC.MaxOpenConnections }} + +##### peer to peer configuration options ##### +[p2p] + +# Address to listen for incoming connections +laddr = "{{ .P2P.ListenAddress }}" + +# Address to advertise to peers for them to dial +# If empty, will use the same port as the laddr, +# and will introspect on the listener or use UPnP +# to figure out the address. +external_address = "{{ .P2P.ExternalAddress }}" + +# Comma separated list of seed nodes to connect to +seeds = "{{ .P2P.Seeds }}" + +# Comma separated list of nodes to keep persistent connections to +# Do not add private peers to this list if you don't want them advertised +persistent_peers = "{{ .P2P.PersistentPeers }}" + +# UPNP port forwarding +upnp = {{ .P2P.UPNP }} + +# Path to address book +addr_book_file = "{{ js .P2P.AddrBook }}" + +# Set true for strict address routability rules +addr_book_strict = {{ .P2P.AddrBookStrict }} + +# Time to wait before flushing messages out on the connection, in ms +flush_throttle_timeout = {{ .P2P.FlushThrottleTimeout }} + +# Maximum number of peers to connect to +max_num_peers = {{ .P2P.MaxNumPeers }} + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = {{ .P2P.MaxPacketMsgPayloadSize }} + +# Rate at which packets can be sent, in bytes/second +send_rate = {{ .P2P.SendRate }} + +# Rate at which packets can be received, in bytes/second +recv_rate = {{ .P2P.RecvRate }} + +# Set true to enable the peer-exchange reactor +pex = {{ .P2P.PexReactor }} + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = {{ .P2P.SeedMode }} + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "{{ .P2P.PrivatePeerIDs }}" + +##### mempool configuration options ##### +[mempool] + +recheck = {{ .Mempool.Recheck }} +recheck_empty = {{ .Mempool.RecheckEmpty }} +broadcast = {{ .Mempool.Broadcast }} +wal_dir = "{{ js .Mempool.WalPath }}" + +# size of the mempool +size = {{ .Mempool.Size }} + +# size of the cache (used to filter transactions we saw earlier) +cache_size = {{ .Mempool.CacheSize }} + +##### consensus configuration options ##### +[consensus] + +wal_file = "{{ js .Consensus.WalPath }}" + +# All timeouts are in milliseconds +timeout_propose = {{ .Consensus.TimeoutPropose }} +timeout_propose_delta = {{ .Consensus.TimeoutProposeDelta }} +timeout_prevote = {{ .Consensus.TimeoutPrevote }} +timeout_prevote_delta = {{ .Consensus.TimeoutPrevoteDelta }} +timeout_precommit = {{ .Consensus.TimeoutPrecommit }} +timeout_precommit_delta = {{ .Consensus.TimeoutPrecommitDelta }} +timeout_commit = {{ .Consensus.TimeoutCommit }} + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = {{ .Consensus.SkipTimeoutCommit }} + +# EmptyBlocks mode and possible interval between empty blocks in seconds +create_empty_blocks = {{ .Consensus.CreateEmptyBlocks }} +create_empty_blocks_interval = {{ .Consensus.CreateEmptyBlocksInterval }} + +# Reactor sleep duration parameters are in milliseconds +peer_gossip_sleep_duration = {{ .Consensus.PeerGossipSleepDuration }} +peer_query_maj23_sleep_duration = {{ .Consensus.PeerQueryMaj23SleepDuration }} + +##### transactions indexer configuration options ##### +[tx_index] + +# What indexer to use for transactions +# +# Options: +# 1) "null" (default) +# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +indexer = "{{ .TxIndex.Indexer }}" + +# Comma-separated list of tags to index (by default the only tag is tx hash) +# +# It's recommended to index only a subset of tags due to possible memory +# bloat. This is, of course, depends on the indexer's DB and the volume of +# transactions. +index_tags = "{{ .TxIndex.IndexTags }}" + +# When set to true, tells indexer to index all tags. Note this may be not +# desirable (see the comment above). IndexTags has a precedence over +# IndexAllTags (i.e. when given both, IndexTags will be indexed). +index_all_tags = {{ .TxIndex.IndexAllTags }} + +##### instrumentation configuration options ##### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = {{ .Instrumentation.Prometheus }} + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = "{{ .Instrumentation.PrometheusListenAddr }}" + +# Maximum number of simultaneous connections. +# If you want to accept more significant number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = {{ .Instrumentation.MaxOpenConnections }} +` + +/****** these are for test settings ***********/ + +func ResetTestRoot(testName string) *Config { + rootDir := os.ExpandEnv("$HOME/.tendermint_test") + rootDir = filepath.Join(rootDir, testName) + // Remove ~/.tendermint_test_bak + if cmn.FileExists(rootDir + "_bak") { + if err := os.RemoveAll(rootDir + "_bak"); err != nil { + cmn.PanicSanity(err.Error()) + } + } + // Move ~/.tendermint_test to ~/.tendermint_test_bak + if cmn.FileExists(rootDir) { + if err := os.Rename(rootDir, rootDir+"_bak"); err != nil { + cmn.PanicSanity(err.Error()) + } + } + // Create new dir + if err := cmn.EnsureDir(rootDir, 0700); err != nil { + cmn.PanicSanity(err.Error()) + } + if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), 0700); err != nil { + cmn.PanicSanity(err.Error()) + } + if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), 0700); err != nil { + cmn.PanicSanity(err.Error()) + } + + baseConfig := DefaultBaseConfig() + configFilePath := filepath.Join(rootDir, defaultConfigFilePath) + genesisFilePath := filepath.Join(rootDir, baseConfig.Genesis) + privFilePath := filepath.Join(rootDir, baseConfig.PrivValidator) + + // Write default config file if missing. + if !cmn.FileExists(configFilePath) { + writeDefaultConfigFile(configFilePath) + } + if !cmn.FileExists(genesisFilePath) { + cmn.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644) + } + // we always overwrite the priv val + cmn.MustWriteFile(privFilePath, []byte(testPrivValidator), 0644) + + config := TestConfig().SetRoot(rootDir) + return config +} + +var testGenesis = `{ + "genesis_time": "0001-01-01T00:00:00.000Z", + "chain_id": "tendermint_test", + "validators": [ + { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE=" + }, + "power": "10", + "name": "" + } + ], + "app_hash": "" +}` + +var testPrivValidator = `{ + "address": "A3258DCBF45DCA0DF052981870F2D1441A36D145", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE=" + }, + "priv_key": { + "type": "tendermint/PrivKeyEd25519", + "value": "EVkqJO/jIXp3rkASXfh9YnyToYXRXhBr6g9cQVxPFnQBP/5povV4HTjvsy530kybxKHwEi85iU8YL0qQhSYVoQ==" + }, + "last_height": "0", + "last_round": "0", + "last_step": 0 +}` diff --git a/config/toml_test.go b/config/toml_test.go new file mode 100644 index 000000000..a1637f671 --- /dev/null +++ b/config/toml_test.go @@ -0,0 +1,94 @@ +package config + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func ensureFiles(t *testing.T, rootDir string, files ...string) { + for _, f := range files { + p := rootify(rootDir, f) + _, err := os.Stat(p) + assert.Nil(t, err, p) + } +} + +func TestEnsureRoot(t *testing.T) { + require := require.New(t) + + // setup temp dir for test + tmpDir, err := ioutil.TempDir("", "config-test") + require.Nil(err) + defer os.RemoveAll(tmpDir) // nolint: errcheck + + // create root dir + EnsureRoot(tmpDir) + + // make sure config is set properly + data, err := ioutil.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath)) + require.Nil(err) + + if !checkConfig(string(data)) { + t.Fatalf("config file missing some information") + } + + ensureFiles(t, tmpDir, "data") +} + +func TestEnsureTestRoot(t *testing.T) { + require := require.New(t) + + testName := "ensureTestRoot" + + // create root dir + cfg := ResetTestRoot(testName) + rootDir := cfg.RootDir + + // make sure config is set properly + data, err := ioutil.ReadFile(filepath.Join(rootDir, defaultConfigFilePath)) + require.Nil(err) + + if !checkConfig(string(data)) { + t.Fatalf("config file missing some information") + } + + // TODO: make sure the cfg returned and testconfig are the same! + baseConfig := DefaultBaseConfig() + ensureFiles(t, rootDir, defaultDataDir, baseConfig.Genesis, baseConfig.PrivValidator) +} + +func checkConfig(configFile string) bool { + var valid bool + + // list of words we expect in the config + var elems = []string{ + "moniker", + "seeds", + "proxy_app", + "fast_sync", + "create_empty_blocks", + "peer", + "timeout", + "broadcast", + "send", + "addr", + "wal", + "propose", + "max", + "genesis", + } + for _, e := range elems { + if !strings.Contains(configFile, e) { + valid = false + } else { + valid = true + } + } + return valid +} diff --git a/consensus/README.md b/consensus/README.md new file mode 100644 index 000000000..1111317d5 --- /dev/null +++ b/consensus/README.md @@ -0,0 +1 @@ +See the [consensus spec](https://github.com/tendermint/tendermint/tree/master/docs/spec/consensus) and the [reactor consensus spec](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/consensus) for more information. diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go new file mode 100644 index 000000000..5360a92c9 --- /dev/null +++ b/consensus/byzantine_test.go @@ -0,0 +1,267 @@ +package consensus + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +func init() { + config = ResetConfig("consensus_byzantine_test") +} + +//---------------------------------------------- +// byzantine failures + +// 4 validators. 1 is byzantine. The other three are partitioned into A (1 val) and B (2 vals). +// byzantine validator sends conflicting proposals into A and B, +// and prevotes/precommits on both of them. +// B sees a commit, A doesn't. +// Byzantine validator refuses to prevote. +// Heal partition and ensure A sees the commit +func TestByzantine(t *testing.T) { + N := 4 + logger := consensusLogger().With("test", "byzantine") + css := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), newCounter) + + // give the byzantine validator a normal ticker + ticker := NewTimeoutTicker() + ticker.SetLogger(css[0].Logger) + css[0].SetTimeoutTicker(ticker) + + switches := make([]*p2p.Switch, N) + p2pLogger := logger.With("module", "p2p") + for i := 0; i < N; i++ { + switches[i] = p2p.NewSwitch(config.P2P) + switches[i].SetLogger(p2pLogger.With("validator", i)) + } + + eventChans := make([]chan interface{}, N) + reactors := make([]p2p.Reactor, N) + for i := 0; i < N; i++ { + // make first val byzantine + if i == 0 { + // NOTE: Now, test validators are MockPV, which by default doesn't + // do any safety checks. + css[i].privValidator.(*types.MockPV).DisableChecks() + css[i].decideProposal = func(j int) func(int64, int) { + return func(height int64, round int) { + byzantineDecideProposalFunc(t, height, round, css[j], switches[j]) + } + }(i) + css[i].doPrevote = func(height int64, round int) {} + } + + eventBus := css[i].eventBus + eventBus.SetLogger(logger.With("module", "events", "validator", i)) + + eventChans[i] = make(chan interface{}, 1) + err := eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, eventChans[i]) + require.NoError(t, err) + + conR := NewConsensusReactor(css[i], true) // so we dont start the consensus states + conR.SetLogger(logger.With("validator", i)) + conR.SetEventBus(eventBus) + + var conRI p2p.Reactor // nolint: gotype, gosimple + conRI = conR + + // make first val byzantine + if i == 0 { + conRI = NewByzantineReactor(conR) + } + + reactors[i] = conRI + } + + defer func() { + for _, r := range reactors { + if rr, ok := r.(*ByzantineReactor); ok { + rr.reactor.Switch.Stop() + } else { + r.(*ConsensusReactor).Switch.Stop() + } + } + }() + + p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch { + // ignore new switch s, we already made ours + switches[i].AddReactor("CONSENSUS", reactors[i]) + return switches[i] + }, func(sws []*p2p.Switch, i, j int) { + // the network starts partitioned with globally active adversary + if i != 0 { + return + } + p2p.Connect2Switches(sws, i, j) + }) + + // start the non-byz state machines. + // note these must be started before the byz + for i := 1; i < N; i++ { + cr := reactors[i].(*ConsensusReactor) + cr.SwitchToConsensus(cr.conS.GetState(), 0) + } + + // start the byzantine state machine + byzR := reactors[0].(*ByzantineReactor) + s := byzR.reactor.conS.GetState() + byzR.reactor.SwitchToConsensus(s, 0) + + // byz proposer sends one block to peers[0] + // and the other block to peers[1] and peers[2]. + // note peers and switches order don't match. + peers := switches[0].Peers().List() + + // partition A + ind0 := getSwitchIndex(switches, peers[0]) + + // partition B + ind1 := getSwitchIndex(switches, peers[1]) + ind2 := getSwitchIndex(switches, peers[2]) + p2p.Connect2Switches(switches, ind1, ind2) + + // wait for someone in the big partition (B) to make a block + <-eventChans[ind2] + + t.Log("A block has been committed. Healing partition") + p2p.Connect2Switches(switches, ind0, ind1) + p2p.Connect2Switches(switches, ind0, ind2) + + // wait till everyone makes the first new block + // (one of them already has) + wg := new(sync.WaitGroup) + wg.Add(2) + for i := 1; i < N-1; i++ { + go func(j int) { + <-eventChans[j] + wg.Done() + }(i) + } + + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + tick := time.NewTicker(time.Second * 10) + select { + case <-done: + case <-tick.C: + for i, reactor := range reactors { + t.Log(cmn.Fmt("Consensus Reactor %v", i)) + t.Log(cmn.Fmt("%v", reactor)) + } + t.Fatalf("Timed out waiting for all validators to commit first block") + } +} + +//------------------------------- +// byzantine consensus functions + +func byzantineDecideProposalFunc(t *testing.T, height int64, round int, cs *ConsensusState, sw *p2p.Switch) { + // byzantine user should create two proposals and try to split the vote. + // Avoid sending on internalMsgQueue and running consensus state. + + // Create a new proposal block from state/txs from the mempool. + block1, blockParts1 := cs.createProposalBlock() + polRound, polBlockID := cs.Votes.POLInfo() + proposal1 := types.NewProposal(height, round, blockParts1.Header(), polRound, polBlockID) + if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal1); err != nil { + t.Error(err) + } + + // Create a new proposal block from state/txs from the mempool. + block2, blockParts2 := cs.createProposalBlock() + polRound, polBlockID = cs.Votes.POLInfo() + proposal2 := types.NewProposal(height, round, blockParts2.Header(), polRound, polBlockID) + if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal2); err != nil { + t.Error(err) + } + + block1Hash := block1.Hash() + block2Hash := block2.Hash() + + // broadcast conflicting proposals/block parts to peers + peers := sw.Peers().List() + t.Logf("Byzantine: broadcasting conflicting proposals to %d peers", len(peers)) + for i, peer := range peers { + if i < len(peers)/2 { + go sendProposalAndParts(height, round, cs, peer, proposal1, block1Hash, blockParts1) + } else { + go sendProposalAndParts(height, round, cs, peer, proposal2, block2Hash, blockParts2) + } + } +} + +func sendProposalAndParts(height int64, round int, cs *ConsensusState, peer p2p.Peer, proposal *types.Proposal, blockHash []byte, parts *types.PartSet) { + // proposal + msg := &ProposalMessage{Proposal: proposal} + peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) + + // parts + for i := 0; i < parts.Total(); i++ { + part := parts.GetPart(i) + msg := &BlockPartMessage{ + Height: height, // This tells peer that this part applies to us. + Round: round, // This tells peer that this part applies to us. + Part: part, + } + peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) + } + + // votes + cs.mtx.Lock() + prevote, _ := cs.signVote(types.VoteTypePrevote, blockHash, parts.Header()) + precommit, _ := cs.signVote(types.VoteTypePrecommit, blockHash, parts.Header()) + cs.mtx.Unlock() + + peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(&VoteMessage{prevote})) + peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(&VoteMessage{precommit})) +} + +//---------------------------------------- +// byzantine consensus reactor + +type ByzantineReactor struct { + cmn.Service + reactor *ConsensusReactor +} + +func NewByzantineReactor(conR *ConsensusReactor) *ByzantineReactor { + return &ByzantineReactor{ + Service: conR, + reactor: conR, + } +} + +func (br *ByzantineReactor) SetSwitch(s *p2p.Switch) { br.reactor.SetSwitch(s) } +func (br *ByzantineReactor) GetChannels() []*p2p.ChannelDescriptor { return br.reactor.GetChannels() } +func (br *ByzantineReactor) AddPeer(peer p2p.Peer) { + if !br.reactor.IsRunning() { + return + } + + // Create peerState for peer + peerState := NewPeerState(peer).SetLogger(br.reactor.Logger) + peer.Set(types.PeerStateKey, peerState) + + // Send our state to peer. + // If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus(). + if !br.reactor.fastSync { + br.reactor.sendNewRoundStepMessages(peer) + } +} +func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) { + br.reactor.RemovePeer(peer, reason) +} +func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) { + br.reactor.Receive(chID, peer, msgBytes) +} diff --git a/consensus/common_test.go b/consensus/common_test.go new file mode 100644 index 000000000..2df226ba1 --- /dev/null +++ b/consensus/common_test.go @@ -0,0 +1,495 @@ +package consensus + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "os" + "path" + "sort" + "sync" + "testing" + "time" + + abcicli "github.com/tendermint/tendermint/abci/client" + abci "github.com/tendermint/tendermint/abci/types" + bc "github.com/tendermint/tendermint/blockchain" + cfg "github.com/tendermint/tendermint/config" + cstypes "github.com/tendermint/tendermint/consensus/types" + mempl "github.com/tendermint/tendermint/mempool" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/privval" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" + + "github.com/tendermint/tendermint/abci/example/counter" + "github.com/tendermint/tendermint/abci/example/kvstore" + + "github.com/go-kit/kit/log/term" +) + +const ( + testSubscriber = "test-client" +) + +// genesis, chain_id, priv_val +var config *cfg.Config // NOTE: must be reset for each _test.go file +var ensureTimeout = time.Second * 1 // must be in seconds because CreateEmptyBlocksInterval is + +func ensureDir(dir string, mode os.FileMode) { + if err := cmn.EnsureDir(dir, mode); err != nil { + panic(err) + } +} + +func ResetConfig(name string) *cfg.Config { + return cfg.ResetTestRoot(name) +} + +//------------------------------------------------------------------------------- +// validator stub (a kvstore consensus peer we control) + +type validatorStub struct { + Index int // Validator index. NOTE: we don't assume validator set changes. + Height int64 + Round int + types.PrivValidator +} + +var testMinPower int64 = 10 + +func NewValidatorStub(privValidator types.PrivValidator, valIndex int) *validatorStub { + return &validatorStub{ + Index: valIndex, + PrivValidator: privValidator, + } +} + +func (vs *validatorStub) signVote(voteType byte, hash []byte, header types.PartSetHeader) (*types.Vote, error) { + vote := &types.Vote{ + ValidatorIndex: vs.Index, + ValidatorAddress: vs.PrivValidator.GetAddress(), + Height: vs.Height, + Round: vs.Round, + Timestamp: time.Now().UTC(), + Type: voteType, + BlockID: types.BlockID{hash, header}, + } + err := vs.PrivValidator.SignVote(config.ChainID(), vote) + return vote, err +} + +// Sign vote for type/hash/header +func signVote(vs *validatorStub, voteType byte, hash []byte, header types.PartSetHeader) *types.Vote { + v, err := vs.signVote(voteType, hash, header) + if err != nil { + panic(fmt.Errorf("failed to sign vote: %v", err)) + } + return v +} + +func signVotes(voteType byte, hash []byte, header types.PartSetHeader, vss ...*validatorStub) []*types.Vote { + votes := make([]*types.Vote, len(vss)) + for i, vs := range vss { + votes[i] = signVote(vs, voteType, hash, header) + } + return votes +} + +func incrementHeight(vss ...*validatorStub) { + for _, vs := range vss { + vs.Height++ + } +} + +func incrementRound(vss ...*validatorStub) { + for _, vs := range vss { + vs.Round++ + } +} + +//------------------------------------------------------------------------------- +// Functions for transitioning the consensus state + +func startTestRound(cs *ConsensusState, height int64, round int) { + cs.enterNewRound(height, round) + cs.startRoutines(0) +} + +// Create proposal block from cs1 but sign it with vs +func decideProposal(cs1 *ConsensusState, vs *validatorStub, height int64, round int) (proposal *types.Proposal, block *types.Block) { + block, blockParts := cs1.createProposalBlock() + if block == nil { // on error + panic("error creating proposal block") + } + + // Make proposal + polRound, polBlockID := cs1.Votes.POLInfo() + proposal = types.NewProposal(height, round, blockParts.Header(), polRound, polBlockID) + if err := vs.SignProposal(cs1.state.ChainID, proposal); err != nil { + panic(err) + } + return +} + +func addVotes(to *ConsensusState, votes ...*types.Vote) { + for _, vote := range votes { + to.peerMsgQueue <- msgInfo{Msg: &VoteMessage{vote}} + } +} + +func signAddVotes(to *ConsensusState, voteType byte, hash []byte, header types.PartSetHeader, vss ...*validatorStub) { + votes := signVotes(voteType, hash, header, vss...) + addVotes(to, votes...) +} + +func validatePrevote(t *testing.T, cs *ConsensusState, round int, privVal *validatorStub, blockHash []byte) { + prevotes := cs.Votes.Prevotes(round) + var vote *types.Vote + if vote = prevotes.GetByAddress(privVal.GetAddress()); vote == nil { + panic("Failed to find prevote from validator") + } + if blockHash == nil { + if vote.BlockID.Hash != nil { + panic(fmt.Sprintf("Expected prevote to be for nil, got %X", vote.BlockID.Hash)) + } + } else { + if !bytes.Equal(vote.BlockID.Hash, blockHash) { + panic(fmt.Sprintf("Expected prevote to be for %X, got %X", blockHash, vote.BlockID.Hash)) + } + } +} + +func validateLastPrecommit(t *testing.T, cs *ConsensusState, privVal *validatorStub, blockHash []byte) { + votes := cs.LastCommit + var vote *types.Vote + if vote = votes.GetByAddress(privVal.GetAddress()); vote == nil { + panic("Failed to find precommit from validator") + } + if !bytes.Equal(vote.BlockID.Hash, blockHash) { + panic(fmt.Sprintf("Expected precommit to be for %X, got %X", blockHash, vote.BlockID.Hash)) + } +} + +func validatePrecommit(t *testing.T, cs *ConsensusState, thisRound, lockRound int, privVal *validatorStub, votedBlockHash, lockedBlockHash []byte) { + precommits := cs.Votes.Precommits(thisRound) + var vote *types.Vote + if vote = precommits.GetByAddress(privVal.GetAddress()); vote == nil { + panic("Failed to find precommit from validator") + } + + if votedBlockHash == nil { + if vote.BlockID.Hash != nil { + panic("Expected precommit to be for nil") + } + } else { + if !bytes.Equal(vote.BlockID.Hash, votedBlockHash) { + panic("Expected precommit to be for proposal block") + } + } + + if lockedBlockHash == nil { + if cs.LockedRound != lockRound || cs.LockedBlock != nil { + panic(fmt.Sprintf("Expected to be locked on nil at round %d. Got locked at round %d with block %v", lockRound, cs.LockedRound, cs.LockedBlock)) + } + } else { + if cs.LockedRound != lockRound || !bytes.Equal(cs.LockedBlock.Hash(), lockedBlockHash) { + panic(fmt.Sprintf("Expected block to be locked on round %d, got %d. Got locked block %X, expected %X", lockRound, cs.LockedRound, cs.LockedBlock.Hash(), lockedBlockHash)) + } + } + +} + +func validatePrevoteAndPrecommit(t *testing.T, cs *ConsensusState, thisRound, lockRound int, privVal *validatorStub, votedBlockHash, lockedBlockHash []byte) { + // verify the prevote + validatePrevote(t, cs, thisRound, privVal, votedBlockHash) + // verify precommit + cs.mtx.Lock() + validatePrecommit(t, cs, thisRound, lockRound, privVal, votedBlockHash, lockedBlockHash) + cs.mtx.Unlock() +} + +// genesis +func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} { + voteCh0 := make(chan interface{}) + err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryVote, voteCh0) + if err != nil { + panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, types.EventQueryVote)) + } + voteCh := make(chan interface{}) + go func() { + for v := range voteCh0 { + vote := v.(types.EventDataVote) + // we only fire for our own votes + if bytes.Equal(addr, vote.Vote.ValidatorAddress) { + voteCh <- v + } + } + }() + return voteCh +} + +//------------------------------------------------------------------------------- +// consensus states + +func newConsensusState(state sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState { + return newConsensusStateWithConfig(config, state, pv, app) +} + +func newConsensusStateWithConfig(thisConfig *cfg.Config, state sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState { + blockDB := dbm.NewMemDB() + return newConsensusStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockDB) +} + +func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.State, pv types.PrivValidator, app abci.Application, blockDB dbm.DB) *ConsensusState { + // Get BlockStore + blockStore := bc.NewBlockStore(blockDB) + + // one for mempool, one for consensus + mtx := new(sync.Mutex) + proxyAppConnMem := abcicli.NewLocalClient(mtx, app) + proxyAppConnCon := abcicli.NewLocalClient(mtx, app) + + // Make Mempool + mempool := mempl.NewMempool(thisConfig.Mempool, proxyAppConnMem, 0) + mempool.SetLogger(log.TestingLogger().With("module", "mempool")) + if thisConfig.Consensus.WaitForTxs() { + mempool.EnableTxsAvailable() + } + + // mock the evidence pool + evpool := sm.MockEvidencePool{} + + // Make ConsensusState + stateDB := dbm.NewMemDB() + blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool) + cs := NewConsensusState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) + cs.SetLogger(log.TestingLogger().With("module", "consensus")) + cs.SetPrivValidator(pv) + + eventBus := types.NewEventBus() + eventBus.SetLogger(log.TestingLogger().With("module", "events")) + eventBus.Start() + cs.SetEventBus(eventBus) + return cs +} + +func loadPrivValidator(config *cfg.Config) *privval.FilePV { + privValidatorFile := config.PrivValidatorFile() + ensureDir(path.Dir(privValidatorFile), 0700) + privValidator := privval.LoadOrGenFilePV(privValidatorFile) + privValidator.Reset() + return privValidator +} + +func randConsensusState(nValidators int) (*ConsensusState, []*validatorStub) { + // Get State + state, privVals := randGenesisState(nValidators, false, 10) + + vss := make([]*validatorStub, nValidators) + + cs := newConsensusState(state, privVals[0], counter.NewCounterApplication(true)) + + for i := 0; i < nValidators; i++ { + vss[i] = NewValidatorStub(privVals[i], i) + } + // since cs1 starts at 1 + incrementHeight(vss[1:]...) + + return cs, vss +} + +//------------------------------------------------------------------------------- + +func ensureNoNewStep(stepCh <-chan interface{}) { + timer := time.NewTimer(ensureTimeout) + select { + case <-timer.C: + break + case <-stepCh: + panic("We should be stuck waiting, not moving to the next step") + } +} + +func ensureNewStep(stepCh <-chan interface{}) { + timer := time.NewTimer(ensureTimeout) + select { + case <-timer.C: + panic("We shouldnt be stuck waiting") + case <-stepCh: + break + } +} + +//------------------------------------------------------------------------------- +// consensus nets + +// consensusLogger is a TestingLogger which uses a different +// color for each validator ("validator" key must exist). +func consensusLogger() log.Logger { + return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor { + for i := 0; i < len(keyvals)-1; i += 2 { + if keyvals[i] == "validator" { + return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))} + } + } + return term.FgBgColor{} + }).With("module", "consensus") +} + +func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application, configOpts ...func(*cfg.Config)) []*ConsensusState { + genDoc, privVals := randGenesisDoc(nValidators, false, 30) + css := make([]*ConsensusState, nValidators) + logger := consensusLogger() + for i := 0; i < nValidators; i++ { + stateDB := dbm.NewMemDB() // each state needs its own db + state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + thisConfig := ResetConfig(cmn.Fmt("%s_%d", testName, i)) + for _, opt := range configOpts { + opt(thisConfig) + } + ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal + app := appFunc() + vals := types.TM2PB.Validators(state.Validators) + app.InitChain(abci.RequestInitChain{Validators: vals}) + + css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], app) + css[i].SetTimeoutTicker(tickerFunc()) + css[i].SetLogger(logger.With("validator", i, "module", "consensus")) + } + return css +} + +// nPeers = nValidators + nNotValidator +func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application) []*ConsensusState { + genDoc, privVals := randGenesisDoc(nValidators, false, testMinPower) + css := make([]*ConsensusState, nPeers) + logger := consensusLogger() + for i := 0; i < nPeers; i++ { + stateDB := dbm.NewMemDB() // each state needs its own db + state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + thisConfig := ResetConfig(cmn.Fmt("%s_%d", testName, i)) + ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal + var privVal types.PrivValidator + if i < nValidators { + privVal = privVals[i] + } else { + _, tempFilePath := cmn.Tempfile("priv_validator_") + privVal = privval.GenFilePV(tempFilePath) + } + + app := appFunc() + vals := types.TM2PB.Validators(state.Validators) + app.InitChain(abci.RequestInitChain{Validators: vals}) + + css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, app) + css[i].SetTimeoutTicker(tickerFunc()) + css[i].SetLogger(logger.With("validator", i, "module", "consensus")) + } + return css +} + +func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int { + for i, s := range switches { + if peer.NodeInfo().ID == s.NodeInfo().ID { + return i + } + } + panic("didnt find peer in switches") + return -1 +} + +//------------------------------------------------------------------------------- +// genesis + +func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) { + validators := make([]types.GenesisValidator, numValidators) + privValidators := make([]types.PrivValidator, numValidators) + for i := 0; i < numValidators; i++ { + val, privVal := types.RandValidator(randPower, minPower) + validators[i] = types.GenesisValidator{ + PubKey: val.PubKey, + Power: val.VotingPower, + } + privValidators[i] = privVal + } + sort.Sort(types.PrivValidatorsByAddress(privValidators)) + + return &types.GenesisDoc{ + GenesisTime: time.Now(), + ChainID: config.ChainID(), + Validators: validators, + }, privValidators +} + +func randGenesisState(numValidators int, randPower bool, minPower int64) (sm.State, []types.PrivValidator) { + genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower) + s0, _ := sm.MakeGenesisState(genDoc) + db := dbm.NewMemDB() + sm.SaveState(db, s0) + return s0, privValidators +} + +//------------------------------------ +// mock ticker + +func newMockTickerFunc(onlyOnce bool) func() TimeoutTicker { + return func() TimeoutTicker { + return &mockTicker{ + c: make(chan timeoutInfo, 10), + onlyOnce: onlyOnce, + } + } +} + +// mock ticker only fires on RoundStepNewHeight +// and only once if onlyOnce=true +type mockTicker struct { + c chan timeoutInfo + + mtx sync.Mutex + onlyOnce bool + fired bool +} + +func (m *mockTicker) Start() error { + return nil +} + +func (m *mockTicker) Stop() error { + return nil +} + +func (m *mockTicker) ScheduleTimeout(ti timeoutInfo) { + m.mtx.Lock() + defer m.mtx.Unlock() + if m.onlyOnce && m.fired { + return + } + if ti.Step == cstypes.RoundStepNewHeight { + m.c <- ti + m.fired = true + } +} + +func (m *mockTicker) Chan() <-chan timeoutInfo { + return m.c +} + +func (mockTicker) SetLogger(log.Logger) { +} + +//------------------------------------ + +func newCounter() abci.Application { + return counter.NewCounterApplication(true) +} + +func newPersistentKVStore() abci.Application { + dir, _ := ioutil.TempDir("/tmp", "persistent-kvstore") + return kvstore.NewPersistentKVStoreApplication(dir) +} diff --git a/consensus/mempool_test.go b/consensus/mempool_test.go new file mode 100644 index 000000000..a811de731 --- /dev/null +++ b/consensus/mempool_test.go @@ -0,0 +1,232 @@ +package consensus + +import ( + "encoding/binary" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/tendermint/tendermint/abci/example/code" + abci "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" + + "github.com/tendermint/tendermint/types" +) + +func init() { + config = ResetConfig("consensus_mempool_test") +} + +func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { + config := ResetConfig("consensus_mempool_txs_available_test") + config.Consensus.CreateEmptyBlocks = false + state, privVals := randGenesisState(1, false, 10) + cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication()) + cs.mempool.EnableTxsAvailable() + height, round := cs.Height, cs.Round + newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) + startTestRound(cs, height, round) + + ensureNewStep(newBlockCh) // first block gets committed + ensureNoNewStep(newBlockCh) + deliverTxsRange(cs, 0, 1) + ensureNewStep(newBlockCh) // commit txs + ensureNewStep(newBlockCh) // commit updated app hash + ensureNoNewStep(newBlockCh) +} + +func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { + config := ResetConfig("consensus_mempool_txs_available_test") + config.Consensus.CreateEmptyBlocksInterval = int(ensureTimeout.Seconds()) + state, privVals := randGenesisState(1, false, 10) + cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication()) + cs.mempool.EnableTxsAvailable() + height, round := cs.Height, cs.Round + newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) + startTestRound(cs, height, round) + + ensureNewStep(newBlockCh) // first block gets committed + ensureNoNewStep(newBlockCh) // then we dont make a block ... + ensureNewStep(newBlockCh) // until the CreateEmptyBlocksInterval has passed +} + +func TestMempoolProgressInHigherRound(t *testing.T) { + config := ResetConfig("consensus_mempool_txs_available_test") + config.Consensus.CreateEmptyBlocks = false + state, privVals := randGenesisState(1, false, 10) + cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication()) + cs.mempool.EnableTxsAvailable() + height, round := cs.Height, cs.Round + newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) + newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) + timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) + cs.setProposal = func(proposal *types.Proposal) error { + if cs.Height == 2 && cs.Round == 0 { + // dont set the proposal in round 0 so we timeout and + // go to next round + cs.Logger.Info("Ignoring set proposal at height 2, round 0") + return nil + } + return cs.defaultSetProposal(proposal) + } + startTestRound(cs, height, round) + + ensureNewStep(newRoundCh) // first round at first height + ensureNewStep(newBlockCh) // first block gets committed + ensureNewStep(newRoundCh) // first round at next height + deliverTxsRange(cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round + <-timeoutCh + ensureNewStep(newRoundCh) // wait for the next round + ensureNewStep(newBlockCh) // now we can commit the block +} + +func deliverTxsRange(cs *ConsensusState, start, end int) { + // Deliver some txs. + for i := start; i < end; i++ { + txBytes := make([]byte, 8) + binary.BigEndian.PutUint64(txBytes, uint64(i)) + err := cs.mempool.CheckTx(txBytes, nil) + if err != nil { + panic(cmn.Fmt("Error after CheckTx: %v", err)) + } + } +} + +func TestMempoolTxConcurrentWithCommit(t *testing.T) { + state, privVals := randGenesisState(1, false, 10) + cs := newConsensusState(state, privVals[0], NewCounterApplication()) + height, round := cs.Height, cs.Round + newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) + + NTxs := 10000 + go deliverTxsRange(cs, 0, NTxs) + + startTestRound(cs, height, round) + for nTxs := 0; nTxs < NTxs; { + ticker := time.NewTicker(time.Second * 30) + select { + case b := <-newBlockCh: + evt := b.(types.EventDataNewBlock) + nTxs += int(evt.Block.Header.NumTxs) + case <-ticker.C: + panic("Timed out waiting to commit blocks with transactions") + } + } +} + +func TestMempoolRmBadTx(t *testing.T) { + state, privVals := randGenesisState(1, false, 10) + app := NewCounterApplication() + cs := newConsensusState(state, privVals[0], app) + + // increment the counter by 1 + txBytes := make([]byte, 8) + binary.BigEndian.PutUint64(txBytes, uint64(0)) + + resDeliver := app.DeliverTx(txBytes) + assert.False(t, resDeliver.IsErr(), cmn.Fmt("expected no error. got %v", resDeliver)) + + resCommit := app.Commit() + assert.True(t, len(resCommit.Data) > 0) + + emptyMempoolCh := make(chan struct{}) + checkTxRespCh := make(chan struct{}) + go func() { + // Try to send the tx through the mempool. + // CheckTx should not err, but the app should return a bad abci code + // and the tx should get removed from the pool + err := cs.mempool.CheckTx(txBytes, func(r *abci.Response) { + if r.GetCheckTx().Code != code.CodeTypeBadNonce { + t.Fatalf("expected checktx to return bad nonce, got %v", r) + } + checkTxRespCh <- struct{}{} + }) + if err != nil { + t.Fatalf("Error after CheckTx: %v", err) + } + + // check for the tx + for { + txs := cs.mempool.Reap(1) + if len(txs) == 0 { + emptyMempoolCh <- struct{}{} + return + } + time.Sleep(10 * time.Millisecond) + } + }() + + // Wait until the tx returns + ticker := time.After(time.Second * 5) + select { + case <-checkTxRespCh: + // success + case <-ticker: + t.Fatalf("Timed out waiting for tx to return") + } + + // Wait until the tx is removed + ticker = time.After(time.Second * 5) + select { + case <-emptyMempoolCh: + // success + case <-ticker: + t.Fatalf("Timed out waiting for tx to be removed") + } +} + +// CounterApplication that maintains a mempool state and resets it upon commit +type CounterApplication struct { + abci.BaseApplication + + txCount int + mempoolTxCount int +} + +func NewCounterApplication() *CounterApplication { + return &CounterApplication{} +} + +func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo { + return abci.ResponseInfo{Data: cmn.Fmt("txs:%v", app.txCount)} +} + +func (app *CounterApplication) DeliverTx(tx []byte) abci.ResponseDeliverTx { + txValue := txAsUint64(tx) + if txValue != uint64(app.txCount) { + return abci.ResponseDeliverTx{ + Code: code.CodeTypeBadNonce, + Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)} + } + app.txCount++ + return abci.ResponseDeliverTx{Code: code.CodeTypeOK} +} + +func (app *CounterApplication) CheckTx(tx []byte) abci.ResponseCheckTx { + txValue := txAsUint64(tx) + if txValue != uint64(app.mempoolTxCount) { + return abci.ResponseCheckTx{ + Code: code.CodeTypeBadNonce, + Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.mempoolTxCount, txValue)} + } + app.mempoolTxCount++ + return abci.ResponseCheckTx{Code: code.CodeTypeOK} +} + +func txAsUint64(tx []byte) uint64 { + tx8 := make([]byte, 8) + copy(tx8[len(tx8)-len(tx):], tx) + return binary.BigEndian.Uint64(tx8) +} + +func (app *CounterApplication) Commit() abci.ResponseCommit { + app.mempoolTxCount = app.txCount + if app.txCount == 0 { + return abci.ResponseCommit{} + } + hash := make([]byte, 8) + binary.BigEndian.PutUint64(hash, uint64(app.txCount)) + return abci.ResponseCommit{Data: hash} +} diff --git a/consensus/metrics.go b/consensus/metrics.go new file mode 100644 index 000000000..253880e84 --- /dev/null +++ b/consensus/metrics.go @@ -0,0 +1,133 @@ +package consensus + +import ( + "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics/discard" + + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +// Metrics contains metrics exposed by this package. +type Metrics struct { + // Height of the chain. + Height metrics.Gauge + + // Number of rounds. + Rounds metrics.Gauge + + // Number of validators. + Validators metrics.Gauge + // Total power of all validators. + ValidatorsPower metrics.Gauge + // Number of validators who did not sign. + MissingValidators metrics.Gauge + // Total power of the missing validators. + MissingValidatorsPower metrics.Gauge + // Number of validators who tried to double sign. + ByzantineValidators metrics.Gauge + // Total power of the byzantine validators. + ByzantineValidatorsPower metrics.Gauge + + // Time between this and the last block. + BlockIntervalSeconds metrics.Histogram + + // Number of transactions. + NumTxs metrics.Gauge + // Size of the block. + BlockSizeBytes metrics.Gauge + // Total number of transactions. + TotalTxs metrics.Gauge +} + +// PrometheusMetrics returns Metrics build using Prometheus client library. +func PrometheusMetrics() *Metrics { + return &Metrics{ + Height: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Subsystem: "consensus", + Name: "height", + Help: "Height of the chain.", + }, []string{}), + Rounds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Subsystem: "consensus", + Name: "rounds", + Help: "Number of rounds.", + }, []string{}), + + Validators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Subsystem: "consensus", + Name: "validators", + Help: "Number of validators.", + }, []string{}), + ValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Subsystem: "consensus", + Name: "validators_power", + Help: "Total power of all validators.", + }, []string{}), + MissingValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Subsystem: "consensus", + Name: "missing_validators", + Help: "Number of validators who did not sign.", + }, []string{}), + MissingValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Subsystem: "consensus", + Name: "missing_validators_power", + Help: "Total power of the missing validators.", + }, []string{}), + ByzantineValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Subsystem: "consensus", + Name: "byzantine_validators", + Help: "Number of validators who tried to double sign.", + }, []string{}), + ByzantineValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Subsystem: "consensus", + Name: "byzantine_validators_power", + Help: "Total power of the byzantine validators.", + }, []string{}), + + BlockIntervalSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Subsystem: "consensus", + Name: "block_interval_seconds", + Help: "Time between this and the last block.", + Buckets: []float64{1, 2.5, 5, 10, 60}, + }, []string{}), + + NumTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Subsystem: "consensus", + Name: "num_txs", + Help: "Number of transactions.", + }, []string{}), + BlockSizeBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Subsystem: "consensus", + Name: "block_size_bytes", + Help: "Size of the block.", + }, []string{}), + TotalTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Subsystem: "consensus", + Name: "total_txs", + Help: "Total number of transactions.", + }, []string{}), + } +} + +// NopMetrics returns no-op Metrics. +func NopMetrics() *Metrics { + return &Metrics{ + Height: discard.NewGauge(), + + Rounds: discard.NewGauge(), + + Validators: discard.NewGauge(), + ValidatorsPower: discard.NewGauge(), + MissingValidators: discard.NewGauge(), + MissingValidatorsPower: discard.NewGauge(), + ByzantineValidators: discard.NewGauge(), + ByzantineValidatorsPower: discard.NewGauge(), + + BlockIntervalSeconds: discard.NewHistogram(), + + NumTxs: discard.NewGauge(), + BlockSizeBytes: discard.NewGauge(), + TotalTxs: discard.NewGauge(), + } +} diff --git a/consensus/reactor.go b/consensus/reactor.go new file mode 100644 index 000000000..48ebcad23 --- /dev/null +++ b/consensus/reactor.go @@ -0,0 +1,1457 @@ +package consensus + +import ( + "fmt" + "reflect" + "sync" + "time" + + "github.com/pkg/errors" + + amino "github.com/tendermint/go-amino" + + cstypes "github.com/tendermint/tendermint/consensus/types" + cmn "github.com/tendermint/tendermint/libs/common" + tmevents "github.com/tendermint/tendermint/libs/events" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/p2p" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" +) + +const ( + StateChannel = byte(0x20) + DataChannel = byte(0x21) + VoteChannel = byte(0x22) + VoteSetBitsChannel = byte(0x23) + + maxMsgSize = 1048576 // 1MB; NOTE/TODO: keep in sync with types.PartSet sizes. + + blocksToContributeToBecomeGoodPeer = 10000 +) + +//----------------------------------------------------------------------------- + +// ConsensusReactor defines a reactor for the consensus service. +type ConsensusReactor struct { + p2p.BaseReactor // BaseService + p2p.Switch + + conS *ConsensusState + + mtx sync.RWMutex + fastSync bool + eventBus *types.EventBus +} + +// NewConsensusReactor returns a new ConsensusReactor with the given +// consensusState. +func NewConsensusReactor(consensusState *ConsensusState, fastSync bool) *ConsensusReactor { + conR := &ConsensusReactor{ + conS: consensusState, + fastSync: fastSync, + } + conR.BaseReactor = *p2p.NewBaseReactor("ConsensusReactor", conR) + return conR +} + +// OnStart implements BaseService by subscribing to events, which later will be +// broadcasted to other peers and starting state if we're not in fast sync. +func (conR *ConsensusReactor) OnStart() error { + conR.Logger.Info("ConsensusReactor ", "fastSync", conR.FastSync()) + if err := conR.BaseReactor.OnStart(); err != nil { + return err + } + + conR.subscribeToBroadcastEvents() + + if !conR.FastSync() { + err := conR.conS.Start() + if err != nil { + return err + } + } + + return nil +} + +// OnStop implements BaseService by unsubscribing from events and stopping +// state. +func (conR *ConsensusReactor) OnStop() { + conR.BaseReactor.OnStop() + conR.unsubscribeFromBroadcastEvents() + conR.conS.Stop() +} + +// SwitchToConsensus switches from fast_sync mode to consensus mode. +// It resets the state, turns off fast_sync, and starts the consensus state-machine +func (conR *ConsensusReactor) SwitchToConsensus(state sm.State, blocksSynced int) { + conR.Logger.Info("SwitchToConsensus") + conR.conS.reconstructLastCommit(state) + // NOTE: The line below causes broadcastNewRoundStepRoutine() to + // broadcast a NewRoundStepMessage. + conR.conS.updateToState(state) + + conR.mtx.Lock() + conR.fastSync = false + conR.mtx.Unlock() + + if blocksSynced > 0 { + // dont bother with the WAL if we fast synced + conR.conS.doWALCatchup = false + } + err := conR.conS.Start() + if err != nil { + conR.Logger.Error("Error starting conS", "err", err) + return + } +} + +// GetChannels implements Reactor +func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor { + // TODO optimize + return []*p2p.ChannelDescriptor{ + { + ID: StateChannel, + Priority: 5, + SendQueueCapacity: 100, + RecvMessageCapacity: maxMsgSize, + }, + { + ID: DataChannel, // maybe split between gossiping current block and catchup stuff + Priority: 10, // once we gossip the whole block there's nothing left to send until next height or round + SendQueueCapacity: 100, + RecvBufferCapacity: 50 * 4096, + RecvMessageCapacity: maxMsgSize, + }, + { + ID: VoteChannel, + Priority: 5, + SendQueueCapacity: 100, + RecvBufferCapacity: 100 * 100, + RecvMessageCapacity: maxMsgSize, + }, + { + ID: VoteSetBitsChannel, + Priority: 1, + SendQueueCapacity: 2, + RecvBufferCapacity: 1024, + RecvMessageCapacity: maxMsgSize, + }, + } +} + +// AddPeer implements Reactor +func (conR *ConsensusReactor) AddPeer(peer p2p.Peer) { + if !conR.IsRunning() { + return + } + + // Create peerState for peer + peerState := NewPeerState(peer).SetLogger(conR.Logger) + peer.Set(types.PeerStateKey, peerState) + + // Begin routines for this peer. + go conR.gossipDataRoutine(peer, peerState) + go conR.gossipVotesRoutine(peer, peerState) + go conR.queryMaj23Routine(peer, peerState) + + // Send our state to peer. + // If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus(). + if !conR.FastSync() { + conR.sendNewRoundStepMessages(peer) + } +} + +// RemovePeer implements Reactor +func (conR *ConsensusReactor) RemovePeer(peer p2p.Peer, reason interface{}) { + if !conR.IsRunning() { + return + } + // TODO + //peer.Get(PeerStateKey).(*PeerState).Disconnect() +} + +// Receive implements Reactor +// NOTE: We process these messages even when we're fast_syncing. +// Messages affect either a peer state or the consensus state. +// Peer state updates can happen in parallel, but processing of +// proposals, block parts, and votes are ordered by the receiveRoutine +// NOTE: blocks on consensus state for proposals, block parts, and votes +func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { + if !conR.IsRunning() { + conR.Logger.Debug("Receive", "src", src, "chId", chID, "bytes", msgBytes) + return + } + + msg, err := decodeMsg(msgBytes) + if err != nil { + conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) + conR.Switch.StopPeerForError(src, err) + return + } + conR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg) + + // Get peer states + ps := src.Get(types.PeerStateKey).(*PeerState) + + switch chID { + case StateChannel: + switch msg := msg.(type) { + case *NewRoundStepMessage: + ps.ApplyNewRoundStepMessage(msg) + case *CommitStepMessage: + ps.ApplyCommitStepMessage(msg) + case *HasVoteMessage: + ps.ApplyHasVoteMessage(msg) + case *VoteSetMaj23Message: + cs := conR.conS + cs.mtx.Lock() + height, votes := cs.Height, cs.Votes + cs.mtx.Unlock() + if height != msg.Height { + return + } + // Peer claims to have a maj23 for some BlockID at H,R,S, + err := votes.SetPeerMaj23(msg.Round, msg.Type, ps.peer.ID(), msg.BlockID) + if err != nil { + conR.Switch.StopPeerForError(src, err) + return + } + // Respond with a VoteSetBitsMessage showing which votes we have. + // (and consequently shows which we don't have) + var ourVotes *cmn.BitArray + switch msg.Type { + case types.VoteTypePrevote: + ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID) + case types.VoteTypePrecommit: + ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID) + default: + conR.Logger.Error("Bad VoteSetBitsMessage field Type") + return + } + src.TrySend(VoteSetBitsChannel, cdc.MustMarshalBinaryBare(&VoteSetBitsMessage{ + Height: msg.Height, + Round: msg.Round, + Type: msg.Type, + BlockID: msg.BlockID, + Votes: ourVotes, + })) + case *ProposalHeartbeatMessage: + hb := msg.Heartbeat + conR.Logger.Debug("Received proposal heartbeat message", + "height", hb.Height, "round", hb.Round, "sequence", hb.Sequence, + "valIdx", hb.ValidatorIndex, "valAddr", hb.ValidatorAddress) + default: + conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg))) + } + + case DataChannel: + if conR.FastSync() { + conR.Logger.Info("Ignoring message received during fastSync", "msg", msg) + return + } + switch msg := msg.(type) { + case *ProposalMessage: + ps.SetHasProposal(msg.Proposal) + conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()} + case *ProposalPOLMessage: + ps.ApplyProposalPOLMessage(msg) + case *BlockPartMessage: + ps.SetHasProposalBlockPart(msg.Height, msg.Round, msg.Part.Index) + if numBlocks := ps.RecordBlockPart(msg); numBlocks%blocksToContributeToBecomeGoodPeer == 0 { + conR.Switch.MarkPeerAsGood(src) + } + conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()} + default: + conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg))) + } + + case VoteChannel: + if conR.FastSync() { + conR.Logger.Info("Ignoring message received during fastSync", "msg", msg) + return + } + switch msg := msg.(type) { + case *VoteMessage: + cs := conR.conS + cs.mtx.Lock() + height, valSize, lastCommitSize := cs.Height, cs.Validators.Size(), cs.LastCommit.Size() + cs.mtx.Unlock() + ps.EnsureVoteBitArrays(height, valSize) + ps.EnsureVoteBitArrays(height-1, lastCommitSize) + ps.SetHasVote(msg.Vote) + if blocks := ps.RecordVote(msg.Vote); blocks%blocksToContributeToBecomeGoodPeer == 0 { + conR.Switch.MarkPeerAsGood(src) + } + + cs.peerMsgQueue <- msgInfo{msg, src.ID()} + + default: + // don't punish (leave room for soft upgrades) + conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg))) + } + + case VoteSetBitsChannel: + if conR.FastSync() { + conR.Logger.Info("Ignoring message received during fastSync", "msg", msg) + return + } + switch msg := msg.(type) { + case *VoteSetBitsMessage: + cs := conR.conS + cs.mtx.Lock() + height, votes := cs.Height, cs.Votes + cs.mtx.Unlock() + + if height == msg.Height { + var ourVotes *cmn.BitArray + switch msg.Type { + case types.VoteTypePrevote: + ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID) + case types.VoteTypePrecommit: + ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID) + default: + conR.Logger.Error("Bad VoteSetBitsMessage field Type") + return + } + ps.ApplyVoteSetBitsMessage(msg, ourVotes) + } else { + ps.ApplyVoteSetBitsMessage(msg, nil) + } + default: + // don't punish (leave room for soft upgrades) + conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg))) + } + + default: + conR.Logger.Error(cmn.Fmt("Unknown chId %X", chID)) + } + + if err != nil { + conR.Logger.Error("Error in Receive()", "err", err) + } +} + +// SetEventBus sets event bus. +func (conR *ConsensusReactor) SetEventBus(b *types.EventBus) { + conR.eventBus = b + conR.conS.SetEventBus(b) +} + +// FastSync returns whether the consensus reactor is in fast-sync mode. +func (conR *ConsensusReactor) FastSync() bool { + conR.mtx.RLock() + defer conR.mtx.RUnlock() + return conR.fastSync +} + +//-------------------------------------- + +// subscribeToBroadcastEvents subscribes for new round steps, votes and +// proposal heartbeats using internal pubsub defined on state to broadcast +// them to peers upon receiving. +func (conR *ConsensusReactor) subscribeToBroadcastEvents() { + const subscriber = "consensus-reactor" + conR.conS.evsw.AddListenerForEvent(subscriber, types.EventNewRoundStep, + func(data tmevents.EventData) { + conR.broadcastNewRoundStepMessages(data.(*cstypes.RoundState)) + }) + + conR.conS.evsw.AddListenerForEvent(subscriber, types.EventVote, + func(data tmevents.EventData) { + conR.broadcastHasVoteMessage(data.(*types.Vote)) + }) + + conR.conS.evsw.AddListenerForEvent(subscriber, types.EventProposalHeartbeat, + func(data tmevents.EventData) { + conR.broadcastProposalHeartbeatMessage(data.(*types.Heartbeat)) + }) +} + +func (conR *ConsensusReactor) unsubscribeFromBroadcastEvents() { + const subscriber = "consensus-reactor" + conR.conS.evsw.RemoveListener(subscriber) +} + +func (conR *ConsensusReactor) broadcastProposalHeartbeatMessage(hb *types.Heartbeat) { + conR.Logger.Debug("Broadcasting proposal heartbeat message", + "height", hb.Height, "round", hb.Round, "sequence", hb.Sequence) + msg := &ProposalHeartbeatMessage{hb} + conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(msg)) +} + +func (conR *ConsensusReactor) broadcastNewRoundStepMessages(rs *cstypes.RoundState) { + nrsMsg, csMsg := makeRoundStepMessages(rs) + if nrsMsg != nil { + conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg)) + } + if csMsg != nil { + conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(csMsg)) + } +} + +// Broadcasts HasVoteMessage to peers that care. +func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote) { + msg := &HasVoteMessage{ + Height: vote.Height, + Round: vote.Round, + Type: vote.Type, + Index: vote.ValidatorIndex, + } + conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(msg)) + /* + // TODO: Make this broadcast more selective. + for _, peer := range conR.Switch.Peers().List() { + ps := peer.Get(PeerStateKey).(*PeerState) + prs := ps.GetRoundState() + if prs.Height == vote.Height { + // TODO: Also filter on round? + peer.TrySend(StateChannel, struct{ ConsensusMessage }{msg}) + } else { + // Height doesn't match + // TODO: check a field, maybe CatchupCommitRound? + // TODO: But that requires changing the struct field comment. + } + } + */ +} + +func makeRoundStepMessages(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage, csMsg *CommitStepMessage) { + nrsMsg = &NewRoundStepMessage{ + Height: rs.Height, + Round: rs.Round, + Step: rs.Step, + SecondsSinceStartTime: int(time.Since(rs.StartTime).Seconds()), + LastCommitRound: rs.LastCommit.Round(), + } + if rs.Step == cstypes.RoundStepCommit { + csMsg = &CommitStepMessage{ + Height: rs.Height, + BlockPartsHeader: rs.ProposalBlockParts.Header(), + BlockParts: rs.ProposalBlockParts.BitArray(), + } + } + return +} + +func (conR *ConsensusReactor) sendNewRoundStepMessages(peer p2p.Peer) { + rs := conR.conS.GetRoundState() + nrsMsg, csMsg := makeRoundStepMessages(rs) + if nrsMsg != nil { + peer.Send(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg)) + } + if csMsg != nil { + peer.Send(StateChannel, cdc.MustMarshalBinaryBare(csMsg)) + } +} + +func (conR *ConsensusReactor) gossipDataRoutine(peer p2p.Peer, ps *PeerState) { + logger := conR.Logger.With("peer", peer) + +OUTER_LOOP: + for { + // Manage disconnects from self or peer. + if !peer.IsRunning() || !conR.IsRunning() { + logger.Info("Stopping gossipDataRoutine for peer") + return + } + rs := conR.conS.GetRoundState() + prs := ps.GetRoundState() + + // Send proposal Block parts? + if rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartsHeader) { + if index, ok := rs.ProposalBlockParts.BitArray().Sub(prs.ProposalBlockParts.Copy()).PickRandom(); ok { + part := rs.ProposalBlockParts.GetPart(index) + msg := &BlockPartMessage{ + Height: rs.Height, // This tells peer that this part applies to us. + Round: rs.Round, // This tells peer that this part applies to us. + Part: part, + } + logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round) + if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) { + ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) + } + continue OUTER_LOOP + } + } + + // If the peer is on a previous height, help catch up. + if (0 < prs.Height) && (prs.Height < rs.Height) { + heightLogger := logger.With("height", prs.Height) + + // if we never received the commit message from the peer, the block parts wont be initialized + if prs.ProposalBlockParts == nil { + blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) + if blockMeta == nil { + cmn.PanicCrisis(cmn.Fmt("Failed to load block %d when blockStore is at %d", + prs.Height, conR.conS.blockStore.Height())) + } + ps.InitProposalBlockParts(blockMeta.BlockID.PartsHeader) + // continue the loop since prs is a copy and not effected by this initialization + continue OUTER_LOOP + } + conR.gossipDataForCatchup(heightLogger, rs, prs, ps, peer) + continue OUTER_LOOP + } + + // If height and round don't match, sleep. + if (rs.Height != prs.Height) || (rs.Round != prs.Round) { + //logger.Info("Peer Height|Round mismatch, sleeping", "peerHeight", prs.Height, "peerRound", prs.Round, "peer", peer) + time.Sleep(conR.conS.config.PeerGossipSleep()) + continue OUTER_LOOP + } + + // By here, height and round match. + // Proposal block parts were already matched and sent if any were wanted. + // (These can match on hash so the round doesn't matter) + // Now consider sending other things, like the Proposal itself. + + // Send Proposal && ProposalPOL BitArray? + if rs.Proposal != nil && !prs.Proposal { + // Proposal: share the proposal metadata with peer. + { + msg := &ProposalMessage{Proposal: rs.Proposal} + logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round) + if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) { + ps.SetHasProposal(rs.Proposal) + } + } + // ProposalPOL: lets peer know which POL votes we have so far. + // Peer must receive ProposalMessage first. + // rs.Proposal was validated, so rs.Proposal.POLRound <= rs.Round, + // so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound). + if 0 <= rs.Proposal.POLRound { + msg := &ProposalPOLMessage{ + Height: rs.Height, + ProposalPOLRound: rs.Proposal.POLRound, + ProposalPOL: rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray(), + } + logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round) + peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) + } + continue OUTER_LOOP + } + + // Nothing to do. Sleep. + time.Sleep(conR.conS.config.PeerGossipSleep()) + continue OUTER_LOOP + } +} + +func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundState, + prs *cstypes.PeerRoundState, ps *PeerState, peer p2p.Peer) { + + if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok { + // Ensure that the peer's PartSetHeader is correct + blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) + if blockMeta == nil { + logger.Error("Failed to load block meta", + "ourHeight", rs.Height, "blockstoreHeight", conR.conS.blockStore.Height()) + time.Sleep(conR.conS.config.PeerGossipSleep()) + return + } else if !blockMeta.BlockID.PartsHeader.Equals(prs.ProposalBlockPartsHeader) { + logger.Info("Peer ProposalBlockPartsHeader mismatch, sleeping", + "blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader) + time.Sleep(conR.conS.config.PeerGossipSleep()) + return + } + // Load the part + part := conR.conS.blockStore.LoadBlockPart(prs.Height, index) + if part == nil { + logger.Error("Could not load part", "index", index, + "blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader) + time.Sleep(conR.conS.config.PeerGossipSleep()) + return + } + // Send the part + msg := &BlockPartMessage{ + Height: prs.Height, // Not our height, so it doesn't matter. + Round: prs.Round, // Not our height, so it doesn't matter. + Part: part, + } + logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index) + if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) { + ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) + } else { + logger.Debug("Sending block part for catchup failed") + } + return + } + //logger.Info("No parts to send in catch-up, sleeping") + time.Sleep(conR.conS.config.PeerGossipSleep()) +} + +func (conR *ConsensusReactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) { + logger := conR.Logger.With("peer", peer) + + // Simple hack to throttle logs upon sleep. + var sleeping = 0 + +OUTER_LOOP: + for { + // Manage disconnects from self or peer. + if !peer.IsRunning() || !conR.IsRunning() { + logger.Info("Stopping gossipVotesRoutine for peer") + return + } + rs := conR.conS.GetRoundState() + prs := ps.GetRoundState() + + switch sleeping { + case 1: // First sleep + sleeping = 2 + case 2: // No more sleep + sleeping = 0 + } + + //logger.Debug("gossipVotesRoutine", "rsHeight", rs.Height, "rsRound", rs.Round, + // "prsHeight", prs.Height, "prsRound", prs.Round, "prsStep", prs.Step) + + // If height matches, then send LastCommit, Prevotes, Precommits. + if rs.Height == prs.Height { + heightLogger := logger.With("height", prs.Height) + if conR.gossipVotesForHeight(heightLogger, rs, prs, ps) { + continue OUTER_LOOP + } + } + + // Special catchup logic. + // If peer is lagging by height 1, send LastCommit. + if prs.Height != 0 && rs.Height == prs.Height+1 { + if ps.PickSendVote(rs.LastCommit) { + logger.Debug("Picked rs.LastCommit to send", "height", prs.Height) + continue OUTER_LOOP + } + } + + // Catchup logic + // If peer is lagging by more than 1, send Commit. + if prs.Height != 0 && rs.Height >= prs.Height+2 { + // Load the block commit for prs.Height, + // which contains precommit signatures for prs.Height. + commit := conR.conS.blockStore.LoadBlockCommit(prs.Height) + if ps.PickSendVote(commit) { + logger.Debug("Picked Catchup commit to send", "height", prs.Height) + continue OUTER_LOOP + } + } + + if sleeping == 0 { + // We sent nothing. Sleep... + sleeping = 1 + logger.Debug("No votes to send, sleeping", "rs.Height", rs.Height, "prs.Height", prs.Height, + "localPV", rs.Votes.Prevotes(rs.Round).BitArray(), "peerPV", prs.Prevotes, + "localPC", rs.Votes.Precommits(rs.Round).BitArray(), "peerPC", prs.Precommits) + } else if sleeping == 2 { + // Continued sleep... + sleeping = 1 + } + + time.Sleep(conR.conS.config.PeerGossipSleep()) + continue OUTER_LOOP + } +} + +func (conR *ConsensusReactor) gossipVotesForHeight(logger log.Logger, rs *cstypes.RoundState, prs *cstypes.PeerRoundState, ps *PeerState) bool { + + // If there are lastCommits to send... + if prs.Step == cstypes.RoundStepNewHeight { + if ps.PickSendVote(rs.LastCommit) { + logger.Debug("Picked rs.LastCommit to send") + return true + } + } + // If there are POL prevotes to send... + if prs.Step <= cstypes.RoundStepPropose && prs.Round != -1 && prs.Round <= rs.Round && prs.ProposalPOLRound != -1 { + if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil { + if ps.PickSendVote(polPrevotes) { + logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send", + "round", prs.ProposalPOLRound) + return true + } + } + } + // If there are prevotes to send... + if prs.Step <= cstypes.RoundStepPrevoteWait && prs.Round != -1 && prs.Round <= rs.Round { + if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) { + logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round) + return true + } + } + // If there are precommits to send... + if prs.Step <= cstypes.RoundStepPrecommitWait && prs.Round != -1 && prs.Round <= rs.Round { + if ps.PickSendVote(rs.Votes.Precommits(prs.Round)) { + logger.Debug("Picked rs.Precommits(prs.Round) to send", "round", prs.Round) + return true + } + } + // If there are prevotes to send...Needed because of validBlock mechanism + if prs.Round != -1 && prs.Round <= rs.Round { + if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) { + logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round) + return true + } + } + // If there are POLPrevotes to send... + if prs.ProposalPOLRound != -1 { + if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil { + if ps.PickSendVote(polPrevotes) { + logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send", + "round", prs.ProposalPOLRound) + return true + } + } + } + + return false +} + +// NOTE: `queryMaj23Routine` has a simple crude design since it only comes +// into play for liveness when there's a signature DDoS attack happening. +func (conR *ConsensusReactor) queryMaj23Routine(peer p2p.Peer, ps *PeerState) { + logger := conR.Logger.With("peer", peer) + +OUTER_LOOP: + for { + // Manage disconnects from self or peer. + if !peer.IsRunning() || !conR.IsRunning() { + logger.Info("Stopping queryMaj23Routine for peer") + return + } + + // Maybe send Height/Round/Prevotes + { + rs := conR.conS.GetRoundState() + prs := ps.GetRoundState() + if rs.Height == prs.Height { + if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok { + peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ + Height: prs.Height, + Round: prs.Round, + Type: types.VoteTypePrevote, + BlockID: maj23, + })) + time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) + } + } + } + + // Maybe send Height/Round/Precommits + { + rs := conR.conS.GetRoundState() + prs := ps.GetRoundState() + if rs.Height == prs.Height { + if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok { + peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ + Height: prs.Height, + Round: prs.Round, + Type: types.VoteTypePrecommit, + BlockID: maj23, + })) + time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) + } + } + } + + // Maybe send Height/Round/ProposalPOL + { + rs := conR.conS.GetRoundState() + prs := ps.GetRoundState() + if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 { + if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok { + peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ + Height: prs.Height, + Round: prs.ProposalPOLRound, + Type: types.VoteTypePrevote, + BlockID: maj23, + })) + time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) + } + } + } + + // Little point sending LastCommitRound/LastCommit, + // These are fleeting and non-blocking. + + // Maybe send Height/CatchupCommitRound/CatchupCommit. + { + prs := ps.GetRoundState() + if prs.CatchupCommitRound != -1 && 0 < prs.Height && prs.Height <= conR.conS.blockStore.Height() { + commit := conR.conS.LoadCommit(prs.Height) + peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ + Height: prs.Height, + Round: commit.Round(), + Type: types.VoteTypePrecommit, + BlockID: commit.BlockID, + })) + time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) + } + } + + time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) + + continue OUTER_LOOP + } +} + +// String returns a string representation of the ConsensusReactor. +// NOTE: For now, it is just a hard-coded string to avoid accessing unprotected shared variables. +// TODO: improve! +func (conR *ConsensusReactor) String() string { + // better not to access shared variables + return "ConsensusReactor" // conR.StringIndented("") +} + +// StringIndented returns an indented string representation of the ConsensusReactor +func (conR *ConsensusReactor) StringIndented(indent string) string { + s := "ConsensusReactor{\n" + s += indent + " " + conR.conS.StringIndented(indent+" ") + "\n" + for _, peer := range conR.Switch.Peers().List() { + ps := peer.Get(types.PeerStateKey).(*PeerState) + s += indent + " " + ps.StringIndented(indent+" ") + "\n" + } + s += indent + "}" + return s +} + +//----------------------------------------------------------------------------- + +var ( + ErrPeerStateHeightRegression = errors.New("Error peer state height regression") + ErrPeerStateInvalidStartTime = errors.New("Error peer state invalid startTime") +) + +// PeerState contains the known state of a peer, including its connection and +// threadsafe access to its PeerRoundState. +// NOTE: THIS GETS DUMPED WITH rpc/core/consensus.go. +// Be mindful of what you Expose. +type PeerState struct { + peer p2p.Peer + logger log.Logger + + mtx sync.Mutex `json:"-"` // NOTE: Modify below using setters, never directly. + PRS cstypes.PeerRoundState `json:"round_state"` // Exposed. + Stats *peerStateStats `json:"stats"` // Exposed. +} + +// peerStateStats holds internal statistics for a peer. +type peerStateStats struct { + LastVoteHeight int64 `json:"last_vote_height"` + Votes int `json:"votes"` + LastBlockPartHeight int64 `json:"last_block_part_height"` + BlockParts int `json:"block_parts"` +} + +func (pss peerStateStats) String() string { + return fmt.Sprintf("peerStateStats{lvh: %d, votes: %d, lbph: %d, blockParts: %d}", + pss.LastVoteHeight, pss.Votes, pss.LastBlockPartHeight, pss.BlockParts) +} + +// NewPeerState returns a new PeerState for the given Peer +func NewPeerState(peer p2p.Peer) *PeerState { + return &PeerState{ + peer: peer, + logger: log.NewNopLogger(), + PRS: cstypes.PeerRoundState{ + Round: -1, + ProposalPOLRound: -1, + LastCommitRound: -1, + CatchupCommitRound: -1, + }, + Stats: &peerStateStats{}, + } +} + +// SetLogger allows to set a logger on the peer state. Returns the peer state +// itself. +func (ps *PeerState) SetLogger(logger log.Logger) *PeerState { + ps.logger = logger + return ps +} + +// GetRoundState returns an shallow copy of the PeerRoundState. +// There's no point in mutating it since it won't change PeerState. +func (ps *PeerState) GetRoundState() *cstypes.PeerRoundState { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + prs := ps.PRS // copy + return &prs +} + +// ToJSON returns a json of PeerState, marshalled using go-amino. +func (ps *PeerState) ToJSON() ([]byte, error) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + return cdc.MarshalJSON(ps) +} + +// GetHeight returns an atomic snapshot of the PeerRoundState's height +// used by the mempool to ensure peers are caught up before broadcasting new txs +func (ps *PeerState) GetHeight() int64 { + ps.mtx.Lock() + defer ps.mtx.Unlock() + return ps.PRS.Height +} + +// SetHasProposal sets the given proposal as known for the peer. +func (ps *PeerState) SetHasProposal(proposal *types.Proposal) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if ps.PRS.Height != proposal.Height || ps.PRS.Round != proposal.Round { + return + } + if ps.PRS.Proposal { + return + } + + ps.PRS.Proposal = true + ps.PRS.ProposalBlockPartsHeader = proposal.BlockPartsHeader + ps.PRS.ProposalBlockParts = cmn.NewBitArray(proposal.BlockPartsHeader.Total) + ps.PRS.ProposalPOLRound = proposal.POLRound + ps.PRS.ProposalPOL = nil // Nil until ProposalPOLMessage received. +} + +// InitProposalBlockParts initializes the peer's proposal block parts header and bit array. +func (ps *PeerState) InitProposalBlockParts(partsHeader types.PartSetHeader) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if ps.PRS.ProposalBlockParts != nil { + return + } + + ps.PRS.ProposalBlockPartsHeader = partsHeader + ps.PRS.ProposalBlockParts = cmn.NewBitArray(partsHeader.Total) +} + +// SetHasProposalBlockPart sets the given block part index as known for the peer. +func (ps *PeerState) SetHasProposalBlockPart(height int64, round int, index int) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if ps.PRS.Height != height || ps.PRS.Round != round { + return + } + + ps.PRS.ProposalBlockParts.SetIndex(index, true) +} + +// PickSendVote picks a vote and sends it to the peer. +// Returns true if vote was sent. +func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool { + if vote, ok := ps.PickVoteToSend(votes); ok { + msg := &VoteMessage{vote} + ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote) + return ps.peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(msg)) + } + return false +} + +// PickVoteToSend picks a vote to send to the peer. +// Returns true if a vote was picked. +// NOTE: `votes` must be the correct Size() for the Height(). +func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote, ok bool) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if votes.Size() == 0 { + return nil, false + } + + height, round, type_, size := votes.Height(), votes.Round(), votes.Type(), votes.Size() + + // Lazily set data using 'votes'. + if votes.IsCommit() { + ps.ensureCatchupCommitRound(height, round, size) + } + ps.ensureVoteBitArrays(height, size) + + psVotes := ps.getVoteBitArray(height, round, type_) + if psVotes == nil { + return nil, false // Not something worth sending + } + if index, ok := votes.BitArray().Sub(psVotes).PickRandom(); ok { + ps.setHasVote(height, round, type_, index) + return votes.GetByIndex(index), true + } + return nil, false +} + +func (ps *PeerState) getVoteBitArray(height int64, round int, type_ byte) *cmn.BitArray { + if !types.IsVoteTypeValid(type_) { + return nil + } + + if ps.PRS.Height == height { + if ps.PRS.Round == round { + switch type_ { + case types.VoteTypePrevote: + return ps.PRS.Prevotes + case types.VoteTypePrecommit: + return ps.PRS.Precommits + } + } + if ps.PRS.CatchupCommitRound == round { + switch type_ { + case types.VoteTypePrevote: + return nil + case types.VoteTypePrecommit: + return ps.PRS.CatchupCommit + } + } + if ps.PRS.ProposalPOLRound == round { + switch type_ { + case types.VoteTypePrevote: + return ps.PRS.ProposalPOL + case types.VoteTypePrecommit: + return nil + } + } + return nil + } + if ps.PRS.Height == height+1 { + if ps.PRS.LastCommitRound == round { + switch type_ { + case types.VoteTypePrevote: + return nil + case types.VoteTypePrecommit: + return ps.PRS.LastCommit + } + } + return nil + } + return nil +} + +// 'round': A round for which we have a +2/3 commit. +func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValidators int) { + if ps.PRS.Height != height { + return + } + /* + NOTE: This is wrong, 'round' could change. + e.g. if orig round is not the same as block LastCommit round. + if ps.CatchupCommitRound != -1 && ps.CatchupCommitRound != round { + cmn.PanicSanity(cmn.Fmt("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round)) + } + */ + if ps.PRS.CatchupCommitRound == round { + return // Nothing to do! + } + ps.PRS.CatchupCommitRound = round + if round == ps.PRS.Round { + ps.PRS.CatchupCommit = ps.PRS.Precommits + } else { + ps.PRS.CatchupCommit = cmn.NewBitArray(numValidators) + } +} + +// EnsureVoteBitArrays ensures the bit-arrays have been allocated for tracking +// what votes this peer has received. +// NOTE: It's important to make sure that numValidators actually matches +// what the node sees as the number of validators for height. +func (ps *PeerState) EnsureVoteBitArrays(height int64, numValidators int) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + ps.ensureVoteBitArrays(height, numValidators) +} + +func (ps *PeerState) ensureVoteBitArrays(height int64, numValidators int) { + if ps.PRS.Height == height { + if ps.PRS.Prevotes == nil { + ps.PRS.Prevotes = cmn.NewBitArray(numValidators) + } + if ps.PRS.Precommits == nil { + ps.PRS.Precommits = cmn.NewBitArray(numValidators) + } + if ps.PRS.CatchupCommit == nil { + ps.PRS.CatchupCommit = cmn.NewBitArray(numValidators) + } + if ps.PRS.ProposalPOL == nil { + ps.PRS.ProposalPOL = cmn.NewBitArray(numValidators) + } + } else if ps.PRS.Height == height+1 { + if ps.PRS.LastCommit == nil { + ps.PRS.LastCommit = cmn.NewBitArray(numValidators) + } + } +} + +// RecordVote updates internal statistics for this peer by recording the vote. +// It returns the total number of votes (1 per block). This essentially means +// the number of blocks for which peer has been sending us votes. +func (ps *PeerState) RecordVote(vote *types.Vote) int { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if ps.Stats.LastVoteHeight >= vote.Height { + return ps.Stats.Votes + } + ps.Stats.LastVoteHeight = vote.Height + ps.Stats.Votes++ + return ps.Stats.Votes +} + +// VotesSent returns the number of blocks for which peer has been sending us +// votes. +func (ps *PeerState) VotesSent() int { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + return ps.Stats.Votes +} + +// RecordBlockPart updates internal statistics for this peer by recording the +// block part. It returns the total number of block parts (1 per block). This +// essentially means the number of blocks for which peer has been sending us +// block parts. +func (ps *PeerState) RecordBlockPart(bp *BlockPartMessage) int { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if ps.Stats.LastBlockPartHeight >= bp.Height { + return ps.Stats.BlockParts + } + + ps.Stats.LastBlockPartHeight = bp.Height + ps.Stats.BlockParts++ + return ps.Stats.BlockParts +} + +// BlockPartsSent returns the number of blocks for which peer has been sending +// us block parts. +func (ps *PeerState) BlockPartsSent() int { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + return ps.Stats.BlockParts +} + +// SetHasVote sets the given vote as known by the peer +func (ps *PeerState) SetHasVote(vote *types.Vote) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + ps.setHasVote(vote.Height, vote.Round, vote.Type, vote.ValidatorIndex) +} + +func (ps *PeerState) setHasVote(height int64, round int, type_ byte, index int) { + logger := ps.logger.With("peerH/R", cmn.Fmt("%d/%d", ps.PRS.Height, ps.PRS.Round), "H/R", cmn.Fmt("%d/%d", height, round)) + logger.Debug("setHasVote", "type", type_, "index", index) + + // NOTE: some may be nil BitArrays -> no side effects. + psVotes := ps.getVoteBitArray(height, round, type_) + if psVotes != nil { + psVotes.SetIndex(index, true) + } +} + +// ApplyNewRoundStepMessage updates the peer state for the new round. +func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + // Ignore duplicates or decreases + if CompareHRS(msg.Height, msg.Round, msg.Step, ps.PRS.Height, ps.PRS.Round, ps.PRS.Step) <= 0 { + return + } + + // Just remember these values. + psHeight := ps.PRS.Height + psRound := ps.PRS.Round + //psStep := ps.PRS.Step + psCatchupCommitRound := ps.PRS.CatchupCommitRound + psCatchupCommit := ps.PRS.CatchupCommit + + startTime := time.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second) + ps.PRS.Height = msg.Height + ps.PRS.Round = msg.Round + ps.PRS.Step = msg.Step + ps.PRS.StartTime = startTime + if psHeight != msg.Height || psRound != msg.Round { + ps.PRS.Proposal = false + ps.PRS.ProposalBlockPartsHeader = types.PartSetHeader{} + ps.PRS.ProposalBlockParts = nil + ps.PRS.ProposalPOLRound = -1 + ps.PRS.ProposalPOL = nil + // We'll update the BitArray capacity later. + ps.PRS.Prevotes = nil + ps.PRS.Precommits = nil + } + if psHeight == msg.Height && psRound != msg.Round && msg.Round == psCatchupCommitRound { + // Peer caught up to CatchupCommitRound. + // Preserve psCatchupCommit! + // NOTE: We prefer to use prs.Precommits if + // pr.Round matches pr.CatchupCommitRound. + ps.PRS.Precommits = psCatchupCommit + } + if psHeight != msg.Height { + // Shift Precommits to LastCommit. + if psHeight+1 == msg.Height && psRound == msg.LastCommitRound { + ps.PRS.LastCommitRound = msg.LastCommitRound + ps.PRS.LastCommit = ps.PRS.Precommits + } else { + ps.PRS.LastCommitRound = msg.LastCommitRound + ps.PRS.LastCommit = nil + } + // We'll update the BitArray capacity later. + ps.PRS.CatchupCommitRound = -1 + ps.PRS.CatchupCommit = nil + } +} + +// ApplyCommitStepMessage updates the peer state for the new commit. +func (ps *PeerState) ApplyCommitStepMessage(msg *CommitStepMessage) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if ps.PRS.Height != msg.Height { + return + } + + ps.PRS.ProposalBlockPartsHeader = msg.BlockPartsHeader + ps.PRS.ProposalBlockParts = msg.BlockParts +} + +// ApplyProposalPOLMessage updates the peer state for the new proposal POL. +func (ps *PeerState) ApplyProposalPOLMessage(msg *ProposalPOLMessage) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if ps.PRS.Height != msg.Height { + return + } + if ps.PRS.ProposalPOLRound != msg.ProposalPOLRound { + return + } + + // TODO: Merge onto existing ps.PRS.ProposalPOL? + // We might have sent some prevotes in the meantime. + ps.PRS.ProposalPOL = msg.ProposalPOL +} + +// ApplyHasVoteMessage updates the peer state for the new vote. +func (ps *PeerState) ApplyHasVoteMessage(msg *HasVoteMessage) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if ps.PRS.Height != msg.Height { + return + } + + ps.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index) +} + +// ApplyVoteSetBitsMessage updates the peer state for the bit-array of votes +// it claims to have for the corresponding BlockID. +// `ourVotes` is a BitArray of votes we have for msg.BlockID +// NOTE: if ourVotes is nil (e.g. msg.Height < rs.Height), +// we conservatively overwrite ps's votes w/ msg.Votes. +func (ps *PeerState) ApplyVoteSetBitsMessage(msg *VoteSetBitsMessage, ourVotes *cmn.BitArray) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + votes := ps.getVoteBitArray(msg.Height, msg.Round, msg.Type) + if votes != nil { + if ourVotes == nil { + votes.Update(msg.Votes) + } else { + otherVotes := votes.Sub(ourVotes) + hasVotes := otherVotes.Or(msg.Votes) + votes.Update(hasVotes) + } + } +} + +// String returns a string representation of the PeerState +func (ps *PeerState) String() string { + return ps.StringIndented("") +} + +// StringIndented returns a string representation of the PeerState +func (ps *PeerState) StringIndented(indent string) string { + ps.mtx.Lock() + defer ps.mtx.Unlock() + return fmt.Sprintf(`PeerState{ +%s Key %v +%s RoundState %v +%s Stats %v +%s}`, + indent, ps.peer.ID(), + indent, ps.PRS.StringIndented(indent+" "), + indent, ps.Stats, + indent) +} + +//----------------------------------------------------------------------------- +// Messages + +// ConsensusMessage is a message that can be sent and received on the ConsensusReactor +type ConsensusMessage interface{} + +func RegisterConsensusMessages(cdc *amino.Codec) { + cdc.RegisterInterface((*ConsensusMessage)(nil), nil) + cdc.RegisterConcrete(&NewRoundStepMessage{}, "tendermint/NewRoundStepMessage", nil) + cdc.RegisterConcrete(&CommitStepMessage{}, "tendermint/CommitStep", nil) + cdc.RegisterConcrete(&ProposalMessage{}, "tendermint/Proposal", nil) + cdc.RegisterConcrete(&ProposalPOLMessage{}, "tendermint/ProposalPOL", nil) + cdc.RegisterConcrete(&BlockPartMessage{}, "tendermint/BlockPart", nil) + cdc.RegisterConcrete(&VoteMessage{}, "tendermint/Vote", nil) + cdc.RegisterConcrete(&HasVoteMessage{}, "tendermint/HasVote", nil) + cdc.RegisterConcrete(&VoteSetMaj23Message{}, "tendermint/VoteSetMaj23", nil) + cdc.RegisterConcrete(&VoteSetBitsMessage{}, "tendermint/VoteSetBits", nil) + cdc.RegisterConcrete(&ProposalHeartbeatMessage{}, "tendermint/ProposalHeartbeat", nil) +} + +func decodeMsg(bz []byte) (msg ConsensusMessage, err error) { + if len(bz) > maxMsgSize { + return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize) + } + err = cdc.UnmarshalBinaryBare(bz, &msg) + return +} + +//------------------------------------- + +// NewRoundStepMessage is sent for every step taken in the ConsensusState. +// For every height/round/step transition +type NewRoundStepMessage struct { + Height int64 + Round int + Step cstypes.RoundStepType + SecondsSinceStartTime int + LastCommitRound int +} + +// String returns a string representation. +func (m *NewRoundStepMessage) String() string { + return fmt.Sprintf("[NewRoundStep H:%v R:%v S:%v LCR:%v]", + m.Height, m.Round, m.Step, m.LastCommitRound) +} + +//------------------------------------- + +// CommitStepMessage is sent when a block is committed. +type CommitStepMessage struct { + Height int64 + BlockPartsHeader types.PartSetHeader + BlockParts *cmn.BitArray +} + +// String returns a string representation. +func (m *CommitStepMessage) String() string { + return fmt.Sprintf("[CommitStep H:%v BP:%v BA:%v]", m.Height, m.BlockPartsHeader, m.BlockParts) +} + +//------------------------------------- + +// ProposalMessage is sent when a new block is proposed. +type ProposalMessage struct { + Proposal *types.Proposal +} + +// String returns a string representation. +func (m *ProposalMessage) String() string { + return fmt.Sprintf("[Proposal %v]", m.Proposal) +} + +//------------------------------------- + +// ProposalPOLMessage is sent when a previous proposal is re-proposed. +type ProposalPOLMessage struct { + Height int64 + ProposalPOLRound int + ProposalPOL *cmn.BitArray +} + +// String returns a string representation. +func (m *ProposalPOLMessage) String() string { + return fmt.Sprintf("[ProposalPOL H:%v POLR:%v POL:%v]", m.Height, m.ProposalPOLRound, m.ProposalPOL) +} + +//------------------------------------- + +// BlockPartMessage is sent when gossipping a piece of the proposed block. +type BlockPartMessage struct { + Height int64 + Round int + Part *types.Part +} + +// String returns a string representation. +func (m *BlockPartMessage) String() string { + return fmt.Sprintf("[BlockPart H:%v R:%v P:%v]", m.Height, m.Round, m.Part) +} + +//------------------------------------- + +// VoteMessage is sent when voting for a proposal (or lack thereof). +type VoteMessage struct { + Vote *types.Vote +} + +// String returns a string representation. +func (m *VoteMessage) String() string { + return fmt.Sprintf("[Vote %v]", m.Vote) +} + +//------------------------------------- + +// HasVoteMessage is sent to indicate that a particular vote has been received. +type HasVoteMessage struct { + Height int64 + Round int + Type byte + Index int +} + +// String returns a string representation. +func (m *HasVoteMessage) String() string { + return fmt.Sprintf("[HasVote VI:%v V:{%v/%02d/%v}]", m.Index, m.Height, m.Round, m.Type) +} + +//------------------------------------- + +// VoteSetMaj23Message is sent to indicate that a given BlockID has seen +2/3 votes. +type VoteSetMaj23Message struct { + Height int64 + Round int + Type byte + BlockID types.BlockID +} + +// String returns a string representation. +func (m *VoteSetMaj23Message) String() string { + return fmt.Sprintf("[VSM23 %v/%02d/%v %v]", m.Height, m.Round, m.Type, m.BlockID) +} + +//------------------------------------- + +// VoteSetBitsMessage is sent to communicate the bit-array of votes seen for the BlockID. +type VoteSetBitsMessage struct { + Height int64 + Round int + Type byte + BlockID types.BlockID + Votes *cmn.BitArray +} + +// String returns a string representation. +func (m *VoteSetBitsMessage) String() string { + return fmt.Sprintf("[VSB %v/%02d/%v %v %v]", m.Height, m.Round, m.Type, m.BlockID, m.Votes) +} + +//------------------------------------- + +// ProposalHeartbeatMessage is sent to signal that a node is alive and waiting for transactions for a proposal. +type ProposalHeartbeatMessage struct { + Heartbeat *types.Heartbeat +} + +// String returns a string representation. +func (m *ProposalHeartbeatMessage) String() string { + return fmt.Sprintf("[HEARTBEAT %v]", m.Heartbeat) +} diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go new file mode 100644 index 000000000..9e2aa0a0b --- /dev/null +++ b/consensus/reactor_test.go @@ -0,0 +1,538 @@ +package consensus + +import ( + "context" + "fmt" + "os" + "runtime" + "runtime/pprof" + "sync" + "testing" + "time" + + "github.com/tendermint/tendermint/abci/example/kvstore" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/p2p" + p2pdummy "github.com/tendermint/tendermint/p2p/dummy" + "github.com/tendermint/tendermint/types" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func init() { + config = ResetConfig("consensus_reactor_test") +} + +//---------------------------------------------- +// in-process testnets + +func startConsensusNet(t *testing.T, css []*ConsensusState, N int) ([]*ConsensusReactor, []chan interface{}, []*types.EventBus) { + reactors := make([]*ConsensusReactor, N) + eventChans := make([]chan interface{}, N) + eventBuses := make([]*types.EventBus, N) + for i := 0; i < N; i++ { + /*logger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info") + if err != nil { t.Fatal(err)}*/ + reactors[i] = NewConsensusReactor(css[i], true) // so we dont start the consensus states + reactors[i].SetLogger(css[i].Logger) + + // eventBus is already started with the cs + eventBuses[i] = css[i].eventBus + reactors[i].SetEventBus(eventBuses[i]) + + eventChans[i] = make(chan interface{}, 1) + err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, eventChans[i]) + require.NoError(t, err) + } + // make connected switches and start all reactors + p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch { + s.AddReactor("CONSENSUS", reactors[i]) + s.SetLogger(reactors[i].conS.Logger.With("module", "p2p")) + return s + }, p2p.Connect2Switches) + + // now that everyone is connected, start the state machines + // If we started the state machines before everyone was connected, + // we'd block when the cs fires NewBlockEvent and the peers are trying to start their reactors + // TODO: is this still true with new pubsub? + for i := 0; i < N; i++ { + s := reactors[i].conS.GetState() + reactors[i].SwitchToConsensus(s, 0) + } + return reactors, eventChans, eventBuses +} + +func stopConsensusNet(logger log.Logger, reactors []*ConsensusReactor, eventBuses []*types.EventBus) { + logger.Info("stopConsensusNet", "n", len(reactors)) + for i, r := range reactors { + logger.Info("stopConsensusNet: Stopping ConsensusReactor", "i", i) + r.Switch.Stop() + } + for i, b := range eventBuses { + logger.Info("stopConsensusNet: Stopping eventBus", "i", i) + b.Stop() + } + logger.Info("stopConsensusNet: DONE", "n", len(reactors)) +} + +// Ensure a testnet makes blocks +func TestReactorBasic(t *testing.T) { + N := 4 + css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) + reactors, eventChans, eventBuses := startConsensusNet(t, css, N) + defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) + // wait till everyone makes the first new block + timeoutWaitGroup(t, N, func(j int) { + <-eventChans[j] + }, css) +} + +// Ensure a testnet sends proposal heartbeats and makes blocks when there are txs +func TestReactorProposalHeartbeats(t *testing.T) { + N := 4 + css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter, + func(c *cfg.Config) { + c.Consensus.CreateEmptyBlocks = false + }) + reactors, eventChans, eventBuses := startConsensusNet(t, css, N) + defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) + heartbeatChans := make([]chan interface{}, N) + var err error + for i := 0; i < N; i++ { + heartbeatChans[i] = make(chan interface{}, 1) + err = eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryProposalHeartbeat, heartbeatChans[i]) + require.NoError(t, err) + } + // wait till everyone sends a proposal heartbeat + timeoutWaitGroup(t, N, func(j int) { + <-heartbeatChans[j] + }, css) + + // send a tx + if err := css[3].mempool.CheckTx([]byte{1, 2, 3}, nil); err != nil { + //t.Fatal(err) + } + + // wait till everyone makes the first new block + timeoutWaitGroup(t, N, func(j int) { + <-eventChans[j] + }, css) +} + +// Test we record block parts from other peers +func TestReactorRecordsBlockParts(t *testing.T) { + // create dummy peer + peer := p2pdummy.NewPeer() + ps := NewPeerState(peer).SetLogger(log.TestingLogger()) + peer.Set(types.PeerStateKey, ps) + + // create reactor + css := randConsensusNet(1, "consensus_reactor_records_block_parts_test", newMockTickerFunc(true), newPersistentKVStore) + reactor := NewConsensusReactor(css[0], false) // so we dont start the consensus states + reactor.SetEventBus(css[0].eventBus) + reactor.SetLogger(log.TestingLogger()) + sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { return sw }) + reactor.SetSwitch(sw) + err := reactor.Start() + require.NoError(t, err) + defer reactor.Stop() + + // 1) new block part + parts := types.NewPartSetFromData(cmn.RandBytes(100), 10) + msg := &BlockPartMessage{ + Height: 2, + Round: 0, + Part: parts.GetPart(0), + } + bz, err := cdc.MarshalBinaryBare(msg) + require.NoError(t, err) + + reactor.Receive(DataChannel, peer, bz) + require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should have increased by 1") + + // 2) block part with the same height, but different round + msg.Round = 1 + + bz, err = cdc.MarshalBinaryBare(msg) + require.NoError(t, err) + + reactor.Receive(DataChannel, peer, bz) + require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same") + + // 3) block part from earlier height + msg.Height = 1 + msg.Round = 0 + + bz, err = cdc.MarshalBinaryBare(msg) + require.NoError(t, err) + + reactor.Receive(DataChannel, peer, bz) + require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same") +} + +// Test we record votes from other peers +func TestReactorRecordsVotes(t *testing.T) { + // create dummy peer + peer := p2pdummy.NewPeer() + ps := NewPeerState(peer).SetLogger(log.TestingLogger()) + peer.Set(types.PeerStateKey, ps) + + // create reactor + css := randConsensusNet(1, "consensus_reactor_records_votes_test", newMockTickerFunc(true), newPersistentKVStore) + reactor := NewConsensusReactor(css[0], false) // so we dont start the consensus states + reactor.SetEventBus(css[0].eventBus) + reactor.SetLogger(log.TestingLogger()) + sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { return sw }) + reactor.SetSwitch(sw) + err := reactor.Start() + require.NoError(t, err) + defer reactor.Stop() + _, val := css[0].state.Validators.GetByIndex(0) + + // 1) new vote + vote := &types.Vote{ + ValidatorIndex: 0, + ValidatorAddress: val.Address, + Height: 2, + Round: 0, + Timestamp: time.Now().UTC(), + Type: types.VoteTypePrevote, + BlockID: types.BlockID{}, + } + bz, err := cdc.MarshalBinaryBare(&VoteMessage{vote}) + require.NoError(t, err) + + reactor.Receive(VoteChannel, peer, bz) + assert.Equal(t, 1, ps.VotesSent(), "number of votes sent should have increased by 1") + + // 2) vote with the same height, but different round + vote.Round = 1 + + bz, err = cdc.MarshalBinaryBare(&VoteMessage{vote}) + require.NoError(t, err) + + reactor.Receive(VoteChannel, peer, bz) + assert.Equal(t, 1, ps.VotesSent(), "number of votes sent should stay the same") + + // 3) vote from earlier height + vote.Height = 1 + vote.Round = 0 + + bz, err = cdc.MarshalBinaryBare(&VoteMessage{vote}) + require.NoError(t, err) + + reactor.Receive(VoteChannel, peer, bz) + assert.Equal(t, 1, ps.VotesSent(), "number of votes sent should stay the same") +} + +//------------------------------------------------------------- +// ensure we can make blocks despite cycling a validator set + +func TestReactorVotingPowerChange(t *testing.T) { + nVals := 4 + logger := log.TestingLogger() + css := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentKVStore) + reactors, eventChans, eventBuses := startConsensusNet(t, css, nVals) + defer stopConsensusNet(logger, reactors, eventBuses) + + // map of active validators + activeVals := make(map[string]struct{}) + for i := 0; i < nVals; i++ { + activeVals[string(css[i].privValidator.GetAddress())] = struct{}{} + } + + // wait till everyone makes block 1 + timeoutWaitGroup(t, nVals, func(j int) { + <-eventChans[j] + }, css) + + //--------------------------------------------------------------------------- + logger.Debug("---------------------------- Testing changing the voting power of one validator a few times") + + val1PubKey := css[0].privValidator.GetPubKey() + val1PubKeyABCI := types.TM2PB.PubKey(val1PubKey) + updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25) + previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower() + + waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx) + waitForAndValidateBlockWithTx(t, nVals, activeVals, eventChans, css, updateValidatorTx) + waitForAndValidateBlock(t, nVals, activeVals, eventChans, css) + waitForAndValidateBlock(t, nVals, activeVals, eventChans, css) + + if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { + t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower()) + } + + updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 2) + previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower() + + waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx) + waitForAndValidateBlockWithTx(t, nVals, activeVals, eventChans, css, updateValidatorTx) + waitForAndValidateBlock(t, nVals, activeVals, eventChans, css) + waitForAndValidateBlock(t, nVals, activeVals, eventChans, css) + + if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { + t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower()) + } + + updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 26) + previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower() + + waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx) + waitForAndValidateBlockWithTx(t, nVals, activeVals, eventChans, css, updateValidatorTx) + waitForAndValidateBlock(t, nVals, activeVals, eventChans, css) + waitForAndValidateBlock(t, nVals, activeVals, eventChans, css) + + if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { + t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower()) + } +} + +func TestReactorValidatorSetChanges(t *testing.T) { + nPeers := 7 + nVals := 4 + css := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentKVStore) + + logger := log.TestingLogger() + + reactors, eventChans, eventBuses := startConsensusNet(t, css, nPeers) + defer stopConsensusNet(logger, reactors, eventBuses) + + // map of active validators + activeVals := make(map[string]struct{}) + for i := 0; i < nVals; i++ { + activeVals[string(css[i].privValidator.GetAddress())] = struct{}{} + } + + // wait till everyone makes block 1 + timeoutWaitGroup(t, nPeers, func(j int) { + <-eventChans[j] + }, css) + + //--------------------------------------------------------------------------- + logger.Info("---------------------------- Testing adding one validator") + + newValidatorPubKey1 := css[nVals].privValidator.GetPubKey() + valPubKey1ABCI := types.TM2PB.PubKey(newValidatorPubKey1) + newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) + + // wait till everyone makes block 2 + // ensure the commit includes all validators + // send newValTx to change vals in block 3 + waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx1) + + // wait till everyone makes block 3. + // it includes the commit for block 2, which is by the original validator set + waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, newValidatorTx1) + + // wait till everyone makes block 4. + // it includes the commit for block 3, which is by the original validator set + waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) + + // the commits for block 4 should be with the updated validator set + activeVals[string(newValidatorPubKey1.Address())] = struct{}{} + + // wait till everyone makes block 5 + // it includes the commit for block 4, which should have the updated validator set + waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, eventChans, css) + + //--------------------------------------------------------------------------- + logger.Info("---------------------------- Testing changing the voting power of one validator") + + updateValidatorPubKey1 := css[nVals].privValidator.GetPubKey() + updatePubKey1ABCI := types.TM2PB.PubKey(updateValidatorPubKey1) + updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) + previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower() + + waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, updateValidatorTx1) + waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, updateValidatorTx1) + waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) + waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, eventChans, css) + + if css[nVals].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { + t.Errorf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[nVals].GetRoundState().LastValidators.TotalVotingPower()) + } + + //--------------------------------------------------------------------------- + logger.Info("---------------------------- Testing adding two validators at once") + + newValidatorPubKey2 := css[nVals+1].privValidator.GetPubKey() + newVal2ABCI := types.TM2PB.PubKey(newValidatorPubKey2) + newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) + + newValidatorPubKey3 := css[nVals+2].privValidator.GetPubKey() + newVal3ABCI := types.TM2PB.PubKey(newValidatorPubKey3) + newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) + + waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3) + waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3) + waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) + activeVals[string(newValidatorPubKey2.Address())] = struct{}{} + activeVals[string(newValidatorPubKey3.Address())] = struct{}{} + waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, eventChans, css) + + //--------------------------------------------------------------------------- + logger.Info("---------------------------- Testing removing two validators at once") + + removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0) + removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) + + waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, removeValidatorTx2, removeValidatorTx3) + waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, removeValidatorTx2, removeValidatorTx3) + waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) + delete(activeVals, string(newValidatorPubKey2.Address())) + delete(activeVals, string(newValidatorPubKey3.Address())) + waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, eventChans, css) +} + +// Check we can make blocks with skip_timeout_commit=false +func TestReactorWithTimeoutCommit(t *testing.T) { + N := 4 + css := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter) + // override default SkipTimeoutCommit == true for tests + for i := 0; i < N; i++ { + css[i].config.SkipTimeoutCommit = false + } + + reactors, eventChans, eventBuses := startConsensusNet(t, css, N-1) + defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) + + // wait till everyone makes the first new block + timeoutWaitGroup(t, N-1, func(j int) { + <-eventChans[j] + }, css) +} + +func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState, txs ...[]byte) { + timeoutWaitGroup(t, n, func(j int) { + css[j].Logger.Debug("waitForAndValidateBlock") + newBlockI, ok := <-eventChans[j] + if !ok { + return + } + newBlock := newBlockI.(types.EventDataNewBlock).Block + css[j].Logger.Debug("waitForAndValidateBlock: Got block", "height", newBlock.Height) + err := validateBlock(newBlock, activeVals) + assert.Nil(t, err) + for _, tx := range txs { + css[j].mempool.CheckTx(tx, nil) + assert.Nil(t, err) + } + }, css) +} + +func waitForAndValidateBlockWithTx(t *testing.T, n int, activeVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState, txs ...[]byte) { + timeoutWaitGroup(t, n, func(j int) { + ntxs := 0 + BLOCK_TX_LOOP: + for { + css[j].Logger.Debug("waitForAndValidateBlockWithTx", "ntxs", ntxs) + newBlockI, ok := <-eventChans[j] + if !ok { + return + } + newBlock := newBlockI.(types.EventDataNewBlock).Block + css[j].Logger.Debug("waitForAndValidateBlockWithTx: Got block", "height", newBlock.Height) + err := validateBlock(newBlock, activeVals) + assert.Nil(t, err) + + // check that txs match the txs we're waiting for. + // note they could be spread over multiple blocks, + // but they should be in order. + for _, tx := range newBlock.Data.Txs { + assert.EqualValues(t, txs[ntxs], tx) + ntxs++ + } + + if ntxs == len(txs) { + break BLOCK_TX_LOOP + } + } + + }, css) +} + +func waitForBlockWithUpdatedValsAndValidateIt(t *testing.T, n int, updatedVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState) { + timeoutWaitGroup(t, n, func(j int) { + + var newBlock *types.Block + LOOP: + for { + css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt") + newBlockI, ok := <-eventChans[j] + if !ok { + return + } + newBlock = newBlockI.(types.EventDataNewBlock).Block + if newBlock.LastCommit.Size() == len(updatedVals) { + css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height) + break LOOP + } else { + css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block with no new validators. Skipping", "height", newBlock.Height) + } + } + + err := validateBlock(newBlock, updatedVals) + assert.Nil(t, err) + }, css) +} + +// expects high synchrony! +func validateBlock(block *types.Block, activeVals map[string]struct{}) error { + if block.LastCommit.Size() != len(activeVals) { + return fmt.Errorf("Commit size doesn't match number of active validators. Got %d, expected %d", block.LastCommit.Size(), len(activeVals)) + } + + for _, vote := range block.LastCommit.Precommits { + if _, ok := activeVals[string(vote.ValidatorAddress)]; !ok { + return fmt.Errorf("Found vote for unactive validator %X", vote.ValidatorAddress) + } + } + return nil +} + +func timeoutWaitGroup(t *testing.T, n int, f func(int), css []*ConsensusState) { + wg := new(sync.WaitGroup) + wg.Add(n) + for i := 0; i < n; i++ { + go func(j int) { + f(j) + wg.Done() + }(i) + } + + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + // we're running many nodes in-process, possibly in in a virtual machine, + // and spewing debug messages - making a block could take a while, + timeout := time.Second * 300 + + select { + case <-done: + case <-time.After(timeout): + for i, cs := range css { + t.Log("#################") + t.Log("Validator", i) + t.Log(cs.GetRoundState()) + t.Log("") + } + os.Stdout.Write([]byte("pprof.Lookup('goroutine'):\n")) + pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) + capture() + panic("Timed out waiting for all validators to commit a block") + } +} + +func capture() { + trace := make([]byte, 10240000) + count := runtime.Stack(trace, true) + fmt.Printf("Stack of %d bytes: %s\n", count, trace) +} diff --git a/consensus/replay.go b/consensus/replay.go new file mode 100644 index 000000000..3035f75d8 --- /dev/null +++ b/consensus/replay.go @@ -0,0 +1,469 @@ +package consensus + +import ( + "bytes" + "fmt" + "hash/crc32" + "io" + "reflect" + //"strconv" + //"strings" + "time" + + abci "github.com/tendermint/tendermint/abci/types" + //auto "github.com/tendermint/tendermint/libs/autofile" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" + + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/version" +) + +var crc32c = crc32.MakeTable(crc32.Castagnoli) + +// Functionality to replay blocks and messages on recovery from a crash. +// There are two general failure scenarios: +// +// 1. failure during consensus +// 2. failure while applying the block +// +// The former is handled by the WAL, the latter by the proxyApp Handshake on +// restart, which ultimately hands off the work to the WAL. + +//----------------------------------------- +// 1. Recover from failure during consensus +// (by replaying messages from the WAL) +//----------------------------------------- + +// Unmarshal and apply a single message to the consensus state as if it were +// received in receiveRoutine. Lines that start with "#" are ignored. +// NOTE: receiveRoutine should not be running. +func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepCh chan interface{}) error { + // Skip meta messages which exist for demarcating boundaries. + if _, ok := msg.Msg.(EndHeightMessage); ok { + return nil + } + + // for logging + switch m := msg.Msg.(type) { + case types.EventDataRoundState: + cs.Logger.Info("Replay: New Step", "height", m.Height, "round", m.Round, "step", m.Step) + // these are playback checks + ticker := time.After(time.Second * 2) + if newStepCh != nil { + select { + case mi := <-newStepCh: + m2 := mi.(types.EventDataRoundState) + if m.Height != m2.Height || m.Round != m2.Round || m.Step != m2.Step { + return fmt.Errorf("RoundState mismatch. Got %v; Expected %v", m2, m) + } + case <-ticker: + return fmt.Errorf("Failed to read off newStepCh") + } + } + case msgInfo: + peerID := m.PeerID + if peerID == "" { + peerID = "local" + } + switch msg := m.Msg.(type) { + case *ProposalMessage: + p := msg.Proposal + cs.Logger.Info("Replay: Proposal", "height", p.Height, "round", p.Round, "header", + p.BlockPartsHeader, "pol", p.POLRound, "peer", peerID) + case *BlockPartMessage: + cs.Logger.Info("Replay: BlockPart", "height", msg.Height, "round", msg.Round, "peer", peerID) + case *VoteMessage: + v := msg.Vote + cs.Logger.Info("Replay: Vote", "height", v.Height, "round", v.Round, "type", v.Type, + "blockID", v.BlockID, "peer", peerID) + } + + cs.handleMsg(m) + case timeoutInfo: + cs.Logger.Info("Replay: Timeout", "height", m.Height, "round", m.Round, "step", m.Step, "dur", m.Duration) + cs.handleTimeout(m, cs.RoundState) + default: + return fmt.Errorf("Replay: Unknown TimedWALMessage type: %v", reflect.TypeOf(msg.Msg)) + } + return nil +} + +// Replay only those messages since the last block. `timeoutRoutine` should +// run concurrently to read off tickChan. +func (cs *ConsensusState) catchupReplay(csHeight int64) error { + + // Set replayMode to true so we don't log signing errors. + cs.replayMode = true + defer func() { cs.replayMode = false }() + + // Ensure that #ENDHEIGHT for this height doesn't exist. + // NOTE: This is just a sanity check. As far as we know things work fine + // without it, and Handshake could reuse ConsensusState if it weren't for + // this check (since we can crash after writing #ENDHEIGHT). + // + // Ignore data corruption errors since this is a sanity check. + gr, found, err := cs.wal.SearchForEndHeight(csHeight, &WALSearchOptions{IgnoreDataCorruptionErrors: true}) + if err != nil { + return err + } + if gr != nil { + if err := gr.Close(); err != nil { + return err + } + } + if found { + return fmt.Errorf("WAL should not contain #ENDHEIGHT %d", csHeight) + } + + // Search for last height marker. + // + // Ignore data corruption errors in previous heights because we only care about last height + gr, found, err = cs.wal.SearchForEndHeight(csHeight-1, &WALSearchOptions{IgnoreDataCorruptionErrors: true}) + if err == io.EOF { + cs.Logger.Error("Replay: wal.group.Search returned EOF", "#ENDHEIGHT", csHeight-1) + } else if err != nil { + return err + } + if !found { + return fmt.Errorf("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d", csHeight, csHeight-1) + } + defer gr.Close() // nolint: errcheck + + cs.Logger.Info("Catchup by replaying consensus messages", "height", csHeight) + + var msg *TimedWALMessage + dec := WALDecoder{gr} + + for { + msg, err = dec.Decode() + if err == io.EOF { + break + } else if IsDataCorruptionError(err) { + cs.Logger.Debug("data has been corrupted in last height of consensus WAL", "err", err, "height", csHeight) + panic(fmt.Sprintf("data has been corrupted (%v) in last height %d of consensus WAL", err, csHeight)) + } else if err != nil { + return err + } + + // NOTE: since the priv key is set when the msgs are received + // it will attempt to eg double sign but we can just ignore it + // since the votes will be replayed and we'll get to the next step + if err := cs.readReplayMessage(msg, nil); err != nil { + return err + } + } + cs.Logger.Info("Replay: Done") + return nil +} + +//-------------------------------------------------------------------------------- + +// Parses marker lines of the form: +// #ENDHEIGHT: 12345 +/* +func makeHeightSearchFunc(height int64) auto.SearchFunc { + return func(line string) (int, error) { + line = strings.TrimRight(line, "\n") + parts := strings.Split(line, " ") + if len(parts) != 2 { + return -1, errors.New("Line did not have 2 parts") + } + i, err := strconv.Atoi(parts[1]) + if err != nil { + return -1, errors.New("Failed to parse INFO: " + err.Error()) + } + if height < i { + return 1, nil + } else if height == i { + return 0, nil + } else { + return -1, nil + } + } +}*/ + +//--------------------------------------------------- +// 2. Recover from failure while applying the block. +// (by handshaking with the app to figure out where +// we were last, and using the WAL to recover there.) +//--------------------------------------------------- + +type Handshaker struct { + stateDB dbm.DB + initialState sm.State + store sm.BlockStore + genDoc *types.GenesisDoc + logger log.Logger + + nBlocks int // number of blocks applied to the state +} + +func NewHandshaker(stateDB dbm.DB, state sm.State, + store sm.BlockStore, genDoc *types.GenesisDoc) *Handshaker { + + return &Handshaker{ + stateDB: stateDB, + initialState: state, + store: store, + genDoc: genDoc, + logger: log.NewNopLogger(), + nBlocks: 0, + } +} + +func (h *Handshaker) SetLogger(l log.Logger) { + h.logger = l +} + +func (h *Handshaker) NBlocks() int { + return h.nBlocks +} + +// TODO: retry the handshake/replay if it fails ? +func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { + + // Handshake is done via ABCI Info on the query conn. + res, err := proxyApp.Query().InfoSync(abci.RequestInfo{version.Version}) + if err != nil { + return fmt.Errorf("Error calling Info: %v", err) + } + + blockHeight := int64(res.LastBlockHeight) + if blockHeight < 0 { + return fmt.Errorf("Got a negative last block height (%d) from the app", blockHeight) + } + appHash := res.LastBlockAppHash + + h.logger.Info("ABCI Handshake", "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash)) + + // TODO: check app version. + + // Replay blocks up to the latest in the blockstore. + _, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp) + if err != nil { + return fmt.Errorf("Error on replay: %v", err) + } + + h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced", + "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash)) + + // TODO: (on restart) replay mempool + + return nil +} + +// Replay all blocks since appBlockHeight and ensure the result matches the current state. +// Returns the final AppHash or an error. +func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight int64, proxyApp proxy.AppConns) ([]byte, error) { + + storeBlockHeight := h.store.Height() + stateBlockHeight := state.LastBlockHeight + h.logger.Info("ABCI Replay Blocks", "appHeight", appBlockHeight, "storeHeight", storeBlockHeight, "stateHeight", stateBlockHeight) + + // If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain + if appBlockHeight == 0 { + validators := types.TM2PB.Validators(state.Validators) + csParams := types.TM2PB.ConsensusParams(h.genDoc.ConsensusParams) + req := abci.RequestInitChain{ + Time: h.genDoc.GenesisTime.Unix(), // TODO + ChainId: h.genDoc.ChainID, + ConsensusParams: csParams, + Validators: validators, + AppStateBytes: h.genDoc.AppStateJSON, + } + res, err := proxyApp.Consensus().InitChainSync(req) + if err != nil { + return nil, err + } + + // if the app returned validators + // or consensus params, update the state + // with the them + if len(res.Validators) > 0 { + vals, err := types.PB2TM.Validators(res.Validators) + if err != nil { + return nil, err + } + state.Validators = types.NewValidatorSet(vals) + } + if res.ConsensusParams != nil { + state.ConsensusParams = types.PB2TM.ConsensusParams(res.ConsensusParams) + } + sm.SaveState(h.stateDB, state) + } + + // First handle edge cases and constraints on the storeBlockHeight + if storeBlockHeight == 0 { + return appHash, checkAppHash(state, appHash) + + } else if storeBlockHeight < appBlockHeight { + // the app should never be ahead of the store (but this is under app's control) + return appHash, sm.ErrAppBlockHeightTooHigh{storeBlockHeight, appBlockHeight} + + } else if storeBlockHeight < stateBlockHeight { + // the state should never be ahead of the store (this is under tendermint's control) + cmn.PanicSanity(cmn.Fmt("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight)) + + } else if storeBlockHeight > stateBlockHeight+1 { + // store should be at most one ahead of the state (this is under tendermint's control) + cmn.PanicSanity(cmn.Fmt("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1)) + } + + var err error + // Now either store is equal to state, or one ahead. + // For each, consider all cases of where the app could be, given app <= store + if storeBlockHeight == stateBlockHeight { + // Tendermint ran Commit and saved the state. + // Either the app is asking for replay, or we're all synced up. + if appBlockHeight < storeBlockHeight { + // the app is behind, so replay blocks, but no need to go through WAL (state is already synced to store) + return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, false) + + } else if appBlockHeight == storeBlockHeight { + // We're good! + return appHash, checkAppHash(state, appHash) + } + + } else if storeBlockHeight == stateBlockHeight+1 { + // We saved the block in the store but haven't updated the state, + // so we'll need to replay a block using the WAL. + if appBlockHeight < stateBlockHeight { + // the app is further behind than it should be, so replay blocks + // but leave the last block to go through the WAL + return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, true) + + } else if appBlockHeight == stateBlockHeight { + // We haven't run Commit (both the state and app are one block behind), + // so replayBlock with the real app. + // NOTE: We could instead use the cs.WAL on cs.Start, + // but we'd have to allow the WAL to replay a block that wrote it's #ENDHEIGHT + h.logger.Info("Replay last block using real app") + state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus()) + return state.AppHash, err + + } else if appBlockHeight == storeBlockHeight { + // We ran Commit, but didn't save the state, so replayBlock with mock app + abciResponses, err := sm.LoadABCIResponses(h.stateDB, storeBlockHeight) + if err != nil { + return nil, err + } + mockApp := newMockProxyApp(appHash, abciResponses) + h.logger.Info("Replay last block using mock app") + state, err = h.replayBlock(state, storeBlockHeight, mockApp) + return state.AppHash, err + } + + } + + cmn.PanicSanity("Should never happen") + return nil, nil +} + +func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBlockHeight, storeBlockHeight int64, mutateState bool) ([]byte, error) { + // App is further behind than it should be, so we need to replay blocks. + // We replay all blocks from appBlockHeight+1. + // + // Note that we don't have an old version of the state, + // so we by-pass state validation/mutation using sm.ExecCommitBlock. + // This also means we won't be saving validator sets if they change during this period. + // TODO: Load the historical information to fix this and just use state.ApplyBlock + // + // If mutateState == true, the final block is replayed with h.replayBlock() + + var appHash []byte + var err error + finalBlock := storeBlockHeight + if mutateState { + finalBlock-- + } + for i := appBlockHeight + 1; i <= finalBlock; i++ { + h.logger.Info("Applying block", "height", i) + block := h.store.LoadBlock(i) + appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, state.LastValidators, h.stateDB) + if err != nil { + return nil, err + } + + h.nBlocks++ + } + + if mutateState { + // sync the final block + state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus()) + if err != nil { + return nil, err + } + appHash = state.AppHash + } + + return appHash, checkAppHash(state, appHash) +} + +// ApplyBlock on the proxyApp with the last block. +func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.AppConnConsensus) (sm.State, error) { + block := h.store.LoadBlock(height) + meta := h.store.LoadBlockMeta(height) + + blockExec := sm.NewBlockExecutor(h.stateDB, h.logger, proxyApp, sm.MockMempool{}, sm.MockEvidencePool{}) + + var err error + state, err = blockExec.ApplyBlock(state, meta.BlockID, block) + if err != nil { + return sm.State{}, err + } + + h.nBlocks++ + + return state, nil +} + +func checkAppHash(state sm.State, appHash []byte) error { + if !bytes.Equal(state.AppHash, appHash) { + panic(fmt.Errorf("Tendermint state.AppHash does not match AppHash after replay. Got %X, expected %X", appHash, state.AppHash).Error()) + } + return nil +} + +//-------------------------------------------------------------------------------- +// mockProxyApp uses ABCIResponses to give the right results +// Useful because we don't want to call Commit() twice for the same block on the real app. + +func newMockProxyApp(appHash []byte, abciResponses *sm.ABCIResponses) proxy.AppConnConsensus { + clientCreator := proxy.NewLocalClientCreator(&mockProxyApp{ + appHash: appHash, + abciResponses: abciResponses, + }) + cli, _ := clientCreator.NewABCIClient() + err := cli.Start() + if err != nil { + panic(err) + } + return proxy.NewAppConnConsensus(cli) +} + +type mockProxyApp struct { + abci.BaseApplication + + appHash []byte + txCount int + abciResponses *sm.ABCIResponses +} + +func (mock *mockProxyApp) DeliverTx(tx []byte) abci.ResponseDeliverTx { + r := mock.abciResponses.DeliverTx[mock.txCount] + mock.txCount++ + return *r +} + +func (mock *mockProxyApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { + mock.txCount = 0 + return *mock.abciResponses.EndBlock +} + +func (mock *mockProxyApp) Commit() abci.ResponseCommit { + return abci.ResponseCommit{Data: mock.appHash} +} diff --git a/consensus/replay_file.go b/consensus/replay_file.go new file mode 100644 index 000000000..0c0b0dcb1 --- /dev/null +++ b/consensus/replay_file.go @@ -0,0 +1,321 @@ +package consensus + +import ( + "bufio" + "context" + "fmt" + "io" + "os" + "strconv" + "strings" + + "github.com/pkg/errors" + + bc "github.com/tendermint/tendermint/blockchain" + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" +) + +const ( + // event bus subscriber + subscriber = "replay-file" +) + +//-------------------------------------------------------- +// replay messages interactively or all at once + +// replay the wal file +func RunReplayFile(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig, console bool) { + consensusState := newConsensusStateForReplay(config, csConfig) + + if err := consensusState.ReplayFile(csConfig.WalFile(), console); err != nil { + cmn.Exit(cmn.Fmt("Error during consensus replay: %v", err)) + } +} + +// Replay msgs in file or start the console +func (cs *ConsensusState) ReplayFile(file string, console bool) error { + + if cs.IsRunning() { + return errors.New("cs is already running, cannot replay") + } + if cs.wal != nil { + return errors.New("cs wal is open, cannot replay") + } + + cs.startForReplay() + + // ensure all new step events are regenerated as expected + newStepCh := make(chan interface{}, 1) + + ctx := context.Background() + err := cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep, newStepCh) + if err != nil { + return errors.Errorf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep) + } + defer cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep) + + // just open the file for reading, no need to use wal + fp, err := os.OpenFile(file, os.O_RDONLY, 0600) + if err != nil { + return err + } + + pb := newPlayback(file, fp, cs, cs.state.Copy()) + defer pb.fp.Close() // nolint: errcheck + + var nextN int // apply N msgs in a row + var msg *TimedWALMessage + for { + if nextN == 0 && console { + nextN = pb.replayConsoleLoop() + } + + msg, err = pb.dec.Decode() + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + if err := pb.cs.readReplayMessage(msg, newStepCh); err != nil { + return err + } + + if nextN > 0 { + nextN-- + } + pb.count++ + } + return nil +} + +//------------------------------------------------ +// playback manager + +type playback struct { + cs *ConsensusState + + fp *os.File + dec *WALDecoder + count int // how many lines/msgs into the file are we + + // replays can be reset to beginning + fileName string // so we can close/reopen the file + genesisState sm.State // so the replay session knows where to restart from +} + +func newPlayback(fileName string, fp *os.File, cs *ConsensusState, genState sm.State) *playback { + return &playback{ + cs: cs, + fp: fp, + fileName: fileName, + genesisState: genState, + dec: NewWALDecoder(fp), + } +} + +// go back count steps by resetting the state and running (pb.count - count) steps +func (pb *playback) replayReset(count int, newStepCh chan interface{}) error { + pb.cs.Stop() + pb.cs.Wait() + + newCS := NewConsensusState(pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec, + pb.cs.blockStore, pb.cs.mempool, pb.cs.evpool) + newCS.SetEventBus(pb.cs.eventBus) + newCS.startForReplay() + + if err := pb.fp.Close(); err != nil { + return err + } + fp, err := os.OpenFile(pb.fileName, os.O_RDONLY, 0600) + if err != nil { + return err + } + pb.fp = fp + pb.dec = NewWALDecoder(fp) + count = pb.count - count + fmt.Printf("Reseting from %d to %d\n", pb.count, count) + pb.count = 0 + pb.cs = newCS + var msg *TimedWALMessage + for i := 0; i < count; i++ { + msg, err = pb.dec.Decode() + if err == io.EOF { + return nil + } else if err != nil { + return err + } + if err := pb.cs.readReplayMessage(msg, newStepCh); err != nil { + return err + } + pb.count++ + } + return nil +} + +func (cs *ConsensusState) startForReplay() { + cs.Logger.Error("Replay commands are disabled until someone updates them and writes tests") + /* TODO:! + // since we replay tocks we just ignore ticks + go func() { + for { + select { + case <-cs.tickChan: + case <-cs.Quit: + return + } + } + }()*/ +} + +// console function for parsing input and running commands +func (pb *playback) replayConsoleLoop() int { + for { + fmt.Printf("> ") + bufReader := bufio.NewReader(os.Stdin) + line, more, err := bufReader.ReadLine() + if more { + cmn.Exit("input is too long") + } else if err != nil { + cmn.Exit(err.Error()) + } + + tokens := strings.Split(string(line), " ") + if len(tokens) == 0 { + continue + } + + switch tokens[0] { + case "next": + // "next" -> replay next message + // "next N" -> replay next N messages + + if len(tokens) == 1 { + return 0 + } + i, err := strconv.Atoi(tokens[1]) + if err != nil { + fmt.Println("next takes an integer argument") + } else { + return i + } + + case "back": + // "back" -> go back one message + // "back N" -> go back N messages + + // NOTE: "back" is not supported in the state machine design, + // so we restart and replay up to + + ctx := context.Background() + // ensure all new step events are regenerated as expected + newStepCh := make(chan interface{}, 1) + + err := pb.cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep, newStepCh) + if err != nil { + cmn.Exit(fmt.Sprintf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep)) + } + defer pb.cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep) + + if len(tokens) == 1 { + if err := pb.replayReset(1, newStepCh); err != nil { + pb.cs.Logger.Error("Replay reset error", "err", err) + } + } else { + i, err := strconv.Atoi(tokens[1]) + if err != nil { + fmt.Println("back takes an integer argument") + } else if i > pb.count { + fmt.Printf("argument to back must not be larger than the current count (%d)\n", pb.count) + } else { + if err := pb.replayReset(i, newStepCh); err != nil { + pb.cs.Logger.Error("Replay reset error", "err", err) + } + } + } + + case "rs": + // "rs" -> print entire round state + // "rs short" -> print height/round/step + // "rs " -> print another field of the round state + + rs := pb.cs.RoundState + if len(tokens) == 1 { + fmt.Println(rs) + } else { + switch tokens[1] { + case "short": + fmt.Printf("%v/%v/%v\n", rs.Height, rs.Round, rs.Step) + case "validators": + fmt.Println(rs.Validators) + case "proposal": + fmt.Println(rs.Proposal) + case "proposal_block": + fmt.Printf("%v %v\n", rs.ProposalBlockParts.StringShort(), rs.ProposalBlock.StringShort()) + case "locked_round": + fmt.Println(rs.LockedRound) + case "locked_block": + fmt.Printf("%v %v\n", rs.LockedBlockParts.StringShort(), rs.LockedBlock.StringShort()) + case "votes": + fmt.Println(rs.Votes.StringIndented(" ")) + + default: + fmt.Println("Unknown option", tokens[1]) + } + } + case "n": + fmt.Println(pb.count) + } + } + return 0 +} + +//-------------------------------------------------------------------------------- + +// convenience for replay mode +func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig) *ConsensusState { + dbType := dbm.DBBackendType(config.DBBackend) + // Get BlockStore + blockStoreDB := dbm.NewDB("blockstore", dbType, config.DBDir()) + blockStore := bc.NewBlockStore(blockStoreDB) + + // Get State + stateDB := dbm.NewDB("state", dbType, config.DBDir()) + gdoc, err := sm.MakeGenesisDocFromFile(config.GenesisFile()) + if err != nil { + cmn.Exit(err.Error()) + } + state, err := sm.MakeGenesisState(gdoc) + if err != nil { + cmn.Exit(err.Error()) + } + + // Create proxyAppConn connection (consensus, mempool, query) + clientCreator := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()) + proxyApp := proxy.NewAppConns(clientCreator, + NewHandshaker(stateDB, state, blockStore, gdoc)) + err = proxyApp.Start() + if err != nil { + cmn.Exit(cmn.Fmt("Error starting proxy app conns: %v", err)) + } + + eventBus := types.NewEventBus() + if err := eventBus.Start(); err != nil { + cmn.Exit(cmn.Fmt("Failed to start event bus: %v", err)) + } + + mempool, evpool := sm.MockMempool{}, sm.MockEvidencePool{} + blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) + + consensusState := NewConsensusState(csConfig, state.Copy(), blockExec, + blockStore, mempool, evpool) + + consensusState.SetEventBus(eventBus) + return consensusState +} diff --git a/consensus/replay_test.go b/consensus/replay_test.go new file mode 100644 index 000000000..da526d249 --- /dev/null +++ b/consensus/replay_test.go @@ -0,0 +1,687 @@ +package consensus + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "runtime" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/abci/example/kvstore" + abci "github.com/tendermint/tendermint/abci/types" + crypto "github.com/tendermint/tendermint/crypto" + auto "github.com/tendermint/tendermint/libs/autofile" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/libs/log" +) + +var consensusReplayConfig *cfg.Config + +func init() { + consensusReplayConfig = ResetConfig("consensus_replay_test") +} + +// These tests ensure we can always recover from failure at any part of the consensus process. +// There are two general failure scenarios: failure during consensus, and failure while applying the block. +// Only the latter interacts with the app and store, +// but the former has to deal with restrictions on re-use of priv_validator keys. +// The `WAL Tests` are for failures during the consensus; +// the `Handshake Tests` are for failures in applying the block. +// With the help of the WAL, we can recover from it all! + +//------------------------------------------------------------------------------------------ +// WAL Tests + +// TODO: It would be better to verify explicitly which states we can recover from without the wal +// and which ones we need the wal for - then we'd also be able to only flush the +// wal writer when we need to, instead of with every message. + +func startNewConsensusStateAndWaitForBlock(t *testing.T, lastBlockHeight int64, blockDB dbm.DB, stateDB dbm.DB) { + logger := log.TestingLogger() + state, _ := sm.LoadStateFromDBOrGenesisFile(stateDB, consensusReplayConfig.GenesisFile()) + privValidator := loadPrivValidator(consensusReplayConfig) + cs := newConsensusStateWithConfigAndBlockStore(consensusReplayConfig, state, privValidator, kvstore.NewKVStoreApplication(), blockDB) + cs.SetLogger(logger) + + bytes, _ := ioutil.ReadFile(cs.config.WalFile()) + // fmt.Printf("====== WAL: \n\r%s\n", bytes) + t.Logf("====== WAL: \n\r%X\n", bytes) + + err := cs.Start() + require.NoError(t, err) + defer cs.Stop() + + // This is just a signal that we haven't halted; its not something contained + // in the WAL itself. Assuming the consensus state is running, replay of any + // WAL, including the empty one, should eventually be followed by a new + // block, or else something is wrong. + newBlockCh := make(chan interface{}, 1) + err = cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, newBlockCh) + require.NoError(t, err) + select { + case <-newBlockCh: + case <-time.After(60 * time.Second): + t.Fatalf("Timed out waiting for new block (see trace above)") + } +} + +func sendTxs(cs *ConsensusState, ctx context.Context) { + for i := 0; i < 256; i++ { + select { + case <-ctx.Done(): + return + default: + tx := []byte{byte(i)} + cs.mempool.CheckTx(tx, nil) + i++ + } + } +} + +// TestWALCrash uses crashing WAL to test we can recover from any WAL failure. +func TestWALCrash(t *testing.T) { + testCases := []struct { + name string + initFn func(dbm.DB, *ConsensusState, context.Context) + heightToStop int64 + }{ + {"empty block", + func(stateDB dbm.DB, cs *ConsensusState, ctx context.Context) {}, + 1}, + {"block with a smaller part size", + func(stateDB dbm.DB, cs *ConsensusState, ctx context.Context) { + // XXX: is there a better way to change BlockPartSizeBytes? + cs.state.ConsensusParams.BlockPartSizeBytes = 512 + sm.SaveState(stateDB, cs.state) + go sendTxs(cs, ctx) + }, + 1}, + {"many non-empty blocks", + func(stateDB dbm.DB, cs *ConsensusState, ctx context.Context) { + go sendTxs(cs, ctx) + }, + 3}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + crashWALandCheckLiveness(t, tc.initFn, tc.heightToStop) + }) + } +} + +func crashWALandCheckLiveness(t *testing.T, initFn func(dbm.DB, *ConsensusState, context.Context), heightToStop int64) { + walPaniced := make(chan error) + crashingWal := &crashingWAL{panicCh: walPaniced, heightToStop: heightToStop} + + i := 1 +LOOP: + for { + // fmt.Printf("====== LOOP %d\n", i) + t.Logf("====== LOOP %d\n", i) + + // create consensus state from a clean slate + logger := log.NewNopLogger() + stateDB := dbm.NewMemDB() + state, _ := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) + privValidator := loadPrivValidator(consensusReplayConfig) + blockDB := dbm.NewMemDB() + cs := newConsensusStateWithConfigAndBlockStore(consensusReplayConfig, state, privValidator, kvstore.NewKVStoreApplication(), blockDB) + cs.SetLogger(logger) + + // start sending transactions + ctx, cancel := context.WithCancel(context.Background()) + initFn(stateDB, cs, ctx) + + // clean up WAL file from the previous iteration + walFile := cs.config.WalFile() + os.Remove(walFile) + + // set crashing WAL + csWal, err := cs.OpenWAL(walFile) + require.NoError(t, err) + crashingWal.next = csWal + // reset the message counter + crashingWal.msgIndex = 1 + cs.wal = crashingWal + + // start consensus state + err = cs.Start() + require.NoError(t, err) + + i++ + + select { + case err := <-walPaniced: + t.Logf("WAL paniced: %v", err) + + // make sure we can make blocks after a crash + startNewConsensusStateAndWaitForBlock(t, cs.Height, blockDB, stateDB) + + // stop consensus state and transactions sender (initFn) + cs.Stop() + cancel() + + // if we reached the required height, exit + if _, ok := err.(ReachedHeightToStopError); ok { + break LOOP + } + case <-time.After(10 * time.Second): + t.Fatal("WAL did not panic for 10 seconds (check the log)") + } + } +} + +// crashingWAL is a WAL which crashes or rather simulates a crash during Save +// (before and after). It remembers a message for which we last panicked +// (lastPanicedForMsgIndex), so we don't panic for it in subsequent iterations. +type crashingWAL struct { + next WAL + panicCh chan error + heightToStop int64 + + msgIndex int // current message index + lastPanicedForMsgIndex int // last message for which we panicked +} + +// WALWriteError indicates a WAL crash. +type WALWriteError struct { + msg string +} + +func (e WALWriteError) Error() string { + return e.msg +} + +// ReachedHeightToStopError indicates we've reached the required consensus +// height and may exit. +type ReachedHeightToStopError struct { + height int64 +} + +func (e ReachedHeightToStopError) Error() string { + return fmt.Sprintf("reached height to stop %d", e.height) +} + +// Write simulate WAL's crashing by sending an error to the panicCh and then +// exiting the cs.receiveRoutine. +func (w *crashingWAL) Write(m WALMessage) { + if endMsg, ok := m.(EndHeightMessage); ok { + if endMsg.Height == w.heightToStop { + w.panicCh <- ReachedHeightToStopError{endMsg.Height} + runtime.Goexit() + } else { + w.next.Write(m) + } + return + } + + if w.msgIndex > w.lastPanicedForMsgIndex { + w.lastPanicedForMsgIndex = w.msgIndex + _, file, line, _ := runtime.Caller(1) + w.panicCh <- WALWriteError{fmt.Sprintf("failed to write %T to WAL (fileline: %s:%d)", m, file, line)} + runtime.Goexit() + } else { + w.msgIndex++ + w.next.Write(m) + } +} + +func (w *crashingWAL) WriteSync(m WALMessage) { + w.Write(m) +} + +func (w *crashingWAL) Group() *auto.Group { return w.next.Group() } +func (w *crashingWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) { + return w.next.SearchForEndHeight(height, options) +} + +func (w *crashingWAL) Start() error { return w.next.Start() } +func (w *crashingWAL) Stop() error { return w.next.Stop() } +func (w *crashingWAL) Wait() { w.next.Wait() } + +//------------------------------------------------------------------------------------------ +// Handshake Tests + +const ( + NUM_BLOCKS = 6 +) + +var ( + mempool = sm.MockMempool{} + evpool = sm.MockEvidencePool{} +) + +//--------------------------------------- +// Test handshake/replay + +// 0 - all synced up +// 1 - saved block but app and state are behind +// 2 - save block and committed but state is behind +var modes = []uint{0, 1, 2} + +// Sync from scratch +func TestHandshakeReplayAll(t *testing.T) { + for _, m := range modes { + testHandshakeReplay(t, 0, m) + } +} + +// Sync many, not from scratch +func TestHandshakeReplaySome(t *testing.T) { + for _, m := range modes { + testHandshakeReplay(t, 1, m) + } +} + +// Sync from lagging by one +func TestHandshakeReplayOne(t *testing.T) { + for _, m := range modes { + testHandshakeReplay(t, NUM_BLOCKS-1, m) + } +} + +// Sync from caught up +func TestHandshakeReplayNone(t *testing.T) { + for _, m := range modes { + testHandshakeReplay(t, NUM_BLOCKS, m) + } +} + +func tempWALWithData(data []byte) string { + walFile, err := ioutil.TempFile("", "wal") + if err != nil { + panic(fmt.Errorf("failed to create temp WAL file: %v", err)) + } + _, err = walFile.Write(data) + if err != nil { + panic(fmt.Errorf("failed to write to temp WAL file: %v", err)) + } + if err := walFile.Close(); err != nil { + panic(fmt.Errorf("failed to close temp WAL file: %v", err)) + } + return walFile.Name() +} + +// Make some blocks. Start a fresh app and apply nBlocks blocks. Then restart the app and sync it up with the remaining blocks +func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) { + config := ResetConfig("proxy_test_") + + walBody, err := WALWithNBlocks(NUM_BLOCKS) + if err != nil { + t.Fatal(err) + } + walFile := tempWALWithData(walBody) + config.Consensus.SetWalFile(walFile) + + privVal := privval.LoadFilePV(config.PrivValidatorFile()) + + wal, err := NewWAL(walFile) + if err != nil { + t.Fatal(err) + } + wal.SetLogger(log.TestingLogger()) + if err := wal.Start(); err != nil { + t.Fatal(err) + } + defer wal.Stop() + + chain, commits, err := makeBlockchainFromWAL(wal) + if err != nil { + t.Fatalf(err.Error()) + } + + stateDB, state, store := stateAndStore(config, privVal.GetPubKey()) + store.chain = chain + store.commits = commits + + // run the chain through state.ApplyBlock to build up the tendermint state + state = buildTMStateFromChain(config, stateDB, state, chain, mode) + latestAppHash := state.AppHash + + // make a new client creator + kvstoreApp := kvstore.NewPersistentKVStoreApplication(path.Join(config.DBDir(), "2")) + clientCreator2 := proxy.NewLocalClientCreator(kvstoreApp) + if nBlocks > 0 { + // run nBlocks against a new client to build up the app state. + // use a throwaway tendermint state + proxyApp := proxy.NewAppConns(clientCreator2, nil) + stateDB, state, _ := stateAndStore(config, privVal.GetPubKey()) + buildAppStateFromChain(proxyApp, stateDB, state, chain, nBlocks, mode) + } + + // now start the app using the handshake - it should sync + genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) + handshaker := NewHandshaker(stateDB, state, store, genDoc) + proxyApp := proxy.NewAppConns(clientCreator2, handshaker) + if err := proxyApp.Start(); err != nil { + t.Fatalf("Error starting proxy app connections: %v", err) + } + defer proxyApp.Stop() + + // get the latest app hash from the app + res, err := proxyApp.Query().InfoSync(abci.RequestInfo{""}) + if err != nil { + t.Fatal(err) + } + + // the app hash should be synced up + if !bytes.Equal(latestAppHash, res.LastBlockAppHash) { + t.Fatalf("Expected app hashes to match after handshake/replay. got %X, expected %X", res.LastBlockAppHash, latestAppHash) + } + + expectedBlocksToSync := NUM_BLOCKS - nBlocks + if nBlocks == NUM_BLOCKS && mode > 0 { + expectedBlocksToSync++ + } else if nBlocks > 0 && mode == 1 { + expectedBlocksToSync++ + } + + if handshaker.NBlocks() != expectedBlocksToSync { + t.Fatalf("Expected handshake to sync %d blocks, got %d", expectedBlocksToSync, handshaker.NBlocks()) + } +} + +func applyBlock(stateDB dbm.DB, st sm.State, blk *types.Block, proxyApp proxy.AppConns) sm.State { + testPartSize := st.ConsensusParams.BlockPartSizeBytes + blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) + + blkID := types.BlockID{blk.Hash(), blk.MakePartSet(testPartSize).Header()} + newState, err := blockExec.ApplyBlock(st, blkID, blk) + if err != nil { + panic(err) + } + return newState +} + +func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB, + state sm.State, chain []*types.Block, nBlocks int, mode uint) { + // start a new app without handshake, play nBlocks blocks + if err := proxyApp.Start(); err != nil { + panic(err) + } + defer proxyApp.Stop() + + validators := types.TM2PB.Validators(state.Validators) + if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{ + Validators: validators, + }); err != nil { + panic(err) + } + + switch mode { + case 0: + for i := 0; i < nBlocks; i++ { + block := chain[i] + state = applyBlock(stateDB, state, block, proxyApp) + } + case 1, 2: + for i := 0; i < nBlocks-1; i++ { + block := chain[i] + state = applyBlock(stateDB, state, block, proxyApp) + } + + if mode == 2 { + // update the kvstore height and apphash + // as if we ran commit but not + state = applyBlock(stateDB, state, chain[nBlocks-1], proxyApp) + } + } + +} + +func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, chain []*types.Block, mode uint) sm.State { + // run the whole chain against this client to build up the tendermint state + clientCreator := proxy.NewLocalClientCreator(kvstore.NewPersistentKVStoreApplication(path.Join(config.DBDir(), "1"))) + proxyApp := proxy.NewAppConns(clientCreator, nil) // sm.NewHandshaker(config, state, store, ReplayLastBlock)) + if err := proxyApp.Start(); err != nil { + panic(err) + } + defer proxyApp.Stop() + + validators := types.TM2PB.Validators(state.Validators) + if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{ + Validators: validators, + }); err != nil { + panic(err) + } + + switch mode { + case 0: + // sync right up + for _, block := range chain { + state = applyBlock(stateDB, state, block, proxyApp) + } + + case 1, 2: + // sync up to the penultimate as if we stored the block. + // whether we commit or not depends on the appHash + for _, block := range chain[:len(chain)-1] { + state = applyBlock(stateDB, state, block, proxyApp) + } + + // apply the final block to a state copy so we can + // get the right next appHash but keep the state back + applyBlock(stateDB, state, chain[len(chain)-1], proxyApp) + } + + return state +} + +//-------------------------- +// utils for making blocks + +func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { + // Search for height marker + gr, found, err := wal.SearchForEndHeight(0, &WALSearchOptions{}) + if err != nil { + return nil, nil, err + } + if !found { + return nil, nil, errors.New(cmn.Fmt("WAL does not contain height %d.", 1)) + } + defer gr.Close() // nolint: errcheck + + // log.Notice("Build a blockchain by reading from the WAL") + + var blocks []*types.Block + var commits []*types.Commit + + var thisBlockParts *types.PartSet + var thisBlockCommit *types.Commit + var height int64 + + dec := NewWALDecoder(gr) + for { + msg, err := dec.Decode() + if err == io.EOF { + break + } else if err != nil { + return nil, nil, err + } + + piece := readPieceFromWAL(msg) + if piece == nil { + continue + } + + switch p := piece.(type) { + case EndHeightMessage: + // if its not the first one, we have a full block + if thisBlockParts != nil { + var block = new(types.Block) + _, err = cdc.UnmarshalBinaryReader(thisBlockParts.GetReader(), block, 0) + if err != nil { + panic(err) + } + if block.Height != height+1 { + panic(cmn.Fmt("read bad block from wal. got height %d, expected %d", block.Height, height+1)) + } + commitHeight := thisBlockCommit.Precommits[0].Height + if commitHeight != height+1 { + panic(cmn.Fmt("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) + } + blocks = append(blocks, block) + commits = append(commits, thisBlockCommit) + height++ + } + case *types.PartSetHeader: + thisBlockParts = types.NewPartSetFromHeader(*p) + case *types.Part: + _, err := thisBlockParts.AddPart(p) + if err != nil { + return nil, nil, err + } + case *types.Vote: + if p.Type == types.VoteTypePrecommit { + thisBlockCommit = &types.Commit{ + BlockID: p.BlockID, + Precommits: []*types.Vote{p}, + } + } + } + } + // grab the last block too + var block = new(types.Block) + _, err = cdc.UnmarshalBinaryReader(thisBlockParts.GetReader(), block, 0) + if err != nil { + panic(err) + } + if block.Height != height+1 { + panic(cmn.Fmt("read bad block from wal. got height %d, expected %d", block.Height, height+1)) + } + commitHeight := thisBlockCommit.Precommits[0].Height + if commitHeight != height+1 { + panic(cmn.Fmt("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) + } + blocks = append(blocks, block) + commits = append(commits, thisBlockCommit) + return blocks, commits, nil +} + +func readPieceFromWAL(msg *TimedWALMessage) interface{} { + // for logging + switch m := msg.Msg.(type) { + case msgInfo: + switch msg := m.Msg.(type) { + case *ProposalMessage: + return &msg.Proposal.BlockPartsHeader + case *BlockPartMessage: + return msg.Part + case *VoteMessage: + return msg.Vote + } + case EndHeightMessage: + return m + } + + return nil +} + +// fresh state and mock store +func stateAndStore(config *cfg.Config, pubKey crypto.PubKey) (dbm.DB, sm.State, *mockBlockStore) { + stateDB := dbm.NewMemDB() + state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile()) + store := NewMockBlockStore(config, state.ConsensusParams) + return stateDB, state, store +} + +//---------------------------------- +// mock block store + +type mockBlockStore struct { + config *cfg.Config + params types.ConsensusParams + chain []*types.Block + commits []*types.Commit +} + +// TODO: NewBlockStore(db.NewMemDB) ... +func NewMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBlockStore { + return &mockBlockStore{config, params, nil, nil} +} + +func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) } +func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain[height-1] } +func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { + block := bs.chain[height-1] + return &types.BlockMeta{ + BlockID: types.BlockID{block.Hash(), block.MakePartSet(bs.params.BlockPartSizeBytes).Header()}, + Header: block.Header, + } +} +func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } +func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { +} +func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit { + return bs.commits[height-1] +} +func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit { + return bs.commits[height-1] +} + +//---------------------------------------- + +func TestInitChainUpdateValidators(t *testing.T) { + val, _ := types.RandValidator(true, 10) + vals := types.NewValidatorSet([]*types.Validator{val}) + app := &initChainApp{vals: types.TM2PB.Validators(vals)} + clientCreator := proxy.NewLocalClientCreator(app) + + config := ResetConfig("proxy_test_") + privVal := privval.LoadFilePV(config.PrivValidatorFile()) + stateDB, state, store := stateAndStore(config, privVal.GetPubKey()) + + oldValAddr := state.Validators.Validators[0].Address + + // now start the app using the handshake - it should sync + genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) + handshaker := NewHandshaker(stateDB, state, store, genDoc) + proxyApp := proxy.NewAppConns(clientCreator, handshaker) + if err := proxyApp.Start(); err != nil { + t.Fatalf("Error starting proxy app connections: %v", err) + } + defer proxyApp.Stop() + + // reload the state, check the validator set was updated + state = sm.LoadState(stateDB) + + newValAddr := state.Validators.Validators[0].Address + expectValAddr := val.Address + assert.NotEqual(t, oldValAddr, newValAddr) + assert.Equal(t, newValAddr, expectValAddr) +} + +func newInitChainApp(vals []abci.Validator) *initChainApp { + return &initChainApp{ + vals: vals, + } +} + +// returns the vals on InitChain +type initChainApp struct { + abci.BaseApplication + vals []abci.Validator +} + +func (ica *initChainApp) InitChain(req abci.RequestInitChain) abci.ResponseInitChain { + return abci.ResponseInitChain{ + Validators: ica.vals, + } +} diff --git a/consensus/state.go b/consensus/state.go new file mode 100644 index 000000000..e4b360e08 --- /dev/null +++ b/consensus/state.go @@ -0,0 +1,1675 @@ +package consensus + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "runtime/debug" + "sync" + "time" + + fail "github.com/ebuchman/fail-test" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + + cfg "github.com/tendermint/tendermint/config" + cstypes "github.com/tendermint/tendermint/consensus/types" + tmevents "github.com/tendermint/tendermint/libs/events" + "github.com/tendermint/tendermint/p2p" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" +) + +//----------------------------------------------------------------------------- +// Config + +const ( + proposalHeartbeatIntervalSeconds = 2 +) + +//----------------------------------------------------------------------------- +// Errors + +var ( + ErrInvalidProposalSignature = errors.New("Error invalid proposal signature") + ErrInvalidProposalPOLRound = errors.New("Error invalid proposal POL round") + ErrAddingVote = errors.New("Error adding vote") + ErrVoteHeightMismatch = errors.New("Error vote height mismatch") +) + +//----------------------------------------------------------------------------- + +var ( + msgQueueSize = 1000 +) + +// msgs from the reactor which may update the state +type msgInfo struct { + Msg ConsensusMessage `json:"msg"` + PeerID p2p.ID `json:"peer_key"` +} + +// internally generated messages which may update the state +type timeoutInfo struct { + Duration time.Duration `json:"duration"` + Height int64 `json:"height"` + Round int `json:"round"` + Step cstypes.RoundStepType `json:"step"` +} + +func (ti *timeoutInfo) String() string { + return fmt.Sprintf("%v ; %d/%d %v", ti.Duration, ti.Height, ti.Round, ti.Step) +} + +// ConsensusState handles execution of the consensus algorithm. +// It processes votes and proposals, and upon reaching agreement, +// commits blocks to the chain and executes them against the application. +// The internal state machine receives input from peers, the internal validator, and from a timer. +type ConsensusState struct { + cmn.BaseService + + // config details + config *cfg.ConsensusConfig + privValidator types.PrivValidator // for signing votes + + // services for creating and executing blocks + // TODO: encapsulate all of this in one "BlockManager" + blockExec *sm.BlockExecutor + blockStore sm.BlockStore + mempool sm.Mempool + evpool sm.EvidencePool + + // internal state + mtx sync.Mutex + cstypes.RoundState + state sm.State // State until height-1. + + // state changes may be triggered by: msgs from peers, + // msgs from ourself, or by timeouts + peerMsgQueue chan msgInfo + internalMsgQueue chan msgInfo + timeoutTicker TimeoutTicker + + // we use eventBus to trigger msg broadcasts in the reactor, + // and to notify external subscribers, eg. through a websocket + eventBus *types.EventBus + + // a Write-Ahead Log ensures we can recover from any kind of crash + // and helps us avoid signing conflicting votes + wal WAL + replayMode bool // so we don't log signing errors during replay + doWALCatchup bool // determines if we even try to do the catchup + + // for tests where we want to limit the number of transitions the state makes + nSteps int + + // some functions can be overwritten for testing + decideProposal func(height int64, round int) + doPrevote func(height int64, round int) + setProposal func(proposal *types.Proposal) error + + // closed when we finish shutting down + done chan struct{} + + // synchronous pubsub between consensus state and reactor. + // state only emits EventNewRoundStep, EventVote and EventProposalHeartbeat + evsw tmevents.EventSwitch + + // for reporting metrics + metrics *Metrics +} + +// CSOption sets an optional parameter on the ConsensusState. +type CSOption func(*ConsensusState) + +// NewConsensusState returns a new ConsensusState. +func NewConsensusState( + config *cfg.ConsensusConfig, + state sm.State, + blockExec *sm.BlockExecutor, + blockStore sm.BlockStore, + mempool sm.Mempool, + evpool sm.EvidencePool, + options ...CSOption, +) *ConsensusState { + cs := &ConsensusState{ + config: config, + blockExec: blockExec, + blockStore: blockStore, + mempool: mempool, + peerMsgQueue: make(chan msgInfo, msgQueueSize), + internalMsgQueue: make(chan msgInfo, msgQueueSize), + timeoutTicker: NewTimeoutTicker(), + done: make(chan struct{}), + doWALCatchup: true, + wal: nilWAL{}, + evpool: evpool, + evsw: tmevents.NewEventSwitch(), + metrics: NopMetrics(), + } + // set function defaults (may be overwritten before calling Start) + cs.decideProposal = cs.defaultDecideProposal + cs.doPrevote = cs.defaultDoPrevote + cs.setProposal = cs.defaultSetProposal + + cs.updateToState(state) + // Don't call scheduleRound0 yet. + // We do that upon Start(). + cs.reconstructLastCommit(state) + cs.BaseService = *cmn.NewBaseService(nil, "ConsensusState", cs) + for _, option := range options { + option(cs) + } + return cs +} + +//---------------------------------------- +// Public interface + +// SetLogger implements Service. +func (cs *ConsensusState) SetLogger(l log.Logger) { + cs.BaseService.Logger = l + cs.timeoutTicker.SetLogger(l) +} + +// SetEventBus sets event bus. +func (cs *ConsensusState) SetEventBus(b *types.EventBus) { + cs.eventBus = b + cs.blockExec.SetEventBus(b) +} + +// WithMetrics sets the metrics. +func WithMetrics(metrics *Metrics) CSOption { + return func(cs *ConsensusState) { cs.metrics = metrics } +} + +// String returns a string. +func (cs *ConsensusState) String() string { + // better not to access shared variables + return cmn.Fmt("ConsensusState") //(H:%v R:%v S:%v", cs.Height, cs.Round, cs.Step) +} + +// GetState returns a copy of the chain state. +func (cs *ConsensusState) GetState() sm.State { + cs.mtx.Lock() + defer cs.mtx.Unlock() + return cs.state.Copy() +} + +// GetRoundState returns a shallow copy of the internal consensus state. +func (cs *ConsensusState) GetRoundState() *cstypes.RoundState { + cs.mtx.Lock() + defer cs.mtx.Unlock() + + rs := cs.RoundState // copy + return &rs +} + +// GetRoundStateJSON returns a json of RoundState, marshalled using go-amino. +func (cs *ConsensusState) GetRoundStateJSON() ([]byte, error) { + cs.mtx.Lock() + defer cs.mtx.Unlock() + + return cdc.MarshalJSON(cs.RoundState) +} + +// GetRoundStateSimpleJSON returns a json of RoundStateSimple, marshalled using go-amino. +func (cs *ConsensusState) GetRoundStateSimpleJSON() ([]byte, error) { + cs.mtx.Lock() + defer cs.mtx.Unlock() + + return cdc.MarshalJSON(cs.RoundState.RoundStateSimple()) +} + +// GetValidators returns a copy of the current validators. +func (cs *ConsensusState) GetValidators() (int64, []*types.Validator) { + cs.mtx.Lock() + defer cs.mtx.Unlock() + return cs.state.LastBlockHeight, cs.state.Validators.Copy().Validators +} + +// SetPrivValidator sets the private validator account for signing votes. +func (cs *ConsensusState) SetPrivValidator(priv types.PrivValidator) { + cs.mtx.Lock() + defer cs.mtx.Unlock() + cs.privValidator = priv +} + +// SetTimeoutTicker sets the local timer. It may be useful to overwrite for testing. +func (cs *ConsensusState) SetTimeoutTicker(timeoutTicker TimeoutTicker) { + cs.mtx.Lock() + defer cs.mtx.Unlock() + cs.timeoutTicker = timeoutTicker +} + +// LoadCommit loads the commit for a given height. +func (cs *ConsensusState) LoadCommit(height int64) *types.Commit { + cs.mtx.Lock() + defer cs.mtx.Unlock() + if height == cs.blockStore.Height() { + return cs.blockStore.LoadSeenCommit(height) + } + return cs.blockStore.LoadBlockCommit(height) +} + +// OnStart implements cmn.Service. +// It loads the latest state via the WAL, and starts the timeout and receive routines. +func (cs *ConsensusState) OnStart() error { + if err := cs.evsw.Start(); err != nil { + return err + } + + // we may set the WAL in testing before calling Start, + // so only OpenWAL if its still the nilWAL + if _, ok := cs.wal.(nilWAL); ok { + walFile := cs.config.WalFile() + wal, err := cs.OpenWAL(walFile) + if err != nil { + cs.Logger.Error("Error loading ConsensusState wal", "err", err.Error()) + return err + } + cs.wal = wal + } + + // we need the timeoutRoutine for replay so + // we don't block on the tick chan. + // NOTE: we will get a build up of garbage go routines + // firing on the tockChan until the receiveRoutine is started + // to deal with them (by that point, at most one will be valid) + if err := cs.timeoutTicker.Start(); err != nil { + return err + } + + // we may have lost some votes if the process crashed + // reload from consensus log to catchup + if cs.doWALCatchup { + if err := cs.catchupReplay(cs.Height); err != nil { + cs.Logger.Error("Error on catchup replay. Proceeding to start ConsensusState anyway", "err", err.Error()) + // NOTE: if we ever do return an error here, + // make sure to stop the timeoutTicker + } + } + + // now start the receiveRoutine + go cs.receiveRoutine(0) + + // schedule the first round! + // use GetRoundState so we don't race the receiveRoutine for access + cs.scheduleRound0(cs.GetRoundState()) + + return nil +} + +// timeoutRoutine: receive requests for timeouts on tickChan and fire timeouts on tockChan +// receiveRoutine: serializes processing of proposoals, block parts, votes; coordinates state transitions +func (cs *ConsensusState) startRoutines(maxSteps int) { + err := cs.timeoutTicker.Start() + if err != nil { + cs.Logger.Error("Error starting timeout ticker", "err", err) + return + } + go cs.receiveRoutine(maxSteps) +} + +// OnStop implements cmn.Service. It stops all routines and waits for the WAL to finish. +func (cs *ConsensusState) OnStop() { + cs.evsw.Stop() + cs.timeoutTicker.Stop() +} + +// Wait waits for the the main routine to return. +// NOTE: be sure to Stop() the event switch and drain +// any event channels or this may deadlock +func (cs *ConsensusState) Wait() { + <-cs.done +} + +// OpenWAL opens a file to log all consensus messages and timeouts for deterministic accountability +func (cs *ConsensusState) OpenWAL(walFile string) (WAL, error) { + wal, err := NewWAL(walFile) + if err != nil { + cs.Logger.Error("Failed to open WAL for consensus state", "wal", walFile, "err", err) + return nil, err + } + wal.SetLogger(cs.Logger.With("wal", walFile)) + if err := wal.Start(); err != nil { + return nil, err + } + return wal, nil +} + +//------------------------------------------------------------ +// Public interface for passing messages into the consensus state, possibly causing a state transition. +// If peerID == "", the msg is considered internal. +// Messages are added to the appropriate queue (peer or internal). +// If the queue is full, the function may block. +// TODO: should these return anything or let callers just use events? + +// AddVote inputs a vote. +func (cs *ConsensusState) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, err error) { + if peerID == "" { + cs.internalMsgQueue <- msgInfo{&VoteMessage{vote}, ""} + } else { + cs.peerMsgQueue <- msgInfo{&VoteMessage{vote}, peerID} + } + + // TODO: wait for event?! + return false, nil +} + +// SetProposal inputs a proposal. +func (cs *ConsensusState) SetProposal(proposal *types.Proposal, peerID p2p.ID) error { + + if peerID == "" { + cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, ""} + } else { + cs.peerMsgQueue <- msgInfo{&ProposalMessage{proposal}, peerID} + } + + // TODO: wait for event?! + return nil +} + +// AddProposalBlockPart inputs a part of the proposal block. +func (cs *ConsensusState) AddProposalBlockPart(height int64, round int, part *types.Part, peerID p2p.ID) error { + + if peerID == "" { + cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""} + } else { + cs.peerMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, peerID} + } + + // TODO: wait for event?! + return nil +} + +// SetProposalAndBlock inputs the proposal and all block parts. +func (cs *ConsensusState) SetProposalAndBlock(proposal *types.Proposal, block *types.Block, parts *types.PartSet, peerID p2p.ID) error { + if err := cs.SetProposal(proposal, peerID); err != nil { + return err + } + for i := 0; i < parts.Total(); i++ { + part := parts.GetPart(i) + if err := cs.AddProposalBlockPart(proposal.Height, proposal.Round, part, peerID); err != nil { + return err + } + } + return nil +} + +//------------------------------------------------------------ +// internal functions for managing the state + +func (cs *ConsensusState) updateHeight(height int64) { + cs.metrics.Height.Set(float64(height)) + cs.Height = height +} + +func (cs *ConsensusState) updateRoundStep(round int, step cstypes.RoundStepType) { + cs.Round = round + cs.Step = step +} + +// enterNewRound(height, 0) at cs.StartTime. +func (cs *ConsensusState) scheduleRound0(rs *cstypes.RoundState) { + //cs.Logger.Info("scheduleRound0", "now", time.Now(), "startTime", cs.StartTime) + sleepDuration := rs.StartTime.Sub(time.Now()) // nolint: gotype, gosimple + cs.scheduleTimeout(sleepDuration, rs.Height, 0, cstypes.RoundStepNewHeight) +} + +// Attempt to schedule a timeout (by sending timeoutInfo on the tickChan) +func (cs *ConsensusState) scheduleTimeout(duration time.Duration, height int64, round int, step cstypes.RoundStepType) { + cs.timeoutTicker.ScheduleTimeout(timeoutInfo{duration, height, round, step}) +} + +// send a msg into the receiveRoutine regarding our own proposal, block part, or vote +func (cs *ConsensusState) sendInternalMessage(mi msgInfo) { + select { + case cs.internalMsgQueue <- mi: + default: + // NOTE: using the go-routine means our votes can + // be processed out of order. + // TODO: use CList here for strict determinism and + // attempt push to internalMsgQueue in receiveRoutine + cs.Logger.Info("Internal msg queue is full. Using a go-routine") + go func() { cs.internalMsgQueue <- mi }() + } +} + +// Reconstruct LastCommit from SeenCommit, which we saved along with the block, +// (which happens even before saving the state) +func (cs *ConsensusState) reconstructLastCommit(state sm.State) { + if state.LastBlockHeight == 0 { + return + } + seenCommit := cs.blockStore.LoadSeenCommit(state.LastBlockHeight) + lastPrecommits := types.NewVoteSet(state.ChainID, state.LastBlockHeight, seenCommit.Round(), types.VoteTypePrecommit, state.LastValidators) + for _, precommit := range seenCommit.Precommits { + if precommit == nil { + continue + } + added, err := lastPrecommits.AddVote(precommit) + if !added || err != nil { + cmn.PanicCrisis(cmn.Fmt("Failed to reconstruct LastCommit: %v", err)) + } + } + if !lastPrecommits.HasTwoThirdsMajority() { + cmn.PanicSanity("Failed to reconstruct LastCommit: Does not have +2/3 maj") + } + cs.LastCommit = lastPrecommits +} + +// Updates ConsensusState and increments height to match that of state. +// The round becomes 0 and cs.Step becomes cstypes.RoundStepNewHeight. +func (cs *ConsensusState) updateToState(state sm.State) { + if cs.CommitRound > -1 && 0 < cs.Height && cs.Height != state.LastBlockHeight { + cmn.PanicSanity(cmn.Fmt("updateToState() expected state height of %v but found %v", + cs.Height, state.LastBlockHeight)) + } + if !cs.state.IsEmpty() && cs.state.LastBlockHeight+1 != cs.Height { + // This might happen when someone else is mutating cs.state. + // Someone forgot to pass in state.Copy() somewhere?! + cmn.PanicSanity(cmn.Fmt("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v", + cs.state.LastBlockHeight+1, cs.Height)) + } + + // If state isn't further out than cs.state, just ignore. + // This happens when SwitchToConsensus() is called in the reactor. + // We don't want to reset e.g. the Votes, but we still want to + // signal the new round step, because other services (eg. mempool) + // depend on having an up-to-date peer state! + if !cs.state.IsEmpty() && (state.LastBlockHeight <= cs.state.LastBlockHeight) { + cs.Logger.Info("Ignoring updateToState()", "newHeight", state.LastBlockHeight+1, "oldHeight", cs.state.LastBlockHeight+1) + cs.newStep() + return + } + + // Reset fields based on state. + validators := state.Validators + lastPrecommits := (*types.VoteSet)(nil) + if cs.CommitRound > -1 && cs.Votes != nil { + if !cs.Votes.Precommits(cs.CommitRound).HasTwoThirdsMajority() { + cmn.PanicSanity("updateToState(state) called but last Precommit round didn't have +2/3") + } + lastPrecommits = cs.Votes.Precommits(cs.CommitRound) + } + + // Next desired block height + height := state.LastBlockHeight + 1 + + // RoundState fields + cs.updateHeight(height) + cs.updateRoundStep(0, cstypes.RoundStepNewHeight) + if cs.CommitTime.IsZero() { + // "Now" makes it easier to sync up dev nodes. + // We add timeoutCommit to allow transactions + // to be gathered for the first block. + // And alternative solution that relies on clocks: + // cs.StartTime = state.LastBlockTime.Add(timeoutCommit) + cs.StartTime = cs.config.Commit(time.Now()) + } else { + cs.StartTime = cs.config.Commit(cs.CommitTime) + } + + cs.Validators = validators + cs.Proposal = nil + cs.ProposalBlock = nil + cs.ProposalBlockParts = nil + cs.LockedRound = 0 + cs.LockedBlock = nil + cs.LockedBlockParts = nil + cs.ValidRound = 0 + cs.ValidBlock = nil + cs.ValidBlockParts = nil + cs.Votes = cstypes.NewHeightVoteSet(state.ChainID, height, validators) + cs.CommitRound = -1 + cs.LastCommit = lastPrecommits + cs.LastValidators = state.LastValidators + + cs.state = state + + // Finally, broadcast RoundState + cs.newStep() +} + +func (cs *ConsensusState) newStep() { + rs := cs.RoundStateEvent() + cs.wal.Write(rs) + cs.nSteps++ + // newStep is called by updateToState in NewConsensusState before the eventBus is set! + if cs.eventBus != nil { + cs.eventBus.PublishEventNewRoundStep(rs) + cs.evsw.FireEvent(types.EventNewRoundStep, &cs.RoundState) + } +} + +//----------------------------------------- +// the main go routines + +// receiveRoutine handles messages which may cause state transitions. +// it's argument (n) is the number of messages to process before exiting - use 0 to run forever +// It keeps the RoundState and is the only thing that updates it. +// Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities. +// ConsensusState must be locked before any internal state is updated. +func (cs *ConsensusState) receiveRoutine(maxSteps int) { + defer func() { + if r := recover(); r != nil { + cs.Logger.Error("CONSENSUS FAILURE!!!", "err", r, "stack", string(debug.Stack())) + } + }() + + for { + if maxSteps > 0 { + if cs.nSteps >= maxSteps { + cs.Logger.Info("reached max steps. exiting receive routine") + cs.nSteps = 0 + return + } + } + rs := cs.RoundState + var mi msgInfo + + select { + case height := <-cs.mempool.TxsAvailable(): + cs.handleTxsAvailable(height) + case mi = <-cs.peerMsgQueue: + cs.wal.Write(mi) + // handles proposals, block parts, votes + // may generate internal events (votes, complete proposals, 2/3 majorities) + cs.handleMsg(mi) + case mi = <-cs.internalMsgQueue: + cs.wal.WriteSync(mi) // NOTE: fsync + // handles proposals, block parts, votes + cs.handleMsg(mi) + case ti := <-cs.timeoutTicker.Chan(): // tockChan: + cs.wal.Write(ti) + // if the timeout is relevant to the rs + // go to the next step + cs.handleTimeout(ti, rs) + case <-cs.Quit(): + + // NOTE: the internalMsgQueue may have signed messages from our + // priv_val that haven't hit the WAL, but its ok because + // priv_val tracks LastSig + + // close wal now that we're done writing to it + cs.wal.Stop() + cs.wal.Wait() + + close(cs.done) + return + } + } +} + +// state transitions on complete-proposal, 2/3-any, 2/3-one +func (cs *ConsensusState) handleMsg(mi msgInfo) { + cs.mtx.Lock() + defer cs.mtx.Unlock() + + var err error + msg, peerID := mi.Msg, mi.PeerID + switch msg := msg.(type) { + case *ProposalMessage: + // will not cause transition. + // once proposal is set, we can receive block parts + err = cs.setProposal(msg.Proposal) + case *BlockPartMessage: + // if the proposal is complete, we'll enterPrevote or tryFinalizeCommit + _, err = cs.addProposalBlockPart(msg, peerID) + if err != nil && msg.Round != cs.Round { + cs.Logger.Debug("Received block part from wrong round", "height", cs.Height, "csRound", cs.Round, "blockRound", msg.Round) + err = nil + } + case *VoteMessage: + // attempt to add the vote and dupeout the validator if its a duplicate signature + // if the vote gives us a 2/3-any or 2/3-one, we transition + err := cs.tryAddVote(msg.Vote, peerID) + if err == ErrAddingVote { + // TODO: punish peer + // We probably don't want to stop the peer here. The vote does not + // necessarily comes from a malicious peer but can be just broadcasted by + // a typical peer. + // https://github.com/tendermint/tendermint/issues/1281 + } + + // NOTE: the vote is broadcast to peers by the reactor listening + // for vote events + + // TODO: If rs.Height == vote.Height && rs.Round < vote.Round, + // the peer is sending us CatchupCommit precommits. + // We could make note of this and help filter in broadcastHasVoteMessage(). + default: + cs.Logger.Error("Unknown msg type", reflect.TypeOf(msg)) + } + if err != nil { + cs.Logger.Error("Error with msg", "height", cs.Height, "round", cs.Round, "type", reflect.TypeOf(msg), "peer", peerID, "err", err, "msg", msg) + } +} + +func (cs *ConsensusState) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) { + cs.Logger.Debug("Received tock", "timeout", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) + + // timeouts must be for current height, round, step + if ti.Height != rs.Height || ti.Round < rs.Round || (ti.Round == rs.Round && ti.Step < rs.Step) { + cs.Logger.Debug("Ignoring tock because we're ahead", "height", rs.Height, "round", rs.Round, "step", rs.Step) + return + } + + // the timeout will now cause a state transition + cs.mtx.Lock() + defer cs.mtx.Unlock() + + switch ti.Step { + case cstypes.RoundStepNewHeight: + // NewRound event fired from enterNewRound. + // XXX: should we fire timeout here (for timeout commit)? + cs.enterNewRound(ti.Height, 0) + case cstypes.RoundStepNewRound: + cs.enterPropose(ti.Height, 0) + case cstypes.RoundStepPropose: + cs.eventBus.PublishEventTimeoutPropose(cs.RoundStateEvent()) + cs.enterPrevote(ti.Height, ti.Round) + case cstypes.RoundStepPrevoteWait: + cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()) + cs.enterPrecommit(ti.Height, ti.Round) + case cstypes.RoundStepPrecommitWait: + cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()) + cs.enterNewRound(ti.Height, ti.Round+1) + default: + panic(cmn.Fmt("Invalid timeout step: %v", ti.Step)) + } + +} + +func (cs *ConsensusState) handleTxsAvailable(height int64) { + cs.mtx.Lock() + defer cs.mtx.Unlock() + // we only need to do this for round 0 + cs.enterPropose(height, 0) +} + +//----------------------------------------------------------------------------- +// State functions +// Used internally by handleTimeout and handleMsg to make state transitions + +// Enter: `timeoutNewHeight` by startTime (commitTime+timeoutCommit), +// or, if SkipTimeout==true, after receiving all precommits from (height,round-1) +// Enter: `timeoutPrecommits` after any +2/3 precommits from (height,round-1) +// Enter: +2/3 precommits for nil at (height,round-1) +// Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round) +// NOTE: cs.StartTime was already set for height. +func (cs *ConsensusState) enterNewRound(height int64, round int) { + logger := cs.Logger.With("height", height, "round", round) + + if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) { + logger.Debug(cmn.Fmt("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + return + } + + if now := time.Now(); cs.StartTime.After(now) { + logger.Info("Need to set a buffer and log message here for sanity.", "startTime", cs.StartTime, "now", now) + } + + logger.Info(cmn.Fmt("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + + // Increment validators if necessary + validators := cs.Validators + if cs.Round < round { + validators = validators.Copy() + validators.IncrementAccum(round - cs.Round) + } + + // Setup new round + // we don't fire newStep for this step, + // but we fire an event, so update the round step first + cs.updateRoundStep(round, cstypes.RoundStepNewRound) + cs.Validators = validators + if round == 0 { + // We've already reset these upon new height, + // and meanwhile we might have received a proposal + // for round 0. + } else { + logger.Info("Resetting Proposal info") + cs.Proposal = nil + cs.ProposalBlock = nil + cs.ProposalBlockParts = nil + } + cs.Votes.SetRound(round + 1) // also track next round (round+1) to allow round-skipping + + cs.eventBus.PublishEventNewRound(cs.RoundStateEvent()) + cs.metrics.Rounds.Set(float64(round)) + + // Wait for txs to be available in the mempool + // before we enterPropose in round 0. If the last block changed the app hash, + // we may need an empty "proof" block, and enterPropose immediately. + waitForTxs := cs.config.WaitForTxs() && round == 0 && !cs.needProofBlock(height) + if waitForTxs { + if cs.config.CreateEmptyBlocksInterval > 0 { + cs.scheduleTimeout(cs.config.EmptyBlocksInterval(), height, round, cstypes.RoundStepNewRound) + } + go cs.proposalHeartbeat(height, round) + } else { + cs.enterPropose(height, round) + } +} + +// needProofBlock returns true on the first height (so the genesis app hash is signed right away) +// and where the last block (height-1) caused the app hash to change +func (cs *ConsensusState) needProofBlock(height int64) bool { + if height == 1 { + return true + } + + lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1) + return !bytes.Equal(cs.state.AppHash, lastBlockMeta.Header.AppHash) +} + +func (cs *ConsensusState) proposalHeartbeat(height int64, round int) { + counter := 0 + addr := cs.privValidator.GetAddress() + valIndex, _ := cs.Validators.GetByAddress(addr) + chainID := cs.state.ChainID + for { + rs := cs.GetRoundState() + // if we've already moved on, no need to send more heartbeats + if rs.Step > cstypes.RoundStepNewRound || rs.Round > round || rs.Height > height { + return + } + heartbeat := &types.Heartbeat{ + Height: rs.Height, + Round: rs.Round, + Sequence: counter, + ValidatorAddress: addr, + ValidatorIndex: valIndex, + } + cs.privValidator.SignHeartbeat(chainID, heartbeat) + cs.eventBus.PublishEventProposalHeartbeat(types.EventDataProposalHeartbeat{heartbeat}) + cs.evsw.FireEvent(types.EventProposalHeartbeat, heartbeat) + counter++ + time.Sleep(proposalHeartbeatIntervalSeconds * time.Second) + } +} + +// Enter (CreateEmptyBlocks): from enterNewRound(height,round) +// Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ): after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval +// Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool +func (cs *ConsensusState) enterPropose(height int64, round int) { + logger := cs.Logger.With("height", height, "round", round) + + if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) { + logger.Debug(cmn.Fmt("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + return + } + logger.Info(cmn.Fmt("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + + defer func() { + // Done enterPropose: + cs.updateRoundStep(round, cstypes.RoundStepPropose) + cs.newStep() + + // If we have the whole proposal + POL, then goto Prevote now. + // else, we'll enterPrevote when the rest of the proposal is received (in AddProposalBlockPart), + // or else after timeoutPropose + if cs.isProposalComplete() { + cs.enterPrevote(height, cs.Round) + } + }() + + // If we don't get the proposal and all block parts quick enough, enterPrevote + cs.scheduleTimeout(cs.config.Propose(round), height, round, cstypes.RoundStepPropose) + + // Nothing more to do if we're not a validator + if cs.privValidator == nil { + logger.Debug("This node is not a validator") + return + } + + // if not a validator, we're done + if !cs.Validators.HasAddress(cs.privValidator.GetAddress()) { + logger.Debug("This node is not a validator", "addr", cs.privValidator.GetAddress(), "vals", cs.Validators) + return + } + logger.Debug("This node is a validator") + + if cs.isProposer() { + logger.Info("enterPropose: Our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator) + cs.decideProposal(height, round) + } else { + logger.Info("enterPropose: Not our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator) + } +} + +func (cs *ConsensusState) isProposer() bool { + return bytes.Equal(cs.Validators.GetProposer().Address, cs.privValidator.GetAddress()) +} + +func (cs *ConsensusState) defaultDecideProposal(height int64, round int) { + var block *types.Block + var blockParts *types.PartSet + + // Decide on block + if cs.LockedBlock != nil { + // If we're locked onto a block, just choose that. + block, blockParts = cs.LockedBlock, cs.LockedBlockParts + } else if cs.ValidBlock != nil { + // If there is valid block, choose that. + block, blockParts = cs.ValidBlock, cs.ValidBlockParts + } else { + // Create a new proposal block from state/txs from the mempool. + block, blockParts = cs.createProposalBlock() + if block == nil { // on error + return + } + } + + // Make proposal + polRound, polBlockID := cs.Votes.POLInfo() + proposal := types.NewProposal(height, round, blockParts.Header(), polRound, polBlockID) + if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal); err == nil { + // Set fields + /* fields set by setProposal and addBlockPart + cs.Proposal = proposal + cs.ProposalBlock = block + cs.ProposalBlockParts = blockParts + */ + + // send proposal and block parts on internal msg queue + cs.sendInternalMessage(msgInfo{&ProposalMessage{proposal}, ""}) + for i := 0; i < blockParts.Total(); i++ { + part := blockParts.GetPart(i) + cs.sendInternalMessage(msgInfo{&BlockPartMessage{cs.Height, cs.Round, part}, ""}) + } + cs.Logger.Info("Signed proposal", "height", height, "round", round, "proposal", proposal) + cs.Logger.Debug(cmn.Fmt("Signed proposal block: %v", block)) + } else { + if !cs.replayMode { + cs.Logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err) + } + } +} + +// Returns true if the proposal block is complete && +// (if POLRound was proposed, we have +2/3 prevotes from there). +func (cs *ConsensusState) isProposalComplete() bool { + if cs.Proposal == nil || cs.ProposalBlock == nil { + return false + } + // we have the proposal. if there's a POLRound, + // make sure we have the prevotes from it too + if cs.Proposal.POLRound < 0 { + return true + } + // if this is false the proposer is lying or we haven't received the POL yet + return cs.Votes.Prevotes(cs.Proposal.POLRound).HasTwoThirdsMajority() + +} + +// Create the next block to propose and return it. +// Returns nil block upon error. +// NOTE: keep it side-effect free for clarity. +func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts *types.PartSet) { + var commit *types.Commit + if cs.Height == 1 { + // We're creating a proposal for the first block. + // The commit is empty, but not nil. + commit = &types.Commit{} + } else if cs.LastCommit.HasTwoThirdsMajority() { + // Make the commit from LastCommit + commit = cs.LastCommit.MakeCommit() + } else { + // This shouldn't happen. + cs.Logger.Error("enterPropose: Cannot propose anything: No commit for the previous block.") + return + } + + // Mempool validated transactions + txs := cs.mempool.Reap(cs.state.ConsensusParams.BlockSize.MaxTxs) + block, parts := cs.state.MakeBlock(cs.Height, txs, commit) + evidence := cs.evpool.PendingEvidence() + block.AddEvidence(evidence) + return block, parts +} + +// Enter: `timeoutPropose` after entering Propose. +// Enter: proposal block and POL is ready. +// Enter: any +2/3 prevotes for future round. +// Prevote for LockedBlock if we're locked, or ProposalBlock if valid. +// Otherwise vote nil. +func (cs *ConsensusState) enterPrevote(height int64, round int) { + if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevote <= cs.Step) { + cs.Logger.Debug(cmn.Fmt("enterPrevote(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + return + } + + defer func() { + // Done enterPrevote: + cs.updateRoundStep(round, cstypes.RoundStepPrevote) + cs.newStep() + }() + + // fire event for how we got here + if cs.isProposalComplete() { + cs.eventBus.PublishEventCompleteProposal(cs.RoundStateEvent()) + } else { + // we received +2/3 prevotes for a future round + // TODO: catchup event? + } + + cs.Logger.Info(cmn.Fmt("enterPrevote(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + + // Sign and broadcast vote as necessary + cs.doPrevote(height, round) + + // Once `addVote` hits any +2/3 prevotes, we will go to PrevoteWait + // (so we have more time to try and collect +2/3 prevotes for a single block) +} + +func (cs *ConsensusState) defaultDoPrevote(height int64, round int) { + logger := cs.Logger.With("height", height, "round", round) + // If a block is locked, prevote that. + if cs.LockedBlock != nil { + logger.Info("enterPrevote: Block was locked") + cs.signAddVote(types.VoteTypePrevote, cs.LockedBlock.Hash(), cs.LockedBlockParts.Header()) + return + } + + // If ProposalBlock is nil, prevote nil. + if cs.ProposalBlock == nil { + logger.Info("enterPrevote: ProposalBlock is nil") + cs.signAddVote(types.VoteTypePrevote, nil, types.PartSetHeader{}) + return + } + + // Validate proposal block + err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock) + if err != nil { + // ProposalBlock is invalid, prevote nil. + logger.Error("enterPrevote: ProposalBlock is invalid", "err", err) + cs.signAddVote(types.VoteTypePrevote, nil, types.PartSetHeader{}) + return + } + + // Prevote cs.ProposalBlock + // NOTE: the proposal signature is validated when it is received, + // and the proposal block parts are validated as they are received (against the merkle hash in the proposal) + logger.Info("enterPrevote: ProposalBlock is valid") + cs.signAddVote(types.VoteTypePrevote, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) +} + +// Enter: any +2/3 prevotes at next round. +func (cs *ConsensusState) enterPrevoteWait(height int64, round int) { + logger := cs.Logger.With("height", height, "round", round) + + if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) { + logger.Debug(cmn.Fmt("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + return + } + if !cs.Votes.Prevotes(round).HasTwoThirdsAny() { + cmn.PanicSanity(cmn.Fmt("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round)) + } + logger.Info(cmn.Fmt("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + + defer func() { + // Done enterPrevoteWait: + cs.updateRoundStep(round, cstypes.RoundStepPrevoteWait) + cs.newStep() + }() + + // Wait for some more prevotes; enterPrecommit + cs.scheduleTimeout(cs.config.Prevote(round), height, round, cstypes.RoundStepPrevoteWait) +} + +// Enter: `timeoutPrevote` after any +2/3 prevotes. +// Enter: +2/3 precomits for block or nil. +// Enter: any +2/3 precommits for next round. +// Lock & precommit the ProposalBlock if we have enough prevotes for it (a POL in this round) +// else, unlock an existing lock and precommit nil if +2/3 of prevotes were nil, +// else, precommit nil otherwise. +func (cs *ConsensusState) enterPrecommit(height int64, round int) { + logger := cs.Logger.With("height", height, "round", round) + + if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) { + logger.Debug(cmn.Fmt("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + return + } + + logger.Info(cmn.Fmt("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + + defer func() { + // Done enterPrecommit: + cs.updateRoundStep(round, cstypes.RoundStepPrecommit) + cs.newStep() + }() + + // check for a polka + blockID, ok := cs.Votes.Prevotes(round).TwoThirdsMajority() + + // If we don't have a polka, we must precommit nil. + if !ok { + if cs.LockedBlock != nil { + logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit while we're locked. Precommitting nil") + } else { + logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit. Precommitting nil.") + } + cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{}) + return + } + + // At this point +2/3 prevoted for a particular block or nil. + cs.eventBus.PublishEventPolka(cs.RoundStateEvent()) + + // the latest POLRound should be this round. + polRound, _ := cs.Votes.POLInfo() + if polRound < round { + cmn.PanicSanity(cmn.Fmt("This POLRound should be %v but got %", round, polRound)) + } + + // +2/3 prevoted nil. Unlock and precommit nil. + if len(blockID.Hash) == 0 { + if cs.LockedBlock == nil { + logger.Info("enterPrecommit: +2/3 prevoted for nil.") + } else { + logger.Info("enterPrecommit: +2/3 prevoted for nil. Unlocking") + cs.LockedRound = 0 + cs.LockedBlock = nil + cs.LockedBlockParts = nil + cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) + } + cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{}) + return + } + + // At this point, +2/3 prevoted for a particular block. + + // If we're already locked on that block, precommit it, and update the LockedRound + if cs.LockedBlock.HashesTo(blockID.Hash) { + logger.Info("enterPrecommit: +2/3 prevoted locked block. Relocking") + cs.LockedRound = round + cs.eventBus.PublishEventRelock(cs.RoundStateEvent()) + cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader) + return + } + + // If +2/3 prevoted for proposal block, stage and precommit it + if cs.ProposalBlock.HashesTo(blockID.Hash) { + logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash) + // Validate the block. + if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil { + cmn.PanicConsensus(cmn.Fmt("enterPrecommit: +2/3 prevoted for an invalid block: %v", err)) + } + cs.LockedRound = round + cs.LockedBlock = cs.ProposalBlock + cs.LockedBlockParts = cs.ProposalBlockParts + cs.eventBus.PublishEventLock(cs.RoundStateEvent()) + cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader) + return + } + + // There was a polka in this round for a block we don't have. + // Fetch that block, unlock, and precommit nil. + // The +2/3 prevotes for this round is the POL for our unlock. + // TODO: In the future save the POL prevotes for justification. + cs.LockedRound = 0 + cs.LockedBlock = nil + cs.LockedBlockParts = nil + if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) { + cs.ProposalBlock = nil + cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader) + } + cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) + cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{}) +} + +// Enter: any +2/3 precommits for next round. +func (cs *ConsensusState) enterPrecommitWait(height int64, round int) { + logger := cs.Logger.With("height", height, "round", round) + + if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommitWait <= cs.Step) { + logger.Debug(cmn.Fmt("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + return + } + if !cs.Votes.Precommits(round).HasTwoThirdsAny() { + cmn.PanicSanity(cmn.Fmt("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round)) + } + logger.Info(cmn.Fmt("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + + defer func() { + // Done enterPrecommitWait: + cs.updateRoundStep(round, cstypes.RoundStepPrecommitWait) + cs.newStep() + }() + + // Wait for some more precommits; enterNewRound + cs.scheduleTimeout(cs.config.Precommit(round), height, round, cstypes.RoundStepPrecommitWait) + +} + +// Enter: +2/3 precommits for block +func (cs *ConsensusState) enterCommit(height int64, commitRound int) { + logger := cs.Logger.With("height", height, "commitRound", commitRound) + + if cs.Height != height || cstypes.RoundStepCommit <= cs.Step { + logger.Debug(cmn.Fmt("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step)) + return + } + logger.Info(cmn.Fmt("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step)) + + defer func() { + // Done enterCommit: + // keep cs.Round the same, commitRound points to the right Precommits set. + cs.updateRoundStep(cs.Round, cstypes.RoundStepCommit) + cs.CommitRound = commitRound + cs.CommitTime = time.Now() + cs.newStep() + + // Maybe finalize immediately. + cs.tryFinalizeCommit(height) + }() + + blockID, ok := cs.Votes.Precommits(commitRound).TwoThirdsMajority() + if !ok { + cmn.PanicSanity("RunActionCommit() expects +2/3 precommits") + } + + // The Locked* fields no longer matter. + // Move them over to ProposalBlock if they match the commit hash, + // otherwise they'll be cleared in updateToState. + if cs.LockedBlock.HashesTo(blockID.Hash) { + logger.Info("Commit is for locked block. Set ProposalBlock=LockedBlock", "blockHash", blockID.Hash) + cs.ProposalBlock = cs.LockedBlock + cs.ProposalBlockParts = cs.LockedBlockParts + } + + // If we don't have the block being committed, set up to get it. + if !cs.ProposalBlock.HashesTo(blockID.Hash) { + if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) { + logger.Info("Commit is for a block we don't know about. Set ProposalBlock=nil", "proposal", cs.ProposalBlock.Hash(), "commit", blockID.Hash) + // We're getting the wrong block. + // Set up ProposalBlockParts and keep waiting. + cs.ProposalBlock = nil + cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader) + } else { + // We just need to keep waiting. + } + } +} + +// If we have the block AND +2/3 commits for it, finalize. +func (cs *ConsensusState) tryFinalizeCommit(height int64) { + logger := cs.Logger.With("height", height) + + if cs.Height != height { + cmn.PanicSanity(cmn.Fmt("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height)) + } + + blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority() + if !ok || len(blockID.Hash) == 0 { + logger.Error("Attempt to finalize failed. There was no +2/3 majority, or +2/3 was for .") + return + } + if !cs.ProposalBlock.HashesTo(blockID.Hash) { + // TODO: this happens every time if we're not a validator (ugly logs) + // TODO: ^^ wait, why does it matter that we're a validator? + logger.Info("Attempt to finalize failed. We don't have the commit block.", "proposal-block", cs.ProposalBlock.Hash(), "commit-block", blockID.Hash) + return + } + + // go + cs.finalizeCommit(height) +} + +// Increment height and goto cstypes.RoundStepNewHeight +func (cs *ConsensusState) finalizeCommit(height int64) { + if cs.Height != height || cs.Step != cstypes.RoundStepCommit { + cs.Logger.Debug(cmn.Fmt("finalizeCommit(%v): Invalid args. Current step: %v/%v/%v", height, cs.Height, cs.Round, cs.Step)) + return + } + + blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority() + block, blockParts := cs.ProposalBlock, cs.ProposalBlockParts + + if !ok { + cmn.PanicSanity(cmn.Fmt("Cannot finalizeCommit, commit does not have two thirds majority")) + } + if !blockParts.HasHeader(blockID.PartsHeader) { + cmn.PanicSanity(cmn.Fmt("Expected ProposalBlockParts header to be commit header")) + } + if !block.HashesTo(blockID.Hash) { + cmn.PanicSanity(cmn.Fmt("Cannot finalizeCommit, ProposalBlock does not hash to commit hash")) + } + if err := cs.blockExec.ValidateBlock(cs.state, block); err != nil { + cmn.PanicConsensus(cmn.Fmt("+2/3 committed an invalid block: %v", err)) + } + + cs.Logger.Info(cmn.Fmt("Finalizing commit of block with %d txs", block.NumTxs), + "height", block.Height, "hash", block.Hash(), "root", block.AppHash) + cs.Logger.Info(cmn.Fmt("%v", block)) + + fail.Fail() // XXX + + // Save to blockStore. + if cs.blockStore.Height() < block.Height { + // NOTE: the seenCommit is local justification to commit this block, + // but may differ from the LastCommit included in the next block + precommits := cs.Votes.Precommits(cs.CommitRound) + seenCommit := precommits.MakeCommit() + cs.blockStore.SaveBlock(block, blockParts, seenCommit) + } else { + // Happens during replay if we already saved the block but didn't commit + cs.Logger.Info("Calling finalizeCommit on already stored block", "height", block.Height) + } + + fail.Fail() // XXX + + // Write EndHeightMessage{} for this height, implying that the blockstore + // has saved the block. + // + // If we crash before writing this EndHeightMessage{}, we will recover by + // running ApplyBlock during the ABCI handshake when we restart. If we + // didn't save the block to the blockstore before writing + // EndHeightMessage{}, we'd have to change WAL replay -- currently it + // complains about replaying for heights where an #ENDHEIGHT entry already + // exists. + // + // Either way, the ConsensusState should not be resumed until we + // successfully call ApplyBlock (ie. later here, or in Handshake after + // restart). + cs.wal.WriteSync(EndHeightMessage{height}) // NOTE: fsync + + fail.Fail() // XXX + + // Create a copy of the state for staging and an event cache for txs. + stateCopy := cs.state.Copy() + + // Execute and commit the block, update and save the state, and update the mempool. + // NOTE The block.AppHash wont reflect these txs until the next block. + var err error + stateCopy, err = cs.blockExec.ApplyBlock(stateCopy, types.BlockID{block.Hash(), blockParts.Header()}, block) + if err != nil { + cs.Logger.Error("Error on ApplyBlock. Did the application crash? Please restart tendermint", "err", err) + err := cmn.Kill() + if err != nil { + cs.Logger.Error("Failed to kill this process - please do so manually", "err", err) + } + return + } + + fail.Fail() // XXX + + // must be called before we update state + cs.recordMetrics(height, block) + + // NewHeightStep! + cs.updateToState(stateCopy) + + fail.Fail() // XXX + + // cs.StartTime is already set. + // Schedule Round0 to start soon. + cs.scheduleRound0(&cs.RoundState) + + // By here, + // * cs.Height has been increment to height+1 + // * cs.Step is now cstypes.RoundStepNewHeight + // * cs.StartTime is set to when we will start round0. +} + +func (cs *ConsensusState) recordMetrics(height int64, block *types.Block) { + cs.metrics.Validators.Set(float64(cs.Validators.Size())) + cs.metrics.ValidatorsPower.Set(float64(cs.Validators.TotalVotingPower())) + missingValidators := 0 + missingValidatorsPower := int64(0) + for i, val := range cs.Validators.Validators { + var vote *types.Vote + if i < len(block.LastCommit.Precommits) { + vote = block.LastCommit.Precommits[i] + } + if vote == nil { + missingValidators++ + missingValidatorsPower += val.VotingPower + } + } + cs.metrics.MissingValidators.Set(float64(missingValidators)) + cs.metrics.MissingValidatorsPower.Set(float64(missingValidatorsPower)) + cs.metrics.ByzantineValidators.Set(float64(len(block.Evidence.Evidence))) + byzantineValidatorsPower := int64(0) + for _, ev := range block.Evidence.Evidence { + if _, val := cs.Validators.GetByAddress(ev.Address()); val != nil { + byzantineValidatorsPower += val.VotingPower + } + } + cs.metrics.ByzantineValidatorsPower.Set(float64(byzantineValidatorsPower)) + + if height > 1 { + lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1) + cs.metrics.BlockIntervalSeconds.Observe( + block.Time.Sub(lastBlockMeta.Header.Time).Seconds(), + ) + } + + cs.metrics.NumTxs.Set(float64(block.NumTxs)) + cs.metrics.BlockSizeBytes.Set(float64(block.Size())) + cs.metrics.TotalTxs.Set(float64(block.TotalTxs)) +} + +//----------------------------------------------------------------------------- + +func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error { + // Already have one + // TODO: possibly catch double proposals + if cs.Proposal != nil { + return nil + } + + // Does not apply + if proposal.Height != cs.Height || proposal.Round != cs.Round { + return nil + } + + // We don't care about the proposal if we're already in cstypes.RoundStepCommit. + if cstypes.RoundStepCommit <= cs.Step { + return nil + } + + // Verify POLRound, which must be -1 or between 0 and proposal.Round exclusive. + if proposal.POLRound != -1 && + (proposal.POLRound < 0 || proposal.Round <= proposal.POLRound) { + return ErrInvalidProposalPOLRound + } + + // Verify signature + if !cs.Validators.GetProposer().PubKey.VerifyBytes(proposal.SignBytes(cs.state.ChainID), proposal.Signature) { + return ErrInvalidProposalSignature + } + + cs.Proposal = proposal + cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockPartsHeader) + cs.Logger.Info("Received proposal", "proposal", proposal) + return nil +} + +// NOTE: block is not necessarily valid. +// Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit, once we have the full block. +func (cs *ConsensusState) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (added bool, err error) { + height, round, part := msg.Height, msg.Round, msg.Part + + // Blocks might be reused, so round mismatch is OK + if cs.Height != height { + cs.Logger.Debug("Received block part from wrong height", "height", height, "round", round) + return false, nil + } + + // We're not expecting a block part. + if cs.ProposalBlockParts == nil { + // NOTE: this can happen when we've gone to a higher round and + // then receive parts from the previous round - not necessarily a bad peer. + cs.Logger.Info("Received a block part when we're not expecting any", + "height", height, "round", round, "index", part.Index, "peer", peerID) + return false, nil + } + + added, err = cs.ProposalBlockParts.AddPart(part) + if err != nil { + return added, err + } + if added && cs.ProposalBlockParts.IsComplete() { + // Added and completed! + _, err = cdc.UnmarshalBinaryReader(cs.ProposalBlockParts.GetReader(), &cs.ProposalBlock, int64(cs.state.ConsensusParams.BlockSize.MaxBytes)) + if err != nil { + return true, err + } + // NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal + cs.Logger.Info("Received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash()) + + // Update Valid* if we can. + prevotes := cs.Votes.Prevotes(cs.Round) + blockID, hasTwoThirds := prevotes.TwoThirdsMajority() + if hasTwoThirds && !blockID.IsZero() && (cs.ValidRound < cs.Round) { + if cs.ProposalBlock.HashesTo(blockID.Hash) { + cs.Logger.Info("Updating valid block to new proposal block", + "valid-round", cs.Round, "valid-block-hash", cs.ProposalBlock.Hash()) + cs.ValidRound = cs.Round + cs.ValidBlock = cs.ProposalBlock + cs.ValidBlockParts = cs.ProposalBlockParts + } + // TODO: In case there is +2/3 majority in Prevotes set for some + // block and cs.ProposalBlock contains different block, either + // proposer is faulty or voting power of faulty processes is more + // than 1/3. We should trigger in the future accountability + // procedure at this point. + } + + if cs.Step <= cstypes.RoundStepPropose && cs.isProposalComplete() { + // Move onto the next step + cs.enterPrevote(height, cs.Round) + } else if cs.Step == cstypes.RoundStepCommit { + // If we're waiting on the proposal block... + cs.tryFinalizeCommit(height) + } + return true, nil + } + return added, nil +} + +// Attempt to add the vote. if its a duplicate signature, dupeout the validator +func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerID p2p.ID) error { + _, err := cs.addVote(vote, peerID) + if err != nil { + // If the vote height is off, we'll just ignore it, + // But if it's a conflicting sig, add it to the cs.evpool. + // If it's otherwise invalid, punish peer. + if err == ErrVoteHeightMismatch { + return err + } else if voteErr, ok := err.(*types.ErrVoteConflictingVotes); ok { + if bytes.Equal(vote.ValidatorAddress, cs.privValidator.GetAddress()) { + cs.Logger.Error("Found conflicting vote from ourselves. Did you unsafe_reset a validator?", "height", vote.Height, "round", vote.Round, "type", vote.Type) + return err + } + cs.evpool.AddEvidence(voteErr.DuplicateVoteEvidence) + return err + } else { + // Probably an invalid signature / Bad peer. + // Seems this can also err sometimes with "Unexpected step" - perhaps not from a bad peer ? + cs.Logger.Error("Error attempting to add vote", "err", err) + return ErrAddingVote + } + } + return nil +} + +//----------------------------------------------------------------------------- + +func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error) { + cs.Logger.Debug("addVote", "voteHeight", vote.Height, "voteType", vote.Type, "valIndex", vote.ValidatorIndex, "csHeight", cs.Height) + + // A precommit for the previous height? + // These come in while we wait timeoutCommit + if vote.Height+1 == cs.Height { + if !(cs.Step == cstypes.RoundStepNewHeight && vote.Type == types.VoteTypePrecommit) { + // TODO: give the reason .. + // fmt.Errorf("tryAddVote: Wrong height, not a LastCommit straggler commit.") + return added, ErrVoteHeightMismatch + } + added, err = cs.LastCommit.AddVote(vote) + if !added { + return added, err + } + + cs.Logger.Info(cmn.Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort())) + cs.eventBus.PublishEventVote(types.EventDataVote{vote}) + cs.evsw.FireEvent(types.EventVote, vote) + + // if we can skip timeoutCommit and have all the votes now, + if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() { + // go straight to new round (skip timeout commit) + // cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight) + cs.enterNewRound(cs.Height, 0) + } + + return + } + + // Height mismatch is ignored. + // Not necessarily a bad peer, but not favourable behaviour. + if vote.Height != cs.Height { + err = ErrVoteHeightMismatch + cs.Logger.Info("Vote ignored and not added", "voteHeight", vote.Height, "csHeight", cs.Height, "err", err) + return + } + + height := cs.Height + added, err = cs.Votes.AddVote(vote, peerID) + if !added { + // Either duplicate, or error upon cs.Votes.AddByIndex() + return + } + + cs.eventBus.PublishEventVote(types.EventDataVote{vote}) + cs.evsw.FireEvent(types.EventVote, vote) + + switch vote.Type { + case types.VoteTypePrevote: + prevotes := cs.Votes.Prevotes(vote.Round) + cs.Logger.Info("Added to prevote", "vote", vote, "prevotes", prevotes.StringShort()) + + // If +2/3 prevotes for a block or nil for *any* round: + if blockID, ok := prevotes.TwoThirdsMajority(); ok { + + // There was a polka! + // If we're locked but this is a recent polka, unlock. + // If it matches our ProposalBlock, update the ValidBlock + + // Unlock if `cs.LockedRound < vote.Round <= cs.Round` + // NOTE: If vote.Round > cs.Round, we'll deal with it when we get to vote.Round + if (cs.LockedBlock != nil) && + (cs.LockedRound < vote.Round) && + (vote.Round <= cs.Round) && + !cs.LockedBlock.HashesTo(blockID.Hash) { + + cs.Logger.Info("Unlocking because of POL.", "lockedRound", cs.LockedRound, "POLRound", vote.Round) + cs.LockedRound = 0 + cs.LockedBlock = nil + cs.LockedBlockParts = nil + cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) + } + + // Update Valid* if we can. + // NOTE: our proposal block may be nil or not what received a polka.. + // TODO: we may want to still update the ValidBlock and obtain it via gossipping + if !blockID.IsZero() && + (cs.ValidRound < vote.Round) && + (vote.Round <= cs.Round) && + cs.ProposalBlock.HashesTo(blockID.Hash) { + + cs.Logger.Info("Updating ValidBlock because of POL.", "validRound", cs.ValidRound, "POLRound", vote.Round) + cs.ValidRound = vote.Round + cs.ValidBlock = cs.ProposalBlock + cs.ValidBlockParts = cs.ProposalBlockParts + } + } + + // If +2/3 prevotes for *anything* for this or future round: + if cs.Round <= vote.Round && prevotes.HasTwoThirdsAny() { + // Round-skip over to PrevoteWait or goto Precommit. + cs.enterNewRound(height, vote.Round) // if the vote is ahead of us + if prevotes.HasTwoThirdsMajority() { + cs.enterPrecommit(height, vote.Round) + } else { + cs.enterPrevote(height, vote.Round) // if the vote is ahead of us + cs.enterPrevoteWait(height, vote.Round) + } + } else if cs.Proposal != nil && 0 <= cs.Proposal.POLRound && cs.Proposal.POLRound == vote.Round { + // If the proposal is now complete, enter prevote of cs.Round. + if cs.isProposalComplete() { + cs.enterPrevote(height, cs.Round) + } + } + + case types.VoteTypePrecommit: + precommits := cs.Votes.Precommits(vote.Round) + cs.Logger.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort()) + blockID, ok := precommits.TwoThirdsMajority() + if ok { + if len(blockID.Hash) == 0 { + cs.enterNewRound(height, vote.Round+1) + } else { + cs.enterNewRound(height, vote.Round) + cs.enterPrecommit(height, vote.Round) + cs.enterCommit(height, vote.Round) + + if cs.config.SkipTimeoutCommit && precommits.HasAll() { + // if we have all the votes now, + // go straight to new round (skip timeout commit) + // cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight) + cs.enterNewRound(cs.Height, 0) + } + + } + } else if cs.Round <= vote.Round && precommits.HasTwoThirdsAny() { + cs.enterNewRound(height, vote.Round) + cs.enterPrecommit(height, vote.Round) + cs.enterPrecommitWait(height, vote.Round) + } + default: + panic(cmn.Fmt("Unexpected vote type %X", vote.Type)) // go-wire should prevent this. + } + + return +} + +func (cs *ConsensusState) signVote(type_ byte, hash []byte, header types.PartSetHeader) (*types.Vote, error) { + addr := cs.privValidator.GetAddress() + valIndex, _ := cs.Validators.GetByAddress(addr) + vote := &types.Vote{ + ValidatorAddress: addr, + ValidatorIndex: valIndex, + Height: cs.Height, + Round: cs.Round, + Timestamp: time.Now().UTC(), + Type: type_, + BlockID: types.BlockID{hash, header}, + } + err := cs.privValidator.SignVote(cs.state.ChainID, vote) + return vote, err +} + +// sign the vote and publish on internalMsgQueue +func (cs *ConsensusState) signAddVote(type_ byte, hash []byte, header types.PartSetHeader) *types.Vote { + // if we don't have a key or we're not in the validator set, do nothing + if cs.privValidator == nil || !cs.Validators.HasAddress(cs.privValidator.GetAddress()) { + return nil + } + vote, err := cs.signVote(type_, hash, header) + if err == nil { + cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""}) + cs.Logger.Info("Signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err) + return vote + } + //if !cs.replayMode { + cs.Logger.Error("Error signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err) + //} + return nil +} + +//--------------------------------------------------------- + +func CompareHRS(h1 int64, r1 int, s1 cstypes.RoundStepType, h2 int64, r2 int, s2 cstypes.RoundStepType) int { + if h1 < h2 { + return -1 + } else if h1 > h2 { + return 1 + } + if r1 < r2 { + return -1 + } else if r1 > r2 { + return 1 + } + if s1 < s2 { + return -1 + } else if s1 > s2 { + return 1 + } + return 0 +} diff --git a/consensus/state_test.go b/consensus/state_test.go new file mode 100644 index 000000000..6a14e17b5 --- /dev/null +++ b/consensus/state_test.go @@ -0,0 +1,1099 @@ +package consensus + +import ( + "bytes" + "context" + "fmt" + "testing" + "time" + + cstypes "github.com/tendermint/tendermint/consensus/types" + tmpubsub "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" +) + +func init() { + config = ResetConfig("consensus_state_test") +} + +func ensureProposeTimeout(timeoutPropose int) time.Duration { + return time.Duration(timeoutPropose*2) * time.Millisecond +} + +/* + +ProposeSuite +x * TestProposerSelection0 - round robin ordering, round 0 +x * TestProposerSelection2 - round robin ordering, round 2++ +x * TestEnterProposeNoValidator - timeout into prevote round +x * TestEnterPropose - finish propose without timing out (we have the proposal) +x * TestBadProposal - 2 vals, bad proposal (bad block state hash), should prevote and precommit nil +FullRoundSuite +x * TestFullRound1 - 1 val, full successful round +x * TestFullRoundNil - 1 val, full round of nil +x * TestFullRound2 - 2 vals, both required for full round +LockSuite +x * TestLockNoPOL - 2 vals, 4 rounds. one val locked, precommits nil every round except first. +x * TestLockPOLRelock - 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka +x * TestLockPOLUnlock - 4 vals, one precommits, other 3 polka nil at next round, so we unlock and precomit nil +x * TestLockPOLSafety1 - 4 vals. We shouldn't change lock based on polka at earlier round +x * TestLockPOLSafety2 - 4 vals. After unlocking, we shouldn't relock based on polka at earlier round + * TestNetworkLock - once +1/3 precommits, network should be locked + * TestNetworkLockPOL - once +1/3 precommits, the block with more recent polka is committed +SlashingSuite +x * TestSlashingPrevotes - a validator prevoting twice in a round gets slashed +x * TestSlashingPrecommits - a validator precomitting twice in a round gets slashed +CatchupSuite + * TestCatchup - if we might be behind and we've seen any 2/3 prevotes, round skip to new round, precommit, or prevote +HaltSuite +x * TestHalt1 - if we see +2/3 precommits after timing out into new round, we should still commit + +*/ + +//---------------------------------------------------------------------------------------------------- +// ProposeSuite + +func TestStateProposerSelection0(t *testing.T) { + cs1, vss := randConsensusState(4) + height, round := cs1.Height, cs1.Round + + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + + startTestRound(cs1, height, round) + + // wait for new round so proposer is set + <-newRoundCh + + // lets commit a block and ensure proposer for the next height is correct + prop := cs1.GetRoundState().Validators.GetProposer() + if !bytes.Equal(prop.Address, cs1.privValidator.GetAddress()) { + t.Fatalf("expected proposer to be validator %d. Got %X", 0, prop.Address) + } + + // wait for complete proposal + <-proposalCh + + rs := cs1.GetRoundState() + signAddVotes(cs1, types.VoteTypePrecommit, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:]...) + + // wait for new round so next validator is set + <-newRoundCh + + prop = cs1.GetRoundState().Validators.GetProposer() + if !bytes.Equal(prop.Address, vss[1].GetAddress()) { + panic(cmn.Fmt("expected proposer to be validator %d. Got %X", 1, prop.Address)) + } +} + +// Now let's do it all again, but starting from round 2 instead of 0 +func TestStateProposerSelection2(t *testing.T) { + cs1, vss := randConsensusState(4) // test needs more work for more than 3 validators + + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + + // this time we jump in at round 2 + incrementRound(vss[1:]...) + incrementRound(vss[1:]...) + startTestRound(cs1, cs1.Height, 2) + + <-newRoundCh // wait for the new round + + // everyone just votes nil. we get a new proposer each round + for i := 0; i < len(vss); i++ { + prop := cs1.GetRoundState().Validators.GetProposer() + if !bytes.Equal(prop.Address, vss[(i+2)%len(vss)].GetAddress()) { + panic(cmn.Fmt("expected proposer to be validator %d. Got %X", (i+2)%len(vss), prop.Address)) + } + + rs := cs1.GetRoundState() + signAddVotes(cs1, types.VoteTypePrecommit, nil, rs.ProposalBlockParts.Header(), vss[1:]...) + <-newRoundCh // wait for the new round event each round + + incrementRound(vss[1:]...) + } + +} + +// a non-validator should timeout into the prevote round +func TestStateEnterProposeNoPrivValidator(t *testing.T) { + cs, _ := randConsensusState(1) + cs.SetPrivValidator(nil) + height, round := cs.Height, cs.Round + + // Listen for propose timeout event + timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) + + startTestRound(cs, height, round) + + // if we're not a validator, EnterPropose should timeout + ticker := time.NewTicker(ensureProposeTimeout(cs.config.TimeoutPropose)) + select { + case <-timeoutCh: + case <-ticker.C: + panic("Expected EnterPropose to timeout") + + } + + if cs.GetRoundState().Proposal != nil { + t.Error("Expected to make no proposal, since no privValidator") + } +} + +// a validator should not timeout of the prevote round (TODO: unless the block is really big!) +func TestStateEnterProposeYesPrivValidator(t *testing.T) { + cs, _ := randConsensusState(1) + height, round := cs.Height, cs.Round + + // Listen for propose timeout event + + timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) + proposalCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) + + cs.enterNewRound(height, round) + cs.startRoutines(3) + + <-proposalCh + + // Check that Proposal, ProposalBlock, ProposalBlockParts are set. + rs := cs.GetRoundState() + if rs.Proposal == nil { + t.Error("rs.Proposal should be set") + } + if rs.ProposalBlock == nil { + t.Error("rs.ProposalBlock should be set") + } + if rs.ProposalBlockParts.Total() == 0 { + t.Error("rs.ProposalBlockParts should be set") + } + + // if we're a validator, enterPropose should not timeout + ticker := time.NewTicker(ensureProposeTimeout(cs.config.TimeoutPropose)) + select { + case <-timeoutCh: + panic("Expected EnterPropose not to timeout") + case <-ticker.C: + + } +} + +func TestStateBadProposal(t *testing.T) { + cs1, vss := randConsensusState(2) + height, round := cs1.Height, cs1.Round + vs2 := vss[1] + + partSize := cs1.state.ConsensusParams.BlockPartSizeBytes + + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + + propBlock, _ := cs1.createProposalBlock() //changeProposer(t, cs1, vs2) + + // make the second validator the proposer by incrementing round + round = round + 1 + incrementRound(vss[1:]...) + + // make the block bad by tampering with statehash + stateHash := propBlock.AppHash + if len(stateHash) == 0 { + stateHash = make([]byte, 32) + } + stateHash[0] = byte((stateHash[0] + 1) % 255) + propBlock.AppHash = stateHash + propBlockParts := propBlock.MakePartSet(partSize) + proposal := types.NewProposal(vs2.Height, round, propBlockParts.Header(), -1, types.BlockID{}) + if err := vs2.SignProposal(config.ChainID(), proposal); err != nil { + t.Fatal("failed to sign bad proposal", err) + } + + // set the proposal block + if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) + } + + // start the machine + startTestRound(cs1, height, round) + + // wait for proposal + <-proposalCh + + // wait for prevote + <-voteCh + + validatePrevote(t, cs1, round, vss[0], nil) + + // add bad prevote from vs2 and wait for it + signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) + <-voteCh + + // wait for precommit + <-voteCh + + validatePrecommit(t, cs1, round, 0, vss[0], nil, nil) + signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) +} + +//---------------------------------------------------------------------------------------------------- +// FullRoundSuite + +// propose, prevote, and precommit a block +func TestStateFullRound1(t *testing.T) { + cs, vss := randConsensusState(1) + height, round := cs.Height, cs.Round + + // NOTE: buffer capacity of 0 ensures we can validate prevote and last commit + // before consensus can move to the next height (and cause a race condition) + cs.eventBus.Stop() + eventBus := types.NewEventBusWithBufferCapacity(0) + eventBus.SetLogger(log.TestingLogger().With("module", "events")) + cs.SetEventBus(eventBus) + eventBus.Start() + + voteCh := subscribe(cs.eventBus, types.EventQueryVote) + propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) + + startTestRound(cs, height, round) + + <-newRoundCh + + // grab proposal + re := <-propCh + propBlockHash := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState).ProposalBlock.Hash() + + <-voteCh // wait for prevote + validatePrevote(t, cs, round, vss[0], propBlockHash) + + <-voteCh // wait for precommit + + // we're going to roll right into new height + <-newRoundCh + + validateLastPrecommit(t, cs, vss[0], propBlockHash) +} + +// nil is proposed, so prevote and precommit nil +func TestStateFullRoundNil(t *testing.T) { + cs, vss := randConsensusState(1) + height, round := cs.Height, cs.Round + + voteCh := subscribe(cs.eventBus, types.EventQueryVote) + + cs.enterPrevote(height, round) + cs.startRoutines(4) + + <-voteCh // prevote + <-voteCh // precommit + + // should prevote and precommit nil + validatePrevoteAndPrecommit(t, cs, round, 0, vss[0], nil, nil) +} + +// run through propose, prevote, precommit commit with two validators +// where the first validator has to wait for votes from the second +func TestStateFullRound2(t *testing.T) { + cs1, vss := randConsensusState(2) + vs2 := vss[1] + height, round := cs1.Height, cs1.Round + + voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock) + + // start round and wait for propose and prevote + startTestRound(cs1, height, round) + + <-voteCh // prevote + + // we should be stuck in limbo waiting for more prevotes + rs := cs1.GetRoundState() + propBlockHash, propPartsHeader := rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header() + + // prevote arrives from vs2: + signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propPartsHeader, vs2) + <-voteCh + + <-voteCh //precommit + + // the proposed block should now be locked and our precommit added + validatePrecommit(t, cs1, 0, 0, vss[0], propBlockHash, propBlockHash) + + // we should be stuck in limbo waiting for more precommits + + // precommit arrives from vs2: + signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propPartsHeader, vs2) + <-voteCh + + // wait to finish commit, propose in next height + <-newBlockCh +} + +//------------------------------------------------------------------------------------------ +// LockSuite + +// two validators, 4 rounds. +// two vals take turns proposing. val1 locks on first one, precommits nil on everything else +func TestStateLockNoPOL(t *testing.T) { + cs1, vss := randConsensusState(2) + vs2 := vss[1] + height := cs1.Height + + partSize := cs1.state.ConsensusParams.BlockPartSizeBytes + + timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + + /* + Round1 (cs1, B) // B B // B B2 + */ + + // start round and wait for prevote + cs1.enterNewRound(height, 0) + cs1.startRoutines(0) + + re := <-proposalCh + rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + theBlockHash := rs.ProposalBlock.Hash() + + <-voteCh // prevote + + // we should now be stuck in limbo forever, waiting for more prevotes + // prevote arrives from vs2: + signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2) + <-voteCh // prevote + + <-voteCh // precommit + + // the proposed block should now be locked and our precommit added + validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash) + + // we should now be stuck in limbo forever, waiting for more precommits + // lets add one for a different block + // NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round + hash := make([]byte, len(theBlockHash)) + copy(hash, theBlockHash) + hash[0] = byte((hash[0] + 1) % 255) + signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) + <-voteCh // precommit + + // (note we're entering precommit for a second time this round) + // but with invalid args. then we enterPrecommitWait, and the timeout to new round + <-timeoutWaitCh + + /// + + <-newRoundCh + t.Log("#### ONTO ROUND 1") + /* + Round2 (cs1, B) // B B2 + */ + + incrementRound(vs2) + + // now we're on a new round and not the proposer, so wait for timeout + re = <-timeoutProposeCh + rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + + if rs.ProposalBlock != nil { + panic("Expected proposal block to be nil") + } + + // wait to finish prevote + <-voteCh + + // we should have prevoted our locked block + validatePrevote(t, cs1, 1, vss[0], rs.LockedBlock.Hash()) + + // add a conflicting prevote from the other validator + signAddVotes(cs1, types.VoteTypePrevote, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) + <-voteCh + + // now we're going to enter prevote again, but with invalid args + // and then prevote wait, which should timeout. then wait for precommit + <-timeoutWaitCh + + <-voteCh // precommit + + // the proposed block should still be locked and our precommit added + // we should precommit nil and be locked on the proposal + validatePrecommit(t, cs1, 1, 0, vss[0], nil, theBlockHash) + + // add conflicting precommit from vs2 + // NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round + signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) + <-voteCh + + // (note we're entering precommit for a second time this round, but with invalid args + // then we enterPrecommitWait and timeout into NewRound + <-timeoutWaitCh + + <-newRoundCh + t.Log("#### ONTO ROUND 2") + /* + Round3 (vs2, _) // B, B2 + */ + + incrementRound(vs2) + + re = <-proposalCh + rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + + // now we're on a new round and are the proposer + if !bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()) { + panic(cmn.Fmt("Expected proposal block to be locked block. Got %v, Expected %v", rs.ProposalBlock, rs.LockedBlock)) + } + + <-voteCh // prevote + + validatePrevote(t, cs1, 2, vss[0], rs.LockedBlock.Hash()) + + signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) + <-voteCh + + <-timeoutWaitCh // prevote wait + <-voteCh // precommit + + validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal + + signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height + <-voteCh + + <-timeoutWaitCh + + // before we time out into new round, set next proposal block + prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) + if prop == nil || propBlock == nil { + t.Fatal("Failed to create proposal block with vs2") + } + + incrementRound(vs2) + + <-newRoundCh + t.Log("#### ONTO ROUND 3") + /* + Round4 (vs2, C) // B C // B C + */ + + // now we're on a new round and not the proposer + // so set the proposal block + if err := cs1.SetProposalAndBlock(prop, propBlock, propBlock.MakePartSet(partSize), ""); err != nil { + t.Fatal(err) + } + + <-proposalCh + <-voteCh // prevote + + // prevote for locked block (not proposal) + validatePrevote(t, cs1, 0, vss[0], cs1.LockedBlock.Hash()) + + signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) + <-voteCh + + <-timeoutWaitCh + <-voteCh + + validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal + + signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height + <-voteCh +} + +// 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka +func TestStateLockPOLRelock(t *testing.T) { + cs1, vss := randConsensusState(4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + + partSize := cs1.state.ConsensusParams.BlockPartSizeBytes + + timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) + + // everything done from perspective of cs1 + + /* + Round1 (cs1, B) // B B B B// B nil B nil + + eg. vs2 and vs4 didn't see the 2/3 prevotes + */ + + // start round and wait for propose and prevote + startTestRound(cs1, cs1.Height, 0) + + <-newRoundCh + re := <-proposalCh + rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + theBlockHash := rs.ProposalBlock.Hash() + + <-voteCh // prevote + + signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2, vs3, vs4) + // prevotes + discardFromChan(voteCh, 3) + + <-voteCh // our precommit + // the proposed block should now be locked and our precommit added + validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash) + + // add precommits from the rest + signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4) + signAddVotes(cs1, types.VoteTypePrecommit, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs3) + // precommites + discardFromChan(voteCh, 3) + + // before we timeout to the new round set the new proposal + prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) + propBlockParts := propBlock.MakePartSet(partSize) + propBlockHash := propBlock.Hash() + + incrementRound(vs2, vs3, vs4) + + // timeout to new round + <-timeoutWaitCh + + //XXX: this isnt guaranteed to get there before the timeoutPropose ... + if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) + } + + <-newRoundCh + t.Log("### ONTO ROUND 1") + + /* + Round2 (vs2, C) // B C C C // C C C _) + + cs1 changes lock! + */ + + // now we're on a new round and not the proposer + // but we should receive the proposal + select { + case <-proposalCh: + case <-timeoutProposeCh: + <-proposalCh + } + + // go to prevote, prevote for locked block (not proposal), move on + <-voteCh + validatePrevote(t, cs1, 0, vss[0], theBlockHash) + + // now lets add prevotes from everyone else for the new block + signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + // prevotes + discardFromChan(voteCh, 3) + + // now either we go to PrevoteWait or Precommit + select { + case <-timeoutWaitCh: // we're in PrevoteWait, go to Precommit + // XXX: there's no guarantee we see the polka, this might be a precommit for nil, + // in which case the test fails! + <-voteCh + case <-voteCh: // we went straight to Precommit + } + + // we should have unlocked and locked on the new block + validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash) + + signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propBlockParts.Header(), vs2, vs3) + discardFromChan(voteCh, 2) + + be := <-newBlockCh + b := be.(types.EventDataNewBlockHeader) + re = <-newRoundCh + rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + if rs.Height != 2 { + panic("Expected height to increment") + } + + if !bytes.Equal(b.Header.Hash(), propBlockHash) { + panic("Expected new block to be proposal block") + } +} + +// 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka +func TestStateLockPOLUnlock(t *testing.T) { + cs1, vss := randConsensusState(4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + + partSize := cs1.state.ConsensusParams.BlockPartSizeBytes + + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) + voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) + + // everything done from perspective of cs1 + + /* + Round1 (cs1, B) // B B B B // B nil B nil + + eg. didn't see the 2/3 prevotes + */ + + // start round and wait for propose and prevote + startTestRound(cs1, cs1.Height, 0) + <-newRoundCh + re := <-proposalCh + rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + theBlockHash := rs.ProposalBlock.Hash() + + <-voteCh // prevote + + signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2, vs3, vs4) + + <-voteCh //precommit + + // the proposed block should now be locked and our precommit added + validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash) + + rs = cs1.GetRoundState() + + // add precommits from the rest + signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4) + signAddVotes(cs1, types.VoteTypePrecommit, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs3) + + // before we time out into new round, set next proposal block + prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) + propBlockParts := propBlock.MakePartSet(partSize) + + incrementRound(vs2, vs3, vs4) + + // timeout to new round + re = <-timeoutWaitCh + rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + lockedBlockHash := rs.LockedBlock.Hash() + + //XXX: this isnt guaranteed to get there before the timeoutPropose ... + if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) + } + + <-newRoundCh + t.Log("#### ONTO ROUND 1") + /* + Round2 (vs2, C) // B nil nil nil // nil nil nil _ + + cs1 unlocks! + */ + + // now we're on a new round and not the proposer, + // but we should receive the proposal + select { + case <-proposalCh: + case <-timeoutProposeCh: + <-proposalCh + } + + // go to prevote, prevote for locked block (not proposal) + <-voteCh + validatePrevote(t, cs1, 0, vss[0], lockedBlockHash) + // now lets add prevotes from everyone else for nil (a polka!) + signAddVotes(cs1, types.VoteTypePrevote, nil, types.PartSetHeader{}, vs2, vs3, vs4) + + // the polka makes us unlock and precommit nil + <-unlockCh + <-voteCh // precommit + + // we should have unlocked and committed nil + // NOTE: since we don't relock on nil, the lock round is 0 + validatePrecommit(t, cs1, 1, 0, vss[0], nil, nil) + + signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3) + <-newRoundCh +} + +// 4 vals +// a polka at round 1 but we miss it +// then a polka at round 2 that we lock on +// then we see the polka from round 1 but shouldn't unlock +func TestStateLockPOLSafety1(t *testing.T) { + cs1, vss := randConsensusState(4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + + partSize := cs1.state.ConsensusParams.BlockPartSizeBytes + + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) + + // start round and wait for propose and prevote + startTestRound(cs1, cs1.Height, 0) + <-newRoundCh + re := <-proposalCh + rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + propBlock := rs.ProposalBlock + + <-voteCh // prevote + + validatePrevote(t, cs1, 0, vss[0], propBlock.Hash()) + + // the others sign a polka but we don't see it + prevotes := signVotes(types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4) + + // before we time out into new round, set next proposer + // and next proposal block + /* + _, v1 := cs1.Validators.GetByAddress(vss[0].Address) + v1.VotingPower = 1 + if updated := cs1.Validators.Update(v1); !updated { + panic("failed to update validator") + }*/ + + t.Logf("old prop hash %v", fmt.Sprintf("%X", propBlock.Hash())) + + // we do see them precommit nil + signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3, vs4) + + prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) + propBlockHash := propBlock.Hash() + propBlockParts := propBlock.MakePartSet(partSize) + + incrementRound(vs2, vs3, vs4) + + //XXX: this isnt guaranteed to get there before the timeoutPropose ... + if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) + } + + <-newRoundCh + t.Log("### ONTO ROUND 1") + /*Round2 + // we timeout and prevote our lock + // a polka happened but we didn't see it! + */ + + // now we're on a new round and not the proposer, + // but we should receive the proposal + select { + case re = <-proposalCh: + case <-timeoutProposeCh: + re = <-proposalCh + } + + rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + + if rs.LockedBlock != nil { + panic("we should not be locked!") + } + t.Logf("new prop hash %v", fmt.Sprintf("%X", propBlockHash)) + // go to prevote, prevote for proposal block + <-voteCh + validatePrevote(t, cs1, 1, vss[0], propBlockHash) + + // now we see the others prevote for it, so we should lock on it + signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + + <-voteCh // precommit + + // we should have precommitted + validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash) + + signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3) + + <-timeoutWaitCh + + incrementRound(vs2, vs3, vs4) + + <-newRoundCh + + t.Log("### ONTO ROUND 2") + /*Round3 + we see the polka from round 1 but we shouldn't unlock! + */ + + // timeout of propose + <-timeoutProposeCh + + // finish prevote + <-voteCh + + // we should prevote what we're locked on + validatePrevote(t, cs1, 2, vss[0], propBlockHash) + + newStepCh := subscribe(cs1.eventBus, types.EventQueryNewRoundStep) + + // add prevotes from the earlier round + addVotes(cs1, prevotes...) + + t.Log("Done adding prevotes!") + + ensureNoNewStep(newStepCh) +} + +// 4 vals. +// polka P0 at R0, P1 at R1, and P2 at R2, +// we lock on P0 at R0, don't see P1, and unlock using P2 at R2 +// then we should make sure we don't lock using P1 + +// What we want: +// dont see P0, lock on P1 at R1, dont unlock using P0 at R2 +func TestStateLockPOLSafety2(t *testing.T) { + cs1, vss := randConsensusState(4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + + partSize := cs1.state.ConsensusParams.BlockPartSizeBytes + + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) + voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) + + // the block for R0: gets polkad but we miss it + // (even though we signed it, shhh) + _, propBlock0 := decideProposal(cs1, vss[0], cs1.Height, cs1.Round) + propBlockHash0 := propBlock0.Hash() + propBlockParts0 := propBlock0.MakePartSet(partSize) + + // the others sign a polka but we don't see it + prevotes := signVotes(types.VoteTypePrevote, propBlockHash0, propBlockParts0.Header(), vs2, vs3, vs4) + + // the block for round 1 + prop1, propBlock1 := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) + propBlockHash1 := propBlock1.Hash() + propBlockParts1 := propBlock1.MakePartSet(partSize) + propBlockID1 := types.BlockID{propBlockHash1, propBlockParts1.Header()} + + incrementRound(vs2, vs3, vs4) + + cs1.updateRoundStep(0, cstypes.RoundStepPrecommitWait) + + t.Log("### ONTO Round 1") + // jump in at round 1 + height := cs1.Height + startTestRound(cs1, height, 1) + <-newRoundCh + + if err := cs1.SetProposalAndBlock(prop1, propBlock1, propBlockParts1, "some peer"); err != nil { + t.Fatal(err) + } + <-proposalCh + + <-voteCh // prevote + + signAddVotes(cs1, types.VoteTypePrevote, propBlockHash1, propBlockParts1.Header(), vs2, vs3, vs4) + + <-voteCh // precommit + // the proposed block should now be locked and our precommit added + validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash1, propBlockHash1) + + // add precommits from the rest + signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4) + signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash1, propBlockParts1.Header(), vs3) + + incrementRound(vs2, vs3, vs4) + + // timeout of precommit wait to new round + <-timeoutWaitCh + + // in round 2 we see the polkad block from round 0 + newProp := types.NewProposal(height, 2, propBlockParts0.Header(), 0, propBlockID1) + if err := vs3.SignProposal(config.ChainID(), newProp); err != nil { + t.Fatal(err) + } + if err := cs1.SetProposalAndBlock(newProp, propBlock0, propBlockParts0, "some peer"); err != nil { + t.Fatal(err) + } + + // Add the pol votes + addVotes(cs1, prevotes...) + + <-newRoundCh + t.Log("### ONTO Round 2") + /*Round2 + // now we see the polka from round 1, but we shouldnt unlock + */ + + select { + case <-timeoutProposeCh: + <-proposalCh + case <-proposalCh: + } + + select { + case <-unlockCh: + panic("validator unlocked using an old polka") + case <-voteCh: + // prevote our locked block + } + validatePrevote(t, cs1, 2, vss[0], propBlockHash1) + +} + +//------------------------------------------------------------------------------------------ +// SlashingSuite +// TODO: Slashing + +/* +func TestStateSlashingPrevotes(t *testing.T) { + cs1, vss := randConsensusState(2) + vs2 := vss[1] + + + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) + + // start round and wait for propose and prevote + startTestRound(cs1, cs1.Height, 0) + <-newRoundCh + re := <-proposalCh + <-voteCh // prevote + + rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + + // we should now be stuck in limbo forever, waiting for more prevotes + // add one for a different block should cause us to go into prevote wait + hash := rs.ProposalBlock.Hash() + hash[0] = byte(hash[0]+1) % 255 + signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlockParts.Header(), vs2) + + <-timeoutWaitCh + + // NOTE: we have to send the vote for different block first so we don't just go into precommit round right + // away and ignore more prevotes (and thus fail to slash!) + + // add the conflicting vote + signAddVotes(cs1, types.VoteTypePrevote, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) + + // XXX: Check for existence of Dupeout info +} + +func TestStateSlashingPrecommits(t *testing.T) { + cs1, vss := randConsensusState(2) + vs2 := vss[1] + + + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) + + // start round and wait for propose and prevote + startTestRound(cs1, cs1.Height, 0) + <-newRoundCh + re := <-proposalCh + <-voteCh // prevote + + // add prevote from vs2 + signAddVotes(cs1, types.VoteTypePrevote, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) + + <-voteCh // precommit + + // we should now be stuck in limbo forever, waiting for more prevotes + // add one for a different block should cause us to go into prevote wait + hash := rs.ProposalBlock.Hash() + hash[0] = byte(hash[0]+1) % 255 + signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlockParts.Header(), vs2) + + // NOTE: we have to send the vote for different block first so we don't just go into precommit round right + // away and ignore more prevotes (and thus fail to slash!) + + // add precommit from vs2 + signAddVotes(cs1, types.VoteTypePrecommit, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) + + // XXX: Check for existence of Dupeout info +} +*/ + +//------------------------------------------------------------------------------------------ +// CatchupSuite + +//------------------------------------------------------------------------------------------ +// HaltSuite + +// 4 vals. +// we receive a final precommit after going into next round, but others might have gone to commit already! +func TestStateHalt1(t *testing.T) { + cs1, vss := randConsensusState(4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + + partSize := cs1.state.ConsensusParams.BlockPartSizeBytes + + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock) + voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) + + // start round and wait for propose and prevote + startTestRound(cs1, cs1.Height, 0) + <-newRoundCh + re := <-proposalCh + rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + propBlock := rs.ProposalBlock + propBlockParts := propBlock.MakePartSet(partSize) + + <-voteCh // prevote + + signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlockParts.Header(), vs3, vs4) + <-voteCh // precommit + + // the proposed block should now be locked and our precommit added + validatePrecommit(t, cs1, 0, 0, vss[0], propBlock.Hash(), propBlock.Hash()) + + // add precommits from the rest + signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2) // didnt receive proposal + signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlockParts.Header(), vs3) + // we receive this later, but vs3 might receive it earlier and with ours will go to commit! + precommit4 := signVote(vs4, types.VoteTypePrecommit, propBlock.Hash(), propBlockParts.Header()) + + incrementRound(vs2, vs3, vs4) + + // timeout to new round + <-timeoutWaitCh + re = <-newRoundCh + rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + + t.Log("### ONTO ROUND 1") + /*Round2 + // we timeout and prevote our lock + // a polka happened but we didn't see it! + */ + + // go to prevote, prevote for locked block + <-voteCh // prevote + validatePrevote(t, cs1, 0, vss[0], rs.LockedBlock.Hash()) + + // now we receive the precommit from the previous round + addVotes(cs1, precommit4) + + // receiving that precommit should take us straight to commit + <-newBlockCh + re = <-newRoundCh + rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + + if rs.Height != 2 { + panic("expected height to increment") + } +} + +// subscribe subscribes test client to the given query and returns a channel with cap = 1. +func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan interface{} { + out := make(chan interface{}, 1) + err := eventBus.Subscribe(context.Background(), testSubscriber, q, out) + if err != nil { + panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q)) + } + return out +} + +// discardFromChan reads n values from the channel. +func discardFromChan(ch <-chan interface{}, n int) { + for i := 0; i < n; i++ { + <-ch + } +} diff --git a/consensus/ticker.go b/consensus/ticker.go new file mode 100644 index 000000000..a1e2174c3 --- /dev/null +++ b/consensus/ticker.go @@ -0,0 +1,134 @@ +package consensus + +import ( + "time" + + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" +) + +var ( + tickTockBufferSize = 10 +) + +// TimeoutTicker is a timer that schedules timeouts +// conditional on the height/round/step in the timeoutInfo. +// The timeoutInfo.Duration may be non-positive. +type TimeoutTicker interface { + Start() error + Stop() error + Chan() <-chan timeoutInfo // on which to receive a timeout + ScheduleTimeout(ti timeoutInfo) // reset the timer + + SetLogger(log.Logger) +} + +// timeoutTicker wraps time.Timer, +// scheduling timeouts only for greater height/round/step +// than what it's already seen. +// Timeouts are scheduled along the tickChan, +// and fired on the tockChan. +type timeoutTicker struct { + cmn.BaseService + + timer *time.Timer + tickChan chan timeoutInfo // for scheduling timeouts + tockChan chan timeoutInfo // for notifying about them +} + +// NewTimeoutTicker returns a new TimeoutTicker. +func NewTimeoutTicker() TimeoutTicker { + tt := &timeoutTicker{ + timer: time.NewTimer(0), + tickChan: make(chan timeoutInfo, tickTockBufferSize), + tockChan: make(chan timeoutInfo, tickTockBufferSize), + } + tt.BaseService = *cmn.NewBaseService(nil, "TimeoutTicker", tt) + tt.stopTimer() // don't want to fire until the first scheduled timeout + return tt +} + +// OnStart implements cmn.Service. It starts the timeout routine. +func (t *timeoutTicker) OnStart() error { + + go t.timeoutRoutine() + + return nil +} + +// OnStop implements cmn.Service. It stops the timeout routine. +func (t *timeoutTicker) OnStop() { + t.BaseService.OnStop() + t.stopTimer() +} + +// Chan returns a channel on which timeouts are sent. +func (t *timeoutTicker) Chan() <-chan timeoutInfo { + return t.tockChan +} + +// ScheduleTimeout schedules a new timeout by sending on the internal tickChan. +// The timeoutRoutine is always available to read from tickChan, so this won't block. +// The scheduling may fail if the timeoutRoutine has already scheduled a timeout for a later height/round/step. +func (t *timeoutTicker) ScheduleTimeout(ti timeoutInfo) { + t.tickChan <- ti +} + +//------------------------------------------------------------- + +// stop the timer and drain if necessary +func (t *timeoutTicker) stopTimer() { + // Stop() returns false if it was already fired or was stopped + if !t.timer.Stop() { + select { + case <-t.timer.C: + default: + t.Logger.Debug("Timer already stopped") + } + } +} + +// send on tickChan to start a new timer. +// timers are interupted and replaced by new ticks from later steps +// timeouts of 0 on the tickChan will be immediately relayed to the tockChan +func (t *timeoutTicker) timeoutRoutine() { + t.Logger.Debug("Starting timeout routine") + var ti timeoutInfo + for { + select { + case newti := <-t.tickChan: + t.Logger.Debug("Received tick", "old_ti", ti, "new_ti", newti) + + // ignore tickers for old height/round/step + if newti.Height < ti.Height { + continue + } else if newti.Height == ti.Height { + if newti.Round < ti.Round { + continue + } else if newti.Round == ti.Round { + if ti.Step > 0 && newti.Step <= ti.Step { + continue + } + } + } + + // stop the last timer + t.stopTimer() + + // update timeoutInfo and reset timer + // NOTE time.Timer allows duration to be non-positive + ti = newti + t.timer.Reset(ti.Duration) + t.Logger.Debug("Scheduled timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) + case <-t.timer.C: + t.Logger.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) + // go routine here guarantees timeoutRoutine doesn't block. + // Determinism comes from playback in the receiveRoutine. + // We can eliminate it by merging the timeoutRoutine into receiveRoutine + // and managing the timeouts ourselves with a millisecond ticker + go func(toi timeoutInfo) { t.tockChan <- toi }(ti) + case <-t.Quit(): + return + } + } +} diff --git a/consensus/types/height_vote_set.go b/consensus/types/height_vote_set.go new file mode 100644 index 000000000..70a38668f --- /dev/null +++ b/consensus/types/height_vote_set.go @@ -0,0 +1,261 @@ +package types + +import ( + "errors" + "fmt" + "strings" + "sync" + + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +type RoundVoteSet struct { + Prevotes *types.VoteSet + Precommits *types.VoteSet +} + +var ( + GotVoteFromUnwantedRoundError = errors.New("Peer has sent a vote that does not match our round for more than one round") +) + +/* +Keeps track of all VoteSets from round 0 to round 'round'. + +Also keeps track of up to one RoundVoteSet greater than +'round' from each peer, to facilitate catchup syncing of commits. + +A commit is +2/3 precommits for a block at a round, +but which round is not known in advance, so when a peer +provides a precommit for a round greater than mtx.round, +we create a new entry in roundVoteSets but also remember the +peer to prevent abuse. +We let each peer provide us with up to 2 unexpected "catchup" rounds. +One for their LastCommit round, and another for the official commit round. +*/ +type HeightVoteSet struct { + chainID string + height int64 + valSet *types.ValidatorSet + + mtx sync.Mutex + round int // max tracked round + roundVoteSets map[int]RoundVoteSet // keys: [0...round] + peerCatchupRounds map[p2p.ID][]int // keys: peer.ID; values: at most 2 rounds +} + +func NewHeightVoteSet(chainID string, height int64, valSet *types.ValidatorSet) *HeightVoteSet { + hvs := &HeightVoteSet{ + chainID: chainID, + } + hvs.Reset(height, valSet) + return hvs +} + +func (hvs *HeightVoteSet) Reset(height int64, valSet *types.ValidatorSet) { + hvs.mtx.Lock() + defer hvs.mtx.Unlock() + + hvs.height = height + hvs.valSet = valSet + hvs.roundVoteSets = make(map[int]RoundVoteSet) + hvs.peerCatchupRounds = make(map[p2p.ID][]int) + + hvs.addRound(0) + hvs.round = 0 +} + +func (hvs *HeightVoteSet) Height() int64 { + hvs.mtx.Lock() + defer hvs.mtx.Unlock() + return hvs.height +} + +func (hvs *HeightVoteSet) Round() int { + hvs.mtx.Lock() + defer hvs.mtx.Unlock() + return hvs.round +} + +// Create more RoundVoteSets up to round. +func (hvs *HeightVoteSet) SetRound(round int) { + hvs.mtx.Lock() + defer hvs.mtx.Unlock() + if hvs.round != 0 && (round < hvs.round+1) { + cmn.PanicSanity("SetRound() must increment hvs.round") + } + for r := hvs.round + 1; r <= round; r++ { + if _, ok := hvs.roundVoteSets[r]; ok { + continue // Already exists because peerCatchupRounds. + } + hvs.addRound(r) + } + hvs.round = round +} + +func (hvs *HeightVoteSet) addRound(round int) { + if _, ok := hvs.roundVoteSets[round]; ok { + cmn.PanicSanity("addRound() for an existing round") + } + // log.Debug("addRound(round)", "round", round) + prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, types.VoteTypePrevote, hvs.valSet) + precommits := types.NewVoteSet(hvs.chainID, hvs.height, round, types.VoteTypePrecommit, hvs.valSet) + hvs.roundVoteSets[round] = RoundVoteSet{ + Prevotes: prevotes, + Precommits: precommits, + } +} + +// Duplicate votes return added=false, err=nil. +// By convention, peerID is "" if origin is self. +func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, err error) { + hvs.mtx.Lock() + defer hvs.mtx.Unlock() + if !types.IsVoteTypeValid(vote.Type) { + return + } + voteSet := hvs.getVoteSet(vote.Round, vote.Type) + if voteSet == nil { + if rndz := hvs.peerCatchupRounds[peerID]; len(rndz) < 2 { + hvs.addRound(vote.Round) + voteSet = hvs.getVoteSet(vote.Round, vote.Type) + hvs.peerCatchupRounds[peerID] = append(rndz, vote.Round) + } else { + // punish peer + err = GotVoteFromUnwantedRoundError + return + } + } + added, err = voteSet.AddVote(vote) + return +} + +func (hvs *HeightVoteSet) Prevotes(round int) *types.VoteSet { + hvs.mtx.Lock() + defer hvs.mtx.Unlock() + return hvs.getVoteSet(round, types.VoteTypePrevote) +} + +func (hvs *HeightVoteSet) Precommits(round int) *types.VoteSet { + hvs.mtx.Lock() + defer hvs.mtx.Unlock() + return hvs.getVoteSet(round, types.VoteTypePrecommit) +} + +// Last round and blockID that has +2/3 prevotes for a particular block or nil. +// Returns -1 if no such round exists. +func (hvs *HeightVoteSet) POLInfo() (polRound int, polBlockID types.BlockID) { + hvs.mtx.Lock() + defer hvs.mtx.Unlock() + for r := hvs.round; r >= 0; r-- { + rvs := hvs.getVoteSet(r, types.VoteTypePrevote) + polBlockID, ok := rvs.TwoThirdsMajority() + if ok { + return r, polBlockID + } + } + return -1, types.BlockID{} +} + +func (hvs *HeightVoteSet) getVoteSet(round int, type_ byte) *types.VoteSet { + rvs, ok := hvs.roundVoteSets[round] + if !ok { + return nil + } + switch type_ { + case types.VoteTypePrevote: + return rvs.Prevotes + case types.VoteTypePrecommit: + return rvs.Precommits + default: + cmn.PanicSanity(cmn.Fmt("Unexpected vote type %X", type_)) + return nil + } +} + +// If a peer claims that it has 2/3 majority for given blockKey, call this. +// NOTE: if there are too many peers, or too much peer churn, +// this can cause memory issues. +// TODO: implement ability to remove peers too +func (hvs *HeightVoteSet) SetPeerMaj23(round int, type_ byte, peerID p2p.ID, blockID types.BlockID) error { + hvs.mtx.Lock() + defer hvs.mtx.Unlock() + if !types.IsVoteTypeValid(type_) { + return fmt.Errorf("SetPeerMaj23: Invalid vote type %v", type_) + } + voteSet := hvs.getVoteSet(round, type_) + if voteSet == nil { + return nil // something we don't know about yet + } + return voteSet.SetPeerMaj23(types.P2PID(peerID), blockID) +} + +//--------------------------------------------------------- +// string and json + +func (hvs *HeightVoteSet) String() string { + return hvs.StringIndented("") +} + +func (hvs *HeightVoteSet) StringIndented(indent string) string { + hvs.mtx.Lock() + defer hvs.mtx.Unlock() + vsStrings := make([]string, 0, (len(hvs.roundVoteSets)+1)*2) + // rounds 0 ~ hvs.round inclusive + for round := 0; round <= hvs.round; round++ { + voteSetString := hvs.roundVoteSets[round].Prevotes.StringShort() + vsStrings = append(vsStrings, voteSetString) + voteSetString = hvs.roundVoteSets[round].Precommits.StringShort() + vsStrings = append(vsStrings, voteSetString) + } + // all other peer catchup rounds + for round, roundVoteSet := range hvs.roundVoteSets { + if round <= hvs.round { + continue + } + voteSetString := roundVoteSet.Prevotes.StringShort() + vsStrings = append(vsStrings, voteSetString) + voteSetString = roundVoteSet.Precommits.StringShort() + vsStrings = append(vsStrings, voteSetString) + } + return cmn.Fmt(`HeightVoteSet{H:%v R:0~%v +%s %v +%s}`, + hvs.height, hvs.round, + indent, strings.Join(vsStrings, "\n"+indent+" "), + indent) +} + +func (hvs *HeightVoteSet) MarshalJSON() ([]byte, error) { + hvs.mtx.Lock() + defer hvs.mtx.Unlock() + + allVotes := hvs.toAllRoundVotes() + return cdc.MarshalJSON(allVotes) +} + +func (hvs *HeightVoteSet) toAllRoundVotes() []roundVotes { + totalRounds := hvs.round + 1 + allVotes := make([]roundVotes, totalRounds) + // rounds 0 ~ hvs.round inclusive + for round := 0; round < totalRounds; round++ { + allVotes[round] = roundVotes{ + Round: round, + Prevotes: hvs.roundVoteSets[round].Prevotes.VoteStrings(), + PrevotesBitArray: hvs.roundVoteSets[round].Prevotes.BitArrayString(), + Precommits: hvs.roundVoteSets[round].Precommits.VoteStrings(), + PrecommitsBitArray: hvs.roundVoteSets[round].Precommits.BitArrayString(), + } + } + // TODO: all other peer catchup rounds + return allVotes +} + +type roundVotes struct { + Round int `json:"round"` + Prevotes []string `json:"prevotes"` + PrevotesBitArray string `json:"prevotes_bit_array"` + Precommits []string `json:"precommits"` + PrecommitsBitArray string `json:"precommits_bit_array"` +} diff --git a/consensus/types/height_vote_set_test.go b/consensus/types/height_vote_set_test.go new file mode 100644 index 000000000..0de656000 --- /dev/null +++ b/consensus/types/height_vote_set_test.go @@ -0,0 +1,69 @@ +package types + +import ( + "testing" + "time" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +var config *cfg.Config // NOTE: must be reset for each _test.go file + +func init() { + config = cfg.ResetTestRoot("consensus_height_vote_set_test") +} + +func TestPeerCatchupRounds(t *testing.T) { + valSet, privVals := types.RandValidatorSet(10, 1) + + hvs := NewHeightVoteSet(config.ChainID(), 1, valSet) + + vote999_0 := makeVoteHR(t, 1, 999, privVals, 0) + added, err := hvs.AddVote(vote999_0, "peer1") + if !added || err != nil { + t.Error("Expected to successfully add vote from peer", added, err) + } + + vote1000_0 := makeVoteHR(t, 1, 1000, privVals, 0) + added, err = hvs.AddVote(vote1000_0, "peer1") + if !added || err != nil { + t.Error("Expected to successfully add vote from peer", added, err) + } + + vote1001_0 := makeVoteHR(t, 1, 1001, privVals, 0) + added, err = hvs.AddVote(vote1001_0, "peer1") + if err != GotVoteFromUnwantedRoundError { + t.Errorf("Expected GotVoteFromUnwantedRoundError, but got %v", err) + } + if added { + t.Error("Expected to *not* add vote from peer, too many catchup rounds.") + } + + added, err = hvs.AddVote(vote1001_0, "peer2") + if !added || err != nil { + t.Error("Expected to successfully add vote from another peer") + } + +} + +func makeVoteHR(t *testing.T, height int64, round int, privVals []types.PrivValidator, valIndex int) *types.Vote { + privVal := privVals[valIndex] + vote := &types.Vote{ + ValidatorAddress: privVal.GetAddress(), + ValidatorIndex: valIndex, + Height: height, + Round: round, + Timestamp: time.Now().UTC(), + Type: types.VoteTypePrecommit, + BlockID: types.BlockID{[]byte("fakehash"), types.PartSetHeader{}}, + } + chainID := config.ChainID() + err := privVal.SignVote(chainID, vote) + if err != nil { + panic(cmn.Fmt("Error signing vote: %v", err)) + return nil + } + return vote +} diff --git a/consensus/types/peer_round_state.go b/consensus/types/peer_round_state.go new file mode 100644 index 000000000..7a5d69b8e --- /dev/null +++ b/consensus/types/peer_round_state.go @@ -0,0 +1,57 @@ +package types + +import ( + "fmt" + "time" + + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +//----------------------------------------------------------------------------- + +// PeerRoundState contains the known state of a peer. +// NOTE: Read-only when returned by PeerState.GetRoundState(). +type PeerRoundState struct { + Height int64 `json:"height"` // Height peer is at + Round int `json:"round"` // Round peer is at, -1 if unknown. + Step RoundStepType `json:"step"` // Step peer is at + StartTime time.Time `json:"start_time"` // Estimated start of round 0 at this height + Proposal bool `json:"proposal"` // True if peer has proposal for this round + ProposalBlockPartsHeader types.PartSetHeader `json:"proposal_block_parts_header"` // + ProposalBlockParts *cmn.BitArray `json:"proposal_block_parts"` // + ProposalPOLRound int `json:"proposal_pol_round"` // Proposal's POL round. -1 if none. + ProposalPOL *cmn.BitArray `json:"proposal_pol"` // nil until ProposalPOLMessage received. + Prevotes *cmn.BitArray `json:"prevotes"` // All votes peer has for this round + Precommits *cmn.BitArray `json:"precommits"` // All precommits peer has for this round + LastCommitRound int `json:"last_commit_round"` // Round of commit for last height. -1 if none. + LastCommit *cmn.BitArray `json:"last_commit"` // All commit precommits of commit for last height. + CatchupCommitRound int `json:"catchup_commit_round"` // Round that we have commit for. Not necessarily unique. -1 if none. + CatchupCommit *cmn.BitArray `json:"catchup_commit"` // All commit precommits peer has for this height & CatchupCommitRound +} + +// String returns a string representation of the PeerRoundState +func (prs PeerRoundState) String() string { + return prs.StringIndented("") +} + +// StringIndented returns a string representation of the PeerRoundState +func (prs PeerRoundState) StringIndented(indent string) string { + return fmt.Sprintf(`PeerRoundState{ +%s %v/%v/%v @%v +%s Proposal %v -> %v +%s POL %v (round %v) +%s Prevotes %v +%s Precommits %v +%s LastCommit %v (round %v) +%s Catchup %v (round %v) +%s}`, + indent, prs.Height, prs.Round, prs.Step, prs.StartTime, + indent, prs.ProposalBlockPartsHeader, prs.ProposalBlockParts, + indent, prs.ProposalPOL, prs.ProposalPOLRound, + indent, prs.Prevotes, + indent, prs.Precommits, + indent, prs.LastCommit, prs.LastCommitRound, + indent, prs.CatchupCommit, prs.CatchupCommitRound, + indent) +} diff --git a/consensus/types/round_state.go b/consensus/types/round_state.go new file mode 100644 index 000000000..cca560ccf --- /dev/null +++ b/consensus/types/round_state.go @@ -0,0 +1,164 @@ +package types + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +//----------------------------------------------------------------------------- +// RoundStepType enum type + +// RoundStepType enumerates the state of the consensus state machine +type RoundStepType uint8 // These must be numeric, ordered. + +// RoundStepType +const ( + RoundStepNewHeight = RoundStepType(0x01) // Wait til CommitTime + timeoutCommit + RoundStepNewRound = RoundStepType(0x02) // Setup new round and go to RoundStepPropose + RoundStepPropose = RoundStepType(0x03) // Did propose, gossip proposal + RoundStepPrevote = RoundStepType(0x04) // Did prevote, gossip prevotes + RoundStepPrevoteWait = RoundStepType(0x05) // Did receive any +2/3 prevotes, start timeout + RoundStepPrecommit = RoundStepType(0x06) // Did precommit, gossip precommits + RoundStepPrecommitWait = RoundStepType(0x07) // Did receive any +2/3 precommits, start timeout + RoundStepCommit = RoundStepType(0x08) // Entered commit state machine + // NOTE: RoundStepNewHeight acts as RoundStepCommitWait. +) + +// String returns a string +func (rs RoundStepType) String() string { + switch rs { + case RoundStepNewHeight: + return "RoundStepNewHeight" + case RoundStepNewRound: + return "RoundStepNewRound" + case RoundStepPropose: + return "RoundStepPropose" + case RoundStepPrevote: + return "RoundStepPrevote" + case RoundStepPrevoteWait: + return "RoundStepPrevoteWait" + case RoundStepPrecommit: + return "RoundStepPrecommit" + case RoundStepPrecommitWait: + return "RoundStepPrecommitWait" + case RoundStepCommit: + return "RoundStepCommit" + default: + return "RoundStepUnknown" // Cannot panic. + } +} + +//----------------------------------------------------------------------------- + +// RoundState defines the internal consensus state. +// NOTE: Not thread safe. Should only be manipulated by functions downstream +// of the cs.receiveRoutine +type RoundState struct { + Height int64 `json:"height"` // Height we are working on + Round int `json:"round"` + Step RoundStepType `json:"step"` + StartTime time.Time `json:"start_time"` + CommitTime time.Time `json:"commit_time"` // Subjective time when +2/3 precommits for Block at Round were found + Validators *types.ValidatorSet `json:"validators"` + Proposal *types.Proposal `json:"proposal"` + ProposalBlock *types.Block `json:"proposal_block"` + ProposalBlockParts *types.PartSet `json:"proposal_block_parts"` + LockedRound int `json:"locked_round"` + LockedBlock *types.Block `json:"locked_block"` + LockedBlockParts *types.PartSet `json:"locked_block_parts"` + ValidRound int `json:"valid_round"` // Last known round with POL for non-nil valid block. + ValidBlock *types.Block `json:"valid_block"` // Last known block of POL mentioned above. + ValidBlockParts *types.PartSet `json:"valid_block_parts"` // Last known block parts of POL metnioned above. + Votes *HeightVoteSet `json:"votes"` + CommitRound int `json:"commit_round"` // + LastCommit *types.VoteSet `json:"last_commit"` // Last precommits at Height-1 + LastValidators *types.ValidatorSet `json:"last_validators"` +} + +// Compressed version of the RoundState for use in RPC +type RoundStateSimple struct { + HeightRoundStep string `json:"height/round/step"` + StartTime time.Time `json:"start_time"` + ProposalBlockHash cmn.HexBytes `json:"proposal_block_hash"` + LockedBlockHash cmn.HexBytes `json:"locked_block_hash"` + ValidBlockHash cmn.HexBytes `json:"valid_block_hash"` + Votes json.RawMessage `json:"height_vote_set"` +} + +// Compress the RoundState to RoundStateSimple +func (rs *RoundState) RoundStateSimple() RoundStateSimple { + votesJSON, err := rs.Votes.MarshalJSON() + if err != nil { + panic(err) + } + return RoundStateSimple{ + HeightRoundStep: fmt.Sprintf("%d/%d/%d", rs.Height, rs.Round, rs.Step), + StartTime: rs.StartTime, + ProposalBlockHash: rs.ProposalBlock.Hash(), + LockedBlockHash: rs.LockedBlock.Hash(), + ValidBlockHash: rs.ValidBlock.Hash(), + Votes: votesJSON, + } +} + +// RoundStateEvent returns the H/R/S of the RoundState as an event. +func (rs *RoundState) RoundStateEvent() types.EventDataRoundState { + // XXX: copy the RoundState + // if we want to avoid this, we may need synchronous events after all + rsCopy := *rs + edrs := types.EventDataRoundState{ + Height: rs.Height, + Round: rs.Round, + Step: rs.Step.String(), + RoundState: &rsCopy, + } + return edrs +} + +// String returns a string +func (rs *RoundState) String() string { + return rs.StringIndented("") +} + +// StringIndented returns a string +func (rs *RoundState) StringIndented(indent string) string { + return fmt.Sprintf(`RoundState{ +%s H:%v R:%v S:%v +%s StartTime: %v +%s CommitTime: %v +%s Validators: %v +%s Proposal: %v +%s ProposalBlock: %v %v +%s LockedRound: %v +%s LockedBlock: %v %v +%s ValidRound: %v +%s ValidBlock: %v %v +%s Votes: %v +%s LastCommit: %v +%s LastValidators:%v +%s}`, + indent, rs.Height, rs.Round, rs.Step, + indent, rs.StartTime, + indent, rs.CommitTime, + indent, rs.Validators.StringIndented(indent+" "), + indent, rs.Proposal, + indent, rs.ProposalBlockParts.StringShort(), rs.ProposalBlock.StringShort(), + indent, rs.LockedRound, + indent, rs.LockedBlockParts.StringShort(), rs.LockedBlock.StringShort(), + indent, rs.ValidRound, + indent, rs.ValidBlockParts.StringShort(), rs.ValidBlock.StringShort(), + indent, rs.Votes.StringIndented(indent+" "), + indent, rs.LastCommit.StringShort(), + indent, rs.LastValidators.StringIndented(indent+" "), + indent) +} + +// StringShort returns a string +func (rs *RoundState) StringShort() string { + return fmt.Sprintf(`RoundState{H:%v R:%v S:%v ST:%v}`, + rs.Height, rs.Round, rs.Step, rs.StartTime) +} diff --git a/consensus/types/round_state_test.go b/consensus/types/round_state_test.go new file mode 100644 index 000000000..080178f24 --- /dev/null +++ b/consensus/types/round_state_test.go @@ -0,0 +1,95 @@ +package types + +import ( + "testing" + "time" + + "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +func BenchmarkRoundStateDeepCopy(b *testing.B) { + b.StopTimer() + + // Random validators + nval, ntxs := 100, 100 + vset, _ := types.RandValidatorSet(nval, 1) + precommits := make([]*types.Vote, nval) + blockID := types.BlockID{ + Hash: cmn.RandBytes(20), + PartsHeader: types.PartSetHeader{ + Hash: cmn.RandBytes(20), + }, + } + sig := crypto.SignatureEd25519{} + for i := 0; i < nval; i++ { + precommits[i] = &types.Vote{ + ValidatorAddress: types.Address(cmn.RandBytes(20)), + Timestamp: time.Now(), + BlockID: blockID, + Signature: sig, + } + } + txs := make([]types.Tx, ntxs) + for i := 0; i < ntxs; i++ { + txs[i] = cmn.RandBytes(100) + } + // Random block + block := &types.Block{ + Header: &types.Header{ + ChainID: cmn.RandStr(12), + Time: time.Now(), + LastBlockID: blockID, + LastCommitHash: cmn.RandBytes(20), + DataHash: cmn.RandBytes(20), + ValidatorsHash: cmn.RandBytes(20), + ConsensusHash: cmn.RandBytes(20), + AppHash: cmn.RandBytes(20), + LastResultsHash: cmn.RandBytes(20), + EvidenceHash: cmn.RandBytes(20), + }, + Data: &types.Data{ + Txs: txs, + }, + Evidence: types.EvidenceData{}, + LastCommit: &types.Commit{ + BlockID: blockID, + Precommits: precommits, + }, + } + parts := block.MakePartSet(4096) + // Random Proposal + proposal := &types.Proposal{ + Timestamp: time.Now(), + BlockPartsHeader: types.PartSetHeader{ + Hash: cmn.RandBytes(20), + }, + POLBlockID: blockID, + Signature: sig, + } + // Random HeightVoteSet + // TODO: hvs := + + rs := &RoundState{ + StartTime: time.Now(), + CommitTime: time.Now(), + Validators: vset, + Proposal: proposal, + ProposalBlock: block, + ProposalBlockParts: parts, + LockedBlock: block, + LockedBlockParts: parts, + ValidBlock: block, + ValidBlockParts: parts, + Votes: nil, // TODO + LastCommit: nil, // TODO + LastValidators: vset, + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + amino.DeepCopy(rs) + } +} diff --git a/consensus/types/wire.go b/consensus/types/wire.go new file mode 100644 index 000000000..6342d7eba --- /dev/null +++ b/consensus/types/wire.go @@ -0,0 +1,12 @@ +package types + +import ( + "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" +) + +var cdc = amino.NewCodec() + +func init() { + crypto.RegisterAmino(cdc) +} diff --git a/consensus/version.go b/consensus/version.go new file mode 100644 index 000000000..5c74a16db --- /dev/null +++ b/consensus/version.go @@ -0,0 +1,13 @@ +package consensus + +import ( + cmn "github.com/tendermint/tendermint/libs/common" +) + +// kind of arbitrary +var Spec = "1" // async +var Major = "0" // +var Minor = "2" // replay refactor +var Revision = "2" // validation -> commit + +var Version = cmn.Fmt("v%s/%s.%s.%s", Spec, Major, Minor, Revision) diff --git a/consensus/wal.go b/consensus/wal.go new file mode 100644 index 000000000..8c4c10bc7 --- /dev/null +++ b/consensus/wal.go @@ -0,0 +1,323 @@ +package consensus + +import ( + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "path/filepath" + "time" + + "github.com/pkg/errors" + + amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/types" + auto "github.com/tendermint/tendermint/libs/autofile" + cmn "github.com/tendermint/tendermint/libs/common" +) + +const ( + // must be greater than params.BlockGossip.BlockPartSizeBytes + a few bytes + maxMsgSizeBytes = 1024 * 1024 // 1MB +) + +//-------------------------------------------------------- +// types and functions for savings consensus messages + +type TimedWALMessage struct { + Time time.Time `json:"time"` // for debugging purposes + Msg WALMessage `json:"msg"` +} + +// EndHeightMessage marks the end of the given height inside WAL. +// @internal used by scripts/wal2json util. +type EndHeightMessage struct { + Height int64 `json:"height"` +} + +type WALMessage interface{} + +func RegisterWALMessages(cdc *amino.Codec) { + cdc.RegisterInterface((*WALMessage)(nil), nil) + cdc.RegisterConcrete(types.EventDataRoundState{}, "tendermint/wal/EventDataRoundState", nil) + cdc.RegisterConcrete(msgInfo{}, "tendermint/wal/MsgInfo", nil) + cdc.RegisterConcrete(timeoutInfo{}, "tendermint/wal/TimeoutInfo", nil) + cdc.RegisterConcrete(EndHeightMessage{}, "tendermint/wal/EndHeightMessage", nil) +} + +//-------------------------------------------------------- +// Simple write-ahead logger + +// WAL is an interface for any write-ahead logger. +type WAL interface { + Write(WALMessage) + WriteSync(WALMessage) + Group() *auto.Group + SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) + + Start() error + Stop() error + Wait() +} + +// Write ahead logger writes msgs to disk before they are processed. +// Can be used for crash-recovery and deterministic replay +// TODO: currently the wal is overwritten during replay catchup +// give it a mode so it's either reading or appending - must read to end to start appending again +type baseWAL struct { + cmn.BaseService + + group *auto.Group + + enc *WALEncoder +} + +func NewWAL(walFile string) (*baseWAL, error) { + err := cmn.EnsureDir(filepath.Dir(walFile), 0700) + if err != nil { + return nil, errors.Wrap(err, "failed to ensure WAL directory is in place") + } + + group, err := auto.OpenGroup(walFile) + if err != nil { + return nil, err + } + wal := &baseWAL{ + group: group, + enc: NewWALEncoder(group), + } + wal.BaseService = *cmn.NewBaseService(nil, "baseWAL", wal) + return wal, nil +} + +func (wal *baseWAL) Group() *auto.Group { + return wal.group +} + +func (wal *baseWAL) OnStart() error { + size, err := wal.group.Head.Size() + if err != nil { + return err + } else if size == 0 { + wal.WriteSync(EndHeightMessage{0}) + } + err = wal.group.Start() + return err +} + +func (wal *baseWAL) OnStop() { + wal.group.Stop() + wal.group.Close() +} + +// Write is called in newStep and for each receive on the +// peerMsgQueue and the timeoutTicker. +// NOTE: does not call fsync() +func (wal *baseWAL) Write(msg WALMessage) { + if wal == nil { + return + } + + // Write the wal message + if err := wal.enc.Encode(&TimedWALMessage{time.Now(), msg}); err != nil { + panic(cmn.Fmt("Error writing msg to consensus wal: %v \n\nMessage: %v", err, msg)) + } +} + +// WriteSync is called when we receive a msg from ourselves +// so that we write to disk before sending signed messages. +// NOTE: calls fsync() +func (wal *baseWAL) WriteSync(msg WALMessage) { + if wal == nil { + return + } + + wal.Write(msg) + if err := wal.group.Flush(); err != nil { + panic(cmn.Fmt("Error flushing consensus wal buf to file. Error: %v \n", err)) + } +} + +// WALSearchOptions are optional arguments to SearchForEndHeight. +type WALSearchOptions struct { + // IgnoreDataCorruptionErrors set to true will result in skipping data corruption errors. + IgnoreDataCorruptionErrors bool +} + +// SearchForEndHeight searches for the EndHeightMessage with the given height +// and returns an auto.GroupReader, whenever it was found or not and an error. +// Group reader will be nil if found equals false. +// +// CONTRACT: caller must close group reader. +func (wal *baseWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) { + var msg *TimedWALMessage + lastHeightFound := int64(-1) + + // NOTE: starting from the last file in the group because we're usually + // searching for the last height. See replay.go + min, max := wal.group.MinIndex(), wal.group.MaxIndex() + wal.Logger.Debug("Searching for height", "height", height, "min", min, "max", max) + for index := max; index >= min; index-- { + gr, err = wal.group.NewReader(index) + if err != nil { + return nil, false, err + } + + dec := NewWALDecoder(gr) + for { + msg, err = dec.Decode() + if err == io.EOF { + // OPTIMISATION: no need to look for height in older files if we've seen h < height + if lastHeightFound > 0 && lastHeightFound < height { + gr.Close() + return nil, false, nil + } + // check next file + break + } + if options.IgnoreDataCorruptionErrors && IsDataCorruptionError(err) { + wal.Logger.Debug("Corrupted entry. Skipping...", "err", err) + // do nothing + continue + } else if err != nil { + gr.Close() + return nil, false, err + } + + if m, ok := msg.Msg.(EndHeightMessage); ok { + lastHeightFound = m.Height + if m.Height == height { // found + wal.Logger.Debug("Found", "height", height, "index", index) + return gr, true, nil + } + } + } + gr.Close() + } + + return nil, false, nil +} + +/////////////////////////////////////////////////////////////////////////////// + +// A WALEncoder writes custom-encoded WAL messages to an output stream. +// +// Format: 4 bytes CRC sum + 4 bytes length + arbitrary-length value (go-amino encoded) +type WALEncoder struct { + wr io.Writer +} + +// NewWALEncoder returns a new encoder that writes to wr. +func NewWALEncoder(wr io.Writer) *WALEncoder { + return &WALEncoder{wr} +} + +// Encode writes the custom encoding of v to the stream. +func (enc *WALEncoder) Encode(v *TimedWALMessage) error { + data := cdc.MustMarshalBinaryBare(v) + + crc := crc32.Checksum(data, crc32c) + length := uint32(len(data)) + totalLength := 8 + int(length) + + msg := make([]byte, totalLength) + binary.BigEndian.PutUint32(msg[0:4], crc) + binary.BigEndian.PutUint32(msg[4:8], length) + copy(msg[8:], data) + + _, err := enc.wr.Write(msg) + + return err +} + +/////////////////////////////////////////////////////////////////////////////// + +// IsDataCorruptionError returns true if data has been corrupted inside WAL. +func IsDataCorruptionError(err error) bool { + _, ok := err.(DataCorruptionError) + return ok +} + +// DataCorruptionError is an error that occures if data on disk was corrupted. +type DataCorruptionError struct { + cause error +} + +func (e DataCorruptionError) Error() string { + return fmt.Sprintf("DataCorruptionError[%v]", e.cause) +} + +func (e DataCorruptionError) Cause() error { + return e.cause +} + +// A WALDecoder reads and decodes custom-encoded WAL messages from an input +// stream. See WALEncoder for the format used. +// +// It will also compare the checksums and make sure data size is equal to the +// length from the header. If that is not the case, error will be returned. +type WALDecoder struct { + rd io.Reader +} + +// NewWALDecoder returns a new decoder that reads from rd. +func NewWALDecoder(rd io.Reader) *WALDecoder { + return &WALDecoder{rd} +} + +// Decode reads the next custom-encoded value from its reader and returns it. +func (dec *WALDecoder) Decode() (*TimedWALMessage, error) { + b := make([]byte, 4) + + _, err := dec.rd.Read(b) + if err == io.EOF { + return nil, err + } + if err != nil { + return nil, fmt.Errorf("failed to read checksum: %v", err) + } + crc := binary.BigEndian.Uint32(b) + + b = make([]byte, 4) + _, err = dec.rd.Read(b) + if err != nil { + return nil, fmt.Errorf("failed to read length: %v", err) + } + length := binary.BigEndian.Uint32(b) + + if length > maxMsgSizeBytes { + return nil, fmt.Errorf("length %d exceeded maximum possible value of %d bytes", length, maxMsgSizeBytes) + } + + data := make([]byte, length) + _, err = dec.rd.Read(data) + if err != nil { + return nil, fmt.Errorf("failed to read data: %v", err) + } + + // check checksum before decoding data + actualCRC := crc32.Checksum(data, crc32c) + if actualCRC != crc { + return nil, DataCorruptionError{fmt.Errorf("checksums do not match: (read: %v, actual: %v)", crc, actualCRC)} + } + + var res = new(TimedWALMessage) // nolint: gosimple + err = cdc.UnmarshalBinaryBare(data, res) + if err != nil { + return nil, DataCorruptionError{fmt.Errorf("failed to decode data: %v", err)} + } + + return res, err +} + +type nilWAL struct{} + +func (nilWAL) Write(m WALMessage) {} +func (nilWAL) WriteSync(m WALMessage) {} +func (nilWAL) Group() *auto.Group { return nil } +func (nilWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) { + return nil, false, nil +} +func (nilWAL) Start() error { return nil } +func (nilWAL) Stop() error { return nil } +func (nilWAL) Wait() {} diff --git a/consensus/wal_fuzz.go b/consensus/wal_fuzz.go new file mode 100644 index 000000000..e15097c30 --- /dev/null +++ b/consensus/wal_fuzz.go @@ -0,0 +1,31 @@ +// +build gofuzz + +package consensus + +import ( + "bytes" + "io" +) + +func Fuzz(data []byte) int { + dec := NewWALDecoder(bytes.NewReader(data)) + for { + msg, err := dec.Decode() + if err == io.EOF { + break + } + if err != nil { + if msg != nil { + panic("msg != nil on error") + } + return 0 + } + var w bytes.Buffer + enc := NewWALEncoder(&w) + err = enc.Encode(msg) + if err != nil { + panic(err) + } + } + return 1 +} diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go new file mode 100644 index 000000000..f3a365809 --- /dev/null +++ b/consensus/wal_generator.go @@ -0,0 +1,205 @@ +package consensus + +import ( + "bufio" + "bytes" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/tendermint/tendermint/abci/example/kvstore" + bc "github.com/tendermint/tendermint/blockchain" + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" + auto "github.com/tendermint/tendermint/libs/autofile" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" +) + +// WALWithNBlocks generates a consensus WAL. It does this by spining up a +// stripped down version of node (proxy app, event bus, consensus state) with a +// persistent kvstore application and special consensus wal instance +// (byteBufferWAL) and waits until numBlocks are created. Then it returns a WAL +// content. If the node fails to produce given numBlocks, it returns an error. +func WALWithNBlocks(numBlocks int) (data []byte, err error) { + config := getConfig() + + app := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), "wal_generator")) + + logger := log.TestingLogger().With("wal_generator", "wal_generator") + logger.Info("generating WAL (last height msg excluded)", "numBlocks", numBlocks) + + ///////////////////////////////////////////////////////////////////////////// + // COPY PASTE FROM node.go WITH A FEW MODIFICATIONS + // NOTE: we can't import node package because of circular dependency + privValidatorFile := config.PrivValidatorFile() + privValidator := privval.LoadOrGenFilePV(privValidatorFile) + genDoc, err := types.GenesisDocFromFile(config.GenesisFile()) + if err != nil { + return nil, errors.Wrap(err, "failed to read genesis file") + } + stateDB := db.NewMemDB() + blockStoreDB := db.NewMemDB() + state, err := sm.MakeGenesisState(genDoc) + if err != nil { + return nil, errors.Wrap(err, "failed to make genesis state") + } + blockStore := bc.NewBlockStore(blockStoreDB) + handshaker := NewHandshaker(stateDB, state, blockStore, genDoc) + proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app), handshaker) + proxyApp.SetLogger(logger.With("module", "proxy")) + if err := proxyApp.Start(); err != nil { + return nil, errors.Wrap(err, "failed to start proxy app connections") + } + defer proxyApp.Stop() + eventBus := types.NewEventBus() + eventBus.SetLogger(logger.With("module", "events")) + if err := eventBus.Start(); err != nil { + return nil, errors.Wrap(err, "failed to start event bus") + } + defer eventBus.Stop() + mempool := sm.MockMempool{} + evpool := sm.MockEvidencePool{} + blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) + consensusState := NewConsensusState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool) + consensusState.SetLogger(logger) + consensusState.SetEventBus(eventBus) + if privValidator != nil { + consensusState.SetPrivValidator(privValidator) + } + // END OF COPY PASTE + ///////////////////////////////////////////////////////////////////////////// + + // set consensus wal to buffered WAL, which will write all incoming msgs to buffer + var b bytes.Buffer + wr := bufio.NewWriter(&b) + numBlocksWritten := make(chan struct{}) + wal := newByteBufferWAL(logger, NewWALEncoder(wr), int64(numBlocks), numBlocksWritten) + // see wal.go#103 + wal.Write(EndHeightMessage{0}) + consensusState.wal = wal + + if err := consensusState.Start(); err != nil { + return nil, errors.Wrap(err, "failed to start consensus state") + } + + select { + case <-numBlocksWritten: + consensusState.Stop() + wr.Flush() + return b.Bytes(), nil + case <-time.After(1 * time.Minute): + consensusState.Stop() + return []byte{}, fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks) + } +} + +// f**ing long, but unique for each test +func makePathname() string { + // get path + p, err := os.Getwd() + if err != nil { + panic(err) + } + // fmt.Println(p) + sep := string(filepath.Separator) + return strings.Replace(p, sep, "_", -1) +} + +func randPort() int { + // returns between base and base + spread + base, spread := 20000, 20000 + return base + cmn.RandIntn(spread) +} + +func makeAddrs() (string, string, string) { + start := randPort() + return fmt.Sprintf("tcp://0.0.0.0:%d", start), + fmt.Sprintf("tcp://0.0.0.0:%d", start+1), + fmt.Sprintf("tcp://0.0.0.0:%d", start+2) +} + +// getConfig returns a config for test cases +func getConfig() *cfg.Config { + pathname := makePathname() + c := cfg.ResetTestRoot(fmt.Sprintf("%s_%d", pathname, cmn.RandInt())) + + // and we use random ports to run in parallel + tm, rpc, grpc := makeAddrs() + c.P2P.ListenAddress = tm + c.RPC.ListenAddress = rpc + c.RPC.GRPCListenAddress = grpc + return c +} + +// byteBufferWAL is a WAL which writes all msgs to a byte buffer. Writing stops +// when the heightToStop is reached. Client will be notified via +// signalWhenStopsTo channel. +type byteBufferWAL struct { + enc *WALEncoder + stopped bool + heightToStop int64 + signalWhenStopsTo chan<- struct{} + + logger log.Logger +} + +// needed for determinism +var fixedTime, _ = time.Parse(time.RFC3339, "2017-01-02T15:04:05Z") + +func newByteBufferWAL(logger log.Logger, enc *WALEncoder, nBlocks int64, signalStop chan<- struct{}) *byteBufferWAL { + return &byteBufferWAL{ + enc: enc, + heightToStop: nBlocks, + signalWhenStopsTo: signalStop, + logger: logger, + } +} + +// Save writes message to the internal buffer except when heightToStop is +// reached, in which case it will signal the caller via signalWhenStopsTo and +// skip writing. +func (w *byteBufferWAL) Write(m WALMessage) { + if w.stopped { + w.logger.Debug("WAL already stopped. Not writing message", "msg", m) + return + } + + if endMsg, ok := m.(EndHeightMessage); ok { + w.logger.Debug("WAL write end height message", "height", endMsg.Height, "stopHeight", w.heightToStop) + if endMsg.Height == w.heightToStop { + w.logger.Debug("Stopping WAL at height", "height", endMsg.Height) + w.signalWhenStopsTo <- struct{}{} + w.stopped = true + return + } + } + + w.logger.Debug("WAL Write Message", "msg", m) + err := w.enc.Encode(&TimedWALMessage{fixedTime, m}) + if err != nil { + panic(fmt.Sprintf("failed to encode the msg %v", m)) + } +} + +func (w *byteBufferWAL) WriteSync(m WALMessage) { + w.Write(m) +} + +func (w *byteBufferWAL) Group() *auto.Group { + panic("not implemented") +} +func (w *byteBufferWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) { + return nil, false, nil +} + +func (w *byteBufferWAL) Start() error { return nil } +func (w *byteBufferWAL) Stop() error { return nil } +func (w *byteBufferWAL) Wait() {} diff --git a/consensus/wal_test.go b/consensus/wal_test.go new file mode 100644 index 000000000..3ecb4fe8f --- /dev/null +++ b/consensus/wal_test.go @@ -0,0 +1,133 @@ +package consensus + +import ( + "bytes" + "crypto/rand" + // "sync" + "testing" + "time" + + "github.com/tendermint/tendermint/consensus/types" + tmtypes "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tendermint/libs/common" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestWALEncoderDecoder(t *testing.T) { + now := time.Now() + msgs := []TimedWALMessage{ + TimedWALMessage{Time: now, Msg: EndHeightMessage{0}}, + TimedWALMessage{Time: now, Msg: timeoutInfo{Duration: time.Second, Height: 1, Round: 1, Step: types.RoundStepPropose}}, + } + + b := new(bytes.Buffer) + + for _, msg := range msgs { + b.Reset() + + enc := NewWALEncoder(b) + err := enc.Encode(&msg) + require.NoError(t, err) + + dec := NewWALDecoder(b) + decoded, err := dec.Decode() + require.NoError(t, err) + + assert.Equal(t, msg.Time.UTC(), decoded.Time) + assert.Equal(t, msg.Msg, decoded.Msg) + } +} + +func TestWALSearchForEndHeight(t *testing.T) { + walBody, err := WALWithNBlocks(6) + if err != nil { + t.Fatal(err) + } + walFile := tempWALWithData(walBody) + + wal, err := NewWAL(walFile) + if err != nil { + t.Fatal(err) + } + + h := int64(3) + gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{}) + assert.NoError(t, err, cmn.Fmt("expected not to err on height %d", h)) + assert.True(t, found, cmn.Fmt("expected to find end height for %d", h)) + assert.NotNil(t, gr, "expected group not to be nil") + defer gr.Close() + + dec := NewWALDecoder(gr) + msg, err := dec.Decode() + assert.NoError(t, err, "expected to decode a message") + rs, ok := msg.Msg.(tmtypes.EventDataRoundState) + assert.True(t, ok, "expected message of type EventDataRoundState") + assert.Equal(t, rs.Height, h+1, cmn.Fmt("wrong height")) +} + +/* +var initOnce sync.Once + +func registerInterfacesOnce() { + initOnce.Do(func() { + var _ = wire.RegisterInterface( + struct{ WALMessage }{}, + wire.ConcreteType{[]byte{}, 0x10}, + ) + }) +} +*/ + +func nBytes(n int) []byte { + buf := make([]byte, n) + n, _ = rand.Read(buf) + return buf[:n] +} + +func benchmarkWalDecode(b *testing.B, n int) { + // registerInterfacesOnce() + + buf := new(bytes.Buffer) + enc := NewWALEncoder(buf) + + data := nBytes(n) + enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second)}) + + encoded := buf.Bytes() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.Reset() + buf.Write(encoded) + dec := NewWALDecoder(buf) + if _, err := dec.Decode(); err != nil { + b.Fatal(err) + } + } + b.ReportAllocs() +} + +func BenchmarkWalDecode512B(b *testing.B) { + benchmarkWalDecode(b, 512) +} + +func BenchmarkWalDecode10KB(b *testing.B) { + benchmarkWalDecode(b, 10*1024) +} +func BenchmarkWalDecode100KB(b *testing.B) { + benchmarkWalDecode(b, 100*1024) +} +func BenchmarkWalDecode1MB(b *testing.B) { + benchmarkWalDecode(b, 1024*1024) +} +func BenchmarkWalDecode10MB(b *testing.B) { + benchmarkWalDecode(b, 10*1024*1024) +} +func BenchmarkWalDecode100MB(b *testing.B) { + benchmarkWalDecode(b, 100*1024*1024) +} +func BenchmarkWalDecode1GB(b *testing.B) { + benchmarkWalDecode(b, 1024*1024*1024) +} diff --git a/consensus/wire.go b/consensus/wire.go new file mode 100644 index 000000000..5f231c0c7 --- /dev/null +++ b/consensus/wire.go @@ -0,0 +1,14 @@ +package consensus + +import ( + "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" +) + +var cdc = amino.NewCodec() + +func init() { + RegisterConsensusMessages(cdc) + RegisterWALMessages(cdc) + crypto.RegisterAmino(cdc) +} diff --git a/crypto/CHANGELOG.md b/crypto/CHANGELOG.md new file mode 100644 index 000000000..dd7c1039f --- /dev/null +++ b/crypto/CHANGELOG.md @@ -0,0 +1,154 @@ +# Changelog + +## 0.9.0 + +BREAKING CHANGES + +- `priv.PubKey()` no longer returns an error. Any applicable errors (such as when fetching the public key from a hardware wallet) should be checked and returned when constructing the private key. + +## 0.8.0 + +**TBD** + +## 0.7.0 + +**May 30th, 2018** + +BREAKING CHANGES + +No breaking changes compared to 0.6.2, but making up for the version bump that +should have happened in 0.6.1. + +We also bring in the `tmlibs/merkle` package with breaking changes: + +- change the hash function from RIPEMD160 to tmhash (first 20-bytes of SHA256) +- remove unused funcs and unexport SimpleMap + +FEATURES + +- [xchacha20poly1305] New authenticated encryption module +- [merkle] Moved in from tmlibs +- [merkle/tmhash] New hash function: the first 20-bytes of SHA256 + +IMPROVEMENTS + +- Remove some dead code +- Use constant-time compare for signatures + +BUG FIXES + +- Fix MixEntropy weakness +- Fix PrivKeyEd25519.Generate() + +## 0.6.2 (April 9, 2018) + +IMPROVEMENTS + +- Update for latest go-amino + +## 0.6.1 (March 26, 2018) + +BREAKING CHANGES + +- Encoding uses MarshalBinaryBare rather than MarshalBinary (which auto-length-prefixes) for pub/priv/sig. + +## 0.6.0 (March 2, 2018) + +BREAKING CHANGES + +- Update Amino names from "com.tendermint/..." to "tendermint/" + +## 0.5.0 (March 2, 2018) + +BREAKING CHANGES + +- nano: moved to `_nano` now while we're having build issues +- bcrypt: moved to `keys/bcrypt` +- hd: moved to `keys/hd`; `BTC` added to some function names; other function cleanup +- keys/cryptostore: moved to `keys`, renamed to `keybase`, and completely refactored +- keys: moved BIP39 related code to `keys/words` + +FEATURE + +- `Address` is a type alias for `cmn.HexBytes` + +BUG FIX + +- PrivKey comparisons done in constant time + +## 0.4.1 (October 27, 2017) + +This release removes support for bcrypt as it was merged too soon without an upgrade plan +for existing keys. + +REVERTS THE FOLLOWING COMMITS: + +- Parameterize and lower bcrypt cost - dfc4cdd2d71513e4a9922d679c74f36357c4c862 +- Upgrade keys to use bcrypt with salts (#38) - 8e7f0e7701f92206679ad093d013b9b162427631 + +## 0.4.0 (October 27, 2017) + +BREAKING CHANGES: + +- `keys`: use bcrypt plus salt + +FEATURES: + +- add support for signing via Ledger Nano + +IMPROVEMENTS: + +- linting and comments + +## 0.3.0 (September 22, 2017) + +BREAKING CHANGES: + +- Remove `cmd` and `keys/tx` packages altogether: move it to the cosmos-sdk +- `cryptostore.Generator` takes a secret +- Remove `String()` from `Signature` interface + +FEATURES: + +- `keys`: add CRC16 error correcting code + +IMPROVEMENTS: + +- Allow no passwords on keys for development convenience + + +## 0.2.1 (June 21, 2017) + +- Improve keys command + - No password prompts in non-interactive mode (echo 'foobar' | keys new foo) + - Added support for seed phrases + - Seed phrase now returned on `keys new` + - Add `keys restore` to restore private key from key phrase + - Checksum to verify typos in the seed phrase (rather than just a useless key) + - Add `keys delete` to remove a key if needed + +## 0.2.0 (May 18, 2017) + +BREAKING CHANGES: + +- [hd] The following functions no longer take a `coin string` as argument: `ComputeAddress`, `AddrFromPubKeyBytes`, `ComputeAddressForPrivKey`, `ComputeWIF`, `WIFFromPrivKeyBytes` +- Changes to `PrivKey`, `PubKey`, and `Signature` (denoted `Xxx` below): + - interfaces are renamed `XxxInner`, and are not for use outside the package, though they must be exposed for sake of serialization. + - `Xxx` is now a struct that wraps the corresponding `XxxInner` interface + +FEATURES: + +- `github.com/tendermint/go-keys -> github.com/tendermint/go-crypto/keys` - command and lib for generating and managing encrypted keys +- [hd] New function `WIFFromPrivKeyBytes(privKeyBytes []byte, compress bool) string` +- Changes to `PrivKey`, `PubKey`, and `Signature` (denoted `Xxx` below): + - Expose a new method `Unwrap() XxxInner` on the `Xxx` struct which returns the corresponding `XxxInner` interface + - Expose a new method `Wrap() Xxx` on the `XxxInner` interface which returns the corresponding `Xxx` struct + +IMPROVEMENTS: + +- Update to use new `tmlibs` repository + +## 0.1.0 (April 14, 2017) + +Initial release + diff --git a/crypto/README.md b/crypto/README.md new file mode 100644 index 000000000..32afde699 --- /dev/null +++ b/crypto/README.md @@ -0,0 +1,25 @@ +# crypto + +crypto is the cryptographic package adapted for Tendermint's uses + +## Importing it +`import "github.com/tendermint/tendermint/crypto"` + +## Binary encoding + +For Binary encoding, please refer to the [Tendermint encoding spec](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/encoding.md). + +## JSON Encoding + +crypto `.Bytes()` uses Amino:binary encoding, but Amino:JSON is also supported. + +```go +Example Amino:JSON encodings: + +crypto.PrivKeyEd25519 - {"type":"954568A3288910","value":"EVkqJO/jIXp3rkASXfh9YnyToYXRXhBr6g9cQVxPFnQBP/5povV4HTjvsy530kybxKHwEi85iU8YL0qQhSYVoQ=="} +crypto.SignatureEd25519 - {"type":"6BF5903DA1DB28","value":"77sQNZOrf7ltExpf7AV1WaYPCHbyRLgjBsoWVzcduuLk+jIGmYk+s5R6Emm29p12HeiNAuhUJgdFGmwkpeGJCA=="} +crypto.PubKeyEd25519 - {"type":"AC26791624DE60","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="} +crypto.PrivKeySecp256k1 - {"type":"019E82E1B0F798","value":"zx4Pnh67N+g2V+5vZbQzEyRerX9c4ccNZOVzM9RvJ0Y="} +crypto.SignatureSecp256k1 - {"type":"6D1EA416E1FEE8","value":"MEUCIQCIg5TqS1l7I+MKTrSPIuUN2+4m5tA29dcauqn3NhEJ2wIgICaZ+lgRc5aOTVahU/XoLopXKn8BZcl0bnuYWLvohR8="} +crypto.PubKeySecp256k1 - {"type":"F8CCEAEB5AE980","value":"A8lPKJXcNl5VHt1FK8a244K9EJuS4WX1hFBnwisi0IJx"} +``` diff --git a/crypto/amino.go b/crypto/amino.go new file mode 100644 index 000000000..6a8703fc9 --- /dev/null +++ b/crypto/amino.go @@ -0,0 +1,37 @@ +package crypto + +import ( + amino "github.com/tendermint/go-amino" +) + +var cdc = amino.NewCodec() + +func init() { + // NOTE: It's important that there be no conflicts here, + // as that would change the canonical representations, + // and therefore change the address. + // TODO: Add feature to go-amino to ensure that there + // are no conflicts. + RegisterAmino(cdc) +} + +// RegisterAmino registers all crypto related types in the given (amino) codec. +func RegisterAmino(cdc *amino.Codec) { + cdc.RegisterInterface((*PubKey)(nil), nil) + cdc.RegisterConcrete(PubKeyEd25519{}, + "tendermint/PubKeyEd25519", nil) + cdc.RegisterConcrete(PubKeySecp256k1{}, + "tendermint/PubKeySecp256k1", nil) + + cdc.RegisterInterface((*PrivKey)(nil), nil) + cdc.RegisterConcrete(PrivKeyEd25519{}, + "tendermint/PrivKeyEd25519", nil) + cdc.RegisterConcrete(PrivKeySecp256k1{}, + "tendermint/PrivKeySecp256k1", nil) + + cdc.RegisterInterface((*Signature)(nil), nil) + cdc.RegisterConcrete(SignatureEd25519{}, + "tendermint/SignatureEd25519", nil) + cdc.RegisterConcrete(SignatureSecp256k1{}, + "tendermint/SignatureSecp256k1", nil) +} diff --git a/crypto/armor.go b/crypto/armor.go new file mode 100644 index 000000000..4146048ad --- /dev/null +++ b/crypto/armor.go @@ -0,0 +1,39 @@ +package crypto + +import ( + "bytes" + "fmt" + "io/ioutil" + + "golang.org/x/crypto/openpgp/armor" +) + +func EncodeArmor(blockType string, headers map[string]string, data []byte) string { + buf := new(bytes.Buffer) + w, err := armor.Encode(buf, blockType, headers) + if err != nil { + panic(fmt.Errorf("could not encode ascii armor: %s", err)) + } + _, err = w.Write(data) + if err != nil { + panic(fmt.Errorf("could not encode ascii armor: %s", err)) + } + err = w.Close() + if err != nil { + panic(fmt.Errorf("could not encode ascii armor: %s", err)) + } + return buf.String() +} + +func DecodeArmor(armorStr string) (blockType string, headers map[string]string, data []byte, err error) { + buf := bytes.NewBufferString(armorStr) + block, err := armor.Decode(buf) + if err != nil { + return "", nil, nil, err + } + data, err = ioutil.ReadAll(block.Body) + if err != nil { + return "", nil, nil, err + } + return block.Type, block.Header, data, nil +} diff --git a/crypto/armor_test.go b/crypto/armor_test.go new file mode 100644 index 000000000..5eae87c00 --- /dev/null +++ b/crypto/armor_test.go @@ -0,0 +1,20 @@ +package crypto + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSimpleArmor(t *testing.T) { + blockType := "MINT TEST" + data := []byte("somedata") + armorStr := EncodeArmor(blockType, nil, data) + + // Decode armorStr and test for equivalence. + blockType2, _, data2, err := DecodeArmor(armorStr) + require.Nil(t, err, "%+v", err) + assert.Equal(t, blockType, blockType2) + assert.Equal(t, data, data2) +} diff --git a/crypto/doc.go b/crypto/doc.go new file mode 100644 index 000000000..544e0df36 --- /dev/null +++ b/crypto/doc.go @@ -0,0 +1,45 @@ +// crypto is a customized/convenience cryptography package for supporting +// Tendermint. + +// It wraps select functionality of equivalent functions in the +// Go standard library, for easy usage with our libraries. + +// Keys: + +// All key generation functions return an instance of the PrivKey interface +// which implements methods + +// AssertIsPrivKeyInner() +// Bytes() []byte +// Sign(msg []byte) Signature +// PubKey() PubKey +// Equals(PrivKey) bool +// Wrap() PrivKey + +// From the above method we can: +// a) Retrieve the public key if needed + +// pubKey := key.PubKey() + +// For example: +// privKey, err := crypto.GenPrivKeyEd25519() +// if err != nil { +// ... +// } +// pubKey := privKey.PubKey() +// ... +// // And then you can use the private and public key +// doSomething(privKey, pubKey) + +// We also provide hashing wrappers around algorithms: + +// Sha256 +// sum := crypto.Sha256([]byte("This is Tendermint")) +// fmt.Printf("%x\n", sum) + +// Ripemd160 +// sum := crypto.Ripemd160([]byte("This is consensus")) +// fmt.Printf("%x\n", sum) +package crypto + +// TODO: Add more docs in here diff --git a/crypto/encode_test.go b/crypto/encode_test.go new file mode 100644 index 000000000..16555bf71 --- /dev/null +++ b/crypto/encode_test.go @@ -0,0 +1,119 @@ +package crypto + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type byter interface { + Bytes() []byte +} + +func checkAminoBinary(t *testing.T, src byter, dst interface{}, size int) { + // Marshal to binary bytes. + bz, err := cdc.MarshalBinaryBare(src) + require.Nil(t, err, "%+v", err) + // Make sure this is compatible with current (Bytes()) encoding. + assert.Equal(t, src.Bytes(), bz, "Amino binary vs Bytes() mismatch") + // Make sure we have the expected length. + if size != -1 { + assert.Equal(t, size, len(bz), "Amino binary size mismatch") + } + // Unmarshal. + err = cdc.UnmarshalBinaryBare(bz, dst) + require.Nil(t, err, "%+v", err) +} + +func checkAminoJSON(t *testing.T, src interface{}, dst interface{}, isNil bool) { + // Marshal to JSON bytes. + js, err := cdc.MarshalJSON(src) + require.Nil(t, err, "%+v", err) + if isNil { + assert.Equal(t, string(js), `null`) + } else { + assert.Contains(t, string(js), `"type":`) + assert.Contains(t, string(js), `"value":`) + } + // Unmarshal. + err = cdc.UnmarshalJSON(js, dst) + require.Nil(t, err, "%+v", err) +} + +func ExamplePrintRegisteredTypes() { + cdc.PrintTypes(os.Stdout) + // Output: | Type | Name | Prefix | Length | Notes | + //| ---- | ---- | ------ | ----- | ------ | + //| PubKeyEd25519 | tendermint/PubKeyEd25519 | 0x1624DE64 | 0x20 | | + //| PubKeySecp256k1 | tendermint/PubKeySecp256k1 | 0xEB5AE987 | 0x21 | | + //| PrivKeyEd25519 | tendermint/PrivKeyEd25519 | 0xA3288910 | 0x40 | | + //| PrivKeySecp256k1 | tendermint/PrivKeySecp256k1 | 0xE1B0F79B | 0x20 | | + //| SignatureEd25519 | tendermint/SignatureEd25519 | 0x2031EA53 | 0x40 | | + //| SignatureSecp256k1 | tendermint/SignatureSecp256k1 | 0x7FC4A495 | variable | | +} + +func TestKeyEncodings(t *testing.T) { + cases := []struct { + privKey PrivKey + privSize, pubSize int // binary sizes + }{ + { + privKey: GenPrivKeyEd25519(), + privSize: 69, + pubSize: 37, + }, + { + privKey: GenPrivKeySecp256k1(), + privSize: 37, + pubSize: 38, + }, + } + + for _, tc := range cases { + + // Check (de/en)codings of PrivKeys. + var priv2, priv3 PrivKey + checkAminoBinary(t, tc.privKey, &priv2, tc.privSize) + assert.EqualValues(t, tc.privKey, priv2) + checkAminoJSON(t, tc.privKey, &priv3, false) // TODO also check Prefix bytes. + assert.EqualValues(t, tc.privKey, priv3) + + // Check (de/en)codings of Signatures. + var sig1, sig2, sig3 Signature + sig1, err := tc.privKey.Sign([]byte("something")) + assert.NoError(t, err) + checkAminoBinary(t, sig1, &sig2, -1) // Signature size changes for Secp anyways. + assert.EqualValues(t, sig1, sig2) + checkAminoJSON(t, sig1, &sig3, false) // TODO also check Prefix bytes. + assert.EqualValues(t, sig1, sig3) + + // Check (de/en)codings of PubKeys. + pubKey := tc.privKey.PubKey() + var pub2, pub3 PubKey + checkAminoBinary(t, pubKey, &pub2, tc.pubSize) + assert.EqualValues(t, pubKey, pub2) + checkAminoJSON(t, pubKey, &pub3, false) // TODO also check Prefix bytes. + assert.EqualValues(t, pubKey, pub3) + } +} + +func TestNilEncodings(t *testing.T) { + + // Check nil Signature. + var a, b Signature + checkAminoJSON(t, &a, &b, true) + assert.EqualValues(t, a, b) + + // Check nil PubKey. + var c, d PubKey + checkAminoJSON(t, &c, &d, true) + assert.EqualValues(t, c, d) + + // Check nil PrivKey. + var e, f PrivKey + checkAminoJSON(t, &e, &f, true) + assert.EqualValues(t, e, f) + +} diff --git a/crypto/example_test.go b/crypto/example_test.go new file mode 100644 index 000000000..904e1c610 --- /dev/null +++ b/crypto/example_test.go @@ -0,0 +1,35 @@ +// Copyright 2017 Tendermint. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crypto_test + +import ( + "fmt" + + "github.com/tendermint/tendermint/crypto" +) + +func ExampleSha256() { + sum := crypto.Sha256([]byte("This is Tendermint")) + fmt.Printf("%x\n", sum) + // Output: + // f91afb642f3d1c87c17eb01aae5cb65c242dfdbe7cf1066cc260f4ce5d33b94e +} + +func ExampleRipemd160() { + sum := crypto.Ripemd160([]byte("This is Tendermint")) + fmt.Printf("%x\n", sum) + // Output: + // 051e22663e8f0fd2f2302f1210f954adff009005 +} diff --git a/crypto/hash.go b/crypto/hash.go new file mode 100644 index 000000000..165b1e153 --- /dev/null +++ b/crypto/hash.go @@ -0,0 +1,18 @@ +package crypto + +import ( + "crypto/sha256" + "golang.org/x/crypto/ripemd160" +) + +func Sha256(bytes []byte) []byte { + hasher := sha256.New() + hasher.Write(bytes) + return hasher.Sum(nil) +} + +func Ripemd160(bytes []byte) []byte { + hasher := ripemd160.New() + hasher.Write(bytes) + return hasher.Sum(nil) +} diff --git a/crypto/hkdfchacha20poly1305/hkdfchachapoly.go b/crypto/hkdfchacha20poly1305/hkdfchachapoly.go new file mode 100644 index 000000000..ab3b9df3a --- /dev/null +++ b/crypto/hkdfchacha20poly1305/hkdfchachapoly.go @@ -0,0 +1,105 @@ +// Package hkdfchacha20poly1305 creates an AEAD using hkdf, chacha20, and poly1305 +// When sealing and opening, the hkdf is used to obtain the nonce and subkey for +// chacha20. Other than the change for the how the subkey and nonce for chacha +// are obtained, this is the same as chacha20poly1305 +package hkdfchacha20poly1305 + +import ( + "crypto/cipher" + "crypto/sha256" + "errors" + "io" + + "golang.org/x/crypto/chacha20poly1305" + "golang.org/x/crypto/hkdf" +) + +type hkdfchacha20poly1305 struct { + key [KeySize]byte +} + +const ( + // KeySize is the size of the key used by this AEAD, in bytes. + KeySize = 32 + // NonceSize is the size of the nonce used with this AEAD, in bytes. + NonceSize = 24 + // TagSize is the size added from poly1305 + TagSize = 16 + // MaxPlaintextSize is the max size that can be passed into a single call of Seal + MaxPlaintextSize = (1 << 38) - 64 + // MaxCiphertextSize is the max size that can be passed into a single call of Open, + // this differs from plaintext size due to the tag + MaxCiphertextSize = (1 << 38) - 48 + // HkdfInfo is the parameter used internally for Hkdf's info parameter. + HkdfInfo = "TENDERMINT_SECRET_CONNECTION_FRAME_KEY_DERIVE" +) + +//New xChaChapoly1305 AEAD with 24 byte nonces +func New(key []byte) (cipher.AEAD, error) { + if len(key) != KeySize { + return nil, errors.New("chacha20poly1305: bad key length") + } + ret := new(hkdfchacha20poly1305) + copy(ret.key[:], key) + return ret, nil + +} +func (c *hkdfchacha20poly1305) NonceSize() int { + return NonceSize +} + +func (c *hkdfchacha20poly1305) Overhead() int { + return TagSize +} + +func (c *hkdfchacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { + if len(nonce) != NonceSize { + panic("hkdfchacha20poly1305: bad nonce length passed to Seal") + } + + if uint64(len(plaintext)) > MaxPlaintextSize { + panic("hkdfchacha20poly1305: plaintext too large") + } + + subKey, chachaNonce := getSubkeyAndChachaNonceFromHkdf(&c.key, &nonce) + + aead, err := chacha20poly1305.New(subKey[:]) + if err != nil { + panic("hkdfchacha20poly1305: failed to initialize chacha20poly1305") + } + + return aead.Seal(dst, chachaNonce[:], plaintext, additionalData) +} + +func (c *hkdfchacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if len(nonce) != NonceSize { + return nil, errors.New("hkdfchacha20poly1305: bad nonce length passed to Open") + } + if uint64(len(ciphertext)) > MaxCiphertextSize { + return nil, errors.New("hkdfchacha20poly1305: ciphertext too large") + } + + subKey, chachaNonce := getSubkeyAndChachaNonceFromHkdf(&c.key, &nonce) + + aead, err := chacha20poly1305.New(subKey[:]) + if err != nil { + panic("hkdfchacha20poly1305: failed to initialize chacha20poly1305") + } + + return aead.Open(dst, chachaNonce[:], ciphertext, additionalData) +} + +func getSubkeyAndChachaNonceFromHkdf(cKey *[32]byte, nonce *[]byte) ( + subKey [KeySize]byte, chachaNonce [chacha20poly1305.NonceSize]byte) { + hash := sha256.New + hkdf := hkdf.New(hash, (*cKey)[:], *nonce, []byte(HkdfInfo)) + _, err := io.ReadFull(hkdf, subKey[:]) + if err != nil { + panic("hkdfchacha20poly1305: failed to read subkey from hkdf") + } + _, err = io.ReadFull(hkdf, chachaNonce[:]) + if err != nil { + panic("hkdfchacha20poly1305: failed to read chachaNonce from hkdf") + } + return +} diff --git a/crypto/hkdfchacha20poly1305/hkdfchachapoly_test.go b/crypto/hkdfchacha20poly1305/hkdfchachapoly_test.go new file mode 100644 index 000000000..854a312e6 --- /dev/null +++ b/crypto/hkdfchacha20poly1305/hkdfchachapoly_test.go @@ -0,0 +1,143 @@ +package hkdfchacha20poly1305 + +import ( + "bytes" + cr "crypto/rand" + "encoding/hex" + mr "math/rand" + "testing" + + "github.com/stretchr/testify/assert" +) + +// Test that a test vector we generated is valid. (Ensures backwards +// compatibility) +func TestVector(t *testing.T) { + key, _ := hex.DecodeString("56f8de45d3c294c7675bcaf457bdd4b71c380b9b2408ce9412b348d0f08b69ee") + aead, err := New(key[:]) + if err != nil { + t.Fatal(err) + } + cts := []string{"e20a8bf42c535ac30125cfc52031577f0b", + "657695b37ba30f67b25860d90a6f1d00d8", + "e9aa6f3b7f625d957fd50f05bcdf20d014", + "8a00b3b5a6014e0d2033bebc5935086245", + "aadd74867b923879e6866ea9e03c009039", + "fc59773c2c864ee3b4cc971876b3c7bed4", + "caec14e3a9a52ce1a2682c6737defa4752", + "0b89511ffe490d2049d6950494ee51f919", + "7de854ea71f43ca35167a07566c769083d", + "cd477327f4ea4765c71e311c5fec1edbfb"} + + for i := 0; i < 10; i++ { + ct, _ := hex.DecodeString(cts[i]) + + byteArr := []byte{byte(i)} + nonce := make([]byte, 24) + nonce[0] = byteArr[0] + + // Test that we get the expected plaintext on open + plaintext, err := aead.Open(nil, nonce, ct, byteArr) + if err != nil { + t.Errorf("%dth Open failed", i) + continue + } + assert.Equal(t, byteArr, plaintext) + // Test that sealing yields the expected ciphertext + ciphertext := aead.Seal(nil, nonce, plaintext, byteArr) + assert.Equal(t, ct, ciphertext) + } +} + +// The following test is taken from +// https://github.com/golang/crypto/blob/master/chacha20poly1305/chacha20poly1305_test.go#L69 +// It requires the below copyright notice, where "this source code" refers to the following function. +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found at the bottom of this file. +func TestRandom(t *testing.T) { + // Some random tests to verify Open(Seal) == Plaintext + for i := 0; i < 256; i++ { + var nonce [24]byte + var key [32]byte + + al := mr.Intn(128) + pl := mr.Intn(16384) + ad := make([]byte, al) + plaintext := make([]byte, pl) + cr.Read(key[:]) + cr.Read(nonce[:]) + cr.Read(ad) + cr.Read(plaintext) + + aead, err := New(key[:]) + if err != nil { + t.Fatal(err) + } + + ct := aead.Seal(nil, nonce[:], plaintext, ad) + + plaintext2, err := aead.Open(nil, nonce[:], ct, ad) + if err != nil { + t.Errorf("Random #%d: Open failed", i) + continue + } + + if !bytes.Equal(plaintext, plaintext2) { + t.Errorf("Random #%d: plaintext's don't match: got %x vs %x", i, plaintext2, plaintext) + continue + } + + if len(ad) > 0 { + alterAdIdx := mr.Intn(len(ad)) + ad[alterAdIdx] ^= 0x80 + if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { + t.Errorf("Random #%d: Open was successful after altering additional data", i) + } + ad[alterAdIdx] ^= 0x80 + } + + alterNonceIdx := mr.Intn(aead.NonceSize()) + nonce[alterNonceIdx] ^= 0x80 + if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { + t.Errorf("Random #%d: Open was successful after altering nonce", i) + } + nonce[alterNonceIdx] ^= 0x80 + + alterCtIdx := mr.Intn(len(ct)) + ct[alterCtIdx] ^= 0x80 + if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { + t.Errorf("Random #%d: Open was successful after altering ciphertext", i) + } + ct[alterCtIdx] ^= 0x80 + } +} + +// AFOREMENTIONED LICENCE +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/crypto/merkle/README.md b/crypto/merkle/README.md new file mode 100644 index 000000000..c44978368 --- /dev/null +++ b/crypto/merkle/README.md @@ -0,0 +1,4 @@ +## Simple Merkle Tree + +For smaller static data structures that don't require immutable snapshots or mutability; +for instance the transactions and validation signatures of a block can be hashed using this simple merkle tree logic. diff --git a/crypto/merkle/doc.go b/crypto/merkle/doc.go new file mode 100644 index 000000000..865c30217 --- /dev/null +++ b/crypto/merkle/doc.go @@ -0,0 +1,31 @@ +/* +Package merkle computes a deterministic minimal height Merkle tree hash. +If the number of items is not a power of two, some leaves +will be at different levels. Tries to keep both sides of +the tree the same size, but the left may be one greater. + +Use this for short deterministic trees, such as the validator list. +For larger datasets, use IAVLTree. + +Be aware that the current implementation by itself does not prevent +second pre-image attacks. Hence, use this library with caution. +Otherwise you might run into similar issues as, e.g., in early Bitcoin: +https://bitcointalk.org/?topic=102395 + + * + / \ + / \ + / \ + / \ + * * + / \ / \ + / \ / \ + / \ / \ + * * * h6 + / \ / \ / \ + h0 h1 h2 h3 h4 h5 + +TODO(ismail): add 2nd pre-image protection or clarify further on how we use this and why this secure. + +*/ +package merkle diff --git a/crypto/merkle/simple_map.go b/crypto/merkle/simple_map.go new file mode 100644 index 000000000..ba4b9309a --- /dev/null +++ b/crypto/merkle/simple_map.go @@ -0,0 +1,88 @@ +package merkle + +import ( + "github.com/tendermint/tendermint/crypto/tmhash" + cmn "github.com/tendermint/tendermint/libs/common" +) + +// Merkle tree from a map. +// Leaves are `hash(key) | hash(value)`. +// Leaves are sorted before Merkle hashing. +type simpleMap struct { + kvs cmn.KVPairs + sorted bool +} + +func newSimpleMap() *simpleMap { + return &simpleMap{ + kvs: nil, + sorted: false, + } +} + +// Set hashes the key and value and appends it to the kv pairs. +func (sm *simpleMap) Set(key string, value Hasher) { + sm.sorted = false + + // The value is hashed, so you can + // check for equality with a cached value (say) + // and make a determination to fetch or not. + vhash := value.Hash() + + sm.kvs = append(sm.kvs, cmn.KVPair{ + Key: []byte(key), + Value: vhash, + }) +} + +// Hash Merkle root hash of items sorted by key +// (UNSTABLE: and by value too if duplicate key). +func (sm *simpleMap) Hash() []byte { + sm.Sort() + return hashKVPairs(sm.kvs) +} + +func (sm *simpleMap) Sort() { + if sm.sorted { + return + } + sm.kvs.Sort() + sm.sorted = true +} + +// Returns a copy of sorted KVPairs. +// NOTE these contain the hashed key and value. +func (sm *simpleMap) KVPairs() cmn.KVPairs { + sm.Sort() + kvs := make(cmn.KVPairs, len(sm.kvs)) + copy(kvs, sm.kvs) + return kvs +} + +//---------------------------------------- + +// A local extension to KVPair that can be hashed. +// Key and value are length prefixed and concatenated, +// then hashed. +type KVPair cmn.KVPair + +func (kv KVPair) Hash() []byte { + hasher := tmhash.New() + err := encodeByteSlice(hasher, kv.Key) + if err != nil { + panic(err) + } + err = encodeByteSlice(hasher, kv.Value) + if err != nil { + panic(err) + } + return hasher.Sum(nil) +} + +func hashKVPairs(kvs cmn.KVPairs) []byte { + kvsH := make([]Hasher, len(kvs)) + for i, kvp := range kvs { + kvsH[i] = KVPair(kvp) + } + return SimpleHashFromHashers(kvsH) +} diff --git a/crypto/merkle/simple_map_test.go b/crypto/merkle/simple_map_test.go new file mode 100644 index 000000000..34febcf16 --- /dev/null +++ b/crypto/merkle/simple_map_test.go @@ -0,0 +1,54 @@ +package merkle + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/crypto/tmhash" +) + +type strHasher string + +func (str strHasher) Hash() []byte { + return tmhash.Sum([]byte(str)) +} + +func TestSimpleMap(t *testing.T) { + { + db := newSimpleMap() + db.Set("key1", strHasher("value1")) + assert.Equal(t, "fa9bc106ffd932d919bee935ceb6cf2b3dd72d8f", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + } + { + db := newSimpleMap() + db.Set("key1", strHasher("value2")) + assert.Equal(t, "e00e7dcfe54e9fafef5111e813a587f01ba9c3e8", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + } + { + db := newSimpleMap() + db.Set("key1", strHasher("value1")) + db.Set("key2", strHasher("value2")) + assert.Equal(t, "eff12d1c703a1022ab509287c0f196130123d786", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + } + { + db := newSimpleMap() + db.Set("key2", strHasher("value2")) // NOTE: out of order + db.Set("key1", strHasher("value1")) + assert.Equal(t, "eff12d1c703a1022ab509287c0f196130123d786", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + } + { + db := newSimpleMap() + db.Set("key1", strHasher("value1")) + db.Set("key2", strHasher("value2")) + db.Set("key3", strHasher("value3")) + assert.Equal(t, "b2c62a277c08dbd2ad73ca53cd1d6bfdf5830d26", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + } + { + db := newSimpleMap() + db.Set("key2", strHasher("value2")) // NOTE: out of order + db.Set("key1", strHasher("value1")) + db.Set("key3", strHasher("value3")) + assert.Equal(t, "b2c62a277c08dbd2ad73ca53cd1d6bfdf5830d26", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + } +} diff --git a/crypto/merkle/simple_proof.go b/crypto/merkle/simple_proof.go new file mode 100644 index 000000000..2541b6d38 --- /dev/null +++ b/crypto/merkle/simple_proof.go @@ -0,0 +1,160 @@ +package merkle + +import ( + "bytes" + "fmt" +) + +// SimpleProof represents a simple merkle proof. +type SimpleProof struct { + Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. +} + +// SimpleProofsFromHashers computes inclusion proof for given items. +// proofs[0] is the proof for items[0]. +func SimpleProofsFromHashers(items []Hasher) (rootHash []byte, proofs []*SimpleProof) { + trails, rootSPN := trailsFromHashers(items) + rootHash = rootSPN.Hash + proofs = make([]*SimpleProof, len(items)) + for i, trail := range trails { + proofs[i] = &SimpleProof{ + Aunts: trail.FlattenAunts(), + } + } + return +} + +// SimpleProofsFromMap generates proofs from a map. The keys/values of the map will be used as the keys/values +// in the underlying key-value pairs. +// The keys are sorted before the proofs are computed. +func SimpleProofsFromMap(m map[string]Hasher) (rootHash []byte, proofs map[string]*SimpleProof, keys []string) { + sm := newSimpleMap() + for k, v := range m { + sm.Set(k, v) + } + sm.Sort() + kvs := sm.kvs + kvsH := make([]Hasher, 0, len(kvs)) + for _, kvp := range kvs { + kvsH = append(kvsH, KVPair(kvp)) + } + + rootHash, proofList := SimpleProofsFromHashers(kvsH) + proofs = make(map[string]*SimpleProof) + keys = make([]string, len(proofList)) + for i, kvp := range kvs { + proofs[string(kvp.Key)] = proofList[i] + keys[i] = string(kvp.Key) + } + return +} + +// Verify that leafHash is a leaf hash of the simple-merkle-tree +// which hashes to rootHash. +func (sp *SimpleProof) Verify(index int, total int, leafHash []byte, rootHash []byte) bool { + computedHash := computeHashFromAunts(index, total, leafHash, sp.Aunts) + return computedHash != nil && bytes.Equal(computedHash, rootHash) +} + +// String implements the stringer interface for SimpleProof. +// It is a wrapper around StringIndented. +func (sp *SimpleProof) String() string { + return sp.StringIndented("") +} + +// StringIndented generates a canonical string representation of a SimpleProof. +func (sp *SimpleProof) StringIndented(indent string) string { + return fmt.Sprintf(`SimpleProof{ +%s Aunts: %X +%s}`, + indent, sp.Aunts, + indent) +} + +// Use the leafHash and innerHashes to get the root merkle hash. +// If the length of the innerHashes slice isn't exactly correct, the result is nil. +// Recursive impl. +func computeHashFromAunts(index int, total int, leafHash []byte, innerHashes [][]byte) []byte { + if index >= total || index < 0 || total <= 0 { + return nil + } + switch total { + case 0: + panic("Cannot call computeHashFromAunts() with 0 total") + case 1: + if len(innerHashes) != 0 { + return nil + } + return leafHash + default: + if len(innerHashes) == 0 { + return nil + } + numLeft := (total + 1) / 2 + if index < numLeft { + leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + if leftHash == nil { + return nil + } + return SimpleHashFromTwoHashes(leftHash, innerHashes[len(innerHashes)-1]) + } + rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + if rightHash == nil { + return nil + } + return SimpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash) + } +} + +// SimpleProofNode is a helper structure to construct merkle proof. +// The node and the tree is thrown away afterwards. +// Exactly one of node.Left and node.Right is nil, unless node is the root, in which case both are nil. +// node.Parent.Hash = hash(node.Hash, node.Right.Hash) or +// hash(node.Left.Hash, node.Hash), depending on whether node is a left/right child. +type SimpleProofNode struct { + Hash []byte + Parent *SimpleProofNode + Left *SimpleProofNode // Left sibling (only one of Left,Right is set) + Right *SimpleProofNode // Right sibling (only one of Left,Right is set) +} + +// FlattenAunts will return the inner hashes for the item corresponding to the leaf, +// starting from a leaf SimpleProofNode. +func (spn *SimpleProofNode) FlattenAunts() [][]byte { + // Nonrecursive impl. + innerHashes := [][]byte{} + for spn != nil { + if spn.Left != nil { + innerHashes = append(innerHashes, spn.Left.Hash) + } else if spn.Right != nil { + innerHashes = append(innerHashes, spn.Right.Hash) + } else { + break + } + spn = spn.Parent + } + return innerHashes +} + +// trails[0].Hash is the leaf hash for items[0]. +// trails[i].Parent.Parent....Parent == root for all i. +func trailsFromHashers(items []Hasher) (trails []*SimpleProofNode, root *SimpleProofNode) { + // Recursive impl. + switch len(items) { + case 0: + return nil, nil + case 1: + trail := &SimpleProofNode{items[0].Hash(), nil, nil, nil} + return []*SimpleProofNode{trail}, trail + default: + lefts, leftRoot := trailsFromHashers(items[:(len(items)+1)/2]) + rights, rightRoot := trailsFromHashers(items[(len(items)+1)/2:]) + rootHash := SimpleHashFromTwoHashes(leftRoot.Hash, rightRoot.Hash) + root := &SimpleProofNode{rootHash, nil, nil, nil} + leftRoot.Parent = root + leftRoot.Right = rightRoot + rightRoot.Parent = root + rightRoot.Left = leftRoot + return append(lefts, rights...), root + } +} diff --git a/crypto/merkle/simple_tree.go b/crypto/merkle/simple_tree.go new file mode 100644 index 000000000..46a075909 --- /dev/null +++ b/crypto/merkle/simple_tree.go @@ -0,0 +1,58 @@ +package merkle + +import ( + "github.com/tendermint/tendermint/crypto/tmhash" +) + +// SimpleHashFromTwoHashes is the basic operation of the Merkle tree: Hash(left | right). +func SimpleHashFromTwoHashes(left, right []byte) []byte { + var hasher = tmhash.New() + err := encodeByteSlice(hasher, left) + if err != nil { + panic(err) + } + err = encodeByteSlice(hasher, right) + if err != nil { + panic(err) + } + return hasher.Sum(nil) +} + +// SimpleHashFromHashers computes a Merkle tree from items that can be hashed. +func SimpleHashFromHashers(items []Hasher) []byte { + hashes := make([][]byte, len(items)) + for i, item := range items { + hash := item.Hash() + hashes[i] = hash + } + return simpleHashFromHashes(hashes) +} + +// SimpleHashFromMap computes a Merkle tree from sorted map. +// Like calling SimpleHashFromHashers with +// `item = []byte(Hash(key) | Hash(value))`, +// sorted by `item`. +func SimpleHashFromMap(m map[string]Hasher) []byte { + sm := newSimpleMap() + for k, v := range m { + sm.Set(k, v) + } + return sm.Hash() +} + +//---------------------------------------------------------------- + +// Expects hashes! +func simpleHashFromHashes(hashes [][]byte) []byte { + // Recursive impl. + switch len(hashes) { + case 0: + return nil + case 1: + return hashes[0] + default: + left := simpleHashFromHashes(hashes[:(len(hashes)+1)/2]) + right := simpleHashFromHashes(hashes[(len(hashes)+1)/2:]) + return SimpleHashFromTwoHashes(left, right) + } +} diff --git a/crypto/merkle/simple_tree_test.go b/crypto/merkle/simple_tree_test.go new file mode 100644 index 000000000..488e0c907 --- /dev/null +++ b/crypto/merkle/simple_tree_test.go @@ -0,0 +1,88 @@ +package merkle + +import ( + "bytes" + + cmn "github.com/tendermint/tendermint/libs/common" + . "github.com/tendermint/tendermint/libs/test" + + "github.com/tendermint/tendermint/crypto/tmhash" + "testing" +) + +type testItem []byte + +func (tI testItem) Hash() []byte { + return []byte(tI) +} + +func TestSimpleProof(t *testing.T) { + + total := 100 + + items := make([]Hasher, total) + for i := 0; i < total; i++ { + items[i] = testItem(cmn.RandBytes(tmhash.Size)) + } + + rootHash := SimpleHashFromHashers(items) + + rootHash2, proofs := SimpleProofsFromHashers(items) + + if !bytes.Equal(rootHash, rootHash2) { + t.Errorf("Unmatched root hashes: %X vs %X", rootHash, rootHash2) + } + + // For each item, check the trail. + for i, item := range items { + itemHash := item.Hash() + proof := proofs[i] + + // Verify success + ok := proof.Verify(i, total, itemHash, rootHash) + if !ok { + t.Errorf("Verification failed for index %v.", i) + } + + // Wrong item index should make it fail + { + ok = proof.Verify((i+1)%total, total, itemHash, rootHash) + if ok { + t.Errorf("Expected verification to fail for wrong index %v.", i) + } + } + + // Trail too long should make it fail + origAunts := proof.Aunts + proof.Aunts = append(proof.Aunts, cmn.RandBytes(32)) + { + ok = proof.Verify(i, total, itemHash, rootHash) + if ok { + t.Errorf("Expected verification to fail for wrong trail length.") + } + } + proof.Aunts = origAunts + + // Trail too short should make it fail + proof.Aunts = proof.Aunts[0 : len(proof.Aunts)-1] + { + ok = proof.Verify(i, total, itemHash, rootHash) + if ok { + t.Errorf("Expected verification to fail for wrong trail length.") + } + } + proof.Aunts = origAunts + + // Mutating the itemHash should make it fail. + ok = proof.Verify(i, total, MutateByteSlice(itemHash), rootHash) + if ok { + t.Errorf("Expected verification to fail for mutated leaf hash") + } + + // Mutating the rootHash should make it fail. + ok = proof.Verify(i, total, itemHash, MutateByteSlice(rootHash)) + if ok { + t.Errorf("Expected verification to fail for mutated root hash") + } + } +} diff --git a/crypto/merkle/types.go b/crypto/merkle/types.go new file mode 100644 index 000000000..2fcb3f39d --- /dev/null +++ b/crypto/merkle/types.go @@ -0,0 +1,38 @@ +package merkle + +import ( + "io" + + amino "github.com/tendermint/go-amino" +) + +// Tree is a Merkle tree interface. +type Tree interface { + Size() (size int) + Height() (height int8) + Has(key []byte) (has bool) + Proof(key []byte) (value []byte, proof []byte, exists bool) // TODO make it return an index + Get(key []byte) (index int, value []byte, exists bool) + GetByIndex(index int) (key []byte, value []byte) + Set(key []byte, value []byte) (updated bool) + Remove(key []byte) (value []byte, removed bool) + HashWithCount() (hash []byte, count int) + Hash() (hash []byte) + Save() (hash []byte) + Load(hash []byte) + Copy() Tree + Iterate(func(key []byte, value []byte) (stop bool)) (stopped bool) + IterateRange(start []byte, end []byte, ascending bool, fx func(key []byte, value []byte) (stop bool)) (stopped bool) +} + +// Hasher represents a hashable piece of data which can be hashed in the Tree. +type Hasher interface { + Hash() []byte +} + +//----------------------------------------------------------------------- + +// Uvarint length prefixed byteslice +func encodeByteSlice(w io.Writer, bz []byte) (err error) { + return amino.EncodeByteSlice(w, bz) +} diff --git a/crypto/priv_key.go b/crypto/priv_key.go new file mode 100644 index 000000000..dbfe64c33 --- /dev/null +++ b/crypto/priv_key.go @@ -0,0 +1,164 @@ +package crypto + +import ( + "crypto/subtle" + + secp256k1 "github.com/btcsuite/btcd/btcec" + "github.com/tendermint/ed25519" + "github.com/tendermint/ed25519/extra25519" +) + +func PrivKeyFromBytes(privKeyBytes []byte) (privKey PrivKey, err error) { + err = cdc.UnmarshalBinaryBare(privKeyBytes, &privKey) + return +} + +//---------------------------------------- + +type PrivKey interface { + Bytes() []byte + Sign(msg []byte) (Signature, error) + PubKey() PubKey + Equals(PrivKey) bool +} + +//------------------------------------- + +var _ PrivKey = PrivKeyEd25519{} + +// Implements PrivKey +type PrivKeyEd25519 [64]byte + +func (privKey PrivKeyEd25519) Bytes() []byte { + return cdc.MustMarshalBinaryBare(privKey) +} + +func (privKey PrivKeyEd25519) Sign(msg []byte) (Signature, error) { + privKeyBytes := [64]byte(privKey) + signatureBytes := ed25519.Sign(&privKeyBytes, msg) + return SignatureEd25519(*signatureBytes), nil +} + +func (privKey PrivKeyEd25519) PubKey() PubKey { + privKeyBytes := [64]byte(privKey) + pubBytes := *ed25519.MakePublicKey(&privKeyBytes) + return PubKeyEd25519(pubBytes) +} + +// Equals - you probably don't need to use this. +// Runs in constant time based on length of the keys. +func (privKey PrivKeyEd25519) Equals(other PrivKey) bool { + if otherEd, ok := other.(PrivKeyEd25519); ok { + return subtle.ConstantTimeCompare(privKey[:], otherEd[:]) == 1 + } else { + return false + } +} + +func (privKey PrivKeyEd25519) ToCurve25519() *[32]byte { + keyCurve25519 := new([32]byte) + privKeyBytes := [64]byte(privKey) + extra25519.PrivateKeyToCurve25519(keyCurve25519, &privKeyBytes) + return keyCurve25519 +} + +// Deterministically generates new priv-key bytes from key. +func (privKey PrivKeyEd25519) Generate(index int) PrivKeyEd25519 { + bz, err := cdc.MarshalBinaryBare(struct { + PrivKey [64]byte + Index int + }{privKey, index}) + if err != nil { + panic(err) + } + newBytes := Sha256(bz) + newKey := new([64]byte) + copy(newKey[:32], newBytes) + ed25519.MakePublicKey(newKey) + return PrivKeyEd25519(*newKey) +} + +func GenPrivKeyEd25519() PrivKeyEd25519 { + privKeyBytes := new([64]byte) + copy(privKeyBytes[:32], CRandBytes(32)) + ed25519.MakePublicKey(privKeyBytes) + return PrivKeyEd25519(*privKeyBytes) +} + +// NOTE: secret should be the output of a KDF like bcrypt, +// if it's derived from user input. +func GenPrivKeyEd25519FromSecret(secret []byte) PrivKeyEd25519 { + privKey32 := Sha256(secret) // Not Ripemd160 because we want 32 bytes. + privKeyBytes := new([64]byte) + copy(privKeyBytes[:32], privKey32) + ed25519.MakePublicKey(privKeyBytes) + return PrivKeyEd25519(*privKeyBytes) +} + +//------------------------------------- + +var _ PrivKey = PrivKeySecp256k1{} + +// Implements PrivKey +type PrivKeySecp256k1 [32]byte + +func (privKey PrivKeySecp256k1) Bytes() []byte { + return cdc.MustMarshalBinaryBare(privKey) +} + +func (privKey PrivKeySecp256k1) Sign(msg []byte) (Signature, error) { + priv__, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey[:]) + sig__, err := priv__.Sign(Sha256(msg)) + if err != nil { + return nil, err + } + return SignatureSecp256k1(sig__.Serialize()), nil +} + +func (privKey PrivKeySecp256k1) PubKey() PubKey { + _, pub__ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey[:]) + var pub PubKeySecp256k1 + copy(pub[:], pub__.SerializeCompressed()) + return pub +} + +// Equals - you probably don't need to use this. +// Runs in constant time based on length of the keys. +func (privKey PrivKeySecp256k1) Equals(other PrivKey) bool { + if otherSecp, ok := other.(PrivKeySecp256k1); ok { + return subtle.ConstantTimeCompare(privKey[:], otherSecp[:]) == 1 + } else { + return false + } +} + +/* +// Deterministically generates new priv-key bytes from key. +func (key PrivKeySecp256k1) Generate(index int) PrivKeySecp256k1 { + newBytes := cdc.BinarySha256(struct { + PrivKey [64]byte + Index int + }{key, index}) + var newKey [64]byte + copy(newKey[:], newBytes) + return PrivKeySecp256k1(newKey) +} +*/ + +func GenPrivKeySecp256k1() PrivKeySecp256k1 { + privKeyBytes := [32]byte{} + copy(privKeyBytes[:], CRandBytes(32)) + priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKeyBytes[:]) + copy(privKeyBytes[:], priv.Serialize()) + return PrivKeySecp256k1(privKeyBytes) +} + +// NOTE: secret should be the output of a KDF like bcrypt, +// if it's derived from user input. +func GenPrivKeySecp256k1FromSecret(secret []byte) PrivKeySecp256k1 { + privKey32 := Sha256(secret) // Not Ripemd160 because we want 32 bytes. + priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey32) + privKeyBytes := [32]byte{} + copy(privKeyBytes[:], priv.Serialize()) + return PrivKeySecp256k1(privKeyBytes) +} diff --git a/crypto/priv_key_test.go b/crypto/priv_key_test.go new file mode 100644 index 000000000..c1ae33ed1 --- /dev/null +++ b/crypto/priv_key_test.go @@ -0,0 +1,60 @@ +package crypto_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/crypto" +) + +func TestGeneratePrivKey(t *testing.T) { + testPriv := crypto.GenPrivKeyEd25519() + testGenerate := testPriv.Generate(1) + signBytes := []byte("something to sign") + pub := testGenerate.PubKey() + sig, err := testGenerate.Sign(signBytes) + assert.NoError(t, err) + assert.True(t, pub.VerifyBytes(signBytes, sig)) +} + +/* + +type BadKey struct { + PrivKeyEd25519 +} + +func TestReadPrivKey(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + // garbage in, garbage out + garbage := []byte("hjgewugfbiewgofwgewr") + XXX This test wants to register BadKey globally to crypto, + but we don't want to support that. + _, err := PrivKeyFromBytes(garbage) + require.Error(err) + + edKey := GenPrivKeyEd25519() + badKey := BadKey{edKey} + + cases := []struct { + key PrivKey + valid bool + }{ + {edKey, true}, + {badKey, false}, + } + + for i, tc := range cases { + data := tc.key.Bytes() + fmt.Println(">>>", data) + key, err := PrivKeyFromBytes(data) + fmt.Printf("!!! %#v\n", key, err) + if tc.valid { + assert.NoError(err, "%d", i) + assert.Equal(tc.key, key, "%d", i) + } else { + assert.Error(err, "%d: %#v", i, key) + } + } +} +*/ diff --git a/crypto/pub_key.go b/crypto/pub_key.go new file mode 100644 index 000000000..588c54113 --- /dev/null +++ b/crypto/pub_key.go @@ -0,0 +1,153 @@ +package crypto + +import ( + "bytes" + "crypto/sha256" + "fmt" + + "golang.org/x/crypto/ripemd160" + + secp256k1 "github.com/btcsuite/btcd/btcec" + + "github.com/tendermint/ed25519" + "github.com/tendermint/ed25519/extra25519" + + cmn "github.com/tendermint/tendermint/libs/common" + + "github.com/tendermint/tendermint/crypto/tmhash" +) + +// An address is a []byte, but hex-encoded even in JSON. +// []byte leaves us the option to change the address length. +// Use an alias so Unmarshal methods (with ptr receivers) are available too. +type Address = cmn.HexBytes + +func PubKeyFromBytes(pubKeyBytes []byte) (pubKey PubKey, err error) { + err = cdc.UnmarshalBinaryBare(pubKeyBytes, &pubKey) + return +} + +//---------------------------------------- + +type PubKey interface { + Address() Address + Bytes() []byte + VerifyBytes(msg []byte, sig Signature) bool + Equals(PubKey) bool +} + +//------------------------------------- + +var _ PubKey = PubKeyEd25519{} + +const PubKeyEd25519Size = 32 + +// Implements PubKeyInner +type PubKeyEd25519 [PubKeyEd25519Size]byte + +// Address is the SHA256-20 of the raw pubkey bytes. +func (pubKey PubKeyEd25519) Address() Address { + return Address(tmhash.Sum(pubKey[:])) +} + +func (pubKey PubKeyEd25519) Bytes() []byte { + bz, err := cdc.MarshalBinaryBare(pubKey) + if err != nil { + panic(err) + } + return bz +} + +func (pubKey PubKeyEd25519) VerifyBytes(msg []byte, sig_ Signature) bool { + // make sure we use the same algorithm to sign + sig, ok := sig_.(SignatureEd25519) + if !ok { + return false + } + pubKeyBytes := [PubKeyEd25519Size]byte(pubKey) + sigBytes := [SignatureEd25519Size]byte(sig) + return ed25519.Verify(&pubKeyBytes, msg, &sigBytes) +} + +// For use with golang/crypto/nacl/box +// If error, returns nil. +func (pubKey PubKeyEd25519) ToCurve25519() *[PubKeyEd25519Size]byte { + keyCurve25519, pubKeyBytes := new([PubKeyEd25519Size]byte), [PubKeyEd25519Size]byte(pubKey) + ok := extra25519.PublicKeyToCurve25519(keyCurve25519, &pubKeyBytes) + if !ok { + return nil + } + return keyCurve25519 +} + +func (pubKey PubKeyEd25519) String() string { + return fmt.Sprintf("PubKeyEd25519{%X}", pubKey[:]) +} + +func (pubKey PubKeyEd25519) Equals(other PubKey) bool { + if otherEd, ok := other.(PubKeyEd25519); ok { + return bytes.Equal(pubKey[:], otherEd[:]) + } else { + return false + } +} + +//------------------------------------- + +var _ PubKey = PubKeySecp256k1{} + +const PubKeySecp256k1Size = 33 + +// Implements PubKey. +// Compressed pubkey (just the x-cord), +// prefixed with 0x02 or 0x03, depending on the y-cord. +type PubKeySecp256k1 [PubKeySecp256k1Size]byte + +// Implements Bitcoin style addresses: RIPEMD160(SHA256(pubkey)) +func (pubKey PubKeySecp256k1) Address() Address { + hasherSHA256 := sha256.New() + hasherSHA256.Write(pubKey[:]) // does not error + sha := hasherSHA256.Sum(nil) + + hasherRIPEMD160 := ripemd160.New() + hasherRIPEMD160.Write(sha) // does not error + return Address(hasherRIPEMD160.Sum(nil)) +} + +func (pubKey PubKeySecp256k1) Bytes() []byte { + bz, err := cdc.MarshalBinaryBare(pubKey) + if err != nil { + panic(err) + } + return bz +} + +func (pubKey PubKeySecp256k1) VerifyBytes(msg []byte, sig_ Signature) bool { + // and assert same algorithm to sign and verify + sig, ok := sig_.(SignatureSecp256k1) + if !ok { + return false + } + + pub__, err := secp256k1.ParsePubKey(pubKey[:], secp256k1.S256()) + if err != nil { + return false + } + sig__, err := secp256k1.ParseDERSignature(sig[:], secp256k1.S256()) + if err != nil { + return false + } + return sig__.Verify(Sha256(msg), pub__) +} + +func (pubKey PubKeySecp256k1) String() string { + return fmt.Sprintf("PubKeySecp256k1{%X}", pubKey[:]) +} + +func (pubKey PubKeySecp256k1) Equals(other PubKey) bool { + if otherSecp, ok := other.(PubKeySecp256k1); ok { + return bytes.Equal(pubKey[:], otherSecp[:]) + } else { + return false + } +} diff --git a/crypto/pub_key_test.go b/crypto/pub_key_test.go new file mode 100644 index 000000000..7b856cf18 --- /dev/null +++ b/crypto/pub_key_test.go @@ -0,0 +1,50 @@ +package crypto + +import ( + "encoding/hex" + "testing" + + "github.com/btcsuite/btcutil/base58" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type keyData struct { + priv string + pub string + addr string +} + +var secpDataTable = []keyData{ + { + priv: "a96e62ed3955e65be32703f12d87b6b5cf26039ecfa948dc5107a495418e5330", + pub: "02950e1cdfcb133d6024109fd489f734eeb4502418e538c28481f22bce276f248c", + addr: "1CKZ9Nx4zgds8tU7nJHotKSDr4a9bYJCa3", + }, +} + +func TestPubKeySecp256k1Address(t *testing.T) { + for _, d := range secpDataTable { + privB, _ := hex.DecodeString(d.priv) + pubB, _ := hex.DecodeString(d.pub) + addrBbz, _, _ := base58.CheckDecode(d.addr) + addrB := Address(addrBbz) + + var priv PrivKeySecp256k1 + copy(priv[:], privB) + + pubKey := priv.PubKey() + pubT, _ := pubKey.(PubKeySecp256k1) + pub := pubT[:] + addr := pubKey.Address() + + assert.Equal(t, pub, pubB, "Expected pub keys to match") + assert.Equal(t, addr, addrB, "Expected addresses to match") + } +} + +func TestPubKeyInvalidDataProperReturnsEmpty(t *testing.T) { + pk, err := PubKeyFromBytes([]byte("foo")) + require.NotNil(t, err, "expecting a non-nil error") + require.Nil(t, pk, "expecting an empty public key on error") +} diff --git a/crypto/random.go b/crypto/random.go new file mode 100644 index 000000000..5c5057d30 --- /dev/null +++ b/crypto/random.go @@ -0,0 +1,108 @@ +package crypto + +import ( + "crypto/aes" + "crypto/cipher" + crand "crypto/rand" + "crypto/sha256" + "encoding/hex" + "io" + "sync" + + . "github.com/tendermint/tendermint/libs/common" +) + +var gRandInfo *randInfo + +func init() { + gRandInfo = &randInfo{} + gRandInfo.MixEntropy(randBytes(32)) // Init +} + +// Mix additional bytes of randomness, e.g. from hardware, user-input, etc. +// It is OK to call it multiple times. It does not diminish security. +func MixEntropy(seedBytes []byte) { + gRandInfo.MixEntropy(seedBytes) +} + +// This only uses the OS's randomness +func randBytes(numBytes int) []byte { + b := make([]byte, numBytes) + _, err := crand.Read(b) + if err != nil { + PanicCrisis(err) + } + return b +} + +// This uses the OS and the Seed(s). +func CRandBytes(numBytes int) []byte { + b := make([]byte, numBytes) + _, err := gRandInfo.Read(b) + if err != nil { + PanicCrisis(err) + } + return b +} + +// CRandHex returns a hex encoded string that's floor(numDigits/2) * 2 long. +// +// Note: CRandHex(24) gives 96 bits of randomness that +// are usually strong enough for most purposes. +func CRandHex(numDigits int) string { + return hex.EncodeToString(CRandBytes(numDigits / 2)) +} + +// Returns a crand.Reader mixed with user-supplied entropy +func CReader() io.Reader { + return gRandInfo +} + +//-------------------------------------------------------------------------------- + +type randInfo struct { + mtx sync.Mutex + seedBytes [32]byte + cipherAES256 cipher.Block + streamAES256 cipher.Stream + reader io.Reader +} + +// You can call this as many times as you'd like. +// XXX TODO review +func (ri *randInfo) MixEntropy(seedBytes []byte) { + ri.mtx.Lock() + defer ri.mtx.Unlock() + // Make new ri.seedBytes using passed seedBytes and current ri.seedBytes: + // ri.seedBytes = sha256( seedBytes || ri.seedBytes ) + h := sha256.New() + h.Write(seedBytes) + h.Write(ri.seedBytes[:]) + hashBytes := h.Sum(nil) + hashBytes32 := [32]byte{} + copy(hashBytes32[:], hashBytes) + ri.seedBytes = xorBytes32(ri.seedBytes, hashBytes32) + // Create new cipher.Block + var err error + ri.cipherAES256, err = aes.NewCipher(ri.seedBytes[:]) + if err != nil { + PanicSanity("Error creating AES256 cipher: " + err.Error()) + } + // Create new stream + ri.streamAES256 = cipher.NewCTR(ri.cipherAES256, randBytes(aes.BlockSize)) + // Create new reader + ri.reader = &cipher.StreamReader{S: ri.streamAES256, R: crand.Reader} +} + +func (ri *randInfo) Read(b []byte) (n int, err error) { + ri.mtx.Lock() + defer ri.mtx.Unlock() + return ri.reader.Read(b) +} + +func xorBytes32(bytesA [32]byte, bytesB [32]byte) (res [32]byte) { + for i, b := range bytesA { + res[i] = b ^ bytesB[i] + } + return res +} diff --git a/crypto/signature.go b/crypto/signature.go new file mode 100644 index 000000000..ae447da64 --- /dev/null +++ b/crypto/signature.go @@ -0,0 +1,90 @@ +package crypto + +import ( + "fmt" + + "crypto/subtle" + + . "github.com/tendermint/tendermint/libs/common" +) + +func SignatureFromBytes(pubKeyBytes []byte) (pubKey Signature, err error) { + err = cdc.UnmarshalBinaryBare(pubKeyBytes, &pubKey) + return +} + +//---------------------------------------- + +type Signature interface { + Bytes() []byte + IsZero() bool + Equals(Signature) bool +} + +//------------------------------------- + +var _ Signature = SignatureEd25519{} + +const SignatureEd25519Size = 64 + +// Implements Signature +type SignatureEd25519 [SignatureEd25519Size]byte + +func (sig SignatureEd25519) Bytes() []byte { + bz, err := cdc.MarshalBinaryBare(sig) + if err != nil { + panic(err) + } + return bz +} + +func (sig SignatureEd25519) IsZero() bool { return len(sig) == 0 } + +func (sig SignatureEd25519) String() string { return fmt.Sprintf("/%X.../", Fingerprint(sig[:])) } + +func (sig SignatureEd25519) Equals(other Signature) bool { + if otherEd, ok := other.(SignatureEd25519); ok { + return subtle.ConstantTimeCompare(sig[:], otherEd[:]) == 1 + } else { + return false + } +} + +func SignatureEd25519FromBytes(data []byte) Signature { + var sig SignatureEd25519 + copy(sig[:], data) + return sig +} + +//------------------------------------- + +var _ Signature = SignatureSecp256k1{} + +// Implements Signature +type SignatureSecp256k1 []byte + +func (sig SignatureSecp256k1) Bytes() []byte { + bz, err := cdc.MarshalBinaryBare(sig) + if err != nil { + panic(err) + } + return bz +} + +func (sig SignatureSecp256k1) IsZero() bool { return len(sig) == 0 } + +func (sig SignatureSecp256k1) String() string { return fmt.Sprintf("/%X.../", Fingerprint(sig[:])) } + +func (sig SignatureSecp256k1) Equals(other Signature) bool { + if otherSecp, ok := other.(SignatureSecp256k1); ok { + return subtle.ConstantTimeCompare(sig[:], otherSecp[:]) == 1 + } else { + return false + } +} + +func SignatureSecp256k1FromBytes(data []byte) Signature { + sig := make(SignatureSecp256k1, len(data)) + copy(sig[:], data) + return sig +} diff --git a/crypto/signature_test.go b/crypto/signature_test.go new file mode 100644 index 000000000..d6ae2b7a9 --- /dev/null +++ b/crypto/signature_test.go @@ -0,0 +1,46 @@ +package crypto + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSignAndValidateEd25519(t *testing.T) { + + privKey := GenPrivKeyEd25519() + pubKey := privKey.PubKey() + + msg := CRandBytes(128) + sig, err := privKey.Sign(msg) + require.Nil(t, err) + + // Test the signature + assert.True(t, pubKey.VerifyBytes(msg, sig)) + + // Mutate the signature, just one bit. + sigEd := sig.(SignatureEd25519) + sigEd[7] ^= byte(0x01) + sig = sigEd + + assert.False(t, pubKey.VerifyBytes(msg, sig)) +} + +func TestSignAndValidateSecp256k1(t *testing.T) { + privKey := GenPrivKeySecp256k1() + pubKey := privKey.PubKey() + + msg := CRandBytes(128) + sig, err := privKey.Sign(msg) + require.Nil(t, err) + + assert.True(t, pubKey.VerifyBytes(msg, sig)) + + // Mutate the signature, just one bit. + sigEd := sig.(SignatureSecp256k1) + sigEd[3] ^= byte(0x01) + sig = sigEd + + assert.False(t, pubKey.VerifyBytes(msg, sig)) +} diff --git a/crypto/symmetric.go b/crypto/symmetric.go new file mode 100644 index 000000000..62379c15f --- /dev/null +++ b/crypto/symmetric.go @@ -0,0 +1,51 @@ +package crypto + +import ( + "errors" + + . "github.com/tendermint/tendermint/libs/common" + "golang.org/x/crypto/nacl/secretbox" +) + +const nonceLen = 24 +const secretLen = 32 + +// secret must be 32 bytes long. Use something like Sha256(Bcrypt(passphrase)) +// The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext. +// NOTE: call crypto.MixEntropy() first. +func EncryptSymmetric(plaintext []byte, secret []byte) (ciphertext []byte) { + if len(secret) != secretLen { + PanicSanity(Fmt("Secret must be 32 bytes long, got len %v", len(secret))) + } + nonce := CRandBytes(nonceLen) + nonceArr := [nonceLen]byte{} + copy(nonceArr[:], nonce) + secretArr := [secretLen]byte{} + copy(secretArr[:], secret) + ciphertext = make([]byte, nonceLen+secretbox.Overhead+len(plaintext)) + copy(ciphertext, nonce) + secretbox.Seal(ciphertext[nonceLen:nonceLen], plaintext, &nonceArr, &secretArr) + return ciphertext +} + +// secret must be 32 bytes long. Use something like Sha256(Bcrypt(passphrase)) +// The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext. +func DecryptSymmetric(ciphertext []byte, secret []byte) (plaintext []byte, err error) { + if len(secret) != secretLen { + PanicSanity(Fmt("Secret must be 32 bytes long, got len %v", len(secret))) + } + if len(ciphertext) <= secretbox.Overhead+nonceLen { + return nil, errors.New("Ciphertext is too short") + } + nonce := ciphertext[:nonceLen] + nonceArr := [nonceLen]byte{} + copy(nonceArr[:], nonce) + secretArr := [secretLen]byte{} + copy(secretArr[:], secret) + plaintext = make([]byte, len(ciphertext)-nonceLen-secretbox.Overhead) + _, ok := secretbox.Open(plaintext[:0], ciphertext[nonceLen:], &nonceArr, &secretArr) + if !ok { + return nil, errors.New("Ciphertext decryption failed") + } + return plaintext, nil +} diff --git a/crypto/symmetric_test.go b/crypto/symmetric_test.go new file mode 100644 index 000000000..d92bff1aa --- /dev/null +++ b/crypto/symmetric_test.go @@ -0,0 +1,42 @@ +package crypto + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "golang.org/x/crypto/bcrypt" +) + +func TestSimple(t *testing.T) { + + MixEntropy([]byte("someentropy")) + + plaintext := []byte("sometext") + secret := []byte("somesecretoflengththirtytwo===32") + ciphertext := EncryptSymmetric(plaintext, secret) + plaintext2, err := DecryptSymmetric(ciphertext, secret) + + require.Nil(t, err, "%+v", err) + assert.Equal(t, plaintext, plaintext2) +} + +func TestSimpleWithKDF(t *testing.T) { + + MixEntropy([]byte("someentropy")) + + plaintext := []byte("sometext") + secretPass := []byte("somesecret") + secret, err := bcrypt.GenerateFromPassword(secretPass, 12) + if err != nil { + t.Error(err) + } + secret = Sha256(secret) + + ciphertext := EncryptSymmetric(plaintext, secret) + plaintext2, err := DecryptSymmetric(ciphertext, secret) + + require.Nil(t, err, "%+v", err) + assert.Equal(t, plaintext, plaintext2) +} diff --git a/crypto/tmhash/hash.go b/crypto/tmhash/hash.go new file mode 100644 index 000000000..1b29d8680 --- /dev/null +++ b/crypto/tmhash/hash.go @@ -0,0 +1,48 @@ +package tmhash + +import ( + "crypto/sha256" + "hash" +) + +const ( + Size = 20 + BlockSize = sha256.BlockSize +) + +type sha256trunc struct { + sha256 hash.Hash +} + +func (h sha256trunc) Write(p []byte) (n int, err error) { + return h.sha256.Write(p) +} +func (h sha256trunc) Sum(b []byte) []byte { + shasum := h.sha256.Sum(b) + return shasum[:Size] +} + +func (h sha256trunc) Reset() { + h.sha256.Reset() +} + +func (h sha256trunc) Size() int { + return Size +} + +func (h sha256trunc) BlockSize() int { + return h.sha256.BlockSize() +} + +// New returns a new hash.Hash. +func New() hash.Hash { + return sha256trunc{ + sha256: sha256.New(), + } +} + +// Sum returns the first 20 bytes of SHA256 of the bz. +func Sum(bz []byte) []byte { + hash := sha256.Sum256(bz) + return hash[:Size] +} diff --git a/crypto/tmhash/hash_test.go b/crypto/tmhash/hash_test.go new file mode 100644 index 000000000..27938039a --- /dev/null +++ b/crypto/tmhash/hash_test.go @@ -0,0 +1,23 @@ +package tmhash_test + +import ( + "crypto/sha256" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/crypto/tmhash" +) + +func TestHash(t *testing.T) { + testVector := []byte("abc") + hasher := tmhash.New() + hasher.Write(testVector) + bz := hasher.Sum(nil) + + hasher = sha256.New() + hasher.Write(testVector) + bz2 := hasher.Sum(nil) + bz2 = bz2[:20] + + assert.Equal(t, bz, bz2) +} diff --git a/crypto/version.go b/crypto/version.go new file mode 100644 index 000000000..77c0bed8a --- /dev/null +++ b/crypto/version.go @@ -0,0 +1,3 @@ +package crypto + +const Version = "0.9.0-dev" diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 000000000..61862e5c1 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,68 @@ +version: '3' + +services: + node0: + container_name: node0 + image: "tendermint/localnode" + ports: + - "26656-26657:26656-26657" + environment: + - ID=0 + - LOG=$${LOG:-tendermint.log} + volumes: + - ./build:/tendermint:Z + networks: + localnet: + ipv4_address: 192.167.10.2 + + node1: + container_name: node1 + image: "tendermint/localnode" + ports: + - "26659-26660:26656-26657" + environment: + - ID=1 + - LOG=$${LOG:-tendermint.log} + volumes: + - ./build:/tendermint:Z + networks: + localnet: + ipv4_address: 192.167.10.3 + + node2: + container_name: node2 + image: "tendermint/localnode" + environment: + - ID=2 + - LOG=$${LOG:-tendermint.log} + ports: + - "26661-26662:26656-26657" + volumes: + - ./build:/tendermint:Z + networks: + localnet: + ipv4_address: 192.167.10.4 + + node3: + container_name: node3 + image: "tendermint/localnode" + environment: + - ID=3 + - LOG=$${LOG:-tendermint.log} + ports: + - "26663-26664:26656-26657" + volumes: + - ./build:/tendermint:Z + networks: + localnet: + ipv4_address: 192.167.10.5 + +networks: + localnet: + driver: bridge + ipam: + driver: default + config: + - + subnet: 192.167.10.0/16 + diff --git a/docs/.python-version b/docs/.python-version new file mode 100644 index 000000000..9bbf49249 --- /dev/null +++ b/docs/.python-version @@ -0,0 +1 @@ +2.7.14 diff --git a/docs/DOCS_README.md b/docs/DOCS_README.md new file mode 100644 index 000000000..162fc1acd --- /dev/null +++ b/docs/DOCS_README.md @@ -0,0 +1,19 @@ +# Documentation Maintenance Overview + +The documentation found in this directory is hosted at: + +- https://tendermint.com/docs/ + +and built using [VuePress](https://vuepress.vuejs.org/) from the tendermint website repo: + +- https://github.com/tendermint/tendermint.com + +which has a [configuration file](https://github.com/tendermint/tendermint.com/blob/develop/docs/.vuepress/config.js) for displaying +the Table of Contents that lists all the documentation. + +The `README.md` in this directory is the landing page for +website documentation and the following folders are intentionally +ommitted: + +- `architecture/` ==> contains Architecture Design Records +- `spec/` ==> contains the detailed specification diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 000000000..442c9be65 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,23 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = python -msphinx +SPHINXPROJ = Tendermint +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +install: + @pip install -r requirements.txt + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 000000000..16ea708ad --- /dev/null +++ b/docs/README.md @@ -0,0 +1,27 @@ +# Tendermint + +Welcome to the Tendermint Core documentation! The introduction below provides +an overview to help you navigate to your area of interest. + +## Introduction + +Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state +transition machine - written in any programming language - and securely +replicates it on many machines. In other words, a blockchain. + +Tendermint requires an application running over the Application Blockchain +Interface (ABCI) - and comes packaged with an example application to do so. +Follow the [installation instructions](./introduction/install) to get up and running +quickly. For more details on [using tendermint](./tendermint-core/using-tendermint) see that +and the following sections. + +## Networks + +Testnets can be setup manually on one or more machines, or automatically on one +or more machine, using a variety of methods described in the [deploy testnets +section](./networks/deploy-testnets). + +## Application Development + +The first step to building application on Tendermint is to [install +ABCI-CLI](./app-dev/getting-started) and play with the example applications. diff --git a/docs/app-dev/abci-cli.md b/docs/app-dev/abci-cli.md new file mode 100644 index 000000000..4f9019fda --- /dev/null +++ b/docs/app-dev/abci-cli.md @@ -0,0 +1,354 @@ +# Using ABCI-CLI + +To facilitate testing and debugging of ABCI servers and simple apps, we +built a CLI, the `abci-cli`, for sending ABCI messages from the command +line. + +## Install + +Make sure you [have Go installed](https://golang.org/doc/install). + +Next, install the `abci-cli` tool and example applications: + +``` +go get github.com/tendermint/tendermint +``` + +to get vendored dependencies: + +``` +cd $GOPATH/src/github.com/tendermint/tendermint +make get_tools +make get_vendor_deps +make install_abci +``` + +Now run `abci-cli` to see the list of commands: + +``` +Usage: + abci-cli [command] + +Available Commands: + batch Run a batch of abci commands against an application + check_tx Validate a tx + commit Commit the application state and return the Merkle root hash + console Start an interactive abci console for multiple commands + counter ABCI demo example + deliver_tx Deliver a new tx to the application + kvstore ABCI demo example + echo Have the application echo a message + help Help about any command + info Get some info about the application + query Query the application state + set_option Set an options on the application + +Flags: + --abci string socket or grpc (default "socket") + --address string address of application socket (default "tcp://127.0.0.1:26658") + -h, --help help for abci-cli + -v, --verbose print the command and results as if it were a console session + +Use "abci-cli [command] --help" for more information about a command. +``` + +## KVStore - First Example + +The `abci-cli` tool lets us send ABCI messages to our application, to +help build and debug them. + +The most important messages are `deliver_tx`, `check_tx`, and `commit`, +but there are others for convenience, configuration, and information +purposes. + +We'll start a kvstore application, which was installed at the same time +as `abci-cli` above. The kvstore just stores transactions in a merkle +tree. + +Its code can be found +[here](https://github.com/tendermint/tendermint/blob/develop/abci/cmd/abci-cli/abci-cli.go) +and looks like: + +``` +func cmdKVStore(cmd *cobra.Command, args []string) error { + logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + + // Create the application - in memory or persisted to disk + var app types.Application + if flagPersist == "" { + app = kvstore.NewKVStoreApplication() + } else { + app = kvstore.NewPersistentKVStoreApplication(flagPersist) + app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore")) + } + + // Start the listener + srv, err := server.NewServer(flagAddrD, flagAbci, app) + if err != nil { + return err + } + srv.SetLogger(logger.With("module", "abci-server")) + if err := srv.Start(); err != nil { + return err + } + + // Wait forever + cmn.TrapSignal(func() { + // Cleanup + srv.Stop() + }) + return nil +} +``` + +Start by running: + +``` +abci-cli kvstore +``` + +And in another terminal, run + +``` +abci-cli echo hello +abci-cli info +``` + +You'll see something like: + +``` +-> data: hello +-> data.hex: 68656C6C6F +``` + +and: + +``` +-> data: {"size":0} +-> data.hex: 7B2273697A65223A307D +``` + +An ABCI application must provide two things: + +- a socket server +- a handler for ABCI messages + +When we run the `abci-cli` tool we open a new connection to the +application's socket server, send the given ABCI message, and wait for a +response. + +The server may be generic for a particular language, and we provide a +[reference implementation in +Golang](https://github.com/tendermint/tendermint/tree/develop/abci/server). See the +[list of other ABCI implementations](./ecosystem.html) for servers in +other languages. + +The handler is specific to the application, and may be arbitrary, so +long as it is deterministic and conforms to the ABCI interface +specification. + +So when we run `abci-cli info`, we open a new connection to the ABCI +server, which calls the `Info()` method on the application, which tells +us the number of transactions in our Merkle tree. + +Now, since every command opens a new connection, we provide the +`abci-cli console` and `abci-cli batch` commands, to allow multiple ABCI +messages to be sent over a single connection. + +Running `abci-cli console` should drop you in an interactive console for +speaking ABCI messages to your application. + +Try running these commands: + +``` +> echo hello +-> code: OK +-> data: hello +-> data.hex: 0x68656C6C6F + +> info +-> code: OK +-> data: {"size":0} +-> data.hex: 0x7B2273697A65223A307D + +> commit +-> code: OK +-> data.hex: 0x0000000000000000 + +> deliver_tx "abc" +-> code: OK + +> info +-> code: OK +-> data: {"size":1} +-> data.hex: 0x7B2273697A65223A317D + +> commit +-> code: OK +-> data.hex: 0x0200000000000000 + +> query "abc" +-> code: OK +-> log: exists +-> height: 0 +-> value: abc +-> value.hex: 616263 + +> deliver_tx "def=xyz" +-> code: OK + +> commit +-> code: OK +-> data.hex: 0x0400000000000000 + +> query "def" +-> code: OK +-> log: exists +-> height: 0 +-> value: xyz +-> value.hex: 78797A +``` + +Note that if we do `deliver_tx "abc"` it will store `(abc, abc)`, but if +we do `deliver_tx "abc=efg"` it will store `(abc, efg)`. + +Similarly, you could put the commands in a file and run +`abci-cli --verbose batch < myfile`. + +## Counter - Another Example + +Now that we've got the hang of it, let's try another application, the +"counter" app. + +Like the kvstore app, its code can be found +[here](https://github.com/tendermint/tendermint/blob/master/abci/cmd/abci-cli/abci-cli.go) +and looks like: + +``` +func cmdCounter(cmd *cobra.Command, args []string) error { + + app := counter.NewCounterApplication(flagSerial) + + logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + + // Start the listener + srv, err := server.NewServer(flagAddrC, flagAbci, app) + if err != nil { + return err + } + srv.SetLogger(logger.With("module", "abci-server")) + if err := srv.Start(); err != nil { + return err + } + + // Wait forever + cmn.TrapSignal(func() { + // Cleanup + srv.Stop() + }) + return nil +} +``` + +The counter app doesn't use a Merkle tree, it just counts how many times +we've sent a transaction, asked for a hash, or committed the state. The +result of `commit` is just the number of transactions sent. + +This application has two modes: `serial=off` and `serial=on`. + +When `serial=on`, transactions must be a big-endian encoded incrementing +integer, starting at 0. + +If `serial=off`, there are no restrictions on transactions. + +We can toggle the value of `serial` using the `set_option` ABCI message. + +When `serial=on`, some transactions are invalid. In a live blockchain, +transactions collect in memory before they are committed into blocks. To +avoid wasting resources on invalid transactions, ABCI provides the +`check_tx` message, which application developers can use to accept or +reject transactions, before they are stored in memory or gossipped to +other peers. + +In this instance of the counter app, `check_tx` only allows transactions +whose integer is greater than the last committed one. + +Let's kill the console and the kvstore application, and start the +counter app: + +``` +abci-cli counter +``` + +In another window, start the `abci-cli console`: + +``` +> set_option serial on +-> code: OK +-> log: OK (SetOption doesn't return anything.) + +> check_tx 0x00 +-> code: OK + +> check_tx 0xff +-> code: OK + +> deliver_tx 0x00 +-> code: OK + +> check_tx 0x00 +-> code: BadNonce +-> log: Invalid nonce. Expected >= 1, got 0 + +> deliver_tx 0x01 +-> code: OK + +> deliver_tx 0x04 +-> code: BadNonce +-> log: Invalid nonce. Expected 2, got 4 + +> info +-> code: OK +-> data: {"hashes":0,"txs":2} +-> data.hex: 0x7B22686173686573223A302C22747873223A327D +``` + +This is a very simple application, but between `counter` and `kvstore`, +its easy to see how you can build out arbitrary application states on +top of the ABCI. [Hyperledger's +Burrow](https://github.com/hyperledger/burrow) also runs atop ABCI, +bringing with it Ethereum-like accounts, the Ethereum virtual-machine, +Monax's permissioning scheme, and native contracts extensions. + +But the ultimate flexibility comes from being able to write the +application easily in any language. + +We have implemented the counter in a number of languages [see the +example directory](https://github.com/tendermint/tendermint/tree/develop/abci/example). + +To run the Node JS version, `cd` to `example/js` and run + +``` +node app.js +``` + +(you'll have to kill the other counter application process). In another +window, run the console and those previous ABCI commands. You should get +the same results as for the Go version. + +## Bounties + +Want to write the counter app in your favorite language?! We'd be happy +to add you to our [ecosystem](https://tendermint.com/ecosystem)! We're +also offering [bounties](https://hackerone.com/tendermint/) for +implementations in new languages! + +The `abci-cli` is designed strictly for testing and debugging. In a real +deployment, the role of sending messages is taken by Tendermint, which +connects to the app using three separate connections, each with its own +pattern of messages. + +For more information, see the [application developers +guide](./app-development.md). For examples of running an ABCI app with +Tendermint, see the [getting started guide](./getting-started.md). +Next is the ABCI specification. diff --git a/docs/app-dev/abci-spec.md b/docs/app-dev/abci-spec.md new file mode 100644 index 000000000..ef274a4e8 --- /dev/null +++ b/docs/app-dev/abci-spec.md @@ -0,0 +1,325 @@ +# ABCI Specification + +## Message Types + +ABCI requests/responses are defined as simple Protobuf messages in [this +schema file](https://github.com/tendermint/tendermint/blob/master/abci/types/types.proto). +TendermintCore sends the requests, and the ABCI application sends the +responses. Here, we provide an overview of the messages types and how +they are used by Tendermint. Then we describe each request-response pair +as a function with arguments and return values, and add some notes on +usage. + +Some messages (`Echo, Info, InitChain, BeginBlock, EndBlock, Commit`), +don't return errors because an error would indicate a critical failure +in the application and there's nothing Tendermint can do. The problem +should be addressed and both Tendermint and the application restarted. +All other messages (`SetOption, Query, CheckTx, DeliverTx`) return an +application-specific response `Code uint32`, where only `0` is reserved +for `OK`. + +Some messages (`SetOption, Query, CheckTx, DeliverTx`) return +non-deterministic data in the form of `Info` and `Log`. The `Log` is +intended for the literal output from the application's logger, while the +`Info` is any additional info that should be returned. + +The first time a new blockchain is started, Tendermint calls +`InitChain`. From then on, the Block Execution Sequence that causes the +committed state to be updated is as follows: + +`BeginBlock, [DeliverTx], EndBlock, Commit` + +where one `DeliverTx` is called for each transaction in the block. +Cryptographic commitments to the results of DeliverTx, EndBlock, and +Commit are included in the header of the next block. + +Tendermint opens three connections to the application to handle the +different message types: + +- `Consensus Connection - InitChain, BeginBlock, DeliverTx, EndBlock, Commit` +- `Mempool Connection - CheckTx` +- `Info Connection - Info, SetOption, Query` + +The `Flush` message is used on every connection, and the `Echo` message +is only used for debugging. + +Note that messages may be sent concurrently across all connections -a +typical application will thus maintain a distinct state for each +connection. They may be referred to as the `DeliverTx state`, the +`CheckTx state`, and the `Commit state` respectively. + +See below for more details on the message types and how they are used. + +## Request/Response Messages + +### Echo + +- **Request**: + - `Message (string)`: A string to echo back +- **Response**: + - `Message (string)`: The input string +- **Usage**: + - Echo a string to test an abci client/server implementation + +### Flush + +- **Usage**: + - Signals that messages queued on the client should be flushed to + the server. It is called periodically by the client + implementation to ensure asynchronous requests are actually + sent, and is called immediately to make a synchronous request, + which returns when the Flush response comes back. + +### Info + +- **Request**: + - `Version (string)`: The Tendermint version +- **Response**: + - `Data (string)`: Some arbitrary information + - `Version (Version)`: Version information + - `LastBlockHeight (int64)`: Latest block for which the app has + called Commit + - `LastBlockAppHash ([]byte)`: Latest result of Commit +- **Usage**: + - Return information about the application state. + - Used to sync Tendermint with the application during a handshake + that happens on startup. + - Tendermint expects `LastBlockAppHash` and `LastBlockHeight` to + be updated during `Commit`, ensuring that `Commit` is never + called twice for the same block height. + +### SetOption + +- **Request**: + - `Key (string)`: Key to set + - `Value (string)`: Value to set for key +- **Response**: + - `Code (uint32)`: Response code + - `Log (string)`: The output of the application's logger. May + be non-deterministic. + - `Info (string)`: Additional information. May + be non-deterministic. +- **Usage**: + - Set non-consensus critical application specific options. + - e.g. Key="min-fee", Value="100fermion" could set the minimum fee + required for CheckTx (but not DeliverTx - that would be + consensus critical). + +### InitChain + +- **Request**: + - `Validators ([]Validator)`: Initial genesis validators + - `AppStateBytes ([]byte)`: Serialized initial application state +- **Response**: + - `ConsensusParams (ConsensusParams)`: Initial + consensus-critical parameters. + - `Validators ([]Validator)`: Initial validator set. +- **Usage**: + - Called once upon genesis. + +### Query + +- **Request**: + - `Data ([]byte)`: Raw query bytes. Can be used with or in lieu + of Path. + - `Path (string)`: Path of request, like an HTTP GET path. Can be + used with or in liue of Data. + - Apps MUST interpret '/store' as a query by key on the + underlying store. The key SHOULD be specified in the Data field. + - Apps SHOULD allow queries over specific types like + '/accounts/...' or '/votes/...' + - `Height (int64)`: The block height for which you want the query + (default=0 returns data for the latest committed block). Note + that this is the height of the block containing the + application's Merkle root hash, which represents the state as it + was after committing the block at Height-1 + - `Prove (bool)`: Return Merkle proof with response if possible +- **Response**: + - `Code (uint32)`: Response code. + - `Log (string)`: The output of the application's logger. May + be non-deterministic. + - `Info (string)`: Additional information. May + be non-deterministic. + - `Index (int64)`: The index of the key in the tree. + - `Key ([]byte)`: The key of the matching data. + - `Value ([]byte)`: The value of the matching data. + - `Proof ([]byte)`: Proof for the data, if requested. + - `Height (int64)`: The block height from which data was derived. + Note that this is the height of the block containing the + application's Merkle root hash, which represents the state as it + was after committing the block at Height-1 +- **Usage**: + - Query for data from the application at current or past height. + - Optionally return Merkle proof. + +### BeginBlock + +- **Request**: + - `Hash ([]byte)`: The block's hash. This can be derived from the + block header. + - `Header (struct{})`: The block header + - `Validators ([]SigningValidator)`: List of validators in the current validator + set and whether or not they signed a vote in the LastCommit + - `ByzantineValidators ([]Evidence)`: List of evidence of + validators that acted maliciously +- **Response**: + - `Tags ([]cmn.KVPair)`: Key-Value tags for filtering and indexing +- **Usage**: + - Signals the beginning of a new block. Called prior to + any DeliverTxs. + - The header is expected to at least contain the Height. + - The `Validators` and `ByzantineValidators` can be used to + determine rewards and punishments for the validators. + +### CheckTx + +- **Request**: + - `Tx ([]byte)`: The request transaction bytes +- **Response**: + - `Code (uint32)`: Response code + - `Data ([]byte)`: Result bytes, if any. + - `Log (string)`: The output of the application's logger. May + be non-deterministic. + - `Info (string)`: Additional information. May + be non-deterministic. + - `GasWanted (int64)`: Amount of gas request for transaction. + - `GasUsed (int64)`: Amount of gas consumed by transaction. + - `Tags ([]cmn.KVPair)`: Key-Value tags for filtering and indexing + transactions (eg. by account). + - `Fee (cmn.KI64Pair)`: Fee paid for the transaction. +- **Usage**: Validate a mempool transaction, prior to broadcasting + or proposing. CheckTx should perform stateful but light-weight + checks of the validity of the transaction (like checking signatures + and account balances), but need not execute in full (like running a + smart contract). + + Tendermint runs CheckTx and DeliverTx concurrently with eachother, + though on distinct ABCI connections - the mempool connection and the + consensus connection, respectively. + + The application should maintain a separate state to support CheckTx. + This state can be reset to the latest committed state during + `Commit`. Before calling Commit, Tendermint will lock and flush the mempool, + ensuring that all existing CheckTx are responded to and no new ones can + begin. After `Commit`, the mempool will rerun + CheckTx for all remaining transactions, throwing out any that are no longer valid. + Then the mempool will unlock and start sending CheckTx again. + + Keys and values in Tags must be UTF-8 encoded strings (e.g. + "account.owner": "Bob", "balance": "100.0", "date": "2018-01-02") + +### DeliverTx + +- **Request**: + - `Tx ([]byte)`: The request transaction bytes. +- **Response**: + - `Code (uint32)`: Response code. + - `Data ([]byte)`: Result bytes, if any. + - `Log (string)`: The output of the application's logger. May + be non-deterministic. + - `Info (string)`: Additional information. May + be non-deterministic. + - `GasWanted (int64)`: Amount of gas requested for transaction. + - `GasUsed (int64)`: Amount of gas consumed by transaction. + - `Tags ([]cmn.KVPair)`: Key-Value tags for filtering and indexing + transactions (eg. by account). + - `Fee (cmn.KI64Pair)`: Fee paid for the transaction. +- **Usage**: + - Deliver a transaction to be executed in full by the application. + If the transaction is valid, returns CodeType.OK. + - Keys and values in Tags must be UTF-8 encoded strings (e.g. + "account.owner": "Bob", "balance": "100.0", + "time": "2018-01-02T12:30:00Z") + +### EndBlock + +- **Request**: + - `Height (int64)`: Height of the block just executed. +- **Response**: + - `ValidatorUpdates ([]Validator)`: Changes to validator set (set + voting power to 0 to remove). + - `ConsensusParamUpdates (ConsensusParams)`: Changes to + consensus-critical time, size, and other parameters. + - `Tags ([]cmn.KVPair)`: Key-Value tags for filtering and indexing +- **Usage**: + - Signals the end of a block. + - Called prior to each Commit, after all transactions. + - Validator set and consensus params are updated with the result. + - Validator pubkeys are expected to be go-wire encoded. + +### Commit + +- **Response**: + - `Data ([]byte)`: The Merkle root hash +- **Usage**: + - Persist the application state. + - Return a Merkle root hash of the application state. + - It's critical that all application instances return the + same hash. If not, they will not be able to agree on the next + block, because the hash is included in the next block! + +## Data Messages + +### Header + +- **Fields**: + - `ChainID (string)`: ID of the blockchain + - `Height (int64)`: Height of the block in the chain + - `Time (int64)`: Unix time of the block + - `NumTxs (int32)`: Number of transactions in the block + - `TotalTxs (int64)`: Total number of transactions in the blockchain until + now + - `LastBlockHash ([]byte)`: Hash of the previous (parent) block + - `ValidatorsHash ([]byte)`: Hash of the validator set for this block + - `AppHash ([]byte)`: Data returned by the last call to `Commit` - typically the + Merkle root of the application state after executing the previous block's + transactions + - `Proposer (Validator)`: Original proposer for the block +- **Usage**: + - Provided in RequestBeginBlock + - Provides important context about the current state of the blockchain - + especially height and time. + - Provides the proposer of the current block, for use in proposer-based + reward mechanisms. + +### Validator + +- **Fields**: + - `Address ([]byte)`: Address of the validator (hash of the public key) + - `PubKey (PubKey)`: Public key of the validator + - `Power (int64)`: Voting power of the validator +- **Usage**: + - Provides all identifying information about the validator + +### SigningValidator + +- **Fields**: + - `Validator (Validator)`: A validator + - `SignedLastBlock (bool)`: Indicated whether or not the validator signed + the last block +- **Usage**: + - Indicates whether a validator signed the last block, allowing for rewards + based on validator availability + +### PubKey + +- **Fields**: + - `Type (string)`: Type of the public key. A simple string like `"ed25519"`. + In the future, may indicate a serialization algorithm to parse the `Data`, + for instance `"amino"`. + - `Data ([]byte)`: Public key data. For a simple public key, it's just the + raw bytes. If the `Type` indicates an encoding algorithm, this is the + encoded public key. +- **Usage**: + - A generic and extensible typed public key + +### Evidence + +- **Fields**: + - `Type (string)`: Type of the evidence. A hierarchical path like + "duplicate/vote". + - `Validator (Validator`: The offending validator + - `Height (int64)`: Height when the offense was committed + - `Time (int64)`: Unix time of the block at height `Height` + - `TotalVotingPower (int64)`: Total voting power of the validator set at + height `Height` diff --git a/docs/app-dev/app-architecture.md b/docs/app-dev/app-architecture.md new file mode 100644 index 000000000..9ce0fae9f --- /dev/null +++ b/docs/app-dev/app-architecture.md @@ -0,0 +1,51 @@ +# Application Architecture Guide + +Here we provide a brief guide on the recommended architecture of a +Tendermint blockchain application. + +The following diagram provides a superb example: + + + +The end-user application here is the Cosmos Voyager, at the bottom left. +Voyager communicates with a REST API exposed by a local Light-Client +Daemon. The Light-Client Daemon is an application specific program that +communicates with Tendermint nodes and verifies Tendermint light-client +proofs through the Tendermint Core RPC. The Tendermint Core process +communicates with a local ABCI application, where the user query or +transaction is actually processed. + +The ABCI application must be a deterministic result of the Tendermint +consensus - any external influence on the application state that didn't +come through Tendermint could cause a consensus failure. Thus _nothing_ +should communicate with the application except Tendermint via ABCI. + +If the application is written in Go, it can be compiled into the +Tendermint binary. Otherwise, it should use a unix socket to communicate +with Tendermint. If it's necessary to use TCP, extra care must be taken +to encrypt and authenticate the connection. + +All reads from the app happen through the Tendermint `/abci_query` +endpoint. All writes to the app happen through the Tendermint +`/broadcast_tx_*` endpoints. + +The Light-Client Daemon is what provides light clients (end users) with +nearly all the security of a full node. It formats and broadcasts +transactions, and verifies proofs of queries and transaction results. +Note that it need not be a daemon - the Light-Client logic could instead +be implemented in the same process as the end-user application. + +Note for those ABCI applications with weaker security requirements, the +functionality of the Light-Client Daemon can be moved into the ABCI +application process itself. That said, exposing the application process +to anything besides Tendermint over ABCI requires extreme caution, as +all transactions, and possibly all queries, should still pass through +Tendermint. + +See the following for more extensive documentation: + +- [Interchain Standard for the Light-Client REST API](https://github.com/cosmos/cosmos-sdk/pull/1028) +- [Tendermint RPC Docs](https://tendermint.github.io/slate/) +- [Tendermint in Production](https://github.com/tendermint/tendermint/pull/1618) +- [Tendermint Basics](https://tendermint.readthedocs.io/en/master/using-tendermint.html) +- [ABCI spec](https://github.com/tendermint/tendermint/blob/develop/abci/docs/abci-spec.md) diff --git a/docs/app-dev/app-development.md b/docs/app-dev/app-development.md new file mode 100644 index 000000000..a795673f1 --- /dev/null +++ b/docs/app-dev/app-development.md @@ -0,0 +1,558 @@ +# Application Development Guide + +## ABCI Design + +The purpose of ABCI is to provide a clean interface between state +transition machines on one computer and the mechanics of their +replication across multiple computers. The former we call 'application +logic' and the latter the 'consensus engine'. Application logic +validates transactions and optionally executes transactions against some +persistent state. A consensus engine ensures all transactions are +replicated in the same order on every machine. We call each machine in a +consensus engine a 'validator', and each validator runs the same +transactions through the same application logic. In particular, we are +interested in blockchain-style consensus engines, where transactions are +committed in hash-linked blocks. + +The ABCI design has a few distinct components: + +- message protocol + - pairs of request and response messages + - consensus makes requests, application responds + - defined using protobuf +- server/client + - consensus engine runs the client + - application runs the server + - two implementations: + - async raw bytes + - grpc +- blockchain protocol + - abci is connection oriented + - Tendermint Core maintains three connections: + - [mempool connection](#mempool-connection): for checking if + transactions should be relayed before they are committed; + only uses `CheckTx` + - [consensus connection](#consensus-connection): for executing + transactions that have been committed. Message sequence is + -for every block -`BeginBlock, [DeliverTx, ...], EndBlock, Commit` + - [query connection](#query-connection): for querying the + application state; only uses Query and Info + +The mempool and consensus logic act as clients, and each maintains an +open ABCI connection with the application, which hosts an ABCI server. +Shown are the request and response types sent on each connection. + +## Message Protocol + +The message protocol consists of pairs of requests and responses. Some +messages have no fields, while others may include byte-arrays, strings, +or integers. See the `message Request` and `message Response` +definitions in [the protobuf definition +file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto), +and the [protobuf +documentation](https://developers.google.com/protocol-buffers/docs/overview) +for more details. + +For each request, a server should respond with the corresponding +response, where order of requests is preserved in the order of +responses. + +## Server + +To use ABCI in your programming language of choice, there must be a ABCI +server in that language. Tendermint supports two kinds of implementation +of the server: + +- Asynchronous, raw socket server (Tendermint Socket Protocol, also + known as TSP or Teaspoon) +- GRPC + +Both can be tested using the `abci-cli` by setting the `--abci` flag +appropriately (ie. to `socket` or `grpc`). + +See examples, in various stages of maintenance, in +[Go](https://github.com/tendermint/tendermint/tree/develop/abci/server), +[JavaScript](https://github.com/tendermint/js-abci), +[Python](https://github.com/tendermint/tendermint/tree/develop/abci/example/python3/abci), +[C++](https://github.com/mdyring/cpp-tmsp), and +[Java](https://github.com/jTendermint/jabci). + +### GRPC + +If GRPC is available in your language, this is the easiest approach, +though it will have significant performance overhead. + +To get started with GRPC, copy in the [protobuf +file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto) +and compile it using the GRPC plugin for your language. For instance, +for golang, the command is `protoc --go_out=plugins=grpc:. types.proto`. +See the [grpc documentation for more details](http://www.grpc.io/docs/). +`protoc` will autogenerate all the necessary code for ABCI client and +server in your language, including whatever interface your application +must satisfy to be used by the ABCI server for handling requests. + +### TSP + +If GRPC is not available in your language, or you require higher +performance, or otherwise enjoy programming, you may implement your own +ABCI server using the Tendermint Socket Protocol, known affectionately +as Teaspoon. The first step is still to auto-generate the relevant data +types and codec in your language using `protoc`. Messages coming over +the socket are proto3 encoded, but additionally length-prefixed to +facilitate use as a streaming protocol. proto3 doesn't have an +official length-prefix standard, so we use our own. The first byte in +the prefix represents the length of the Big Endian encoded length. The +remaining bytes in the prefix are the Big Endian encoded length. + +For example, if the proto3 encoded ABCI message is 0xDEADBEEF (4 +bytes), the length-prefixed message is 0x0104DEADBEEF. If the proto3 +encoded ABCI message is 65535 bytes long, the length-prefixed message +would be like 0x02FFFF.... + +Note this prefixing does not apply for grpc. + +An ABCI server must also be able to support multiple connections, as +Tendermint uses three connections. + +## Client + +There are currently two use-cases for an ABCI client. One is a testing +tool, as in the `abci-cli`, which allows ABCI requests to be sent via +command line. The other is a consensus engine, such as Tendermint Core, +which makes requests to the application every time a new transaction is +received or a block is committed. + +It is unlikely that you will need to implement a client. For details of +our client, see +[here](https://github.com/tendermint/tendermint/tree/develop/abci/client). + +Most of the examples below are from [kvstore +application](https://github.com/tendermint/tendermint/blob/develop/abci/example/kvstore/kvstore.go), +which is a part of the abci repo. [persistent_kvstore +application](https://github.com/tendermint/tendermint/blob/develop/abci/example/kvstore/persistent_kvstore.go) +is used to show `BeginBlock`, `EndBlock` and `InitChain` example +implementations. + +## Blockchain Protocol + +In ABCI, a transaction is simply an arbitrary length byte-array. It is +the application's responsibility to define the transaction codec as they +please, and to use it for both CheckTx and DeliverTx. + +Note that there are two distinct means for running transactions, +corresponding to stages of 'awareness' of the transaction in the +network. The first stage is when a transaction is received by a +validator from a client into the so-called mempool or transaction pool +-this is where we use CheckTx. The second is when the transaction is +successfully committed on more than 2/3 of validators - where we use +DeliverTx. In the former case, it may not be necessary to run all the +state transitions associated with the transaction, as the transaction +may not ultimately be committed until some much later time, when the +result of its execution will be different. For instance, an Ethereum +ABCI app would check signatures and amounts in CheckTx, but would not +actually execute any contract code until the DeliverTx, so as to avoid +executing state transitions that have not been finalized. + +To formalize the distinction further, two explicit ABCI connections are +made between Tendermint Core and the application: the mempool connection +and the consensus connection. We also make a third connection, the query +connection, to query the local state of the app. + +### Mempool Connection + +The mempool connection is used _only_ for CheckTx requests. Transactions +are run using CheckTx in the same order they were received by the +validator. If the CheckTx returns `OK`, the transaction is kept in +memory and relayed to other peers in the same order it was received. +Otherwise, it is discarded. + +CheckTx requests run concurrently with block processing; so they should +run against a copy of the main application state which is reset after +every block. This copy is necessary to track transitions made by a +sequence of CheckTx requests before they are included in a block. When a +block is committed, the application must ensure to reset the mempool +state to the latest committed state. Tendermint Core will then filter +through all transactions in the mempool, removing any that were included +in the block, and re-run the rest using CheckTx against the post-Commit +mempool state (this behaviour can be turned off with +`[mempool] recheck = false`). + +In go: + +``` +func (app *KVStoreApplication) CheckTx(tx []byte) types.Result { + return types.OK +} +``` + +In Java: + +``` +ResponseCheckTx requestCheckTx(RequestCheckTx req) { + byte[] transaction = req.getTx().toByteArray(); + + // validate transaction + + if (notValid) { + return ResponseCheckTx.newBuilder().setCode(CodeType.BadNonce).setLog("invalid tx").build(); + } else { + return ResponseCheckTx.newBuilder().setCode(CodeType.OK).build(); + } +} +``` + +### Replay Protection + +To prevent old transactions from being replayed, CheckTx must implement +replay protection. + +Tendermint provides the first defence layer by keeping a lightweight +in-memory cache of 100k (`[mempool] cache_size`) last transactions in +the mempool. If Tendermint is just started or the clients sent more than +100k transactions, old transactions may be sent to the application. So +it is important CheckTx implements some logic to handle them. + +There are cases where a transaction will (or may) become valid in some +future state, in which case you probably want to disable Tendermint's +cache. You can do that by setting `[mempool] cache_size = 0` in the +config. + +### Consensus Connection + +The consensus connection is used only when a new block is committed, and +communicates all information from the block in a series of requests: +`BeginBlock, [DeliverTx, ...], EndBlock, Commit`. That is, when a block +is committed in the consensus, we send a list of DeliverTx requests (one +for each transaction) sandwiched by BeginBlock and EndBlock requests, +and followed by a Commit. + +### DeliverTx + +DeliverTx is the workhorse of the blockchain. Tendermint sends the +DeliverTx requests asynchronously but in order, and relies on the +underlying socket protocol (ie. TCP) to ensure they are received by the +app in order. They have already been ordered in the global consensus by +the Tendermint protocol. + +DeliverTx returns a abci.Result, which includes a Code, Data, and Log. +The code may be non-zero (non-OK), meaning the corresponding transaction +should have been rejected by the mempool, but may have been included in +a block by a Byzantine proposer. + +The block header will be updated (TODO) to include some commitment to +the results of DeliverTx, be it a bitarray of non-OK transactions, or a +merkle root of the data returned by the DeliverTx requests, or both. + +In go: + +``` +// tx is either "key=value" or just arbitrary bytes +func (app *KVStoreApplication) DeliverTx(tx []byte) types.Result { + parts := strings.Split(string(tx), "=") + if len(parts) == 2 { + app.state.Set([]byte(parts[0]), []byte(parts[1])) + } else { + app.state.Set(tx, tx) + } + return types.OK +} +``` + +In Java: + +``` +/** + * Using Protobuf types from the protoc compiler, we always start with a byte[] + */ +ResponseDeliverTx deliverTx(RequestDeliverTx request) { + byte[] transaction = request.getTx().toByteArray(); + + // validate your transaction + + if (notValid) { + return ResponseDeliverTx.newBuilder().setCode(CodeType.BadNonce).setLog("transaction was invalid").build(); + } else { + ResponseDeliverTx.newBuilder().setCode(CodeType.OK).build(); + } + +} +``` + +### Commit + +Once all processing of the block is complete, Tendermint sends the +Commit request and blocks waiting for a response. While the mempool may +run concurrently with block processing (the BeginBlock, DeliverTxs, and +EndBlock), it is locked for the Commit request so that its state can be +safely reset during Commit. This means the app _MUST NOT_ do any +blocking communication with the mempool (ie. broadcast_tx) during +Commit, or there will be deadlock. Note also that all remaining +transactions in the mempool are replayed on the mempool connection +(CheckTx) following a commit. + +The app should respond to the Commit request with a byte array, which is +the deterministic state root of the application. It is included in the +header of the next block. It can be used to provide easily verified +Merkle-proofs of the state of the application. + +It is expected that the app will persist state to disk on Commit. The +option to have all transactions replayed from some previous block is the +job of the [Handshake](#handshake). + +In go: + +``` +func (app *KVStoreApplication) Commit() types.Result { + hash := app.state.Hash() + return types.NewResultOK(hash, "") +} +``` + +In Java: + +``` +ResponseCommit requestCommit(RequestCommit requestCommit) { + + // update the internal app-state + byte[] newAppState = calculateAppState(); + + // and return it to the node + return ResponseCommit.newBuilder().setCode(CodeType.OK).setData(ByteString.copyFrom(newAppState)).build(); +} +``` + +### BeginBlock + +The BeginBlock request can be used to run some code at the beginning of +every block. It also allows Tendermint to send the current block hash +and header to the application, before it sends any of the transactions. + +The app should remember the latest height and header (ie. from which it +has run a successful Commit) so that it can tell Tendermint where to +pick up from when it restarts. See information on the Handshake, below. + +In go: + +``` +// Track the block hash and header information +func (app *PersistentKVStoreApplication) BeginBlock(params types.RequestBeginBlock) { + // update latest block info + app.blockHeader = params.Header + + // reset valset changes + app.changes = make([]*types.Validator, 0) +} +``` + +In Java: + +``` +/* + * all types come from protobuf definition + */ +ResponseBeginBlock requestBeginBlock(RequestBeginBlock req) { + + Header header = req.getHeader(); + byte[] prevAppHash = header.getAppHash().toByteArray(); + long prevHeight = header.getHeight(); + long numTxs = header.getNumTxs(); + + // run your pre-block logic. Maybe prepare a state snapshot, message components, etc + + return ResponseBeginBlock.newBuilder().build(); +} +``` + +### EndBlock + +The EndBlock request can be used to run some code at the end of every +block. Additionally, the response may contain a list of validators, +which can be used to update the validator set. To add a new validator or +update an existing one, simply include them in the list returned in the +EndBlock response. To remove one, include it in the list with a `power` +equal to `0`. Tendermint core will take care of updating the validator +set. Note the change in voting power must be strictly less than 1/3 per +block if you want a light client to be able to prove the transition +externally. See the [light client +docs](https://godoc.org/github.com/tendermint/tendermint/lite#hdr-How_We_Track_Validators) +for details on how it tracks validators. + +In go: + +``` +// Update the validator set +func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock { + return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates} +} +``` + +In Java: + +``` +/* + * Assume that one validator changes. The new validator has a power of 10 + */ +ResponseEndBlock requestEndBlock(RequestEndBlock req) { + final long currentHeight = req.getHeight(); + final byte[] validatorPubKey = getValPubKey(); + + ResponseEndBlock.Builder builder = ResponseEndBlock.newBuilder(); + builder.addDiffs(1, Types.Validator.newBuilder().setPower(10L).setPubKey(ByteString.copyFrom(validatorPubKey)).build()); + + return builder.build(); +} +``` + +### Query Connection + +This connection is used to query the application without engaging +consensus. It's exposed over the tendermint core rpc, so clients can +query the app without exposing a server on the app itself, but they must +serialize each query as a single byte array. Additionally, certain +"standardized" queries may be used to inform local decisions, for +instance about which peers to connect to. + +Tendermint Core currently uses the Query connection to filter peers upon +connecting, according to IP address or node ID. For instance, +returning non-OK ABCI response to either of the following queries will +cause Tendermint to not connect to the corresponding peer: + +- `p2p/filter/addr/`, where `` is an IP address. +- `p2p/filter/id/`, where `` is the hex-encoded node ID (the hash of + the node's p2p pubkey). + +Note: these query formats are subject to change! + +In go: + +``` +func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) { + if reqQuery.Prove { + value, proof, exists := app.state.Proof(reqQuery.Data) + resQuery.Index = -1 // TODO make Proof return index + resQuery.Key = reqQuery.Data + resQuery.Value = value + resQuery.Proof = proof + if exists { + resQuery.Log = "exists" + } else { + resQuery.Log = "does not exist" + } + return + } else { + index, value, exists := app.state.Get(reqQuery.Data) + resQuery.Index = int64(index) + resQuery.Value = value + if exists { + resQuery.Log = "exists" + } else { + resQuery.Log = "does not exist" + } + return + } +} +``` + +In Java: + +``` +ResponseQuery requestQuery(RequestQuery req) { + final boolean isProveQuery = req.getProve(); + final ResponseQuery.Builder responseBuilder = ResponseQuery.newBuilder(); + + if (isProveQuery) { + com.app.example.ProofResult proofResult = generateProof(req.getData().toByteArray()); + final byte[] proofAsByteArray = proofResult.getAsByteArray(); + + responseBuilder.setProof(ByteString.copyFrom(proofAsByteArray)); + responseBuilder.setKey(req.getData()); + responseBuilder.setValue(ByteString.copyFrom(proofResult.getData())); + responseBuilder.setLog(result.getLogValue()); + } else { + byte[] queryData = req.getData().toByteArray(); + + final com.app.example.QueryResult result = generateQueryResult(queryData); + + responseBuilder.setIndex(result.getIndex()); + responseBuilder.setValue(ByteString.copyFrom(result.getValue())); + responseBuilder.setLog(result.getLogValue()); + } + + return responseBuilder.build(); +} +``` + +### Handshake + +When the app or tendermint restarts, they need to sync to a common +height. When an ABCI connection is first established, Tendermint will +call `Info` on the Query connection. The response should contain the +LastBlockHeight and LastBlockAppHash - the former is the last block for +which the app ran Commit successfully, the latter is the response from +that Commit. + +Using this information, Tendermint will determine what needs to be +replayed, if anything, against the app, to ensure both Tendermint and +the app are synced to the latest block height. + +If the app returns a LastBlockHeight of 0, Tendermint will just replay +all blocks. + +In go: + +``` +func (app *KVStoreApplication) Info(req types.RequestInfo) (resInfo types.ResponseInfo) { + return types.ResponseInfo{Data: cmn.Fmt("{\"size\":%v}", app.state.Size())} +} +``` + +In Java: + +``` +ResponseInfo requestInfo(RequestInfo req) { + final byte[] lastAppHash = getLastAppHash(); + final long lastHeight = getLastHeight(); + return ResponseInfo.newBuilder().setLastBlockAppHash(ByteString.copyFrom(lastAppHash)).setLastBlockHeight(lastHeight).build(); +} +``` + +### Genesis + +`InitChain` will be called once upon the genesis. `params` includes the +initial validator set. Later on, it may be extended to take parts of the +consensus params. + +In go: + +``` +// Save the validators in the merkle tree +func (app *PersistentKVStoreApplication) InitChain(params types.RequestInitChain) { + for _, v := range params.Validators { + r := app.updateValidator(v) + if r.IsErr() { + app.logger.Error("Error updating validators", "r", r) + } + } +} +``` + +In Java: + +``` +/* + * all types come from protobuf definition + */ +ResponseInitChain requestInitChain(RequestInitChain req) { + final int validatorsCount = req.getValidatorsCount(); + final List validatorsList = req.getValidatorsList(); + + validatorsList.forEach((validator) -> { + long power = validator.getPower(); + byte[] validatorPubKey = validator.getPubKey().toByteArray(); + + // do somehing for validator setup in app + }); + + return ResponseInitChain.newBuilder().build(); +} +``` diff --git a/docs/app-dev/ecosystem.json b/docs/app-dev/ecosystem.json new file mode 100644 index 000000000..363f18902 --- /dev/null +++ b/docs/app-dev/ecosystem.json @@ -0,0 +1,213 @@ +{ + "abciApps": [ + { + "name": "Cosmos SDK", + "url": "https://github.com/cosmos/cosmos-sdk", + "language": "Go", + "author": "Cosmos", + "description": + "A prototypical account based crypto currency state machine supporting plugins" + }, + { + "name": "cb-ledger", + "url": "https://github.com/block-finance/cpp-abci", + "language": "C++", + "author": "Block Finance", + "description": + "Custodian Bank Ledger, integrating central banking with the blockchains of tomorrow" + }, + { + "name": "Clearchain", + "url": "https://github.com/tendermint/clearchain", + "language": "Go", + "author": "FXCLR", + "description": + "Application to manage a distributed ledger for money transfers that support multi-currency accounts" + }, + { + "name": "Ethermint", + "url": "https://github.com/tendermint/ethermint", + "language": "Go", + "author": "Tendermint", + "description": "The go-ethereum state machine run as an ABCI app" + }, + { + "name": "Merkle AVL Tree", + "url": "https://github.com/tendermint/merkleeyes", + "language": "Go", + "author": "Tendermint", + "description": "Tendermint IAVL tree implemented as an ABCI app" + }, + { + "name": "Burrow", + "url": "https://github.com/hyperledger/burrow", + "language": "Go", + "author": "Monax Industries", + "description": + "Ethereum Virtual Machine augmented with native permissioning scheme and global key-value store" + }, + { + "name": "Merkle AVL Tree", + "url": "https://github.com/jTMSP/MerkleTree", + "language": "Java", + "author": "jTMSP", + "description": "Tendermint IAVL tree implemented as an ABCI app" + }, + { + "name": "TMChat", + "url": "https://github.com/wolfposd/TMChat", + "language": "Java", + "author": "jTMSP", + "description": "P2P chat using Tendermint" + }, + { + "name": "Comit", + "url": "https://github.com/zballs/comit", + "language": "Go", + "author": "Zach Balder", + "description": "Public service reporting and tracking" + }, + { + "name": "Passchain", + "url": "https://github.com/trusch/passchain", + "language": "Go", + "author": "trusch", + "description": + "Tool to securely store and share passwords, tokens and other short secrets" + }, + { + "name": "Passwerk", + "url": "https://github.com/rigelrozanski/passwerk", + "language": "Go", + "author": "Rigel Rozanski", + "description": "Encrypted storage web-utility backed by Tendermint" + }, + { + "name": "py-tendermint", + "url": "https://github.com/davebryson/py-tendermint", + "language": "Python", + "author": "Dave Bryson", + "description": + "A Python microframework for building blockchain applications with Tendermint" + }, + { + "name": "Stratumn SDK", + "url": "https://github.com/stratumn/sdk", + "language": "Go", + "author": "Stratumn", + "description": "SDK for Proof-of-Process networks" + }, + { + "name": "Lotion", + "url": "https://github.com/keppel/lotion", + "language": "Javascript", + "author": "Judd Keppel", + "description": + "A Javascript microframework for building blockchain applications with Tendermint" + }, + { + "name": "Tendermint Blockchain Chat App", + "url": "https://github.com/SaifRehman/tendermint-chat-app/", + "language": "Javascript", + "author": "Saif Rehman", + "description": + "This is a minimal chat application based on Tendermint using Lotion.js in 30 lines of code!. It also includes web/mobile application built using Ionic 3." + }, + { + "name": "BigchainDB", + "url": "https://github.com/bigchaindb/bigchaindb", + "language": "Python", + "author": "BigchainDB GmbH and the BigchainDB community", + "description": "Blockchain database" + }, + { + "name": "Mint", + "url": "https://github.com/Hashnode/mint", + "language": "Go", + "author": "Hashnode", + "description": "Build blockchain-powered social apps" + } + ], + "abciServers": [ + { + "name": "abci", + "url": "https://github.com/tendermint/abci", + "language": "Go", + "author": "Tendermint" + }, + { + "name": "js-abci", + "url": "https://github.com/tendermint/js-abci", + "language": "Javascript", + "author": "Tendermint" + }, + { + "name": "cpp-tmsp", + "url": "https://github.com/mdyring/cpp-tmsp", + "language": "C++", + "author": "Martin Dyring" + }, + { + "name": "jabci", + "url": "https://github.com/jTendermint/jabci", + "language": "Java", + "author": "jTendermint" + }, + { + "name": "ocaml-tmsp", + "url": "https://github.com/zballs/ocaml-tmsp", + "language": "Ocaml", + "author": "Zach Balder" + }, + { + "name": "abci_server", + "url": "https://github.com/KrzysiekJ/abci_server", + "language": "Erlang", + "author": "Krzysztof Jurewicz" + }, + { + "name": "py-abci", + "url": "https://github.com/davebryson/py-abci", + "language": "Python", + "author": "Dave Bryson" + }, + { + "name": "Spearmint", + "url": "https://github.com/dennismckinnon/spearmint", + "language": "Javascript", + "author": "Dennis McKinnon" + } + ], + "deploymentTools": [ + { + "name": "mintnet-kubernetes", + "url": "https://github.com/tendermint/tools", + "technology": "Docker and Kubernetes", + "author": "Tendermint", + "description": + "Deploy a Tendermint test network using Google's kubernetes" + }, + { + "name": "terraforce", + "url": "https://github.com/tendermint/tools", + "technology": "Terraform", + "author": "Tendermint", + "description": + "Terraform + our custom terraforce tool; deploy a production Tendermint network with load balancing over multiple AWS availability zones" + }, + { + "name": "ansible-tendermint", + "url": "https://github.com/tendermint/tools", + "technology": "Ansible", + "author": "Tendermint", + "description": "Ansible playbooks + Tendermint" + }, + { + "name": "brooklyn-tendermint", + "url": "https://github.com/cloudsoft/brooklyn-tendermint", + "technology": "Clocker for Apache Brooklyn ", + "author": "Cloudsoft", + "description": "Deploy a tendermint test network in docker containers " + } + ] +} diff --git a/docs/app-dev/ecosystem.md b/docs/app-dev/ecosystem.md new file mode 100644 index 000000000..7960e6c0d --- /dev/null +++ b/docs/app-dev/ecosystem.md @@ -0,0 +1,21 @@ +# Ecosystem + +The growing list of applications built using various pieces of the +Tendermint stack can be found at: + +- https://tendermint.com/ecosystem + +We thank the community for their contributions thus far and welcome the +addition of new projects. A pull request can be submitted to [this +file](https://github.com/tendermint/tendermint/blob/master/docs/app-dev/ecosystem.json) +to include your project. + +## Other Tools + +See [deploy testnets](./deploy-testnets) for information about all +the tools built by Tendermint. We have Kubernetes, Ansible, and +Terraform integrations. + +For upgrading from older to newer versions of tendermint and to migrate +your chain data, see [tm-migrator](https://github.com/hxzqlh/tm-tools) +written by @hxzqlh. diff --git a/docs/app-dev/getting-started.md b/docs/app-dev/getting-started.md new file mode 100644 index 000000000..cfc614ddc --- /dev/null +++ b/docs/app-dev/getting-started.md @@ -0,0 +1,299 @@ +# Getting Started + +## First Tendermint App + +As a general purpose blockchain engine, Tendermint is agnostic to the +application you want to run. So, to run a complete blockchain that does +something useful, you must start two programs: one is Tendermint Core, +the other is your application, which can be written in any programming +language. Recall from [the intro to +ABCI](./introduction.md#ABCI-Overview) that Tendermint Core handles all +the p2p and consensus stuff, and just forwards transactions to the +application when they need to be validated, or when they're ready to be +committed to a block. + +In this guide, we show you some examples of how to run an application +using Tendermint. + +### Install + +The first apps we will work with are written in Go. To install them, you +need to [install Go](https://golang.org/doc/install) and put +`$GOPATH/bin` in your `$PATH`; see +[here](https://github.com/tendermint/tendermint/wiki/Setting-GOPATH) for +more info. + +Then run + +``` +go get github.com/tendermint/tendermint +cd $GOPATH/src/github.com/tendermint/tendermint +make get_tools +make get_vendor_deps +make install_abci +``` + +Now you should have the `abci-cli` installed; you'll see a couple of +commands (`counter` and `kvstore`) that are example applications written +in Go. See below for an application written in JavaScript. + +Now, let's run some apps! + +## KVStore - A First Example + +The kvstore app is a [Merkle +tree](https://en.wikipedia.org/wiki/Merkle_tree) that just stores all +transactions. If the transaction contains an `=`, e.g. `key=value`, then +the `value` is stored under the `key` in the Merkle tree. Otherwise, the +full transaction bytes are stored as the key and the value. + +Let's start a kvstore application. + +``` +abci-cli kvstore +``` + +In another terminal, we can start Tendermint. If you have never run +Tendermint before, use: + +``` +tendermint init +tendermint node +``` + +If you have used Tendermint, you may want to reset the data for a new +blockchain by running `tendermint unsafe_reset_all`. Then you can run +`tendermint node` to start Tendermint, and connect to the app. For more +details, see [the guide on using Tendermint](./using-tendermint.md). + +You should see Tendermint making blocks! We can get the status of our +Tendermint node as follows: + +``` +curl -s localhost:26657/status +``` + +The `-s` just silences `curl`. For nicer output, pipe the result into a +tool like [jq](https://stedolan.github.io/jq/) or `json_pp`. + +Now let's send some transactions to the kvstore. + +``` +curl -s 'localhost:26657/broadcast_tx_commit?tx="abcd"' +``` + +Note the single quote (`'`) around the url, which ensures that the +double quotes (`"`) are not escaped by bash. This command sent a +transaction with bytes `abcd`, so `abcd` will be stored as both the key +and the value in the Merkle tree. The response should look something +like: + +``` +{ + "jsonrpc": "2.0", + "id": "", + "result": { + "check_tx": { + "fee": {} + }, + "deliver_tx": { + "tags": [ + { + "key": "YXBwLmNyZWF0b3I=", + "value": "amFl" + }, + { + "key": "YXBwLmtleQ==", + "value": "YWJjZA==" + } + ], + "fee": {} + }, + "hash": "9DF66553F98DE3C26E3C3317A3E4CED54F714E39", + "height": 14 + } +} +``` + +We can confirm that our transaction worked and the value got stored by +querying the app: + +``` +curl -s 'localhost:26657/abci_query?data="abcd"' +``` + +The result should look like: + +``` +{ + "jsonrpc": "2.0", + "id": "", + "result": { + "response": { + "log": "exists", + "index": "-1", + "key": "YWJjZA==", + "value": "YWJjZA==" + } + } +} +``` + +Note the `value` in the result (`YWJjZA==`); this is the base64-encoding +of the ASCII of `abcd`. You can verify this in a python 2 shell by +running `"YWJjZA==".decode('base64')` or in python 3 shell by running +`import codecs; codecs.decode("YWJjZA==", 'base64').decode('ascii')`. +Stay tuned for a future release that [makes this output more +human-readable](https://github.com/tendermint/tendermint/issues/1794). + +Now let's try setting a different key and value: + +``` +curl -s 'localhost:26657/broadcast_tx_commit?tx="name=satoshi"' +``` + +Now if we query for `name`, we should get `satoshi`, or `c2F0b3NoaQ==` +in base64: + +``` +curl -s 'localhost:26657/abci_query?data="name"' +``` + +Try some other transactions and queries to make sure everything is +working! + +## Counter - Another Example + +Now that we've got the hang of it, let's try another application, the +`counter` app. + +The counter app doesn't use a Merkle tree, it just counts how many times +we've sent a transaction, or committed the state. + +This application has two modes: `serial=off` and `serial=on`. + +When `serial=on`, transactions must be a big-endian encoded incrementing +integer, starting at 0. + +If `serial=off`, there are no restrictions on transactions. + +In a live blockchain, transactions collect in memory before they are +committed into blocks. To avoid wasting resources on invalid +transactions, ABCI provides the `CheckTx` message, which application +developers can use to accept or reject transactions, before they are +stored in memory or gossipped to other peers. + +In this instance of the counter app, with `serial=on`, `CheckTx` only +allows transactions whose integer is greater than the last committed +one. + +Let's kill the previous instance of `tendermint` and the `kvstore` +application, and start the counter app. We can enable `serial=on` with a +flag: + +``` +abci-cli counter --serial +``` + +In another window, reset then start Tendermint: + +``` +tendermint unsafe_reset_all +tendermint node +``` + +Once again, you can see the blocks streaming by. Let's send some +transactions. Since we have set `serial=on`, the first transaction must +be the number `0`: + +``` +curl localhost:26657/broadcast_tx_commit?tx=0x00 +``` + +Note the empty (hence successful) response. The next transaction must be +the number `1`. If instead, we try to send a `5`, we get an error: + +``` +> curl localhost:26657/broadcast_tx_commit?tx=0x05 +{ + "jsonrpc": "2.0", + "id": "", + "result": { + "check_tx": { + "fee": {} + }, + "deliver_tx": { + "code": 2, + "log": "Invalid nonce. Expected 1, got 5", + "fee": {} + }, + "hash": "33B93DFF98749B0D6996A70F64071347060DC19C", + "height": 34 + } +} +``` + +But if we send a `1`, it works again: + +``` +> curl localhost:26657/broadcast_tx_commit?tx=0x01 +{ + "jsonrpc": "2.0", + "id": "", + "result": { + "check_tx": { + "fee": {} + }, + "deliver_tx": { + "fee": {} + }, + "hash": "F17854A977F6FA7EEA1BD758E296710B86F72F3D", + "height": 60 + } +} +``` + +For more details on the `broadcast_tx` API, see [the guide on using +Tendermint](./using-tendermint.md). + +## CounterJS - Example in Another Language + +We also want to run applications in another language - in this case, +we'll run a Javascript version of the `counter`. To run it, you'll need +to [install node](https://nodejs.org/en/download/). + +You'll also need to fetch the relevant repository, from +[here](https://github.com/tendermint/js-abci) then install it. As go +devs, we keep all our code under the `$GOPATH`, so run: + +``` +go get github.com/tendermint/js-abci &> /dev/null +cd $GOPATH/src/github.com/tendermint/js-abci/example +npm install +cd .. +``` + +Kill the previous `counter` and `tendermint` processes. Now run the app: + +``` +node example/counter.js +``` + +In another window, reset and start `tendermint`: + +``` +tendermint unsafe_reset_all +tendermint node +``` + +Once again, you should see blocks streaming by - but now, our +application is written in javascript! Try sending some transactions, and +like before - the results should be the same: + +``` +curl localhost:26657/broadcast_tx_commit?tx=0x00 # ok +curl localhost:26657/broadcast_tx_commit?tx=0x05 # invalid nonce +curl localhost:26657/broadcast_tx_commit?tx=0x01 # ok +``` + +Neat, eh? diff --git a/docs/app-dev/indexing-transactions.md b/docs/app-dev/indexing-transactions.md new file mode 100644 index 000000000..3bca10959 --- /dev/null +++ b/docs/app-dev/indexing-transactions.md @@ -0,0 +1,97 @@ +# Indexing Transactions + +Tendermint allows you to index transactions and later query or subscribe +to their results. + +Let's take a look at the `[tx_index]` config section: + +``` +##### transactions indexer configuration options ##### +[tx_index] + +# What indexer to use for transactions +# +# Options: +# 1) "null" (default) +# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +indexer = "kv" + +# Comma-separated list of tags to index (by default the only tag is tx hash) +# +# It's recommended to index only a subset of tags due to possible memory +# bloat. This is, of course, depends on the indexer's DB and the volume of +# transactions. +index_tags = "" + +# When set to true, tells indexer to index all tags. Note this may be not +# desirable (see the comment above). IndexTags has a precedence over +# IndexAllTags (i.e. when given both, IndexTags will be indexed). +index_all_tags = false +``` + +By default, Tendermint will index all transactions by their respective +hashes using an embedded simple indexer. Note, we are planning to add +more options in the future (e.g., Postgresql indexer). + +## Adding tags + +In your application's `DeliverTx` method, add the `Tags` field with the +pairs of UTF-8 encoded strings (e.g. "account.owner": "Bob", "balance": +"100.0", "date": "2018-01-02"). + +Example: + +``` +func (app *KVStoreApplication) DeliverTx(tx []byte) types.Result { + ... + tags := []cmn.KVPair{ + {[]byte("account.name"), []byte("igor")}, + {[]byte("account.address"), []byte("0xdeadbeef")}, + {[]byte("tx.amount"), []byte("7")}, + } + return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags} +} +``` + +If you want Tendermint to only index transactions by "account.name" tag, +in the config set `tx_index.index_tags="account.name"`. If you to index +all tags, set `index_all_tags=true` + +Note, there are a few predefined tags: + +- `tm.event` (event type) +- `tx.hash` (transaction's hash) +- `tx.height` (height of the block transaction was committed in) + +Tendermint will throw a warning if you try to use any of the above keys. + +## Querying transactions + +You can query the transaction results by calling `/tx_search` RPC +endpoint: + +``` +curl "localhost:26657/tx_search?query=\"account.name='igor'\"&prove=true" +``` + +Check out [API docs](https://tendermint.github.io/slate/?shell#txsearch) +for more information on query syntax and other options. + +## Subscribing to transactions + +Clients can subscribe to transactions with the given tags via Websocket +by providing a query to `/subscribe` RPC endpoint. + +``` +{ + "jsonrpc": "2.0", + "method": "subscribe", + "id": "0", + "params": { + "query": "account.name='igor'" + } +} +``` + +Check out [API docs](https://tendermint.github.io/slate/#subscribe) for +more information on query syntax and other options. diff --git a/docs/app-dev/subscribing-to-events-via-websocket.md b/docs/app-dev/subscribing-to-events-via-websocket.md new file mode 100644 index 000000000..9e7c642a0 --- /dev/null +++ b/docs/app-dev/subscribing-to-events-via-websocket.md @@ -0,0 +1,28 @@ +# Subscribing to events via Websocket + +Tendermint emits different events, to which you can subscribe via +[Websocket](https://en.wikipedia.org/wiki/WebSocket). This can be useful +for third-party applications (for analysys) or inspecting state. + +[List of events](https://godoc.org/github.com/tendermint/tendermint/types#pkg-constants) + +You can subscribe to any of the events above by calling `subscribe` RPC +method via Websocket. + +``` +{ + "jsonrpc": "2.0", + "method": "subscribe", + "id": "0", + "params": { + "query": "tm.event='NewBlock'" + } +} +``` + +Check out [API docs](https://tendermint.github.io/slate/#subscribe) for +more information on query syntax and other options. + +You can also use tags, given you had included them into DeliverTx +response, to query transaction results. See [Indexing +transactions](./indexing-transactions.md) for details. diff --git a/docs/architecture/README.md b/docs/architecture/README.md new file mode 100644 index 000000000..1cfc7ddce --- /dev/null +++ b/docs/architecture/README.md @@ -0,0 +1,22 @@ +# Architecture Decision Records (ADR) + +This is a location to record all high-level architecture decisions in the tendermint project. + +You can read more about the ADR concept in this [blog post](https://product.reverb.com/documenting-architecture-decisions-the-reverb-way-a3563bb24bd0#.78xhdix6t). + +An ADR should provide: + +- Context on the relevant goals and the current state +- Proposed changes to achieve the goals +- Summary of pros and cons +- References +- Changelog + +Note the distinction between an ADR and a spec. The ADR provides the context, intuition, reasoning, and +justification for a change in architecture, or for the architecture of something +new. The spec is much more compressed and streamlined summary of everything as +it stands today. + +If recorded decisions turned out to be lacking, convene a discussion, record the new decisions here, and then modify the code to match. + +Note the context/background should be written in the present tense. diff --git a/docs/architecture/adr-001-logging.md b/docs/architecture/adr-001-logging.md new file mode 100644 index 000000000..a11a49e14 --- /dev/null +++ b/docs/architecture/adr-001-logging.md @@ -0,0 +1,216 @@ +# ADR 1: Logging + +## Context + +Current logging system in Tendermint is very static and not flexible enough. + +Issues: [358](https://github.com/tendermint/tendermint/issues/358), [375](https://github.com/tendermint/tendermint/issues/375). + +What we want from the new system: + +- per package dynamic log levels +- dynamic logger setting (logger tied to the processing struct) +- conventions +- be more visually appealing + +"dynamic" here means the ability to set smth in runtime. + +## Decision + +### 1) An interface + +First, we will need an interface for all of our libraries (`tmlibs`, Tendermint, etc.). My personal preference is go-kit `Logger` interface (see Appendix A.), but that is too much a bigger change. Plus we will still need levels. + +```go +# log.go +type Logger interface { + Debug(msg string, keyvals ...interface{}) error + Info(msg string, keyvals ...interface{}) error + Error(msg string, keyvals ...interface{}) error + + With(keyvals ...interface{}) Logger +} +``` + +On a side note: difference between `Info` and `Notice` is subtle. We probably +could do without `Notice`. Don't think we need `Panic` or `Fatal` as a part of +the interface. These funcs could be implemented as helpers. In fact, we already +have some in `tmlibs/common`. + +- `Debug` - extended output for devs +- `Info` - all that is useful for a user +- `Error` - errors + +`Notice` should become `Info`, `Warn` either `Error` or `Debug` depending on the message, `Crit` -> `Error`. + +This interface should go into `tmlibs/log`. All libraries which are part of the core (tendermint/tendermint) should obey it. + +### 2) Logger with our current formatting + +On top of this interface, we will need to implement a stdout logger, which will be used when Tendermint is configured to output logs to STDOUT. + +Many people say that they like the current output, so let's stick with it. + +``` +NOTE[04-25|14:45:08] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0 +``` + +Couple of minor changes: + +``` +I[04-25|14:45:08.322] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0 +``` + +Notice the level is encoded using only one char plus milliseconds. + +Note: there are many other formats out there like [logfmt](https://brandur.org/logfmt). + +This logger could be implemented using any logger - [logrus](https://github.com/sirupsen/logrus), [go-kit/log](https://github.com/go-kit/kit/tree/master/log), [zap](https://github.com/uber-go/zap), log15 so far as it + +a) supports coloring output
+b) is moderately fast (buffering)
+c) conforms to the new interface or adapter could be written for it
+d) is somewhat configurable
+ +go-kit is my favorite so far. Check out how easy it is to color errors in red https://github.com/go-kit/kit/blob/master/log/term/example_test.go#L12. Although, coloring could only be applied to the whole string :( + +``` +go-kit +: flexible, modular +go-kit “-”: logfmt format https://brandur.org/logfmt + +logrus +: popular, feature rich (hooks), API and output is more like what we want +logrus -: not so flexible +``` + +```go +# tm_logger.go +// NewTmLogger returns a logger that encodes keyvals to the Writer in +// tm format. +func NewTmLogger(w io.Writer) Logger { + return &tmLogger{kitlog.NewLogfmtLogger(w)} +} + +func (l tmLogger) SetLevel(level string() { + switch (level) { + case "debug": + l.sourceLogger = level.NewFilter(l.sourceLogger, level.AllowDebug()) + } +} + +func (l tmLogger) Info(msg string, keyvals ...interface{}) error { + l.sourceLogger.Log("msg", msg, keyvals...) +} + +# log.go +func With(logger Logger, keyvals ...interface{}) Logger { + kitlog.With(logger.sourceLogger, keyvals...) +} +``` + +Usage: + +```go +logger := log.NewTmLogger(os.Stdout) +logger.SetLevel(config.GetString("log_level")) +node.SetLogger(log.With(logger, "node", Name)) +``` + +**Other log formatters** + +In the future, we may want other formatters like JSONFormatter. + +``` +{ "level": "notice", "time": "2017-04-25 14:45:08.562471297 -0400 EDT", "module": "consensus", "msg": "ABCI Replay Blocks", "appHeight": 0, "storeHeight": 0, "stateHeight": 0 } +``` + +### 3) Dynamic logger setting + +https://dave.cheney.net/2017/01/23/the-package-level-logger-anti-pattern + +This is the hardest part and where the most work will be done. logger should be tied to the processing struct, or the context if it adds some fields to the logger. + +```go +type BaseService struct { + log log15.Logger + name string + started uint32 // atomic + stopped uint32 // atomic +... +} +``` + +BaseService already contains `log` field, so most of the structs embedding it should be fine. We should rename it to `logger`. + +The only thing missing is the ability to set logger: + +``` +func (bs *BaseService) SetLogger(l log.Logger) { + bs.logger = l +} +``` + +### 4) Conventions + +Important keyvals should go first. Example: + +``` +correct +I[04-25|14:45:08.322] ABCI Replay Blocks module=consensus instance=1 appHeight=0 storeHeight=0 stateHeight=0 +``` + +not + +``` +wrong +I[04-25|14:45:08.322] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0 instance=1 +``` + +for that in most cases you'll need to add `instance` field to a logger upon creating, not when u log a particular message: + +```go +colorFn := func(keyvals ...interface{}) term.FgBgColor { + for i := 1; i < len(keyvals); i += 2 { + if keyvals[i] == "instance" && keyvals[i+1] == "1" { + return term.FgBgColor{Fg: term.Blue} + } else if keyvals[i] == "instance" && keyvals[i+1] == "1" { + return term.FgBgColor{Fg: term.Red} + } + } + return term.FgBgColor{} + } +logger := term.NewLogger(os.Stdout, log.NewTmLogger, colorFn) + +c1 := NewConsensusReactor(...) +c1.SetLogger(log.With(logger, "instance", 1)) + +c2 := NewConsensusReactor(...) +c2.SetLogger(log.With(logger, "instance", 2)) +``` + +## Status + +proposed + +## Consequences + +### Positive + +Dynamic logger, which could be turned off for some modules at runtime. Public interface for other projects using Tendermint libraries. + +### Negative + +We may loose the ability to color keys in keyvalue pairs. go-kit allow you to easily change foreground / background colors of the whole string, but not its parts. + +### Neutral + +## Appendix A. + +I really like a minimalistic approach go-kit took with his logger https://github.com/go-kit/kit/tree/master/log: + +``` +type Logger interface { + Log(keyvals ...interface{}) error +} +``` + +See [The Hunt for a Logger Interface](https://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide). The advantage is greater composability (check out how go-kit defines colored logging or log-leveled logging on top of this interface https://github.com/go-kit/kit/tree/master/log). diff --git a/docs/architecture/adr-002-event-subscription.md b/docs/architecture/adr-002-event-subscription.md new file mode 100644 index 000000000..cc207c4af --- /dev/null +++ b/docs/architecture/adr-002-event-subscription.md @@ -0,0 +1,90 @@ +# ADR 2: Event Subscription + +## Context + +In the light client (or any other client), the user may want to **subscribe to +a subset of transactions** (rather than all of them) using `/subscribe?event=X`. For +example, I want to subscribe for all transactions associated with a particular +account. Same for fetching. The user may want to **fetch transactions based on +some filter** (rather than fetching all the blocks). For example, I want to get +all transactions for a particular account in the last two weeks (`tx's block +time >= '2017-06-05'`). + +Now you can't even subscribe to "all txs" in Tendermint. + +The goal is a simple and easy to use API for doing that. + +![Tx Send Flow Diagram](img/tags1.png) + +## Decision + +ABCI app return tags with a `DeliverTx` response inside the `data` field (_for +now, later we may create a separate field_). Tags is a list of key-value pairs, +protobuf encoded. + +Example data: + +```json +{ + "abci.account.name": "Igor", + "abci.account.address": "0xdeadbeef", + "tx.gas": 7 +} +``` + +### Subscribing for transactions events + +If the user wants to receive only a subset of transactions, ABCI-app must +return a list of tags with a `DeliverTx` response. These tags will be parsed and +matched with the current queries (subscribers). If the query matches the tags, +subscriber will get the transaction event. + +``` +/subscribe?query="tm.event = Tx AND tx.hash = AB0023433CF0334223212243BDD AND abci.account.invoice.number = 22" +``` + +A new package must be developed to replace the current `events` package. It +will allow clients to subscribe to a different types of events in the future: + +``` +/subscribe?query="abci.account.invoice.number = 22" +/subscribe?query="abci.account.invoice.owner CONTAINS Igor" +``` + +### Fetching transactions + +This is a bit tricky because a) we want to support a number of indexers, all of +which have a different API b) we don't know whenever tags will be sufficient +for the most apps (I guess we'll see). + +``` +/txs/search?query="tx.hash = AB0023433CF0334223212243BDD AND abci.account.owner CONTAINS Igor" +/txs/search?query="abci.account.owner = Igor" +``` + +For historic queries we will need a indexing storage (Postgres, SQLite, ...). + +### Issues + +- https://github.com/tendermint/basecoin/issues/91 +- https://github.com/tendermint/tendermint/issues/376 +- https://github.com/tendermint/tendermint/issues/287 +- https://github.com/tendermint/tendermint/issues/525 (related) + +## Status + +proposed + +## Consequences + +### Positive + +- same format for event notifications and search APIs +- powerful enough query + +### Negative + +- performance of the `match` function (where we have too many queries / subscribers) +- there is an issue where there are too many txs in the DB + +### Neutral diff --git a/docs/architecture/adr-003-abci-app-rpc.md b/docs/architecture/adr-003-abci-app-rpc.md new file mode 100644 index 000000000..2775db077 --- /dev/null +++ b/docs/architecture/adr-003-abci-app-rpc.md @@ -0,0 +1,34 @@ +# ADR 3: Must an ABCI-app have an RPC server? + +## Context + +ABCI-server could expose its own RPC-server and act as a proxy to Tendermint. + +The idea was for the Tendermint RPC to just be a transparent proxy to the app. +Clients need to talk to Tendermint for proofs, unless we burden all app devs +with exposing Tendermint proof stuff. Also seems less complex to lock down one +server than two, but granted it makes querying a bit more kludgy since it needs +to be passed as a `Query`. Also, **having a very standard rpc interface means +the light-client can work with all apps and handle proofs**. The only +app-specific logic is decoding the binary data to a more readable form (eg. +json). This is a huge advantage for code-reuse and standardization. + +## Decision + +We dont expose an RPC server on any of our ABCI-apps. + +## Status + +accepted + +## Consequences + +### Positive + +- Unified interface for all apps + +### Negative + +- `Query` interface + +### Neutral diff --git a/docs/architecture/adr-004-historical-validators.md b/docs/architecture/adr-004-historical-validators.md new file mode 100644 index 000000000..be0de22c1 --- /dev/null +++ b/docs/architecture/adr-004-historical-validators.md @@ -0,0 +1,38 @@ +# ADR 004: Historical Validators + +## Context + +Right now, we can query the present validator set, but there is no history. +If you were offline for a long time, there is no way to reconstruct past validators. This is needed for the light client and we agreed needs enhancement of the API. + +## Decision + +For every block, store a new structure that contains either the latest validator set, +or the height of the last block for which the validator set changed. Note this is not +the height of the block which returned the validator set change itself, but the next block, +ie. the first block it comes into effect for. + +Storing the validators will be handled by the `state` package. + +At some point in the future, we may consider more efficient storage in the case where the validators +are updated frequently - for instance by only saving the diffs, rather than the whole set. + +An alternative approach suggested keeping the validator set, or diffs of it, in a merkle IAVL tree. +While it might afford cheaper proofs that a validator set has not changed, it would be more complex, +and likely less efficient. + +## Status + +Accepted. + +## Consequences + +### Positive + +- Can query old validator sets, with proof. + +### Negative + +- Writes an extra structure to disk with every block. + +### Neutral diff --git a/docs/architecture/adr-005-consensus-params.md b/docs/architecture/adr-005-consensus-params.md new file mode 100644 index 000000000..6656d35b2 --- /dev/null +++ b/docs/architecture/adr-005-consensus-params.md @@ -0,0 +1,86 @@ +# ADR 005: Consensus Params + +## Context + +Consensus critical parameters controlling blockchain capacity have until now been hard coded, loaded from a local config, or neglected. +Since they may be need to be different in different networks, and potentially to evolve over time within +networks, we seek to initialize them in a genesis file, and expose them through the ABCI. + +While we have some specific parameters now, like maximum block and transaction size, we expect to have more in the future, +such as a period over which evidence is valid, or the frequency of checkpoints. + +## Decision + +### ConsensusParams + +No consensus critical parameters should ever be found in the `config.toml`. + +A new `ConsensusParams` is optionally included in the `genesis.json` file, +and loaded into the `State`. Any items not included are set to their default value. +A value of 0 is undefined (see ABCI, below). A value of -1 is used to indicate the parameter does not apply. +The parameters are used to determine the validity of a block (and tx) via the union of all relevant parameters. + +``` +type ConsensusParams struct { + BlockSize + TxSize + BlockGossip +} + +type BlockSize struct { + MaxBytes int + MaxTxs int + MaxGas int +} + +type TxSize struct { + MaxBytes int + MaxGas int +} + +type BlockGossip struct { + BlockPartSizeBytes int +} +``` + +The `ConsensusParams` can evolve over time by adding new structs that cover different aspects of the consensus rules. + +The `BlockPartSizeBytes` and the `BlockSize.MaxBytes` are enforced to be greater than 0. +The former because we need a part size, the latter so that we always have at least some sanity check over the size of blocks. + +### ABCI + +#### InitChain + +InitChain currently takes the initial validator set. It should be extended to also take parts of the ConsensusParams. +There is some case to be made for it to take the entire Genesis, except there may be things in the genesis, +like the BlockPartSize, that the app shouldn't really know about. + +#### EndBlock + +The EndBlock response includes a `ConsensusParams`, which includes BlockSize and TxSize, but not BlockGossip. +Other param struct can be added to `ConsensusParams` in the future. +The `0` value is used to denote no change. +Any other value will update that parameter in the `State.ConsensusParams`, to be applied for the next block. +Tendermint should have hard-coded upper limits as sanity checks. + +## Status + +Proposed. + +## Consequences + +### Positive + +- Alternative capacity limits and consensus parameters can be specified without re-compiling the software. +- They can also change over time under the control of the application + +### Negative + +- More exposed parameters is more complexity +- Different rules at different heights in the blockchain complicates fast sync + +### Neutral + +- The TxSize, which checks validity, may be in conflict with the config's `max_block_size_tx`, which determines proposal sizes + diff --git a/docs/architecture/adr-006-trust-metric.md b/docs/architecture/adr-006-trust-metric.md new file mode 100644 index 000000000..ec8a0cce7 --- /dev/null +++ b/docs/architecture/adr-006-trust-metric.md @@ -0,0 +1,238 @@ +# ADR 006: Trust Metric Design + +## Context + +The proposed trust metric will allow Tendermint to maintain local trust rankings for peers it has directly interacted with, which can then be used to implement soft security controls. The calculations were obtained from the [TrustGuard](https://dl.acm.org/citation.cfm?id=1060808) project. + +### Background + +The Tendermint Core project developers would like to improve Tendermint security and reliability by keeping track of the level of trustworthiness peers have demonstrated within the peer-to-peer network. This way, undesirable outcomes from peers will not immediately result in them being dropped from the network (potentially causing drastic changes to take place). Instead, peers behavior can be monitored with appropriate metrics and be removed from the network once Tendermint Core is certain the peer is a threat. For example, when the PEXReactor makes a request for peers network addresses from a already known peer, and the returned network addresses are unreachable, this untrustworthy behavior should be tracked. Returning a few bad network addresses probably shouldn’t cause a peer to be dropped, while excessive amounts of this behavior does qualify the peer being dropped. + +Trust metrics can be circumvented by malicious nodes through the use of strategic oscillation techniques, which adapts the malicious node’s behavior pattern in order to maximize its goals. For instance, if the malicious node learns that the time interval of the Tendermint trust metric is *X* hours, then it could wait *X* hours in-between malicious activities. We could try to combat this issue by increasing the interval length, yet this will make the system less adaptive to recent events. + +Instead, having shorter intervals, but keeping a history of interval values, will give our metric the flexibility needed in order to keep the network stable, while also making it resilient against a strategic malicious node in the Tendermint peer-to-peer network. Also, the metric can access trust data over a rather long period of time while not greatly increasing its history size by aggregating older history values over a larger number of intervals, and at the same time, maintain great precision for the recent intervals. This approach is referred to as fading memories, and closely resembles the way human beings remember their experiences. The trade-off to using history data is that the interval values should be preserved in-between executions of the node. + +### References + +S. Mudhakar, L. Xiong, and L. Liu, “TrustGuard: Countering Vulnerabilities in Reputation Management for Decentralized Overlay Networks,” in *Proceedings of the 14th international conference on World Wide Web, pp. 422-431*, May 2005. + +## Decision + +The proposed trust metric will allow a developer to inform the trust metric store of all good and bad events relevant to a peer's behavior, and at any time, the metric can be queried for a peer's current trust ranking. + +The three subsections below will cover the process being considered for calculating the trust ranking, the concept of the trust metric store, and the interface for the trust metric. + +### Proposed Process + +The proposed trust metric will count good and bad events relevant to the object, and calculate the percent of counters that are good over an interval with a predefined duration. This is the procedure that will continue for the life of the trust metric. When the trust metric is queried for the current **trust value**, a resilient equation will be utilized to perform the calculation. + +The equation being proposed resembles a Proportional-Integral-Derivative (PID) controller used in control systems. The proportional component allows us to be sensitive to the value of the most recent interval, while the integral component allows us to incorporate trust values stored in the history data, and the derivative component allows us to give weight to sudden changes in the behavior of a peer. We compute the trust value of a peer in interval i based on its current trust ranking, its trust rating history prior to interval *i* (over the past *maxH* number of intervals) and its trust ranking fluctuation. We will break up the equation into the three components. + +```math +(1) Proportional Value = a * R[i] +``` + +where *R*[*i*] denotes the raw trust value at time interval *i* (where *i* == 0 being current time) and *a* is the weight applied to the contribution of the current reports. The next component of our equation uses a weighted sum over the last *maxH* intervals to calculate the history value for time *i*: + + +`H[i] = ` ![formula1](img/formula1.png "Weighted Sum Formula") + + +The weights can be chosen either optimistically or pessimistically. An optimistic weight creates larger weights for newer history data values, while the the pessimistic weight creates larger weights for time intervals with lower scores. The default weights used during the calculation of the history value are optimistic and calculated as *Wk* = 0.8^*k*, for time interval *k*. With the history value available, we can now finish calculating the integral value: + +```math +(2) Integral Value = b * H[i] +``` + +Where *H*[*i*] denotes the history value at time interval *i* and *b* is the weight applied to the contribution of past performance for the object being measured. The derivative component will be calculated as follows: + +```math +D[i] = R[i] – H[i] + +(3) Derivative Value = c(D[i]) * D[i] +``` + +Where the value of *c* is selected based on the *D*[*i*] value relative to zero. The default selection process makes *c* equal to 0 unless *D*[*i*] is a negative value, in which case c is equal to 1. The result is that the maximum penalty is applied when current behavior is lower than previously experienced behavior. If the current behavior is better than the previously experienced behavior, then the Derivative Value has no impact on the trust value. With the three components brought together, our trust value equation is calculated as follows: + +```math +TrustValue[i] = a * R[i] + b * H[i] + c(D[i]) * D[i] +``` + +As a performance optimization that will keep the amount of raw interval data being saved to a reasonable size of *m*, while allowing us to represent 2^*m* - 1 history intervals, we can employ the fading memories technique that will trade space and time complexity for the precision of the history data values by summarizing larger quantities of less recent values. While our equation above attempts to access up to *maxH* (which can be 2^*m* - 1), we will map those requests down to *m* values using equation 4 below: + +```math +(4) j = index, where index > 0 +``` + +Where *j* is one of *(0, 1, 2, … , m – 1)* indices used to access history interval data. Now we can access the raw intervals using the following calculations: + +```math +R[0] = raw data for current time interval +``` + +`R[j] = ` ![formula2](img/formula2.png "Fading Memories Formula") + +### Trust Metric Store + +Similar to the P2P subsystem AddrBook, the trust metric store will maintain information relevant to Tendermint peers. Additionally, the trust metric store will ensure that trust metrics will only be active for peers that a node is currently and directly engaged with. + +Reactors will provide a peer key to the trust metric store in order to retrieve the associated trust metric. The trust metric can then record new positive and negative events experienced by the reactor, as well as provided the current trust score calculated by the metric. + +When the node is shutting down, the trust metric store will save history data for trust metrics associated with all known peers. This saved information allows experiences with a peer to be preserved across node executions, which can span a tracking windows of days or weeks. The trust history data is loaded automatically during OnStart. + +### Interface Detailed Design + +Each trust metric allows for the recording of positive/negative events, querying the current trust value/score, and the stopping/pausing of tracking over time intervals. This can be seen below: + + +```go + +// TrustMetric - keeps track of peer reliability +type TrustMetric struct { + // Private elements. +} + +// Pause tells the metric to pause recording data over time intervals. +// All method calls that indicate events will unpause the metric +func (tm *TrustMetric) Pause() {} + +// Stop tells the metric to stop recording data over time intervals +func (tm *TrustMetric) Stop() {} + +// BadEvents indicates that an undesirable event(s) took place +func (tm *TrustMetric) BadEvents(num int) {} + +// GoodEvents indicates that a desirable event(s) took place +func (tm *TrustMetric) GoodEvents(num int) {} + +// TrustValue gets the dependable trust value; always between 0 and 1 +func (tm *TrustMetric) TrustValue() float64 {} + +// TrustScore gets a score based on the trust value always between 0 and 100 +func (tm *TrustMetric) TrustScore() int {} + +// NewMetric returns a trust metric with the default configuration +func NewMetric() *TrustMetric {} + +//------------------------------------------------------------------------------------------------ +// For example + +tm := NewMetric() + +tm.BadEvents(1) +score := tm.TrustScore() + +tm.Stop() + +``` + +Some of the trust metric parameters can be configured. The weight values should probably be left alone in more cases, yet the time durations for the tracking window and individual time interval should be considered. + +```go + +// TrustMetricConfig - Configures the weight functions and time intervals for the metric +type TrustMetricConfig struct { + // Determines the percentage given to current behavior + ProportionalWeight float64 + + // Determines the percentage given to prior behavior + IntegralWeight float64 + + // The window of time that the trust metric will track events across. + // This can be set to cover many days without issue + TrackingWindow time.Duration + + // Each interval should be short for adapability. + // Less than 30 seconds is too sensitive, + // and greater than 5 minutes will make the metric numb + IntervalLength time.Duration +} + +// DefaultConfig returns a config with values that have been tested and produce desirable results +func DefaultConfig() TrustMetricConfig {} + +// NewMetricWithConfig returns a trust metric with a custom configuration +func NewMetricWithConfig(tmc TrustMetricConfig) *TrustMetric {} + +//------------------------------------------------------------------------------------------------ +// For example + +config := TrustMetricConfig{ + TrackingWindow: time.Minute * 60 * 24, // one day + IntervalLength: time.Minute * 2, +} + +tm := NewMetricWithConfig(config) + +tm.BadEvents(10) +tm.Pause() +tm.GoodEvents(1) // becomes active again + +``` + +A trust metric store should be created with a DB that has persistent storage so it can save history data across node executions. All trust metrics instantiated by the store will be created with the provided TrustMetricConfig configuration. + +When you attempt to fetch the trust metric for a peer, and an entry does not exist in the trust metric store, a new metric is automatically created and the entry made within the store. + +In additional to the fetching method, GetPeerTrustMetric, the trust metric store provides a method to call when a peer has disconnected from the node. This is so the metric can be paused (history data will not be saved) for periods of time when the node is not having direct experiences with the peer. + +```go + +// TrustMetricStore - Manages all trust metrics for peers +type TrustMetricStore struct { + cmn.BaseService + + // Private elements +} + +// OnStart implements Service +func (tms *TrustMetricStore) OnStart() error {} + +// OnStop implements Service +func (tms *TrustMetricStore) OnStop() {} + +// NewTrustMetricStore returns a store that saves data to the DB +// and uses the config when creating new trust metrics +func NewTrustMetricStore(db dbm.DB, tmc TrustMetricConfig) *TrustMetricStore {} + +// Size returns the number of entries in the trust metric store +func (tms *TrustMetricStore) Size() int {} + +// GetPeerTrustMetric returns a trust metric by peer key +func (tms *TrustMetricStore) GetPeerTrustMetric(key string) *TrustMetric {} + +// PeerDisconnected pauses the trust metric associated with the peer identified by the key +func (tms *TrustMetricStore) PeerDisconnected(key string) {} + +//------------------------------------------------------------------------------------------------ +// For example + +db := dbm.NewDB("trusthistory", "goleveldb", dirPathStr) +tms := NewTrustMetricStore(db, DefaultConfig()) + +tm := tms.GetPeerTrustMetric(key) +tm.BadEvents(1) + +tms.PeerDisconnected(key) + +``` + +## Status + +Approved. + +## Consequences + +### Positive + +- The trust metric will allow Tendermint to make non-binary security and reliability decisions +- Will help Tendermint implement deterrents that provide soft security controls, yet avoids disruption on the network +- Will provide useful profiling information when analyzing performance over time related to peer interaction + +### Negative + +- Requires saving the trust metric history data across node executions + +### Neutral + +- Keep in mind that, good events need to be recorded just as bad events do using this implementation diff --git a/docs/architecture/adr-007-trust-metric-usage.md b/docs/architecture/adr-007-trust-metric-usage.md new file mode 100644 index 000000000..4d833a69f --- /dev/null +++ b/docs/architecture/adr-007-trust-metric-usage.md @@ -0,0 +1,103 @@ +# ADR 007: Trust Metric Usage Guide + +## Context + +Tendermint is required to monitor peer quality in order to inform its peer dialing and peer exchange strategies. + +When a node first connects to the network, it is important that it can quickly find good peers. +Thus, while a node has fewer connections, it should prioritize connecting to higher quality peers. +As the node becomes well connected to the rest of the network, it can dial lesser known or lesser +quality peers and help assess their quality. Similarly, when queried for peers, a node should make +sure they dont return low quality peers. + +Peer quality can be tracked using a trust metric that flags certain behaviours as good or bad. When enough +bad behaviour accumulates, we can mark the peer as bad and disconnect. +For example, when the PEXReactor makes a request for peers network addresses from an already known peer, and the returned network addresses are unreachable, this undesirable behavior should be tracked. Returning a few bad network addresses probably shouldn’t cause a peer to be dropped, while excessive amounts of this behavior does qualify the peer for removal. The originally proposed approach and design document for the trust metric can be found in the [ADR 006](adr-006-trust-metric.md) document. + +The trust metric implementation allows a developer to obtain a peer's trust metric from a trust metric store, and track good and bad events relevant to a peer's behavior, and at any time, the peer's metric can be queried for a current trust value. The current trust value is calculated with a formula that utilizes current behavior, previous behavior, and change between the two. Current behavior is calculated as the percentage of good behavior within a time interval. The time interval is short; probably set between 30 seconds and 5 minutes. On the other hand, the historic data can estimate a peer's behavior over days worth of tracking. At the end of a time interval, the current behavior becomes part of the historic data, and a new time interval begins with the good and bad counters reset to zero. + +These are some important things to keep in mind regarding how the trust metrics handle time intervals and scoring: +- Each new time interval begins with a perfect score +- Bad events quickly bring the score down and good events cause the score to slowly rise +- When the time interval is over, the percentage of good events becomes historic data. + +Some useful information about the inner workings of the trust metric: +- When a trust metric is first instantiated, a timer (ticker) periodically fires in order to handle transitions between trust metric time intervals +- If a peer is disconnected from a node, the timer should be paused, since the node is no longer connected to that peer +- The ability to pause the metric is supported with the store **PeerDisconnected** method and the metric **Pause** method +- After a pause, if a good or bad event method is called on a metric, it automatically becomes unpaused and begins a new time interval. + +## Decision + +The trust metric capability is now available, yet, it still leaves the question of how should it be applied throughout Tendermint in order to properly track the quality of peers? + +### Proposed Process + +Peers are managed using an address book and a trust metric: + +- The address book keeps a record of peers and provides selection methods +- The trust metric tracks the quality of the peers + +#### Presence in Address Book + +Outbound peers are added to the address book before they are dialed, +and inbound peers are added once the peer connection is set up. +Peers are also added to the address book when they are received in response to +a pexRequestMessage. + +While a node has less than `needAddressThreshold`, it will periodically request more, +via pexRequestMessage, from randomly selected peers and from newly dialed outbound peers. + +When a new address is added to an address book that has more than `0.5*needAddressThreshold` addresses, +then with some low probability, a randomly chosen low quality peer is removed. + +#### Outbound Peers + +Peers attempt to maintain a minimum number of outbound connections by +repeatedly querying the address book for peers to connect to. +While a node has few to no outbound connections, the address book is biased to return +higher quality peers. As the node increases the number of outbound connections, +the address book is biased to return less-vetted or lower-quality peers. + +#### Inbound Peers + +Peers also maintain a maximum number of total connections, MaxNumPeers. +If a peer has MaxNumPeers, new incoming connections will be accepted with low probability. +When such a new connection is accepted, the peer disconnects from a probabilistically chosen low ranking peer +so it does not exceed MaxNumPeers. + +#### Peer Exchange + +When a peer receives a pexRequestMessage, it returns a random sample of high quality peers from the address book. Peers with no score or low score should not be inclided in a response to pexRequestMessage. + +#### Peer Quality + +Peer quality is tracked in the connection and across the reactors by storing the TrustMetric in the peer's +thread safe Data store. + +Peer behaviour is then defined as one of the following: +- Fatal - something outright malicious that causes us to disconnect the peer and ban it from the address book for some amount of time +- Bad - Any kind of timeout, messages that don't unmarshal, fail other validity checks, or messages we didn't ask for or aren't expecting (usually worth one bad event) +- Neutral - Unknown channels/message types/version upgrades (no good or bad events recorded) +- Correct - Normal correct behavior (worth one good event) +- Good - some random majority of peers per reactor sending us useful messages (worth more than one good event). + +Note that Fatal behaviour causes us to remove the peer, and neutral behaviour does not affect the score. + +## Status + +Proposed. + +## Consequences + +### Positive + +- Bringing the address book and trust metric store together will cause the network to be built in a way that encourages greater security and reliability. + +### Negative + +- TBD + +### Neutral + +- Keep in mind that, good events need to be recorded just as bad events do using this implementation. diff --git a/docs/architecture/adr-008-priv-validator.md b/docs/architecture/adr-008-priv-validator.md new file mode 100644 index 000000000..4c1d87bed --- /dev/null +++ b/docs/architecture/adr-008-priv-validator.md @@ -0,0 +1,29 @@ +# ADR 008: SocketPV + +Tendermint node's should support only two in-process PrivValidator +implementations: + +- FilePV uses an unencrypted private key in a "priv_validator.json" file - no + configuration required (just `tendermint init`). +- SocketPV uses a socket to send signing requests to another process - user is + responsible for starting that process themselves. + +The SocketPV address can be provided via flags at the command line - doing so +will cause Tendermint to ignore any "priv_validator.json" file and to listen on +the given address for incoming connections from an external priv_validator +process. It will halt any operation until at least one external process +succesfully connected. + +The external priv_validator process will dial the address to connect to +Tendermint, and then Tendermint will send requests on the ensuing connection to +sign votes and proposals. Thus the external process initiates the connection, +but the Tendermint process makes all requests. In a later stage we're going to +support multiple validators for fault tolerance. To prevent double signing they +need to be synced, which is deferred to an external solution (see #1185). + +In addition, Tendermint will provide implementations that can be run in that +external process. These include: + +- FilePV will encrypt the private key, and the user must enter password to + decrypt key when process is started. +- LedgerPV uses a Ledger Nano S to handle all signing. diff --git a/docs/architecture/adr-009-ABCI-design.md b/docs/architecture/adr-009-ABCI-design.md new file mode 100644 index 000000000..8b85679b8 --- /dev/null +++ b/docs/architecture/adr-009-ABCI-design.md @@ -0,0 +1,273 @@ +# ADR 009: ABCI UX Improvements + +## Changelog + +23-06-2018: Some minor fixes from review +07-06-2018: Some updates based on discussion with Jae +07-06-2018: Initial draft to match what was released in ABCI v0.11 + +## Context + +The ABCI was first introduced in late 2015. It's purpose is to be: + +- a generic interface between state machines and their replication engines +- agnostic to the language the state machine is written in +- agnostic to the replication engine that drives it + +This means ABCI should provide an interface for both pluggable applications and +pluggable consensus engines. + +To achieve this, it uses Protocol Buffers (proto3) for message types. The dominant +implementation is in Go. + +After some recent discussions with the community on github, the following were +identified as pain points: + +- Amino encoded types +- Managing validator sets +- Imports in the protobuf file + +See the [references](#references) for more. + +### Imports + +The native proto library in Go generates inflexible and verbose code. +Many in the Go community have adopted a fork called +[gogoproto](https://github.com/gogo/protobuf) that provides a +variety of features aimed to improve the developer experience. +While `gogoproto` is nice, it creates an additional dependency, and compiling +the protobuf types for other languages has been reported to fail when `gogoproto` is used. + +### Amino + +Amino is an encoding protocol designed to improve over insufficiencies of protobuf. +It's goal is to be proto4. + +Many people are frustrated by incompatibility with protobuf, +and with the requirement for Amino to be used at all within ABCI. + +We intend to make Amino successful enough that we can eventually use it for ABCI +message types directly. By then it should be called proto4. In the meantime, +we want it to be easy to use. + +### PubKey + +PubKeys are encoded using Amino (and before that, go-wire). +Ideally, PubKeys are an interface type where we don't know all the +implementation types, so its unfitting to use `oneof` or `enum`. + +### Addresses + +The address for ED25519 pubkey is the RIPEMD160 of the Amino +encoded pubkey. This introduces an Amino dependency in the address generation, +a functionality that is widely required and should be easy to compute as +possible. + +### Validators + +To change the validator set, applications can return a list of validator updates +with ResponseEndBlock. In these updates, the public key *must* be included, +because Tendermint requires the public key to verify validator signatures. This +means ABCI developers have to work with PubKeys. That said, it would also be +convenient to work with address information, and for it to be simple to do so. + +### AbsentValidators + +Tendermint also provides a list of validators in BeginBlock who did not sign the +last block. This allows applications to reflect availability behaviour in the +application, for instance by punishing validators for not having votes included +in commits. + +### InitChain + +Tendermint passes in a list of validators here, and nothing else. It would +benefit the application to be able to control the initial validator set. For +instance the genesis file could include application-based information about the +initial validator set that the application could process to determine the +initial validator set. Additionally, InitChain would benefit from getting all +the genesis information. + +### Header + +ABCI provides the Header in RequestBeginBlock so the application can have +important information about the latest state of the blockchain. + +## Decision + +### Imports + +Move away from gogoproto. In the short term, we will just maintain a second +protobuf file without the gogoproto annotations. In the medium term, we will +make copies of all the structs in Golang and shuttle back and forth. In the long +term, we will use Amino. + +### Amino + +To simplify ABCI application development in the short term, +Amino will be completely removed from the ABCI: + +- It will not be required for PubKey encoding +- It will not be required for computing PubKey addresses + +That said, we are working to make Amino a huge success, and to become proto4. +To facilitate adoption and cross-language compatibility in the near-term, Amino +v1 will: + +- be fully compatible with the subset of proto3 that excludes `oneof` +- use the Amino prefix system to provide interface types, as opposed to `oneof` + style union types. + +That said, an Amino v2 will be worked on to improve the performance of the +format and its useability in cryptographic applications. + + +### PubKey + +Encoding schemes infect software. As a generic middleware, ABCI aims to have +some cross scheme compatibility. For this it has no choice but to include opaque +bytes from time to time. While we will not enforce Amino encoding for these +bytes yet, we need to provide a type system. The simplest way to do this is to +use a type string. + +PubKey will now look like: + +``` +message PubKey { + string type + bytes data +} +``` + +where `type` can be: + +- "ed225519", with `data = ` +- "secp256k1", with `data = <33-byte OpenSSL compressed pubkey>` + + +As we want to retain flexibility here, and since ideally, PubKey would be an +interface type, we do not use `enum` or `oneof`. + +### Addresses + +To simplify and improve computing addresses, we change it to the first 20-bytes of the SHA256 +of the raw 32-byte public key. + +We continue to use the Bitcoin address scheme for secp256k1 keys. + +### Validators + +Add a `bytes address` field: + +``` +message Validator { + bytes address + PubKey pub_key + int64 power +} +``` + +### RequestBeginBlock and AbsentValidators + +To simplify this, RequestBeginBlock will include the complete validator set, +including the address, and voting power of each validator, along +with a boolean for whether or not they voted: + +``` +message RequestBeginBlock { + bytes hash + Header header + LastCommitInfo last_commit_info + repeated Evidence byzantine_validators +} + +message LastCommitInfo { + int32 CommitRound + repeated SigningValidator validators +} + +message SigningValidator { + Validator validator + bool signed_last_block +} +``` + +Note that in Validators in RequestBeginBlock, we DO NOT include public keys. Public keys are +larger than addresses and in the future, with quantum computers, will be much +larger. The overhead of passing them, especially during fast-sync, is +significant. + +Additional, addresses are changing to be simpler to compute, further removing +the need to include pubkeys here. + +In short, ABCI developers must be aware of both addresses and public keys. + +### ResponseEndBlock + +Since ResponseEndBlock includes Validator, it must now include their address. + +### InitChain + +Change RequestInitChain to give the app all the information from the genesis file: + +``` +message RequestInitChain { + int64 time + string chain_id + ConsensusParams consensus_params + repeated Validator validators + bytes app_state_bytes +} +``` + +Change ResponseInitChain to allow the app to specify the initial validator set +and consensus parameters. + +``` +message ResponseInitChain { + ConsensusParams consensus_params + repeated Validator validators +} +``` + +### Header + +Now that Tendermint Amino will be compatible with proto3, the Header in ABCI +should exactly match the Tendermint header - they will then be encoded +identically in ABCI and in Tendermint Core. + +## Status + +Accepted. + +## Consequences + +### Positive + +- Easier for developers to build on the ABCI +- ABCI and Tendermint headers are identically serialized + +### Negative + +- Maintenance overhead of alternative type encoding scheme +- Performance overhead of passing all validator info every block (at least its + only addresses, and not also pubkeys) +- Maintenance overhead of duplicate types + +### Neutral + +- ABCI developers must know about validator addresses + +## References + +- [ABCI v0.10.3 Specification (before this + proposal)](https://github.com/tendermint/abci/blob/v0.10.3/specification.rst) +- [ABCI v0.11.0 Specification (implementing first draft of this + proposal)](https://github.com/tendermint/abci/blob/v0.11.0/specification.md) +- [Ed25519 addresses](https://github.com/tendermint/go-crypto/issues/103) +- [InitChain contains the + Genesis](https://github.com/tendermint/abci/issues/216) +- [PubKeys](https://github.com/tendermint/tendermint/issues/1524) +- [Notes on + Header](https://github.com/tendermint/tendermint/issues/1605) +- [Gogoproto issues](https://github.com/tendermint/abci/issues/256) +- [Absent Validators](https://github.com/tendermint/abci/issues/231) diff --git a/docs/architecture/adr-010-crypto-changes.md b/docs/architecture/adr-010-crypto-changes.md new file mode 100644 index 000000000..cfe618421 --- /dev/null +++ b/docs/architecture/adr-010-crypto-changes.md @@ -0,0 +1,78 @@ +# ADR 010: Crypto Changes + +## Context + +Tendermint is a cryptographic protocol that uses and composes a variety of cryptographic primitives. + +After nearly 4 years of development, Tendermint has recently undergone multiple security reviews to search for vulnerabilities and to assess the the use and composition of cryptographic primitives. + +### Hash Functions + +Tendermint uses RIPEMD160 universally as a hash function, most notably in its Merkle tree implementation. + +RIPEMD160 was chosen because it provides the shortest fingerprint that is long enough to be considered secure (ie. birthday bound of 80-bits). +It was also developed in the open academic community, unlike NSA-designed algorithms like SHA256. + +That said, the cryptographic community appears to unanimously agree on the security of SHA256. It has become a universal standard, especially now that SHA1 is broken, being required in TLS connections and having optimized support in hardware. + +### Merkle Trees + +Tendermint uses a simple Merkle tree to compute digests of large structures like transaction batches +and even blockchain headers. The Merkle tree length prefixes byte arrays before concatenating and hashing them. +It uses RIPEMD160. + +### Addresses + +ED25519 addresses are computed using the RIPEMD160 of the Amino encoding of the public key. +RIPEMD160 is generally considered an outdated hash function, and is much slower +than more modern functions like SHA256 or Blake2. + +### Authenticated Encryption + +Tendermint P2P connections use authenticated encryption to provide privacy and authentication in the communications. +This is done using the simple Station-to-Station protocol with the NaCL Ed25519 library. + +While there have been no vulnerabilities found in the implementation, there are some concerns: + +- NaCL uses Salsa20, a not-widely used and relatively out-dated stream cipher that has been obsoleted by ChaCha20 +- Connections use RIPEMD160 to compute a value that is used for the encryption nonce with subtle requirements on how it's used + +## Decision + +### Hash Functions + +Use the first 20-bytes of the SHA256 hash instead of RIPEMD160 for everything + +### Merkle Trees + +TODO + +### Addresses + +Compute ED25519 addresses as the first 20-bytes of the SHA256 of the raw 32-byte public key + +### Authenticated Encryption + +Make the following changes: + +- Use xChaCha20 instead of xSalsa20 - https://github.com/tendermint/tendermint/issues/1124 +- Use an HKDF instead of RIPEMD160 to compute nonces - https://github.com/tendermint/tendermint/issues/1165 + +## Status + +## Consequences + +### Positive + +- More modern and standard cryptographic functions with wider adoption and hardware acceleration + + +### Negative + +- Exact authenticated encryption construction isn't already provided in a well-used library + + +### Neutral + +## References + diff --git a/docs/architecture/adr-011-monitoring.md b/docs/architecture/adr-011-monitoring.md new file mode 100644 index 000000000..ca16a9a1c --- /dev/null +++ b/docs/architecture/adr-011-monitoring.md @@ -0,0 +1,116 @@ +# ADR 011: Monitoring + +## Changelog + +08-06-2018: Initial draft +11-06-2018: Reorg after @xla comments +13-06-2018: Clarification about usage of labels + +## Context + +In order to bring more visibility into Tendermint, we would like it to report +metrics and, maybe later, traces of transactions and RPC queries. See +https://github.com/tendermint/tendermint/issues/986. + +A few solutions were considered: + +1. [Prometheus](https://prometheus.io) + a) Prometheus API + b) [go-kit metrics package](https://github.com/go-kit/kit/tree/master/metrics) as an interface plus Prometheus + c) [telegraf](https://github.com/influxdata/telegraf) + d) new service, which will listen to events emitted by pubsub and report metrics +5. [OpenCensus](https://opencensus.io/go/index.html) + +### 1. Prometheus + +Prometheus seems to be the most popular product out there for monitoring. It has +a Go client library, powerful queries, alerts. + +**a) Prometheus API** + +We can commit to using Prometheus in Tendermint, but I think Tendermint users +should be free to choose whatever monitoring tool they feel will better suit +their needs (if they don't have existing one already). So we should try to +abstract interface enough so people can switch between Prometheus and other +similar tools. + +**b) go-kit metrics package as an interface** + +metrics package provides a set of uniform interfaces for service +instrumentation and offers adapters to popular metrics packages: + +https://godoc.org/github.com/go-kit/kit/metrics#pkg-subdirectories + +Comparing to Prometheus API, we're losing customisability and control, but gaining +freedom in choosing any instrument from the above list given we will extract +metrics creation into a separate function (see "providers" in node/node.go). + +**c) telegraf** + +Unlike already discussed options, telegraf does not require modifying Tendermint +source code. You create something called an input plugin, which polls +Tendermint RPC every second and calculates the metrics itself. + +While it may sound good, but some metrics we want to report are not exposed via +RPC or pubsub, therefore can't be accessed externally. + +**d) service, listening to pubsub** + +Same issue as the above. + +### 2. opencensus + +opencensus provides both metrics and tracing, which may be important in the +future. It's API looks different from go-kit and Prometheus, but looks like it +covers everything we need. + +Unfortunately, OpenCensus go client does not define any +interfaces, so if we want to abstract away metrics we +will need to write interfaces ourselves. + +### List of metrics + +| | Name | Type | Description | +| - | --------------------------------------- | ------- | ----------------------------------------------------------------------------- | +| A | consensus_height | Gauge | | +| A | consensus_validators | Gauge | Number of validators who signed | +| A | consensus_validators_power | Gauge | Total voting power of all validators | +| A | consensus_missing_validators | Gauge | Number of validators who did not sign | +| A | consensus_missing_validators_power | Gauge | Total voting power of the missing validators | +| A | consensus_byzantine_validators | Gauge | Number of validators who tried to double sign | +| A | consensus_byzantine_validators_power | Gauge | Total voting power of the byzantine validators | +| A | consensus_block_interval | Timing | Time between this and last block (Block.Header.Time) | +| | consensus_block_time | Timing | Time to create a block (from creating a proposal to commit) | +| | consensus_time_between_blocks | Timing | Time between committing last block and (receiving proposal creating proposal) | +| A | consensus_rounds | Gauge | Number of rounds | +| | consensus_prevotes | Gauge | | +| | consensus_precommits | Gauge | | +| | consensus_prevotes_total_power | Gauge | | +| | consensus_precommits_total_power | Gauge | | +| A | consensus_num_txs | Gauge | | +| A | mempool_size | Gauge | | +| A | consensus_total_txs | Gauge | | +| A | consensus_block_size | Gauge | In bytes | +| A | p2p_peers | Gauge | Number of peers node's connected to | + +`A` - will be implemented in the fist place. + +**Proposed solution** + +## Status + +Proposed. + +## Consequences + +### Positive + +Better visibility, support of variety of monitoring backends + +### Negative + +One more library to audit, messing metrics reporting code with business domain. + +### Neutral + +- diff --git a/docs/architecture/adr-template.md b/docs/architecture/adr-template.md new file mode 100644 index 000000000..2303490ad --- /dev/null +++ b/docs/architecture/adr-template.md @@ -0,0 +1,16 @@ +# ADR 000: Template for an ADR + +## Context + +## Decision + +## Status + + +## Consequences + +### Positive + +### Negative + +### Neutral diff --git a/docs/architecture/img/formula1.png b/docs/architecture/img/formula1.png new file mode 100644 index 000000000..447ee30f5 Binary files /dev/null and b/docs/architecture/img/formula1.png differ diff --git a/docs/architecture/img/formula2.png b/docs/architecture/img/formula2.png new file mode 100644 index 000000000..081a15769 Binary files /dev/null and b/docs/architecture/img/formula2.png differ diff --git a/docs/architecture/img/tags1.png b/docs/architecture/img/tags1.png new file mode 100644 index 000000000..a6bc64e81 Binary files /dev/null and b/docs/architecture/img/tags1.png differ diff --git a/docs/assets/a_plus_t.png b/docs/assets/a_plus_t.png new file mode 100644 index 000000000..8f5bc5e95 Binary files /dev/null and b/docs/assets/a_plus_t.png differ diff --git a/docs/assets/abci.png b/docs/assets/abci.png new file mode 100644 index 000000000..73111cafd Binary files /dev/null and b/docs/assets/abci.png differ diff --git a/docs/assets/consensus_logic.png b/docs/assets/consensus_logic.png new file mode 100644 index 000000000..22b70b265 Binary files /dev/null and b/docs/assets/consensus_logic.png differ diff --git a/docs/assets/tm-application-example.png b/docs/assets/tm-application-example.png new file mode 100644 index 000000000..47d4e928c Binary files /dev/null and b/docs/assets/tm-application-example.png differ diff --git a/docs/assets/tm-transaction-flow.png b/docs/assets/tm-transaction-flow.png new file mode 100644 index 000000000..ea4908003 Binary files /dev/null and b/docs/assets/tm-transaction-flow.png differ diff --git a/docs/assets/tmint-logo-blue.png b/docs/assets/tmint-logo-blue.png new file mode 100644 index 000000000..cc4c8fb82 Binary files /dev/null and b/docs/assets/tmint-logo-blue.png differ diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 000000000..0cfc05cdf --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,199 @@ +# -*- coding: utf-8 -*- +# +# Tendermint documentation build configuration file, created by +# sphinx-quickstart on Mon Aug 7 04:55:09 2017. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) +import urllib + +import sphinx_rtd_theme + + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# + +from recommonmark.parser import CommonMarkParser + +source_parsers = { + '.md': CommonMarkParser, +} + +source_suffix = ['.rst', '.md'] +#source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'Tendermint' +copyright = u'2018, The Authors' +author = u'Tendermint' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = u'' +# The full version, including alpha/beta/rc tags. +release = u'' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'architecture', 'spec', 'examples'] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' +# html_theme = 'alabaster' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# This is required for the alabaster theme +# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars +html_sidebars = { + '**': [ + 'about.html', + 'navigation.html', + 'relations.html', # needs 'show_related': True theme option to display + 'searchbox.html', + 'donate.html', + ] +} + + +# -- Options for HTMLHelp output ------------------------------------------ + +# Output file base name for HTML help builder. +htmlhelp_basename = 'Tendermintdoc' + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'Tendermint.tex', u'Tendermint Documentation', + u'The Authors', 'manual'), +] + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'Tendermint', u'Tendermint Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'Tendermint', u'Tendermint Documentation', + author, 'Tendermint', 'Byzantine Fault Tolerant Consensus.', + 'Database'), +] + +# ---------------- customizations ---------------------- + +# for Docker README, below +from shutil import copyfile + +# tm-bench and tm-monitor +tools_repo = "https://raw.githubusercontent.com/tendermint/tools/" +tools_branch = "master" + +tools_dir = "./tools" + + +if os.path.isdir(tools_dir) != True: + os.mkdir(tools_dir) + +copyfile('../DOCKER/README.md', tools_dir+'/docker.md') + +urllib.urlretrieve(tools_repo+tools_branch+'/tm-bench/README.md', filename=tools_dir+'/benchmarking.md') +urllib.urlretrieve(tools_repo+tools_branch+'/tm-monitor/README.md', filename=tools_dir+'/monitoring.md') diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 000000000..bafbec354 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,15 @@ +.. Tendermint documentation master file, created by + sphinx-quickstart on Mon Aug 7 04:55:09 2017. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to Tendermint! +====================== + +This location for our documentation has been deprecated, please see: + +- https://tendermint.com/docs/ + +The last version built by Read The Docs will still be available at: + +- https://tendermint.readthedocs.io/projects/tools/en/v0.21.0/ diff --git a/docs/interviews/tendermint-bft.md b/docs/interviews/tendermint-bft.md new file mode 100644 index 000000000..8b3ad5743 --- /dev/null +++ b/docs/interviews/tendermint-bft.md @@ -0,0 +1,250 @@ +# Interview Transcript with Tendermint core researcher, Zarko Milosevic, by Chjango + +**ZM**: Regarding leader election, it's round robin, but a weighted one. You +take into account the amount of bonded tokens. Depending on how much weight +they have of voting power, they would be elected more frequently. So we do +rotate, but just the guys who are having more voting power would be elected +more frequently. We are having 4 validators, and 1 of them have 2 times more +voting power, they have 2 times more elected as a leader. + +**CC**: 2x more absolute voting power or probabilistic voting power? + +**ZM**: It's actually very deterministic. It's not probabilistic at all. See +[Tendermint proposal election specification][1]. In Tendermint, there is no +pseudorandom leader election. It's a deterministic protocol. So leader election +is a built-in function in the code, so you know exactly—depending on the voting +power in the validator set, you'd know who exactly would be the leader in round +x, x + 1, and so on. There is nothing random there; we are not trying to hide +who would be the leader. It's really well known. It's just that there is a +function, it's a mathematical function, and it's just basically—it's kind of an +implementation detail—it starts from the voting power, and when you are +elected, you get decreased some number, and in each round you keep increasing +depending on your voting power, so that you are elected after k rounds again. +But knowing the validator set and the voting power, it's very simple function, +you can calculate yourself to know exactly who would be next. For each round, +this function will return you the leader for that round. In every round, we do +this computation. It's all part of the same flow. It enforces the properties +which are: proportional to your voting power, you will be elected, and we keep +changing the leaders. So it can't happen to have one guy being more elected +than other guys, if they have the same voting power. So one time it will be guy +B, and next time it will be guy B1. So it's not random. + +**CC**: Assuming the validator set remains unchanged for a month, then if you +run this function, are you able to know exactly who is going to go for that +entire month? + +**ZM**: Yes. + +**CC**: What're the attack scenarios for this? + +**ZM**: This is something which is easily attacked by people who argue that +Tendermint is not decentralized enough. They say that by knowing the leader, +you can DDoS the leader. And by DDoSing the leader, you are able to stop the +progress. Because it's true. If you would be able to DDoS the leader, the +leader would not be able to propose and then effectively will not be making +progress. How we are addressing this thing is Sentry Architecture. So the +validator—or at least a proper validator—will never be available. You don't +know the ip address of the validator. You are never able to open the connection +to the validator. So validator is spawning sentry nodes and this is the single +administration domain and there is only connection from validator in the sense +of sentry nodes. And ip address of validator is not shared in the p2p network. +It’s completely private. This is our answer to DDoS attack. By playing clever +at this sentry node architecture and spawning additional sentry nodes in case, +for ex your sentry nodes are being DDoS’d, bc your sentry nodes are public, +then you will be able to connect to sentry nodes. this is where we will expect +the validator to be clever enough that so that in case they are DDoS’d at the +sentry level, they will spawn a different sentry node and then you communicate +through them. We are in a sense pushing the responsibility on the validator. + +**CC**: So if I understand this correctly, the public identity of the validator +doesn’t even matter because that entity can obfuscate where their real full +nodes reside via a proxy through this sentry architecture. + +**ZM**: Exactly. So you do know what is the address or identity of the validator +but you don’t know the network address of it; you’re not able to attack it +because you don’t know where they are. They are completely obfuscated by the +sentry nodes. There is now, if you really want to figure out….There is the +Tendermint protocol, the structure of the protocol is not fully decentralized +in the sense that the flow of information is going from the round proposer, or +the round coordinator, to other nodes, and then after they receive this it’s +basically like [inaudible: “O to 1”]. So by tracking where this information is +coming from, you might be able to identify who are the sentry nodes behind it. +So if you are doing some network analysis, you might be able to deduce +something. If the thing would be completely stuck, where the validator would +never change their sentry nodes or ip addresses of sentry nodes, it could be +possible to deduce something. This is where economic game comes into play. We +are doing an economics game there. We say that it’s a validator business. If +they are not able to hide themselves well enough, they’ll be DDoS’d and they +will be kicked out of the active validator set. So it’s in their interest. + +[Proposer Selection Procedure in Tendermint][1]. This is how it should work no +matter what implementation. + +**CC**: Going back to the proposer, lets say the validator does get DDoS’d, then +the proposer goes down. What happens? + +**ZM**: How the proposal mechanism works—there’s nothing special there—it goes +through a sequence of rounds. Normal execution of Tendermint is that for each +height, we are going through a sequence of rounds, starting from round 0, and +then we are incrementing through the rounds. The nodes are moving through the +rounds as part of normal procedure until they decide to commit. In case you +have one proposer—the proposer of a single round—being DDoS’d, we will probably +not decide in that round, because he will not be able to send his proposal. So +we will go to the next round, and hopefully the next proposer will be able to +communicate with the validators and then we’ll decide in the next round. + +**CC**: Are there timeouts between one round to another, if a round gets +skipped? + +**ZM**: There are timeouts. It’s a bit more complex. I think we have 5 timeouts. +We may be able to simplify this a bit. What is important to understand is: The +only condition which needs to be satisfied so we can go to the next round is +that your validator is able to communicate with more than 2/3rds of voting +power. To be able to move to the next round, you need to receive more than +2/3rd of voting power equivalent of pre-commit messages. + +We have two kinds of messages: 1) Proposal: Where the current round proposer is +suggesting how the next block should look like. This is first one. Every round +starts with proposer sending a proposal. And then there are two more rounds of +voting, where the validator is trying to agree whether they will commit the +proposal or not. And the first of such vote messages is called `pre-vote` and +the second one is `pre-commit`. Now, to be able to move between steps, between +a `pre-vote` and `pre-commit` step, you need to receive enough number of +messages where if message is sent by validator A, then also this message has a +weight, or voting power which is equal to the voting power of the validator who +sent this message. Before you receive more than 2/3 of voting power messages, you are not +able to move to the higher round. Only when you receive more than 2/3 of +messages, you actually start the timeout. The timeout is happening only after +you receive enough messages. And it happens because of the asynchrony of the +message communication so you give more time to guys with this timeout to +receive some messages which are maybe delayed. + +**CC**: In this way that you just described via the whole network gossiping +before we commit a block, that is what makes Tendermint BFT deterministic in a +partially synchronous setting vs Bitcoin which has synchrony assumptions +whereby blocks are first mined and then gossiped to the network. + +**ZM**: It's true that in Bitcoin, this is where the synchrony assumption comes +to play because if they're not able to communicate timely, they are not able to +converge to a single longest chain. Why are they not able to decrease timeout +in Bitcoin? Because if they would decrease, there would be so many forks that +they won't be able to converge to a single chain. By increasing this +complexity and the block time, they're able to have not so many forks. This is +effectively the timing assumption—the block duration in a sense because it's +enough time so that the decided block is propagated through the network before +someone else start deciding on the same block and creating forks. It's very +different from the consensus algorithms in a distributed computing setup where +Tendermint fits. In Tendermint, where we talk about the timing dependency, they +are really part of this 3-communication step protocol I just explained. We have +the following assumption: If the good guys are not able to communicate timely +and reliably without having message loss within a round, the Tendermint will +not make progress—it will not be making blocks. So if you are in a completely +asynchronous network where messages get lost or delayed unpredictably, +Tendermint will not make progress, it will not create forks, but it will not +decide, it will not tell you what is the next block. For termination, it's a +liveness property of consensus. It's a guarantee to decide. We do need timing +assumptions. Within a round, correct validators are able to communicate to each +other the consensus messages, not the transactions, but consensus messages. +They need to communicate in a timely and reliable fashion. But this doesn't +need to hold forever. It's just that what we are assuming when we say it's a +partially synchronous system, we assume that the system will be going through a +period of asynchrony, where we don't have this guarantee; the messages will be +delayed or some will be lost and then will not make progress for some period of +time, or we're not guaranteed to make progress. And the period of synchrony +where these guarantees hold. And if we think about internet, internet is best +described using such a model. Sometimes when we send a message to SF to +Belgrade, it takes 100 ms, sometimes it takes 300 ms, sometimes it takes 1 s. +But in most cases, it takes 100 ms or less than this. + +There is one thing which would be really nice if you understand it. In a global +wide area network, we can't make assumption on the communication unless we are +very conservative about this. If you want to be very fast, then we can't make +assumption and say we'll be for sure communicating with 1 ms communication +delay. Because of the complexity and various congestion issues on the network, +it might happen that during a short period of time, this doesn't hold. If this +doesn't hold and you depend on this for correctness of your protocol, you will +have a fork. So the partially synchronous protocol, most of them like +Tendermint, they don't depend on the timing assumption from the internet for +correctness. This is where we state: safety always. So we never make a fork no +matter how bad our estimates about the internet communication delays are. We'll +never make a fork, but we do make some assumptions, and these assumptions are +built-in our timeouts in our protocol which are actually adaptive. So we are +adapting to the current condition and this is where we're saying...We do assume +some properties, or some communication delays, to eventually hold on the +network. During this period, we guarantee that we will be deciding and +committing blocks. And we will be doing this very fast. We will be basically on +the speed of the current network. + +**CC**: We make liveness assumptions based on the integrity of the validator +businesses, assuming they're up and running fine. + +**ZM**: This is where we are saying, the protocol will be live if we have at +most 1/3, or a bit less than 1/3, of faulty validators. Which means that all +other guys should be online and available. This is also for liveness. This is +related to the condition that we are not able to make progress in rounds if we +don't receive enough messages. If half of our voting power, or half of our +validators are down, we don't have enough messages, so the protocol is +completely blocked. It doesn't make progress in a round, which means it's not +able to be signed. So it's completely critical for Tendermint that we make +progress in rounds. It's like breathing. Tendermint is breathing. If there is +no progress, it's dead; it's blocked, we're not able to breathe, that's why +we're not able to make progress. + +**CC**: How does Tendermint compare to other consensus algos? + +**ZM**: Tendermint is a very interesting protocol. From an academic point of +view, I'm convinced that there is value there. Hopefully, we prove it by +publishing it on some good conference. What is novel is, if we compare first +Tendermint to this existing BFT problem, it's a continuation of academic +research on BFT consensus. What is novel in Tendermint is that it somehow +merges consensus protocol with gossip. This is completely novel idea. +Originally, in BFT, people were assuming the single administration domain, +small number of nodes, local area network, 4-7 nodes max. If you look at the +research paper, 99% of them have this kind of setup. Wide area was studied but +there is significantly less work in wide area networks. No one studied how to +scale those protocols to hundreds or thousands of nodes before blockchain. It +was always a single administration domain. So in Tendermint now, you are able +to reach consensus among different administration domains which are potentially +hundreds of them in wide area network. The system model is potentially harder +because we have more nodes and wide area network. The second thing is that: +normally, in bft protocols, the protocol itself are normally designed in a way +that has two phases, or two parts. The one which is called normal case, which +is normally quite simple, in this normal case. In spite of some failures, which +are part of the normal execution of the protocol, like for example leader +crashes or leader being DDoS'd, they need to go through a quite complex +protocol, which is like being called view change or leader election or +whatever. These two parts of the same protocol are having quite different +complexity. And most of the people only understand this normal case. In +Tendermint, there is no this difference. We have only one protocol, there are +not two protocols. It's always the same steps and they are much closer to the +normal case than this complex view change protocol. + +_This is a bit too technical but this is on a high level things to remember, +that: The system it addresses it's harder than the others and the algorithm +complexity in Tendermint is simpler._ The initial goal of Jae and Bucky which +is inspired by Raft, is that it's simpler so normal engineers could understand. + +**CC**: Can you expand on the termination requirement? + +_Important point about Liveness in Tendermint_ + +**ZM**: In Tendermint, we are saying, for termination, we are making assumption +that the system is partially synchronous. And in a partially synchronous system +model, we are able to mathematically prove that the protocol will make +decisions; it will decide. + +**CC**: What is a persistent peer? + +**ZM**: It's a list of peer identities, which you will try to establish +connection to them, in case connection is broken, Tendermint will automatically +try to reestablish connection. These are important peers, you will really try +persistently to establish connection to them. For other peers, you just drop it +and try from your address book to connect to someone else. The address book is a +list of peers which you discover that they exist, because we are talking about a +very dynamic network—so the nodes are coming and going away—and the gossiping +protocol is discovering new nodes and gossiping them around. So every node will +keep the list of new nodes it discovers, and when you need to establish +connection to a peer, you'll look to address book and get some addresses from +there. There's categorization/ranking of nodes there. + +[1]: https://github.com/tendermint/tendermint/blob/master/docs/spec/reactors/consensus/proposer-selection.md diff --git a/docs/introduction/install.md b/docs/introduction/install.md new file mode 100644 index 000000000..d02691102 --- /dev/null +++ b/docs/introduction/install.md @@ -0,0 +1,76 @@ +# Install Tendermint + +The fastest and easiest way to install the `tendermint` binary +is to run [this script](https://github.com/tendermint/tendermint/blob/develop/scripts/install/install_tendermint_ubuntu.sh) on +a fresh Ubuntu instance, +or [this script](https://github.com/tendermint/tendermint/blob/develop/scripts/install/install_tendermint_bsd.sh) +on a fresh FreeBSD instance. Read the comments / instructions carefully (i.e., reset your terminal after running the script, +make sure your okay with the network connections being made). + +## From Binary + +To download pre-built binaries, see the [releases page](https://github.com/tendermint/tendermint/releases). + +## From Source + +You'll need `go` [installed](https://golang.org/doc/install) and the required +[environment variables set](https://github.com/tendermint/tendermint/wiki/Setting-GOPATH) + +### Get Source Code + +``` +mkdir -p $GOPATH/src/github.com/tendermint +cd $GOPATH/src/github.com/tendermint +git clone https://github.com/tendermint/tendermint.git +cd tendermint +``` + +### Get Tools & Dependencies + +``` +make get_tools +make get_vendor_deps +``` + +### Compile + +``` +make install +``` + +to put the binary in `$GOPATH/bin` or use: + +``` +make build +``` + +to put the binary in `./build`. + +The latest `tendermint version` is now installed. + +## Reinstall + +If you already have Tendermint installed, and you make updates, simply + +``` +cd $GOPATH/src/github.com/tendermint/tendermint +make install +``` + +To upgrade, run + +``` +cd $GOPATH/src/github.com/tendermint/tendermint +git pull origin master +make get_vendor_deps +make install +``` + +## Run + +To start a one-node blockchain with a simple in-process application: + +``` +tendermint init +tendermint node --proxy_app=kvstore +``` diff --git a/docs/introduction/introduction.md b/docs/introduction/introduction.md new file mode 100644 index 000000000..d43fa9b2d --- /dev/null +++ b/docs/introduction/introduction.md @@ -0,0 +1,332 @@ +# What is Tendermint? + +Tendermint is software for securely and consistently replicating an +application on many machines. By securely, we mean that Tendermint works +even if up to 1/3 of machines fail in arbitrary ways. By consistently, +we mean that every non-faulty machine sees the same transaction log and +computes the same state. Secure and consistent replication is a +fundamental problem in distributed systems; it plays a critical role in +the fault tolerance of a broad range of applications, from currencies, +to elections, to infrastructure orchestration, and beyond. + +The ability to tolerate machines failing in arbitrary ways, including +becoming malicious, is known as Byzantine fault tolerance (BFT). The +theory of BFT is decades old, but software implementations have only +became popular recently, due largely to the success of "blockchain +technology" like Bitcoin and Ethereum. Blockchain technology is just a +reformalization of BFT in a more modern setting, with emphasis on +peer-to-peer networking and cryptographic authentication. The name +derives from the way transactions are batched in blocks, where each +block contains a cryptographic hash of the previous one, forming a +chain. In practice, the blockchain data structure actually optimizes BFT +design. + +Tendermint consists of two chief technical components: a blockchain +consensus engine and a generic application interface. The consensus +engine, called Tendermint Core, ensures that the same transactions are +recorded on every machine in the same order. The application interface, +called the Application BlockChain Interface (ABCI), enables the +transactions to be processed in any programming language. Unlike other +blockchain and consensus solutions, which come pre-packaged with built +in state machines (like a fancy key-value store, or a quirky scripting +language), developers can use Tendermint for BFT state machine +replication of applications written in whatever programming language and +development environment is right for them. + +Tendermint is designed to be easy-to-use, simple-to-understand, highly +performant, and useful for a wide variety of distributed applications. + +## Tendermint vs. X + +Tendermint is broadly similar to two classes of software. The first +class consists of distributed key-value stores, like Zookeeper, etcd, +and consul, which use non-BFT consensus. The second class is known as +"blockchain technology", and consists of both cryptocurrencies like +Bitcoin and Ethereum, and alternative distributed ledger designs like +Hyperledger's Burrow. + +### Zookeeper, etcd, consul + +Zookeeper, etcd, and consul are all implementations of a key-value store +atop a classical, non-BFT consensus algorithm. Zookeeper uses a version +of Paxos called Zookeeper Atomic Broadcast, while etcd and consul use +the Raft consensus algorithm, which is much younger and simpler. A +typical cluster contains 3-5 machines, and can tolerate crash failures +in up to 1/2 of the machines, but even a single Byzantine fault can +destroy the system. + +Each offering provides a slightly different implementation of a +featureful key-value store, but all are generally focused around +providing basic services to distributed systems, such as dynamic +configuration, service discovery, locking, leader-election, and so on. + +Tendermint is in essence similar software, but with two key differences: + +- It is Byzantine Fault Tolerant, meaning it can only tolerate up to a + 1/3 of failures, but those failures can include arbitrary behaviour - + including hacking and malicious attacks. - It does not specify a + particular application, like a fancy key-value store. Instead, it + focuses on arbitrary state machine replication, so developers can build + the application logic that's right for them, from key-value store to + cryptocurrency to e-voting platform and beyond. + +The layout of this Tendermint website content is also ripped directly +and without shame from [consul.io](https://www.consul.io/) and the other +[Hashicorp sites](https://www.hashicorp.com/#tools). + +### Bitcoin, Ethereum, etc. + +Tendermint emerged in the tradition of cryptocurrencies like Bitcoin, +Ethereum, etc. with the goal of providing a more efficient and secure +consensus algorithm than Bitcoin's Proof of Work. In the early days, +Tendermint had a simple currency built in, and to participate in +consensus, users had to "bond" units of the currency into a security +deposit which could be revoked if they misbehaved -this is what made +Tendermint a Proof-of-Stake algorithm. + +Since then, Tendermint has evolved to be a general purpose blockchain +consensus engine that can host arbitrary application states. That means +it can be used as a plug-and-play replacement for the consensus engines +of other blockchain software. So one can take the current Ethereum code +base, whether in Rust, or Go, or Haskell, and run it as a ABCI +application using Tendermint consensus. Indeed, [we did that with +Ethereum](https://github.com/tendermint/ethermint). And we plan to do +the same for Bitcoin, ZCash, and various other deterministic +applications as well. + +Another example of a cryptocurrency application built on Tendermint is +[the Cosmos network](http://cosmos.network). + +### Other Blockchain Projects + +[Fabric](https://github.com/hyperledger/fabric) takes a similar approach +to Tendermint, but is more opinionated about how the state is managed, +and requires that all application behaviour runs in potentially many +docker containers, modules it calls "chaincode". It uses an +implementation of [PBFT](http://pmg.csail.mit.edu/papers/osdi99.pdf). +from a team at IBM that is [augmented to handle potentially +non-deterministic +chaincode](https://www.zurich.ibm.com/~cca/papers/sieve.pdf) It is +possible to implement this docker-based behaviour as a ABCI app in +Tendermint, though extending Tendermint to handle non-determinism +remains for future work. + +[Burrow](https://github.com/hyperledger/burrow) is an implementation of +the Ethereum Virtual Machine and Ethereum transaction mechanics, with +additional features for a name-registry, permissions, and native +contracts, and an alternative blockchain API. It uses Tendermint as its +consensus engine, and provides a particular application state. + +## ABCI Overview + +The [Application BlockChain Interface +(ABCI)](https://github.com/tendermint/tendermint/tree/develop/abci) +allows for Byzantine Fault Tolerant replication of applications +written in any programming language. + +### Motivation + +Thus far, all blockchains "stacks" (such as +[Bitcoin](https://github.com/bitcoin/bitcoin)) have had a monolithic +design. That is, each blockchain stack is a single program that handles +all the concerns of a decentralized ledger; this includes P2P +connectivity, the "mempool" broadcasting of transactions, consensus on +the most recent block, account balances, Turing-complete contracts, +user-level permissions, etc. + +Using a monolithic architecture is typically bad practice in computer +science. It makes it difficult to reuse components of the code, and +attempts to do so result in complex maintenance procedures for forks of +the codebase. This is especially true when the codebase is not modular +in design and suffers from "spaghetti code". + +Another problem with monolithic design is that it limits you to the +language of the blockchain stack (or vice versa). In the case of +Ethereum which supports a Turing-complete bytecode virtual-machine, it +limits you to languages that compile down to that bytecode; today, those +are Serpent and Solidity. + +In contrast, our approach is to decouple the consensus engine and P2P +layers from the details of the application state of the particular +blockchain application. We do this by abstracting away the details of +the application to an interface, which is implemented as a socket +protocol. + +Thus we have an interface, the Application BlockChain Interface (ABCI), +and its primary implementation, the Tendermint Socket Protocol (TSP, or +Teaspoon). + +### Intro to ABCI + +[Tendermint Core](https://github.com/tendermint/tendermint) (the +"consensus engine") communicates with the application via a socket +protocol that satisfies the ABCI. + +To draw an analogy, lets talk about a well-known cryptocurrency, +Bitcoin. Bitcoin is a cryptocurrency blockchain where each node +maintains a fully audited Unspent Transaction Output (UTXO) database. If +one wanted to create a Bitcoin-like system on top of ABCI, Tendermint +Core would be responsible for + +- Sharing blocks and transactions between nodes +- Establishing a canonical/immutable order of transactions + (the blockchain) + +The application will be responsible for + +- Maintaining the UTXO database +- Validating cryptographic signatures of transactions +- Preventing transactions from spending non-existent transactions +- Allowing clients to query the UTXO database. + +Tendermint is able to decompose the blockchain design by offering a very +simple API (ie. the ABCI) between the application process and consensus +process. + +The ABCI consists of 3 primary message types that get delivered from the +core to the application. The application replies with corresponding +response messages. + +The messages are specified here: [ABCI Message +Types](https://github.com/tendermint/tendermint/blob/develop/abci/README.md#message-types). + +The **DeliverTx** message is the work horse of the application. Each +transaction in the blockchain is delivered with this message. The +application needs to validate each transaction received with the +**DeliverTx** message against the current state, application protocol, +and the cryptographic credentials of the transaction. A validated +transaction then needs to update the application state — by binding a +value into a key values store, or by updating the UTXO database, for +instance. + +The **CheckTx** message is similar to **DeliverTx**, but it's only for +validating transactions. Tendermint Core's mempool first checks the +validity of a transaction with **CheckTx**, and only relays valid +transactions to its peers. For instance, an application may check an +incrementing sequence number in the transaction and return an error upon +**CheckTx** if the sequence number is old. Alternatively, they might use +a capabilities based system that requires capabilities to be renewed +with every transaction. + +The **Commit** message is used to compute a cryptographic commitment to +the current application state, to be placed into the next block header. +This has some handy properties. Inconsistencies in updating that state +will now appear as blockchain forks which catches a whole class of +programming errors. This also simplifies the development of secure +lightweight clients, as Merkle-hash proofs can be verified by checking +against the block hash, and that the block hash is signed by a quorum. + +There can be multiple ABCI socket connections to an application. +Tendermint Core creates three ABCI connections to the application; one +for the validation of transactions when broadcasting in the mempool, one +for the consensus engine to run block proposals, and one more for +querying the application state. + +It's probably evident that applications designers need to very carefully +design their message handlers to create a blockchain that does anything +useful but this architecture provides a place to start. The diagram +below illustrates the flow of messages via ABCI. + +![](assets/abci.png) + +## A Note on Determinism + +The logic for blockchain transaction processing must be deterministic. +If the application logic weren't deterministic, consensus would not be +reached among the Tendermint Core replica nodes. + +Solidity on Ethereum is a great language of choice for blockchain +applications because, among other reasons, it is a completely +deterministic programming language. However, it's also possible to +create deterministic applications using existing popular languages like +Java, C++, Python, or Go. Game programmers and blockchain developers are +already familiar with creating deterministic programs by avoiding +sources of non-determinism such as: + +- random number generators (without deterministic seeding) +- race conditions on threads (or avoiding threads altogether) +- the system clock +- uninitialized memory (in unsafe programming languages like C + or C++) +- [floating point + arithmetic](http://gafferongames.com/networking-for-game-programmers/floating-point-determinism/) +- language features that are random (e.g. map iteration in Go) + +While programmers can avoid non-determinism by being careful, it is also +possible to create a special linter or static analyzer for each language +to check for determinism. In the future we may work with partners to +create such tools. + +## Consensus Overview + +Tendermint is an easy-to-understand, mostly asynchronous, BFT consensus +protocol. The protocol follows a simple state machine that looks like +this: + +![](assets/consensus_logic.png) + +Participants in the protocol are called **validators**; they take turns +proposing blocks of transactions and voting on them. Blocks are +committed in a chain, with one block at each **height**. A block may +fail to be committed, in which case the protocol moves to the next +**round**, and a new validator gets to propose a block for that height. +Two stages of voting are required to successfully commit a block; we +call them **pre-vote** and **pre-commit**. A block is committed when +more than 2/3 of validators pre-commit for the same block in the same +round. + +There is a picture of a couple doing the polka because validators are +doing something like a polka dance. When more than two-thirds of the +validators pre-vote for the same block, we call that a **polka**. Every +pre-commit must be justified by a polka in the same round. + +Validators may fail to commit a block for a number of reasons; the +current proposer may be offline, or the network may be slow. Tendermint +allows them to establish that a validator should be skipped. Validators +wait a small amount of time to receive a complete proposal block from +the proposer before voting to move to the next round. This reliance on a +timeout is what makes Tendermint a weakly synchronous protocol, rather +than an asynchronous one. However, the rest of the protocol is +asynchronous, and validators only make progress after hearing from more +than two-thirds of the validator set. A simplifying element of +Tendermint is that it uses the same mechanism to commit a block as it +does to skip to the next round. + +Assuming less than one-third of the validators are Byzantine, Tendermint +guarantees that safety will never be violated - that is, validators will +never commit conflicting blocks at the same height. To do this it +introduces a few **locking** rules which modulate which paths can be +followed in the flow diagram. Once a validator precommits a block, it is +locked on that block. Then, + +1. it must prevote for the block it is locked on +2. it can only unlock, and precommit for a new block, if there is a + polka for that block in a later round + +## Stake + +In many systems, not all validators will have the same "weight" in the +consensus protocol. Thus, we are not so much interested in one-third or +two-thirds of the validators, but in those proportions of the total +voting power, which may not be uniformly distributed across individual +validators. + +Since Tendermint can replicate arbitrary applications, it is possible to +define a currency, and denominate the voting power in that currency. +When voting power is denominated in a native currency, the system is +often referred to as Proof-of-Stake. Validators can be forced, by logic +in the application, to "bond" their currency holdings in a security +deposit that can be destroyed if they're found to misbehave in the +consensus protocol. This adds an economic element to the security of the +protocol, allowing one to quantify the cost of violating the assumption +that less than one-third of voting power is Byzantine. + +The [Cosmos Network](http://cosmos.network) is designed to use this +Proof-of-Stake mechanism across an array of cryptocurrencies implemented +as ABCI applications. + +The following diagram is Tendermint in a (technical) nutshell. [See here +for high resolution +version](https://github.com/mobfoundry/hackatom/blob/master/tminfo.pdf). + +![](assets/tm-transaction-flow.png) diff --git a/docs/introduction/quick-start.md b/docs/introduction/quick-start.md new file mode 100644 index 000000000..8e4908784 --- /dev/null +++ b/docs/introduction/quick-start.md @@ -0,0 +1,149 @@ +# Tendermint + +## Overview + +This is a quick start guide. If you have a vague idea about how Tendermint +works and want to get started right away, continue. + +## Install + +### Quick Install + +On a fresh Ubuntu 16.04 machine can be done with [this script](https://git.io/fFfOR), like so: + +``` +curl -L https://git.io/fFfOR | bash +source ~/.profile +``` + +WARNING: do not run the above on your local machine. + +The script is also used to facilitate cluster deployment below. + +### Manual Install + +Requires: + +- `go` minimum version 1.10 +- `$GOPATH` environment variable must be set +- `$GOPATH/bin` must be on your `$PATH` (see https://github.com/tendermint/tendermint/wiki/Setting-GOPATH) + +To install Tendermint, run: + +``` +go get github.com/tendermint/tendermint +cd $GOPATH/src/github.com/tendermint/tendermint +make get_tools && make get_vendor_deps +make install +``` + +Note that `go get` may return an error but it can be ignored. + +Confirm installation: + +``` +$ tendermint version +0.23.0-dev +``` + +## Initialization + +Running: + +``` +tendermint init +``` + +will create the required files for a single, local node. + +These files are found in `$HOME/.tendermint`: + +``` +$ ls $HOME/.tendermint + +config.toml data genesis.json priv_validator.json +``` + +For a single, local node, no further configuration is required. +Configuring a cluster is covered further below. + +## Local Node + +Start tendermint with a simple in-process application: + +``` +tendermint node --proxy_app=kvstore +``` + +and blocks will start to stream in: + +``` +I[01-06|01:45:15.592] Executed block module=state height=1 validTxs=0 invalidTxs=0 +I[01-06|01:45:15.624] Committed state module=state height=1 txs=0 appHash= +``` + +Check the status with: + +``` +curl -s localhost:26657/status +``` + +### Sending Transactions + +With the kvstore app running, we can send transactions: + +``` +curl -s 'localhost:26657/broadcast_tx_commit?tx="abcd"' +``` + +and check that it worked with: + +``` +curl -s 'localhost:26657/abci_query?data="abcd"' +``` + +We can send transactions with a key and value too: + +``` +curl -s 'localhost:26657/broadcast_tx_commit?tx="name=satoshi"' +``` + +and query the key: + +``` +curl -s 'localhost:26657/abci_query?data="name"' +``` + +where the value is returned in hex. + +## Cluster of Nodes + +First create four Ubuntu cloud machines. The following was tested on Digital +Ocean Ubuntu 16.04 x64 (3GB/1CPU, 20GB SSD). We'll refer to their respective IP +addresses below as IP1, IP2, IP3, IP4. + +Then, `ssh` into each machine, and execute [this script](https://git.io/fFfOR): + +``` +curl -L https://git.io/fFfOR | bash +source ~/.profile +``` + +This will install `go` and other dependencies, get the Tendermint source code, then compile the `tendermint` binary. + +Next, use the `tendermint testnet` command to create four directories of config files (found in `./mytestnet`) and copy each directory to the relevant machine in the cloud, so that each machine has `$HOME/mytestnet/node[0-3]` directory. Then from each machine, run: + +``` +tendermint node --home ./mytestnet/node0 --proxy_app=kvstore --p2p.persistent_peers="ID1@IP1:26656,ID2@IP2:26656,ID3@IP3:26656,ID4@IP4:26656" +tendermint node --home ./mytestnet/node1 --proxy_app=kvstore --p2p.persistent_peers="ID1@IP1:26656,ID2@IP2:26656,ID3@IP3:26656,ID4@IP4:26656" +tendermint node --home ./mytestnet/node2 --proxy_app=kvstore --p2p.persistent_peers="ID1@IP1:26656,ID2@IP2:26656,ID3@IP3:26656,ID4@IP4:26656" +tendermint node --home ./mytestnet/node3 --proxy_app=kvstore --p2p.persistent_peers="ID1@IP1:26656,ID2@IP2:26656,ID3@IP3:26656,ID4@IP4:26656" +``` + +Note that after the third node is started, blocks will start to stream in +because >2/3 of validators (defined in the `genesis.json`) have come online. +Seeds can also be specified in the `config.toml`. See [this +PR](https://github.com/tendermint/tendermint/pull/792) for more information +about configuration options. + +Transactions can then be sent as covered in the single, local node example above. diff --git a/docs/networks/deploy-testnets.md b/docs/networks/deploy-testnets.md new file mode 100644 index 000000000..88e5c6f72 --- /dev/null +++ b/docs/networks/deploy-testnets.md @@ -0,0 +1,74 @@ +# Deploy a Testnet + +Now that we've seen how ABCI works, and even played with a few +applications on a single validator node, it's time to deploy a test +network to four validator nodes. + +## Manual Deployments + +It's relatively easy to setup a Tendermint cluster manually. The only +requirements for a particular Tendermint node are a private key for the +validator, stored as `priv_validator.json`, a node key, stored as +`node_key.json` and a list of the public keys of all validators, stored +as `genesis.json`. These files should be stored in +`~/.tendermint/config`, or wherever the `$TMHOME` variable might be set +to. + +Here are the steps to setting up a testnet manually: + +1. Provision nodes on your cloud provider of choice +2. Install Tendermint and the application of interest on all nodes +3. Generate a private key and a node key for each validator using + `tendermint init` +4. Compile a list of public keys for each validator into a + `genesis.json` file and replace the existing file with it. +5. Run + `tendermint node --proxy_app=kvstore --p2p.persistent_peers=< peer addresses >` on each node, where `< peer addresses >` is a comma separated + list of the ID@IP:PORT combination for each node. The default port for + Tendermint is `26656`. The ID of a node can be obtained by running + `tendermint show_node_id` command. Thus, if the IP addresses of your nodes + were `192.168.0.1, 192.168.0.2, 192.168.0.3, 192.168.0.4`, the command + would look like: + +``` +tendermint node --proxy_app=kvstore --p2p.persistent_peers=96663a3dd0d7b9d17d4c8211b191af259621c693@192.168.0.1:26656, 429fcf25974313b95673f58d77eacdd434402665@192.168.0.2:26656, 0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@192.168.0.3:26656, f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@192.168.0.4:26656 +``` + +After a few seconds, all the nodes should connect to each other and +start making blocks! For more information, see the Tendermint Networks +section of [the guide to using Tendermint](./using-tendermint.md). + +But wait! Steps 3, 4 and 5 are quite manual. Instead, use the `tendermint testnet` command. By default, running `tendermint testnet` will create all the +required files, but it won't populate the list of persistent peers. It will do +it however if you provide the `--populate-persistent-peers` flag and optional +`--starting-ip-address` flag. Run `tendermint testnet --help` for more details +on the available flags. + +``` +tendermint testnet --populate-persistent-peers --starting-ip-address 192.168.0.1 +``` + +This command will generate four folders, prefixed with "node" and put them into +the "./mytestnet" directory by default. + +As you might imagine, this command is useful for manual or automated +deployments. + +## Automated Deployments + +The easiest and fastest way to get a testnet up in less than 5 minutes. + +### Local + +With `docker` and `docker-compose` installed, run the command: + +``` +make localnet-start +``` + +from the root of the tendermint repository. This will spin up a 4-node +local testnet. Review the target in the Makefile to debug any problems. + +### Cloud + +See the [next section](./terraform-and-ansible.html) for details. diff --git a/docs/networks/terraform-and-ansible.md b/docs/networks/terraform-and-ansible.md new file mode 100644 index 000000000..5a4b9c53b --- /dev/null +++ b/docs/networks/terraform-and-ansible.md @@ -0,0 +1,169 @@ +# Terraform & Ansible + +Automated deployments are done using +[Terraform](https://www.terraform.io/) to create servers on Digital +Ocean then [Ansible](http://www.ansible.com/) to create and manage +testnets on those servers. + +## Install + +NOTE: see the [integration bash +script](https://github.com/tendermint/tendermint/blob/develop/networks/remote/integration.sh) +that can be run on a fresh DO droplet and will automatically spin up a 4 +node testnet. The script more or less does everything described below. + +- Install [Terraform](https://www.terraform.io/downloads.html) and + [Ansible](http://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) + on a Linux machine. +- Create a [DigitalOcean API + token](https://cloud.digitalocean.com/settings/api/tokens) with read + and write capability. +- Install the python dopy package (`pip install dopy`) +- Create SSH keys (`ssh-keygen`) +- Set environment variables: + +``` +export DO_API_TOKEN="abcdef01234567890abcdef01234567890" +export SSH_KEY_FILE="$HOME/.ssh/id_rsa.pub" +``` + +These will be used by both `terraform` and `ansible`. + +### Terraform + +This step will create four Digital Ocean droplets. First, go to the +correct directory: + +``` +cd $GOPATH/src/github.com/tendermint/tendermint/networks/remote/terraform +``` + +then: + +``` +terraform init +terraform apply -var DO_API_TOKEN="$DO_API_TOKEN" -var SSH_KEY_FILE="$SSH_KEY_FILE" +``` + +and you will get a list of IP addresses that belong to your droplets. + +With the droplets created and running, let's setup Ansible. + +### Ansible + +The playbooks in [the ansible +directory](https://github.com/tendermint/tendermint/tree/master/networks/remote/ansible) +run ansible roles to configure the sentry node architecture. You must +switch to this directory to run ansible +(`cd $GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible`). + +There are several roles that are self-explanatory: + +First, we configure our droplets by specifying the paths for tendermint +(`BINARY`) and the node files (`CONFIGDIR`). The latter expects any +number of directories named `node0, node1, ...` and so on (equal to the +number of droplets created). For this example, we use pre-created files +from [this +directory](https://github.com/tendermint/tendermint/tree/master/docs/examples). +To create your own files, use either the `tendermint testnet` command or +review [manual deployments](./deploy-testnets.md). + +Here's the command to run: + +``` +ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$GOPATH/src/github.com/tendermint/tendermint/build/tendermint -e CONFIGDIR=$GOPATH/src/github.com/tendermint/tendermint/docs/examples +``` + +Voila! All your droplets now have the `tendermint` binary and required +configuration files to run a testnet. + +Next, we run the install role: + +``` +ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml +``` + +which as you'll see below, executes +`tendermint node --proxy_app=kvstore` on all droplets. Although we'll +soon be modifying this role and running it again, this first execution +allows us to get each `node_info.id` that corresponds to each +`node_info.listen_addr`. (This part will be automated in the future). In +your browser (or using `curl`), for every droplet, go to IP:26657/status +and note the two just mentioned `node_info` fields. Notice that blocks +aren't being created (`latest_block_height` should be zero and not +increasing). + +Next, open `roles/install/templates/systemd.service.j2` and look for the +line `ExecStart` which should look something like: + +``` +ExecStart=/usr/bin/tendermint node --proxy_app=kvstore +``` + +and add the `--p2p.persistent_peers` flag with the relevant information +for each node. The resulting file should look something like: + +``` +[Unit] +Description={{service}} +Requires=network-online.target +After=network-online.target + +[Service] +Restart=on-failure +User={{service}} +Group={{service}} +PermissionsStartOnly=true +ExecStart=/usr/bin/tendermint node --proxy_app=kvstore --p2p.persistent_peers=167b80242c300bf0ccfb3ced3dec60dc2a81776e@165.227.41.206:26656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@165.227.43.146:26656,303a1a4312c30525c99ba66522dd81cca56a361a@159.89.115.32:26656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@159.89.119.125:26656 +ExecReload=/bin/kill -HUP $MAINPID +KillSignal=SIGTERM + +[Install] +WantedBy=multi-user.target +``` + +Then, stop the nodes: + +``` +ansible-playbook -i inventory/digital_ocean.py -l sentrynet stop.yml +``` + +Finally, we run the install role again: + +``` +ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml +``` + +to re-run `tendermint node` with the new flag, on all droplets. The +`latest_block_hash` should now be changing and `latest_block_height` +increasing. Your testnet is now up and running :) + +Peek at the logs with the status role: + +``` +ansible-playbook -i inventory/digital_ocean.py -l sentrynet status.yml +``` + +### Logging + +The crudest way is the status role described above. You can also ship +logs to Logz.io, an Elastic stack (Elastic search, Logstash and Kibana) +service provider. You can set up your nodes to log there automatically. +Create an account and get your API key from the notes on [this +page](https://app.logz.io/#/dashboard/data-sources/Filebeat), then: + +``` +yum install systemd-devel || echo "This will only work on RHEL-based systems." +apt-get install libsystemd-dev || echo "This will only work on Debian-based systems." + +go get github.com/mheese/journalbeat +ansible-playbook -i inventory/digital_ocean.py -l sentrynet logzio.yml -e LOGZIO_TOKEN=ABCDEFGHIJKLMNOPQRSTUVWXYZ012345 +``` + +### Cleanup + +To remove your droplets, run: + +``` +terraform destroy -var DO_API_TOKEN="$DO_API_TOKEN" -var SSH_KEY_FILE="$SSH_KEY_FILE" +``` diff --git a/docs/package.json b/docs/package.json new file mode 100644 index 000000000..c76bb37c4 --- /dev/null +++ b/docs/package.json @@ -0,0 +1,37 @@ +{ + "dependencies": { + "prettier": "^1.13.7", + "remark-cli": "^5.0.0", + "remark-lint-no-dead-urls": "^0.3.0", + "textlint": "^10.2.1" + }, + "name": "tendermint", + "description": "Tendermint Core Documentation", + "version": "0.0.1", + "main": "README.md", + "devDependencies": {}, + "scripts": { + "lint:json": "prettier \"**/*.json\" --write", + "lint:md": "prettier \"**/*.md\" --write && remark . && textlint \"md/**\"", + "lint": "yarn lint:json && yarn lint:md" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/tendermint/tendermint.git" + }, + "keywords": [ + "tendermint", + "blockchain" + ], + "author": "Tendermint", + "license": "ISC", + "bugs": { + "url": "https://github.com/tendermint/tendermint/issues" + }, + "homepage": "https://tendermint.com/docs/", + "remarkConfig": { + "plugins": [ + "remark-lint-no-dead-urls" + ] + } +} diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 000000000..85e42ba83 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,4 @@ +sphinx +sphinx-autobuild +recommonmark +sphinx_rtd_theme diff --git a/docs/research/determinism.md b/docs/research/determinism.md new file mode 100644 index 000000000..2e23476c6 --- /dev/null +++ b/docs/research/determinism.md @@ -0,0 +1,3 @@ +# On Determinism + +Arguably, the most difficult part of blockchain programming is determinism - that is, ensuring that sources of indeterminism do not creep into the design of such systems. diff --git a/docs/research/transactional-semantics.md b/docs/research/transactional-semantics.md new file mode 100644 index 000000000..bab1864e8 --- /dev/null +++ b/docs/research/transactional-semantics.md @@ -0,0 +1,25 @@ +# Transactional Semantics + +In [Using Tendermint](./using-tendermint.md#broadcast-api) we +discussed different API endpoints for sending transactions and +differences between them. + +What we have not yet covered is transactional semantics. + +When you send a transaction using one of the available methods, it first +goes to the mempool. Currently, it does not provide strong guarantees +like "if the transaction were accepted, it would be eventually included +in a block (given CheckTx passes)." + +For instance a tx could enter the mempool, but before it can be sent to +peers the node crashes. + +We are planning to provide such guarantees by using a WAL and replaying +transactions (See +[this issue](https://github.com/tendermint/tendermint/issues/248)), but +it's non-trivial to do this all efficiently. + +The temporary solution is for clients to monitor the node and resubmit +transaction(s) and/or send them to more nodes at once, so the +probability of all of them crashing at the same time and losing the msg +decreases substantially. diff --git a/docs/spec/README.md b/docs/spec/README.md new file mode 100644 index 000000000..ab689d9d6 --- /dev/null +++ b/docs/spec/README.md @@ -0,0 +1,80 @@ +# Tendermint Specification + +This is a markdown specification of the Tendermint blockchain. +It defines the base data structures, how they are validated, +and how they are communicated over the network. + +If you find discrepancies between the spec and the code that +do not have an associated issue or pull request on github, +please submit them to our [bug bounty](https://tendermint.com/security)! + +## Contents + +- [Overview](#overview) + +### Data Structures + +- [Encoding and Digests](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/encoding.md) +- [Blockchain](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/blockchain.md) +- [State](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/state.md) + +### Consensus Protocol + +- [Consensus Algorithm](/docs/spec/consensus/consensus.md) +- [Time](/docs/spec/consensus/bft-time.md) +- [Light-Client](/docs/spec/consensus/light-client.md) + +### P2P and Network Protocols + +- [The Base P2P Layer](https://github.com/tendermint/tendermint/tree/master/docs/spec/p2p): multiplex the protocols ("reactors") on authenticated and encrypted TCP connections +- [Peer Exchange (PEX)](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/pex): gossip known peer addresses so peers can find each other +- [Block Sync](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/block_sync): gossip blocks so peers can catch up quickly +- [Consensus](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/consensus): gossip votes and block parts so new blocks can be committed +- [Mempool](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/mempool): gossip transactions so they get included in blocks +- Evidence: TODO + +### Software + +- [ABCI](/docs/spec/software/abci.md): Details about interactions between the + application and consensus engine over ABCI +- [Write-Ahead Log](/docs/spec/software/wal.md): Details about how the consensus + engine preserves data and recovers from crash failures + +## Overview + +Tendermint provides Byzantine Fault Tolerant State Machine Replication using +hash-linked batches of transactions. Such transaction batches are called "blocks". +Hence, Tendermint defines a "blockchain". + +Each block in Tendermint has a unique index - its Height. +Height's in the blockchain are monotonic. +Each block is committed by a known set of weighted Validators. +Membership and weighting within this validator set may change over time. +Tendermint guarantees the safety and liveness of the blockchain +so long as less than 1/3 of the total weight of the Validator set +is malicious or faulty. + +A commit in Tendermint is a set of signed messages from more than 2/3 of +the total weight of the current Validator set. Validators take turns proposing +blocks and voting on them. Once enough votes are received, the block is considered +committed. These votes are included in the *next* block as proof that the previous block +was committed - they cannot be included in the current block, as that block has already been +created. + +Once a block is committed, it can be executed against an application. +The application returns results for each of the transactions in the block. +The application can also return changes to be made to the validator set, +as well as a cryptographic digest of its latest state. + +Tendermint is designed to enable efficient verification and authentication +of the latest state of the blockchain. To achieve this, it embeds +cryptographic commitments to certain information in the block "header". +This information includes the contents of the block (eg. the transactions), +the validator set committing the block, as well as the various results returned by the application. +Note, however, that block execution only occurs *after* a block is committed. +Thus, application results can only be included in the *next* block. + +Also note that information like the transaction results and the validator set are never +directly included in the block - only their cryptographic digests (Merkle roots) are. +Hence, verification of a block requires a separate data structure to store this information. +We call this the `State`. Block verification also requires access to the previous block. diff --git a/docs/spec/blockchain/blockchain.md b/docs/spec/blockchain/blockchain.md new file mode 100644 index 000000000..eb34f4c87 --- /dev/null +++ b/docs/spec/blockchain/blockchain.md @@ -0,0 +1,431 @@ +# Tendermint Blockchain + +Here we describe the data structures in the Tendermint blockchain and the rules for validating them. + +## Data Structures + +The Tendermint blockchains consists of a short list of basic data types: + +- `Block` +- `Header` +- `Vote` +- `BlockID` +- `Signature` +- `Evidence` + +## Block + +A block consists of a header, a list of transactions, a list of votes (the commit), +and a list of evidence of malfeasance (ie. signing conflicting votes). + +```go +type Block struct { + Header Header + Txs [][]byte + LastCommit []Vote + Evidence []Evidence +} +``` + +## Header + +A block header contains metadata about the block and about the consensus, as well as commitments to +the data in the current block, the previous block, and the results returned by the application: + +```go +type Header struct { + // block metadata + Version string // Version string + ChainID string // ID of the chain + Height int64 // Current block height + Time int64 // UNIX time, in millisconds + + // current block + NumTxs int64 // Number of txs in this block + TxHash []byte // SimpleMerkle of the block.Txs + LastCommitHash []byte // SimpleMerkle of the block.LastCommit + + // previous block + TotalTxs int64 // prevBlock.TotalTxs + block.NumTxs + LastBlockID BlockID // BlockID of prevBlock + + // application + ResultsHash []byte // SimpleMerkle of []abci.Result from prevBlock + AppHash []byte // Arbitrary state digest + ValidatorsHash []byte // SimpleMerkle of the ValidatorSet + ConsensusParamsHash []byte // SimpleMerkle of the ConsensusParams + + // consensus + Proposer []byte // Address of the block proposer + EvidenceHash []byte // SimpleMerkle of []Evidence +} +``` + +Further details on each of these fields is described below. + +## BlockID + +The `BlockID` contains two distinct Merkle roots of the block. +The first, used as the block's main hash, is the Merkle root +of all the fields in the header. The second, used for secure gossipping of +the block during consensus, is the Merkle root of the complete serialized block +cut into parts. The `BlockID` includes these two hashes, as well as the number of +parts. + +```go +type BlockID struct { + Hash []byte + Parts PartsHeader +} + +type PartsHeader struct { + Hash []byte + Total int32 +} +``` + +## Vote + +A vote is a signed message from a validator for a particular block. +The vote includes information about the validator signing it. + +```go +type Vote struct { + Timestamp int64 + Address []byte + Index int + Height int64 + Round int + Type int8 + BlockID BlockID + Signature Signature +} +``` + +There are two types of votes: +a *prevote* has `vote.Type == 1` and +a *precommit* has `vote.Type == 2`. + +## Signature + +Tendermint allows for multiple signature schemes to be used by prepending a single type-byte +to the signature bytes. Different signatures may also come with fixed or variable lengths. +Currently, Tendermint supports Ed25519 and Secp256k1. + +### ED25519 + +An ED25519 signature has `Type == 0x1`. It looks like: + +```go +// Implements Signature +type Ed25519Signature struct { + Type int8 = 0x1 + Signature [64]byte +} +``` + +where `Signature` is the 64 byte signature. + +### Secp256k1 + +A `Secp256k1` signature has `Type == 0x2`. It looks like: + +```go +// Implements Signature +type Secp256k1Signature struct { + Type int8 = 0x2 + Signature []byte +} +``` + +where `Signature` is the DER encoded signature, ie: + +```hex +0x30 <0x02> 0x2 . +``` + +## Evidence + +TODO + +## Validation + +Here we describe the validation rules for every element in a block. +Blocks which do not satisfy these rules are considered invalid. + +We abuse notation by using something that looks like Go, supplemented with English. +A statement such as `x == y` is an assertion - if it fails, the item is invalid. + +We refer to certain globally available objects: +`block` is the block under consideration, +`prevBlock` is the `block` at the previous height, +and `state` keeps track of the validator set, the consensus parameters +and other results from the application. +Elements of an object are accessed as expected, +ie. `block.Header`. See [here](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/state.md) for the definition of `state`. + +### Header + +A Header is valid if its corresponding fields are valid. + +### Version + +Arbitrary string. + +### ChainID + +Arbitrary constant string. + +### Height + +```go +block.Header.Height > 0 +block.Header.Height == prevBlock.Header.Height + 1 +``` + +The height is an incrementing integer. The first block has `block.Header.Height == 1`. + +### Time + +The median of the timestamps of the valid votes in the block.LastCommit. +Corresponds to the number of nanoseconds, with millisecond resolution, since January 1, 1970. + +Note: the timestamp of a vote must be greater by at least one millisecond than that of the +block being voted on. + +### NumTxs + +```go +block.Header.NumTxs == len(block.Txs) +``` + +Number of transactions included in the block. + +### TxHash + +```go +block.Header.TxHash == SimpleMerkleRoot(block.Txs) +``` + +Simple Merkle root of the transactions in the block. + +### LastCommitHash + +```go +block.Header.LastCommitHash == SimpleMerkleRoot(block.LastCommit) +``` + +Simple Merkle root of the votes included in the block. +These are the votes that committed the previous block. + +The first block has `block.Header.LastCommitHash == []byte{}` + +### TotalTxs + +```go +block.Header.TotalTxs == prevBlock.Header.TotalTxs + block.Header.NumTxs +``` + +The cumulative sum of all transactions included in this blockchain. + +The first block has `block.Header.TotalTxs = block.Header.NumberTxs`. + +### LastBlockID + +LastBlockID is the previous block's BlockID: + +```go +prevBlockParts := MakeParts(prevBlock, state.LastConsensusParams.BlockGossip.BlockPartSize) +block.Header.LastBlockID == BlockID { + Hash: SimpleMerkleRoot(prevBlock.Header), + PartsHeader{ + Hash: SimpleMerkleRoot(prevBlockParts), + Total: len(prevBlockParts), + }, +} +``` + +Note: it depends on the ConsensusParams, +which are held in the `state` and may be updated by the application. + +The first block has `block.Header.LastBlockID == BlockID{}`. + +### ResultsHash + +```go +block.ResultsHash == SimpleMerkleRoot(state.LastResults) +``` + +Simple Merkle root of the results of the transactions in the previous block. + +The first block has `block.Header.ResultsHash == []byte{}`. + +### AppHash + +```go +block.AppHash == state.AppHash +``` + +Arbitrary byte array returned by the application after executing and commiting the previous block. + +The first block has `block.Header.AppHash == []byte{}`. + +### ValidatorsHash + +```go +block.ValidatorsHash == SimpleMerkleRoot(state.Validators) +``` + +Simple Merkle root of the current validator set that is committing the block. +This can be used to validate the `LastCommit` included in the next block. +May be updated by the application. + +### ConsensusParamsHash + +```go +block.ConsensusParamsHash == SimpleMerkleRoot(state.ConsensusParams) +``` + +Simple Merkle root of the consensus parameters. +May be updated by the application. + +### Proposer + +```go +block.Header.Proposer in state.Validators +``` + +Original proposer of the block. Must be a current validator. + +NOTE: we also need to track the round. + +## EvidenceHash + +```go +block.EvidenceHash == SimpleMerkleRoot(block.Evidence) +``` + +Simple Merkle root of the evidence of Byzantine behaviour included in this block. + +## Txs + +Arbitrary length array of arbitrary length byte-arrays. + +## LastCommit + +The first height is an exception - it requires the LastCommit to be empty: + +```go +if block.Header.Height == 1 { + len(b.LastCommit) == 0 +} +``` + +Otherwise, we require: + +```go +len(block.LastCommit) == len(state.LastValidators) +talliedVotingPower := 0 +for i, vote := range block.LastCommit{ + if vote == nil{ + continue + } + vote.Type == 2 + vote.Height == block.LastCommit.Height() + vote.Round == block.LastCommit.Round() + vote.BlockID == block.LastBlockID + + val := state.LastValidators[i] + vote.Verify(block.ChainID, val.PubKey) == true + + talliedVotingPower += val.VotingPower +} + +talliedVotingPower > (2/3) * TotalVotingPower(state.LastValidators) +``` + +Includes one (possibly nil) vote for every current validator. +Non-nil votes must be Precommits. +All votes must be for the same height and round. +All votes must be for the previous block. +All votes must have a valid signature from the corresponding validator. +The sum total of the voting power of the validators that voted +must be greater than 2/3 of the total voting power of the complete validator set. + +### Vote + +A vote is a signed message broadcast in the consensus for a particular block at a particular height and round. +When stored in the blockchain or propagated over the network, votes are encoded in TMBIN. +For signing, votes are encoded in JSON, and the ChainID is included, in the form of the `CanonicalSignBytes`. + +We define a method `Verify` that returns `true` if the signature verifies against the pubkey for the CanonicalSignBytes +using the given ChainID: + +```go +func (v Vote) Verify(chainID string, pubKey PubKey) bool { + return pubKey.Verify(v.Signature, CanonicalSignBytes(chainID, v)) +} +``` + +where `pubKey.Verify` performs the appropriate digital signature verification of the `pubKey` +against the given signature and message bytes. + +## Evidence + +There is currently only one kind of evidence: + +``` +// amino: "tendermint/DuplicateVoteEvidence" +type DuplicateVoteEvidence struct { + PubKey crypto.PubKey + VoteA *Vote + VoteB *Vote +} +``` + +DuplicateVoteEvidence `ev` is valid if + +- `ev.VoteA` and `ev.VoteB` can be verified with `ev.PubKey` +- `ev.VoteA` and `ev.VoteB` have the same `Height, Round, Address, Index, Type` +- `ev.VoteA.BlockID != ev.VoteB.BlockID` +- `(block.Height - ev.VoteA.Height) < MAX_EVIDENCE_AGE` + +# Execution + +Once a block is validated, it can be executed against the state. + +The state follows this recursive equation: + +```go +state(1) = InitialState +state(h+1) <- Execute(state(h), ABCIApp, block(h)) +``` + +where `InitialState` includes the initial consensus parameters and validator set, +and `ABCIApp` is an ABCI application that can return results and changes to the validator +set (TODO). Execute is defined as: + +```go +Execute(s State, app ABCIApp, block Block) State { + TODO: just spell out ApplyBlock here + and remove ABCIResponses struct. + abciResponses := app.ApplyBlock(block) + + return State{ + LastResults: abciResponses.DeliverTxResults, + AppHash: abciResponses.AppHash, + Validators: UpdateValidators(state.Validators, abciResponses.ValidatorChanges), + LastValidators: state.Validators, + ConsensusParams: UpdateConsensusParams(state.ConsensusParams, abci.Responses.ConsensusParamChanges), + } +} + +type ABCIResponses struct { + DeliverTxResults []Result + ValidatorChanges []Validator + ConsensusParamChanges ConsensusParams + AppHash []byte +} +``` + + diff --git a/docs/spec/blockchain/encoding.md b/docs/spec/blockchain/encoding.md new file mode 100644 index 000000000..16902d099 --- /dev/null +++ b/docs/spec/blockchain/encoding.md @@ -0,0 +1,279 @@ +# Tendermint Encoding + +## Amino + +Tendermint uses the proto3 derivative [Amino](https://github.com/tendermint/go-amino) for all data structures. +Think of Amino as an object-oriented proto3 with native JSON support. +The goal of the Amino encoding protocol is to bring parity between application +logic objects and persistence objects. + +Please see the [Amino +specification](https://github.com/tendermint/go-amino#amino-encoding-for-go) for +more details. + +Notably, every object that satisfies an interface (eg. a particular kind of p2p message, +or a particular kind of pubkey) is registered with a global name, the hash of +which is included in the object's encoding as the so-called "prefix bytes". + +We define the `func AminoEncode(obj interface{}) []byte` function to take an +arbitrary object and return the Amino encoded bytes. + +## Byte Arrays + +The encoding of a byte array is simply the raw-bytes prefixed with the length of +the array as a `UVarint` (what proto calls a `Varint`). + +For details on varints, see the [protobuf +spec](https://developers.google.com/protocol-buffers/docs/encoding#varints). + +For example, the byte-array `[0xA, 0xB]` would be encoded as `0x020A0B`, +while a byte-array containing 300 entires beginning with `[0xA, 0xB, ...]` would +be encoded as `0xAC020A0B...` where `0xAC02` is the UVarint encoding of 300. + +## Public Key Cryptography + +Tendermint uses Amino to distinguish between different types of private keys, +public keys, and signatures. Additionally, for each public key, Tendermint +defines an Address function that can be used as a more compact identifier in +place of the public key. Here we list the concrete types, their names, +and prefix bytes for public keys and signatures, as well as the address schemes +for each PubKey. Note for brevity we don't +include details of the private keys beyond their type and name, as they can be +derived the same way as the others using Amino. + +All registered objects are encoded by Amino using a 4-byte PrefixBytes that +uniquely identifies the object and includes information about its underlying +type. For details on how PrefixBytes are computed, see the [Amino +spec](https://github.com/tendermint/go-amino#computing-the-prefix-and-disambiguation-bytes). + +In what follows, we provide the type names and prefix bytes directly. +Notice that when encoding byte-arrays, the length of the byte-array is appended +to the PrefixBytes. Thus the encoding of a byte array becomes ` + `. In other words, to encode any type listed below you do not need to be +familiar with amino encoding. +You can simply use below table and concatenate Prefix || Length (of raw bytes) || raw bytes +( while || stands for byte concatenation here). + +| Type | Name | Prefix | Length | Notes | +| ---- | ---- | ------ | ----- | ------ | +| PubKeyEd25519 | tendermint/PubKeyEd25519 | 0x1624DE64 | 0x20 | | +| PubKeySecp256k1 | tendermint/PubKeySecp256k1 | 0xEB5AE987 | 0x21 | | +| PrivKeyEd25519 | tendermint/PrivKeyEd25519 | 0xA3288910 | 0x40 | | +| PrivKeySecp256k1 | tendermint/PrivKeySecp256k1 | 0xE1B0F79B | 0x20 | | +| SignatureEd25519 | tendermint/SignatureEd25519 | 0x2031EA53 | 0x40 | | +| SignatureSecp256k1 | tendermint/SignatureSecp256k1 | 0x7FC4A495 | variable | +| + +### Examples + +1. For example, the 33-byte (or 0x21-byte in hex) Secp256k1 pubkey +`020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9` +would be encoded as +`EB5AE98221020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9` + +2. For example, the variable size Secp256k1 signature (in this particular example 70 or 0x46 bytes) +`304402201CD4B8C764D2FD8AF23ECFE6666CA8A53886D47754D951295D2D311E1FEA33BF02201E0F906BB1CF2C30EAACFFB032A7129358AFF96B9F79B06ACFFB18AC90C2ADD7` +would be encoded as +`16E1FEEA46304402201CD4B8C764D2FD8AF23ECFE6666CA8A53886D47754D951295D2D311E1FEA33BF02201E0F906BB1CF2C30EAACFFB032A7129358AFF96B9F79B06ACFFB18AC90C2ADD7` + +### Addresses + +Addresses for each public key types are computed as follows: + +#### Ed25519 + +First 20-bytes of the SHA256 hash of the raw 32-byte public key: + +``` +address = SHA256(pubkey)[:20] +``` + +NOTE: before v0.22.0, this was the RIPEMD160 of the Amino encoded public key. + +#### Secp256k1 + +RIPEMD160 hash of the SHA256 hash of the OpenSSL compressed public key: + +``` +address = RIPEMD160(SHA256(pubkey)) +``` + +This is the same as Bitcoin. + +## Other Common Types + +### BitArray + +The BitArray is used in block headers and some consensus messages to signal +whether or not something was done by each validator. BitArray is represented +with a struct containing the number of bits (`Bits`) and the bit-array itself +encoded in base64 (`Elems`). + +```go +type BitArray struct { + Bits int + Elems []uint64 +} +``` + +This type is easily encoded directly by Amino. + +Note BitArray receives a special JSON encoding in the form of `x` and `_` +representing `1` and `0`. Ie. the BitArray `10110` would be JSON encoded as +`"x_xx_"` + +### Part + +Part is used to break up blocks into pieces that can be gossiped in parallel +and securely verified using a Merkle tree of the parts. + +Part contains the index of the part in the larger set (`Index`), the actual +underlying data of the part (`Bytes`), and a simple Merkle proof that the part is contained in +the larger set (`Proof`). + +```go +type Part struct { + Index int + Bytes byte[] + Proof byte[] +} +``` + +### MakeParts + +Encode an object using Amino and slice it into parts. + +```go +func MakeParts(obj interface{}, partSize int) []Part +``` + +## Merkle Trees + +Simple Merkle trees are used in numerous places in Tendermint to compute a cryptographic digest of a data structure. + +Tendermint always uses the `TMHASH` hash function, which is the first 20-bytes +of the SHA256: + +``` +func TMHASH(bz []byte) []byte { + shasum := SHA256(bz) + return shasum[:20] +} +``` + +### Simple Merkle Root + +The function `SimpleMerkleRoot` is a simple recursive function defined as follows: + +```go +func SimpleMerkleRoot(hashes [][]byte) []byte{ + switch len(hashes) { + case 0: + return nil + case 1: + return hashes[0] + default: + left := SimpleMerkleRoot(hashes[:(len(hashes)+1)/2]) + right := SimpleMerkleRoot(hashes[(len(hashes)+1)/2:]) + return SimpleConcatHash(left, right) + } +} + +func SimpleConcatHash(left, right []byte) []byte{ + left = encodeByteSlice(left) + right = encodeByteSlice(right) + return TMHASH(append(left, right)) +} +``` + +Note that the leaves are Amino encoded as byte-arrays (ie. simple Uvarint length +prefix) before being concatenated together and hashed. + +Note: we will abuse notion and invoke `SimpleMerkleRoot` with arguments of type `struct` or type `[]struct`. +For `struct` arguments, we compute a `[][]byte` containing the hash of each +field in the struct sorted by the hash of the field name. +For `[]struct` arguments, we compute a `[][]byte` by hashing the individual `struct` elements. + +### Simple Merkle Proof + +Proof that a leaf is in a Merkle tree consists of a simple structure: + + +``` +type SimpleProof struct { + Aunts [][]byte +} +``` + +Which is verified using the following: + +``` +func (proof SimpleProof) Verify(index, total int, leafHash, rootHash []byte) bool { + computedHash := computeHashFromAunts(index, total, leafHash, proof.Aunts) + return computedHash == rootHash +} + +func computeHashFromAunts(index, total int, leafHash []byte, innerHashes [][]byte) []byte{ + assert(index < total && index >= 0 && total > 0) + + if total == 1{ + assert(len(proof.Aunts) == 0) + return leafHash + } + + assert(len(innerHashes) > 0) + + numLeft := (total + 1) / 2 + if index < numLeft { + leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + assert(leftHash != nil) + return SimpleHashFromTwoHashes(leftHash, innerHashes[len(innerHashes)-1]) + } + rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + assert(rightHash != nil) + return SimpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash) +} +``` + +## JSON + +### Amino + +TODO: improve this + +Amino also supports JSON encoding - registered types are simply encoded as: + +``` +{ + "type": "", + "value": +} +``` + +For instance, an ED25519 PubKey would look like: + +``` +{ + "type": "tendermint/PubKeyEd25519", + "value": "uZ4h63OFWuQ36ZZ4Bd6NF+/w9fWUwrOncrQsackrsTk=" +} +``` + +Where the `"value"` is the base64 encoding of the raw pubkey bytes, and the +`"type"` is the full disfix bytes for Ed25519 pubkeys. + + +### Signed Messages + +Signed messages (eg. votes, proposals) in the consensus are encoded using Amino-JSON, rather than in the standard binary format. + +When signing, the elements of a message are sorted by key and the sorted message is embedded in an +outer JSON that includes a `chain_id` field. +We call this encoding the CanonicalSignBytes. For instance, CanonicalSignBytes for a vote would look +like: + +```json +{"chain_id":"my-chain-id","vote":{"block_id":{"hash":DEADBEEF,"parts":{"hash":BEEFDEAD,"total":3}},"height":3,"round":2,"timestamp":1234567890, "type":2} +``` + +Note how the fields within each level are sorted. diff --git a/docs/spec/blockchain/state.md b/docs/spec/blockchain/state.md new file mode 100644 index 000000000..3b374f70a --- /dev/null +++ b/docs/spec/blockchain/state.md @@ -0,0 +1,80 @@ +# Tendermint State + +## State + +The state contains information whose cryptographic digest is included in block headers, and thus is +necessary for validating new blocks. For instance, the set of validators and the results of +transactions are never included in blocks, but their Merkle roots are - the state keeps track of them. + +Note that the `State` object itself is an implementation detail, since it is never +included in a block or gossipped over the network, and we never compute +its hash. However, the types it contains are part of the specification, since +their Merkle roots are included in blocks. + +For details on an implementation of `State` with persistence, see TODO + +```go +type State struct { + LastResults []Result + AppHash []byte + + Validators []Validator + LastValidators []Validator + + ConsensusParams ConsensusParams +} +``` + +### Result + +```go +type Result struct { + Code uint32 + Data []byte + Tags []KVPair +} + +type KVPair struct { + Key []byte + Value []byte +} +``` + +`Result` is the result of executing a transaction against the application. +It returns a result code, an arbitrary byte array (ie. a return value), +and a list of key-value pairs ordered by key. The key-value pairs, or tags, +can be used to index transactions according to their "effects", which are +represented in the tags. + +### Validator + +A validator is an active participant in the consensus with a public key and a voting power. +Validator's also contain an address which is derived from the PubKey: + +```go +type Validator struct { + Address []byte + PubKey PubKey + VotingPower int64 +} +``` + +The `state.Validators` and `state.LastValidators` must always by sorted by validator address, +so that there is a canonical order for computing the SimpleMerkleRoot. + +We also define a `TotalVotingPower` function, to return the total voting power: + +```go +func TotalVotingPower(vals []Validators) int64{ + sum := 0 + for v := range vals{ + sum += v.VotingPower + } + return sum +} +``` + + +### ConsensusParams + +TODO diff --git a/docs/spec/consensus/abci.md b/docs/spec/consensus/abci.md new file mode 100644 index 000000000..82b88161e --- /dev/null +++ b/docs/spec/consensus/abci.md @@ -0,0 +1 @@ +[Moved](/docs/spec/software/abci.md) diff --git a/docs/spec/consensus/bft-time.md b/docs/spec/consensus/bft-time.md new file mode 100644 index 000000000..a005e9040 --- /dev/null +++ b/docs/spec/consensus/bft-time.md @@ -0,0 +1,56 @@ +# BFT time in Tendermint + +Tendermint provides a deterministic, Byzantine fault-tolerant, source of time. +Time in Tendermint is defined with the Time field of the block header. + +It satisfies the following properties: + +- Time Monotonicity: Time is monotonically increasing, i.e., given +a header H1 for height h1 and a header H2 for height `h2 = h1 + 1`, `H1.Time < H2.Time`. +- Time Validity: Given a set of Commit votes that forms the `block.LastCommit` field, a range of +valid values for the Time field of the block header is defined only by +Precommit messages (from the LastCommit field) sent by correct processes, i.e., +a faulty process cannot arbitrarily increase the Time value. + +In the context of Tendermint, time is of type int64 and denotes UNIX time in milliseconds, i.e., +corresponds to the number of milliseconds since January 1, 1970. Before defining rules that need to be enforced by the +Tendermint consensus protocol, so the properties above holds, we introduce the following definition: + +- median of a set of `Vote` messages is equal to the median of `Vote.Time` fields of the corresponding `Vote` messages, +where the value of `Vote.Time` is counted number of times proportional to the process voting power. As in Tendermint +the voting power is not uniform (one process one vote), a vote message is actually an aggregator of the same votes whose +number is equal to the voting power of the process that has casted the corresponding votes message. + +Let's consider the following example: + - we have four processes p1, p2, p3 and p4, with the following voting power distribution (p1, 23), (p2, 27), (p3, 10) +and (p4, 10). The total voting power is 70 (`N = 3f+1`, where `N` is the total voting power, and `f` is the maximum voting +power of the faulty processes), so we assume that the faulty processes have at most 23 of voting power. +Furthermore, we have the following vote messages in some LastCommit field (we ignore all fields except Time field): + - (p1, 100), (p2, 98), (p3, 1000), (p4, 500). We assume that p3 and p4 are faulty processes. Let's assume that the + `block.LastCommit` message contains votes of processes p2, p3 and p4. Median is then chosen the following way: + the value 98 is counted 27 times, the value 1000 is counted 10 times and the value 500 is counted also 10 times. + So the median value will be the value 98. No matter what set of messages with at least `2f+1` voting power we + choose, the median value will always be between the values sent by correct processes. + +We ensure Time Monotonicity and Time Validity properties by the following rules: + +- let rs denotes `RoundState` (consensus internal state) of some process. Then +`rs.ProposalBlock.Header.Time == median(rs.LastCommit) && +rs.Proposal.Timestamp == rs.ProposalBlock.Header.Time`. + +- Furthermore, when creating the `vote` message, the following rules for determining `vote.Time` field should hold: + + - if `rs.Proposal` is defined then + `vote.Time = max(rs.Proposal.Timestamp + 1, time.Now())`, where `time.Now()` + denotes local Unix time in milliseconds. + + - if `rs.Proposal` is not defined and `rs.Votes` contains +2/3 of the corresponding vote messages (votes for the + current height and round, and with the corresponding type (`Prevote` or `Precommit`)), then + + `vote.Time = max(median(getVotes(rs.Votes, vote.Height, vote.Round, vote.Type)), time.Now())`, + + where `getVotes` function returns the votes for particular `Height`, `Round` and `Type`. + The second rule is relevant for the case when a process jumps to a higher round upon receiving +2/3 votes for a higher + round, but the corresponding `Proposal` message for the higher round hasn't been received yet. + + diff --git a/docs/spec/consensus/consensus.md b/docs/spec/consensus/consensus.md new file mode 100644 index 000000000..1bf075773 --- /dev/null +++ b/docs/spec/consensus/consensus.md @@ -0,0 +1,9 @@ +We are working to finalize an updated Tendermint specification with formal +proofs of safety and liveness. + +In the meantime, see the [description in the +docs](http://tendermint.readthedocs.io/en/master/specification/byzantine-consensus-algorithm.html). + +There are also relevant but somewhat outdated descriptions in Jae Kwon's [original +whitepaper](https://tendermint.com/static/docs/tendermint.pdf) and Ethan Buchman's [master's +thesis](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769). diff --git a/docs/spec/consensus/light-client.md b/docs/spec/consensus/light-client.md new file mode 100644 index 000000000..0ed9d36d4 --- /dev/null +++ b/docs/spec/consensus/light-client.md @@ -0,0 +1,114 @@ +# Light client + +A light client is a process that connects to the Tendermint Full Node(s) and then tries to verify the Merkle proofs +about the blockchain application. In this document we describe mechanisms that ensures that the Tendermint light client +has the same level of security as Full Node processes (without being itself a Full Node). + +To be able to validate a Merkle proof, a light client needs to validate the blockchain header that contains the root app hash. +Validating a blockchain header in Tendermint consists in verifying that the header is committed (signed) by >2/3 of the +voting power of the corresponding validator set. As the validator set is a dynamic set (it is changing), one of the +core functionality of the light client is updating the current validator set, that is then used to verify the +blockchain header, and further the corresponding Merkle proofs. + +For the purpose of this light client specification, we assume that the Tendermint Full Node exposes the following functions over +Tendermint RPC: + +```golang +Header(height int64) (SignedHeader, error) // returns signed header for the given height +Validators(height int64) (ResultValidators, error) // returns validator set for the given height +LastHeader(valSetNumber int64) (SignedHeader, error) // returns last header signed by the validator set with the given validator set number + +type SignedHeader struct { + Header Header + Commit Commit + ValSetNumber int64 +} + +type ResultValidators struct { + BlockHeight int64 + Validators []Validator + // time the current validator set is initialised, i.e, time of the last validator change before header BlockHeight + ValSetTime int64 +} +``` + +We assume that Tendermint keeps track of the validator set changes and that each time a validator set is changed it is +being assigned the next sequence number. We can call this number the validator set sequence number. Tendermint also remembers +the Time from the header when the next validator set is initialised (starts to be in power), and we refer to this time +as validator set init time. +Furthermore, we assume that each validator set change is signed (committed) by the current validator set. More precisely, +given a block `H` that contains transactions that are modifying the current validator set, the Merkle root hash of the next +validator set (modified based on transactions from block H) will be in block `H+1` (and signed by the current validator +set), and then starting from the block `H+2`, it will be signed by the next validator set. + +Note that the real Tendermint RPC API is slightly different (for example, response messages contain more data and function +names are slightly different); we shortened (and modified) it for the purpose of this document to make the spec more +clear and simple. Furthermore, note that in case of the third function, the returned header has `ValSetNumber` equals to +`valSetNumber+1`. + + +Locally, light client manages the following state: + +```golang +valSet []Validator // current validator set (last known and verified validator set) +valSetNumber int64 // sequence number of the current validator set +valSetHash []byte // hash of the current validator set +valSetTime int64 // time when the current validator set is initialised +``` + +The light client is initialised with the trusted validator set, for example based on the known validator set hash, +validator set sequence number and the validator set init time. +The core of the light client logic is captured by the VerifyAndUpdate function that is used to 1) verify if the given header is valid, +and 2) update the validator set (when the given header is valid and it is more recent than the seen headers). + +```golang +VerifyAndUpdate(signedHeader SignedHeader): + assertThat signedHeader.valSetNumber >= valSetNumber + if isValid(signedHeader) and signedHeader.Header.Time <= valSetTime + UNBONDING_PERIOD then + setValidatorSet(signedHeader) + return true + else + updateValidatorSet(signedHeader.ValSetNumber) + return VerifyAndUpdate(signedHeader) + +isValid(signedHeader SignedHeader): + valSetOfTheHeader = Validators(signedHeader.Header.Height) + assertThat Hash(valSetOfTheHeader) == signedHeader.Header.ValSetHash + assertThat signedHeader is passing basic validation + if votingPower(signedHeader.Commit) > 2/3 * votingPower(valSetOfTheHeader) then return true + else + return false + +setValidatorSet(signedHeader SignedHeader): + nextValSet = Validators(signedHeader.Header.Height) + assertThat Hash(nextValSet) == signedHeader.Header.ValidatorsHash + valSet = nextValSet.Validators + valSetHash = signedHeader.Header.ValidatorsHash + valSetNumber = signedHeader.ValSetNumber + valSetTime = nextValSet.ValSetTime + +votingPower(commit Commit): + votingPower = 0 + for each precommit in commit.Precommits do: + if precommit.ValidatorAddress is in valSet and signature of the precommit verifies then + votingPower += valSet[precommit.ValidatorAddress].VotingPower + return votingPower + +votingPower(validatorSet []Validator): + for each validator in validatorSet do: + votingPower += validator.VotingPower + return votingPower + +updateValidatorSet(valSetNumberOfTheHeader): + while valSetNumber != valSetNumberOfTheHeader do + signedHeader = LastHeader(valSetNumber) + if isValid(signedHeader) then + setValidatorSet(signedHeader) + else return error + return +``` + +Note that in the logic above we assume that the light client will always go upward with respect to header verifications, +i.e., that it will always be used to verify more recent headers. In case a light client needs to be used to verify older +headers (go backward) the same mechanisms and similar logic can be used. In case a call to the FullNode or subsequent +checks fail, a light client need to implement some recovery strategy, for example connecting to other FullNode. diff --git a/docs/spec/consensus/wal.md b/docs/spec/consensus/wal.md new file mode 100644 index 000000000..589680f99 --- /dev/null +++ b/docs/spec/consensus/wal.md @@ -0,0 +1 @@ +[Moved](/docs/spec/software/wal.md) diff --git a/docs/spec/p2p/config.md b/docs/spec/p2p/config.md new file mode 100644 index 000000000..b31a36736 --- /dev/null +++ b/docs/spec/p2p/config.md @@ -0,0 +1,38 @@ +# P2P Config + +Here we describe configuration options around the Peer Exchange. +These can be set using flags or via the `$TMHOME/config/config.toml` file. + +## Seed Mode + +`--p2p.seed_mode` + +The node operates in seed mode. In seed mode, a node continuously crawls the network for peers, +and upon incoming connection shares some peers and disconnects. + +## Seeds + +`--p2p.seeds “1.2.3.4:26656,2.3.4.5:4444”` + +Dials these seeds when we need more peers. They should return a list of peers and then disconnect. +If we already have enough peers in the address book, we may never need to dial them. + +## Persistent Peers + +`--p2p.persistent_peers “1.2.3.4:26656,2.3.4.5:26656”` + +Dial these peers and auto-redial them if the connection fails. +These are intended to be trusted persistent peers that can help +anchor us in the p2p network. The auto-redial uses exponential +backoff and will give up after a day of trying to connect. + +**Note:** If `seeds` and `persistent_peers` intersect, +the user will be warned that seeds may auto-close connections +and that the node may not be able to keep the connection persistent. + +## Private Persistent Peers + +`--p2p.private_persistent_peers “1.2.3.4:26656,2.3.4.5:26656”` + +These are persistent peers that we do not add to the address book or +gossip to other peers. They stay private to us. diff --git a/docs/spec/p2p/connection.md b/docs/spec/p2p/connection.md new file mode 100644 index 000000000..9b5e49675 --- /dev/null +++ b/docs/spec/p2p/connection.md @@ -0,0 +1,110 @@ +# P2P Multiplex Connection + +## MConnection + +`MConnection` is a multiplex connection that supports multiple independent streams +with distinct quality of service guarantees atop a single TCP connection. +Each stream is known as a `Channel` and each `Channel` has a globally unique *byte id*. +Each `Channel` also has a relative priority that determines the quality of service +of the `Channel` compared to other `Channel`s. +The *byte id* and the relative priorities of each `Channel` are configured upon +initialization of the connection. + +The `MConnection` supports three packet types: + +- Ping +- Pong +- Msg + +### Ping and Pong + +The ping and pong messages consist of writing a single byte to the connection; 0x1 and 0x2, respectively. + +When we haven't received any messages on an `MConnection` in time `pingTimeout`, we send a ping message. +When a ping is received on the `MConnection`, a pong is sent in response only if there are no other messages +to send and the peer has not sent us too many pings (TODO). + +If a pong or message is not received in sufficient time after a ping, the peer is disconnected from. + +### Msg + +Messages in channels are chopped into smaller `msgPacket`s for multiplexing. + +``` +type msgPacket struct { + ChannelID byte + EOF byte // 1 means message ends here. + Bytes []byte +} +``` + +The `msgPacket` is serialized using [go-wire](https://github.com/tendermint/go-wire) and prefixed with 0x3. +The received `Bytes` of a sequential set of packets are appended together +until a packet with `EOF=1` is received, then the complete serialized message +is returned for processing by the `onReceive` function of the corresponding channel. + +### Multiplexing + +Messages are sent from a single `sendRoutine`, which loops over a select statement and results in the sending +of a ping, a pong, or a batch of data messages. The batch of data messages may include messages from multiple channels. +Message bytes are queued for sending in their respective channel, with each channel holding one unsent message at a time. +Messages are chosen for a batch one at a time from the channel with the lowest ratio of recently sent bytes to channel priority. + +## Sending Messages + +There are two methods for sending messages: +```go +func (m MConnection) Send(chID byte, msg interface{}) bool {} +func (m MConnection) TrySend(chID byte, msg interface{}) bool {} +``` + +`Send(chID, msg)` is a blocking call that waits until `msg` is successfully queued +for the channel with the given id byte `chID`. The message `msg` is serialized +using the `tendermint/wire` submodule's `WriteBinary()` reflection routine. + +`TrySend(chID, msg)` is a nonblocking call that queues the message msg in the channel +with the given id byte chID if the queue is not full; otherwise it returns false immediately. + +`Send()` and `TrySend()` are also exposed for each `Peer`. + +## Peer + +Each peer has one `MConnection` instance, and includes other information such as whether the connection +was outbound, whether the connection should be recreated if it closes, various identity information about the node, +and other higher level thread-safe data used by the reactors. + +## Switch/Reactor + +The `Switch` handles peer connections and exposes an API to receive incoming messages +on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one +or more `Channels`. So while sending outgoing messages is typically performed on the peer, +incoming messages are received on the reactor. + +```go +// Declare a MyReactor reactor that handles messages on MyChannelID. +type MyReactor struct{} + +func (reactor MyReactor) GetChannels() []*ChannelDescriptor { + return []*ChannelDescriptor{ChannelDescriptor{ID:MyChannelID, Priority: 1}} +} + +func (reactor MyReactor) Receive(chID byte, peer *Peer, msgBytes []byte) { + r, n, err := bytes.NewBuffer(msgBytes), new(int64), new(error) + msgString := ReadString(r, n, err) + fmt.Println(msgString) +} + +// Other Reactor methods omitted for brevity +... + +switch := NewSwitch([]Reactor{MyReactor{}}) + +... + +// Send a random message to all outbound connections +for _, peer := range switch.Peers().List() { + if peer.IsOutbound() { + peer.Send(MyChannelID, "Here's a random message") + } +} +``` diff --git a/docs/spec/p2p/node.md b/docs/spec/p2p/node.md new file mode 100644 index 000000000..366b27dd2 --- /dev/null +++ b/docs/spec/p2p/node.md @@ -0,0 +1,65 @@ +# Tendermint Peer Discovery + +A Tendermint P2P network has different kinds of nodes with different requirements for connectivity to one another. +This document describes what kind of nodes Tendermint should enable and how they should work. + +## Seeds + +Seeds are the first point of contact for a new node. +They return a list of known active peers and then disconnect. + +Seeds should operate full nodes with the PEX reactor in a "crawler" mode +that continuously explores to validate the availability of peers. + +Seeds should only respond with some top percentile of the best peers it knows about. +See [the peer-exchange docs](https://github.com/tendermint/tendermint/blob/master/docs/spec/reactors/pex/pex.md)for details on peer quality. + +## New Full Node + +A new node needs a few things to connect to the network: +- a list of seeds, which can be provided to Tendermint via config file or flags, +or hardcoded into the software by in-process apps +- a `ChainID`, also called `Network` at the p2p layer +- a recent block height, H, and hash, HASH for the blockchain. + +The values `H` and `HASH` must be received and corroborated by means external to Tendermint, and specific to the user - ie. via the user's trusted social consensus. +This requirement to validate `H` and `HASH` out-of-band and via social consensus +is the essential difference in security models between Proof-of-Work and Proof-of-Stake blockchains. + +With the above, the node then queries some seeds for peers for its chain, +dials those peers, and runs the Tendermint protocols with those it successfully connects to. + +When the peer catches up to height H, it ensures the block hash matches HASH. +If not, Tendermint will exit, and the user must try again - either they are connected +to bad peers or their social consensus is invalid. + +## Restarted Full Node + +A node checks its address book on startup and attempts to connect to peers from there. +If it can't connect to any peers after some time, it falls back to the seeds to find more. + +Restarted full nodes can run the `blockchain` or `consensus` reactor protocols to sync up +to the latest state of the blockchain from wherever they were last. +In a Proof-of-Stake context, if they are sufficiently far behind (greater than the length +of the unbonding period), they will need to validate a recent `H` and `HASH` out-of-band again +so they know they have synced the correct chain. + +## Validator Node + +A validator node is a node that interfaces with a validator signing key. +These nodes require the highest security, and should not accept incoming connections. +They should maintain outgoing connections to a controlled set of "Sentry Nodes" that serve +as their proxy shield to the rest of the network. + +Validators that know and trust each other can accept incoming connections from one another and maintain direct private connectivity via VPN. + +## Sentry Node + +Sentry nodes are guardians of a validator node and provide it access to the rest of the network. +They should be well connected to other full nodes on the network. +Sentry nodes may be dynamic, but should maintain persistent connections to some evolving random subset of each other. +They should always expect to have direct incoming connections from the validator node and its backup(s). +They do not report the validator node's address in the PEX and +they may be more strict about the quality of peers they keep. + +Sentry nodes belonging to validators that trust each other may wish to maintain persistent connections via VPN with one another, but only report each other sparingly in the PEX. diff --git a/docs/spec/p2p/peer.md b/docs/spec/p2p/peer.md new file mode 100644 index 000000000..69c5bbacf --- /dev/null +++ b/docs/spec/p2p/peer.md @@ -0,0 +1,112 @@ +# Tendermint Peers + +This document explains how Tendermint Peers are identified and how they connect to one another. + +For details on peer discovery, see the [peer exchange (PEX) reactor doc](https://github.com/tendermint/tendermint/blob/master/docs/spec/reactors/pex/pex.md). + +## Peer Identity + +Tendermint peers are expected to maintain long-term persistent identities in the form of a public key. +Each peer has an ID defined as `peer.ID == peer.PubKey.Address()`, where `Address` uses the scheme defined in `crypto` package. + +A single peer ID can have multiple IP addresses associated with it, but a node +will only ever connect to one at a time. + +When attempting to connect to a peer, we use the PeerURL: `@:`. +We will attempt to connect to the peer at IP:PORT, and verify, +via authenticated encryption, that it is in possession of the private key +corresponding to ``. This prevents man-in-the-middle attacks on the peer layer. + +## Connections + +All p2p connections use TCP. +Upon establishing a successful TCP connection with a peer, +two handhsakes are performed: one for authenticated encryption, and one for Tendermint versioning. +Both handshakes have configurable timeouts (they should complete quickly). + +### Authenticated Encryption Handshake + +Tendermint implements the Station-to-Station protocol +using ED25519 keys for Diffie-Helman key-exchange and NACL SecretBox for encryption. +It goes as follows: +- generate an emphemeral ED25519 keypair +- send the ephemeral public key to the peer +- wait to receive the peer's ephemeral public key +- compute the Diffie-Hellman shared secret using the peers ephemeral public key and our ephemeral private key +- generate two nonces to use for encryption (sending and receiving) as follows: + - sort the ephemeral public keys in ascending order and concatenate them + - RIPEMD160 the result + - append 4 empty bytes (extending the hash to 24-bytes) + - the result is nonce1 + - flip the last bit of nonce1 to get nonce2 + - if we had the smaller ephemeral pubkey, use nonce1 for receiving, nonce2 for sending; + else the opposite +- all communications from now on are encrypted using the shared secret and the nonces, where each nonce +increments by 2 every time it is used +- we now have an encrypted channel, but still need to authenticate +- generate a common challenge to sign: + - SHA256 of the sorted (lowest first) and concatenated ephemeral pub keys +- sign the common challenge with our persistent private key +- send the go-wire encoded persistent pubkey and signature to the peer +- wait to receive the persistent public key and signature from the peer +- verify the signature on the challenge using the peer's persistent public key + + +If this is an outgoing connection (we dialed the peer) and we used a peer ID, +then finally verify that the peer's persistent public key corresponds to the peer ID we dialed, +ie. `peer.PubKey.Address() == `. + +The connection has now been authenticated. All traffic is encrypted. + +Note: only the dialer can authenticate the identity of the peer, +but this is what we care about since when we join the network we wish to +ensure we have reached the intended peer (and are not being MITMd). + +### Peer Filter + +Before continuing, we check if the new peer has the same ID as ourselves or +an existing peer. If so, we disconnect. + +We also check the peer's address and public key against +an optional whitelist which can be managed through the ABCI app - +if the whitelist is enabled and the peer does not qualify, the connection is +terminated. + + +### Tendermint Version Handshake + +The Tendermint Version Handshake allows the peers to exchange their NodeInfo: + +```golang +type NodeInfo struct { + ID p2p.ID + ListenAddr string + + Network string + Version string + Channels []int8 + + Moniker string + Other []string +} +``` + +The connection is disconnected if: +- `peer.NodeInfo.ID` is not equal `peerConn.ID` +- `peer.NodeInfo.Version` is not formatted as `X.X.X` where X are integers known as Major, Minor, and Revision +- `peer.NodeInfo.Version` Major is not the same as ours +- `peer.NodeInfo.Network` is not the same as ours +- `peer.Channels` does not intersect with our known Channels. +- `peer.NodeInfo.ListenAddr` is malformed or is a DNS host that cannot be + resolved + + +At this point, if we have not disconnected, the peer is valid. +It is added to the switch and hence all reactors via the `AddPeer` method. +Note that each reactor may handle multiple channels. + +## Connection Activity + +Once a peer is added, incoming messages for a given reactor are handled through +that reactor's `Receive` method, and output messages are sent directly by the Reactors +on each peer. A typical reactor maintains per-peer go-routine(s) that handle this. diff --git a/docs/spec/reactors/block_sync/img/bc-reactor.png b/docs/spec/reactors/block_sync/img/bc-reactor.png new file mode 100644 index 000000000..f7fe0f819 Binary files /dev/null and b/docs/spec/reactors/block_sync/img/bc-reactor.png differ diff --git a/docs/spec/reactors/block_sync/impl.md b/docs/spec/reactors/block_sync/impl.md new file mode 100644 index 000000000..a96f83b32 --- /dev/null +++ b/docs/spec/reactors/block_sync/impl.md @@ -0,0 +1,46 @@ +## Blockchain Reactor + +* coordinates the pool for syncing +* coordinates the store for persistence +* coordinates the playing of blocks towards the app using a sm.BlockExecutor +* handles switching between fastsync and consensus +* it is a p2p.BaseReactor +* starts the pool.Start() and its poolRoutine() +* registers all the concrete types and interfaces for serialisation + +### poolRoutine + +* listens to these channels: + * pool requests blocks from a specific peer by posting to requestsCh, block reactor then sends + a &bcBlockRequestMessage for a specific height + * pool signals timeout of a specific peer by posting to timeoutsCh + * switchToConsensusTicker to periodically try and switch to consensus + * trySyncTicker to periodically check if we have fallen behind and then catch-up sync + * if there aren't any new blocks available on the pool it skips syncing +* tries to sync the app by taking downloaded blocks from the pool, gives them to the app and stores + them on disk +* implements Receive which is called by the switch/peer + * calls AddBlock on the pool when it receives a new block from a peer + +## Block Pool + +* responsible for downloading blocks from peers +* makeRequestersRoutine() + * removes timeout peers + * starts new requesters by calling makeNextRequester() +* requestRoutine(): + * picks a peer and sends the request, then blocks until: + * pool is stopped by listening to pool.Quit + * requester is stopped by listening to Quit + * request is redone + * we receive a block + * gotBlockCh is strange + +## Block Store + +* persists blocks to disk + +# TODO + +* How does the switch from bcR to conR happen? Does conR persist blocks to disk too? +* What is the interaction between the consensus and blockchain reactors? diff --git a/docs/spec/reactors/block_sync/reactor.md b/docs/spec/reactors/block_sync/reactor.md new file mode 100644 index 000000000..97104eeeb --- /dev/null +++ b/docs/spec/reactors/block_sync/reactor.md @@ -0,0 +1,307 @@ +# Blockchain Reactor + +The Blockchain Reactor's high level responsibility is to enable peers who are +far behind the current state of the consensus to quickly catch up by downloading +many blocks in parallel, verifying their commits, and executing them against the +ABCI application. + +Tendermint full nodes run the Blockchain Reactor as a service to provide blocks +to new nodes. New nodes run the Blockchain Reactor in "fast_sync" mode, +where they actively make requests for more blocks until they sync up. +Once caught up, "fast_sync" mode is disabled and the node switches to +using (and turns on) the Consensus Reactor. + +## Message Types + +```go +const ( + msgTypeBlockRequest = byte(0x10) + msgTypeBlockResponse = byte(0x11) + msgTypeNoBlockResponse = byte(0x12) + msgTypeStatusResponse = byte(0x20) + msgTypeStatusRequest = byte(0x21) +) +``` + +```go +type bcBlockRequestMessage struct { + Height int64 +} + +type bcNoBlockResponseMessage struct { + Height int64 +} + +type bcBlockResponseMessage struct { + Block Block +} + +type bcStatusRequestMessage struct { + Height int64 + +type bcStatusResponseMessage struct { + Height int64 +} +``` + +## Architecture and algorithm + +The Blockchain reactor is organised as a set of concurrent tasks: + - Receive routine of Blockchain Reactor + - Task for creating Requesters + - Set of Requesters tasks and + - Controller task. + +![Blockchain Reactor Architecture Diagram](img/bc-reactor.png) + +### Data structures + +These are the core data structures necessarily to provide the Blockchain Reactor logic. + +Requester data structure is used to track assignment of request for `block` at position `height` to a +peer with id equals to `peerID`. + +```go +type Requester { + mtx Mutex + block Block + height int64 + 
 peerID p2p.ID + redoChannel chan struct{} +} +``` +Pool is core data structure that stores last executed block (`height`), assignment of requests to peers (`requesters`), +current height for each peer and number of pending requests for each peer (`peers`), maximum peer height, etc. + +```go +type Pool { + mtx Mutex + requesters map[int64]*Requester + 
height int64 + peers map[p2p.ID]*Peer + 
maxPeerHeight int64 

 + 
numPending int32 + store BlockStore + 
requestsChannel chan<- BlockRequest + 
errorsChannel chan<- peerError +} +``` + +Peer data structure stores for each peer current `height` and number of pending requests sent to +the peer (`numPending`), etc. + +```go +type Peer struct { + id p2p.ID + height int64 + numPending int32 + timeout *time.Timer + didTimeout bool +} +``` + +BlockRequest is internal data structure used to denote current mapping of request for a block at some `height` to +a peer (`PeerID`). + +```go +type BlockRequest { + Height int64 + PeerID p2p.ID +} +``` + +### Receive routine of Blockchain Reactor + +It is executed upon message reception on the BlockchainChannel inside p2p receive routine. There is a separate p2p +receive routine (and therefore receive routine of the Blockchain Reactor) executed for each peer. Note that +try to send will not block (returns immediately) if outgoing buffer is full. + +```go +handleMsg(pool, m): + upon receiving bcBlockRequestMessage m from peer p: + block = load block for height m.Height from pool.store + if block != nil then + try to send BlockResponseMessage(block) to p + else + try to send bcNoBlockResponseMessage(m.Height) to p + + upon receiving bcBlockResponseMessage m from peer p: + pool.mtx.Lock() + requester = pool.requesters[m.Height] + if requester == nil then + error("peer sent us a block we didn't expect") + continue + + if requester.block == nil and requester.peerID == p then + requester.block = m + pool.numPending -= 1 // atomic decrement + peer = pool.peers[p] + if peer != nil then + peer.numPending-- + if peer.numPending == 0 then + peer.timeout.Stop() + // NOTE: we don't send Quit signal to the corresponding requester task! + else + trigger peer timeout to expire after peerTimeout + pool.mtx.Unlock() + + + upon receiving bcStatusRequestMessage m from peer p: + try to send bcStatusResponseMessage(pool.store.Height) + + upon receiving bcStatusResponseMessage m from peer p: + pool.mtx.Lock() + peer = pool.peers[p] + if peer != nil then + peer.height = m.height + else + peer = create new Peer data structure with id = p and height = m.Height + pool.peers[p] = peer + + if m.Height > pool.maxPeerHeight then + pool.maxPeerHeight = m.Height + pool.mtx.Unlock() + +onTimeout(p): + send error message to pool error channel + peer = pool.peers[p] + peer.didTimeout = true +``` + +### Requester tasks + +Requester task is responsible for fetching a single block at position `height`. + +```go +fetchBlock(height, pool): + while true do + peerID = nil + block = nil + peer = pickAvailablePeer(height) + peerId = peer.id + + enqueue BlockRequest(height, peerID) to pool.requestsChannel + redo = false + while !redo do + select { + upon receiving Quit message do + return + upon receiving message on redoChannel do + mtx.Lock() + pool.numPending++ + redo = true + mtx.UnLock() + } + +pickAvailablePeer(height): + selectedPeer = nil + while selectedPeer = nil do + pool.mtx.Lock() + for each peer in pool.peers do + if !peer.didTimeout and peer.numPending < maxPendingRequestsPerPeer and peer.height >= height then + peer.numPending++ + selectedPeer = peer + break + pool.mtx.Unlock() + + if selectedPeer = nil then + sleep requestIntervalMS + + return selectedPeer +``` +sleep for requestIntervalMS +### Task for creating Requesters + +This task is responsible for continuously creating and starting Requester tasks. +```go +createRequesters(pool): + while true do + if !pool.isRunning then break + if pool.numPending < maxPendingRequests or size(pool.requesters) < maxTotalRequesters then + pool.mtx.Lock() + nextHeight = pool.height + size(pool.requesters) + requester = create new requester for height nextHeight + pool.requesters[nextHeight] = requester + pool.numPending += 1 // atomic increment + start requester task + pool.mtx.Unlock() + else + sleep requestIntervalMS + pool.mtx.Lock() + for each peer in pool.peers do + if !peer.didTimeout && peer.numPending > 0 && peer.curRate < minRecvRate then + send error on pool error channel + peer.didTimeout = true + if peer.didTimeout then + for each requester in pool.requesters do + if requester.getPeerID() == peer then + enqueue msg on requestor's redoChannel + delete(pool.peers, peerID) + pool.mtx.Unlock() +``` + + +### Main blockchain reactor controller task +```go +main(pool): + create trySyncTicker with interval trySyncIntervalMS + create statusUpdateTicker with interval statusUpdateIntervalSeconds + create switchToConsensusTicker with interbal switchToConsensusIntervalSeconds + + while true do + select { + upon receiving BlockRequest(Height, Peer) on pool.requestsChannel: + try to send bcBlockRequestMessage(Height) to Peer + + upon receiving error(peer) on errorsChannel: + stop peer for error + + upon receiving message on statusUpdateTickerChannel: + broadcast bcStatusRequestMessage(bcR.store.Height) // message sent in a separate routine + + upon receiving message on switchToConsensusTickerChannel: + pool.mtx.Lock() + receivedBlockOrTimedOut = pool.height > 0 || (time.Now() - pool.startTime) > 5 Seconds + ourChainIsLongestAmongPeers = pool.maxPeerHeight == 0 || pool.height >= pool.maxPeerHeight + haveSomePeers = size of pool.peers > 0 + pool.mtx.Unlock() + if haveSomePeers && receivedBlockOrTimedOut && ourChainIsLongestAmongPeers then + switch to consensus mode + + upon receiving message on trySyncTickerChannel: + for i = 0; i < 10; i++ do + pool.mtx.Lock() + firstBlock = pool.requesters[pool.height].block + secondBlock = pool.requesters[pool.height].block + if firstBlock == nil or secondBlock == nil then continue + pool.mtx.Unlock() + verify firstBlock using LastCommit from secondBlock + if verification failed + pool.mtx.Lock() + peerID = pool.requesters[pool.height].peerID + redoRequestsForPeer(peerId) + delete(pool.peers, peerID) + stop peer peerID for error + pool.mtx.Unlock() + else + delete(pool.requesters, pool.height) + save firstBlock to store + pool.height++ + execute firstBlock + } + +redoRequestsForPeer(pool, peerId): + for each requester in pool.requesters do + if requester.getPeerID() == peerID + enqueue msg on redoChannel for requester +``` + +## Channels + +Defines `maxMsgSize` for the maximum size of incoming messages, +`SendQueueCapacity` and `RecvBufferCapacity` for maximum sending and +receiving buffers respectively. These are supposed to prevent amplification +attacks by setting up the upper limit on how much data we can receive & send to +a peer. + +Sending incorrectly encoded data will result in stopping the peer. diff --git a/docs/spec/reactors/consensus/consensus-reactor.md b/docs/spec/reactors/consensus/consensus-reactor.md new file mode 100644 index 000000000..0f03b44b7 --- /dev/null +++ b/docs/spec/reactors/consensus/consensus-reactor.md @@ -0,0 +1,352 @@ +# Consensus Reactor + +Consensus Reactor defines a reactor for the consensus service. It contains the ConsensusState service that +manages the state of the Tendermint consensus internal state machine. +When Consensus Reactor is started, it starts Broadcast Routine which starts ConsensusState service. +Furthermore, for each peer that is added to the Consensus Reactor, it creates (and manages) the known peer state +(that is used extensively in gossip routines) and starts the following three routines for the peer p: +Gossip Data Routine, Gossip Votes Routine and QueryMaj23Routine. Finally, Consensus Reactor is responsible +for decoding messages received from a peer and for adequate processing of the message depending on its type and content. +The processing normally consists of updating the known peer state and for some messages +(`ProposalMessage`, `BlockPartMessage` and `VoteMessage`) also forwarding message to ConsensusState module +for further processing. In the following text we specify the core functionality of those separate unit of executions +that are part of the Consensus Reactor. + +## ConsensusState service + +Consensus State handles execution of the Tendermint BFT consensus algorithm. It processes votes and proposals, +and upon reaching agreement, commits blocks to the chain and executes them against the application. +The internal state machine receives input from peers, the internal validator and from a timer. + +Inside Consensus State we have the following units of execution: Timeout Ticker and Receive Routine. +Timeout Ticker is a timer that schedules timeouts conditional on the height/round/step that are processed +by the Receive Routine. + + +### Receive Routine of the ConsensusState service + +Receive Routine of the ConsensusState handles messages which may cause internal consensus state transitions. +It is the only routine that updates RoundState that contains internal consensus state. +Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities. +It receives messages from peers, internal validators and from Timeout Ticker +and invokes the corresponding handlers, potentially updating the RoundState. +The details of the protocol (together with formal proofs of correctness) implemented by the Receive Routine are +discussed in separate document. For understanding of this document +it is sufficient to understand that the Receive Routine manages and updates RoundState data structure that is +then extensively used by the gossip routines to determine what information should be sent to peer processes. + +## Round State + +RoundState defines the internal consensus state. It contains height, round, round step, a current validator set, +a proposal and proposal block for the current round, locked round and block (if some block is being locked), set of +received votes and last commit and last validators set. + +```golang +type RoundState struct { + Height int64 + Round int + Step RoundStepType + Validators ValidatorSet + Proposal Proposal + ProposalBlock Block + ProposalBlockParts PartSet + LockedRound int + LockedBlock Block + LockedBlockParts PartSet + Votes HeightVoteSet + LastCommit VoteSet + LastValidators ValidatorSet +} +``` + +Internally, consensus will run as a state machine with the following states: + +- RoundStepNewHeight +- RoundStepNewRound +- RoundStepPropose +- RoundStepProposeWait +- RoundStepPrevote +- RoundStepPrevoteWait +- RoundStepPrecommit +- RoundStepPrecommitWait +- RoundStepCommit + +## Peer Round State + +Peer round state contains the known state of a peer. It is being updated by the Receive routine of +Consensus Reactor and by the gossip routines upon sending a message to the peer. + +```golang +type PeerRoundState struct { + Height int64 // Height peer is at + Round int // Round peer is at, -1 if unknown. + Step RoundStepType // Step peer is at + Proposal bool // True if peer has proposal for this round + ProposalBlockPartsHeader PartSetHeader + ProposalBlockParts BitArray + ProposalPOLRound int // Proposal's POL round. -1 if none. + ProposalPOL BitArray // nil until ProposalPOLMessage received. + Prevotes BitArray // All votes peer has for this round + Precommits BitArray // All precommits peer has for this round + LastCommitRound int // Round of commit for last height. -1 if none. + LastCommit BitArray // All commit precommits of commit for last height. + CatchupCommitRound int // Round that we have commit for. Not necessarily unique. -1 if none. + CatchupCommit BitArray // All commit precommits peer has for this height & CatchupCommitRound +} +``` + +## Receive method of Consensus reactor + +The entry point of the Consensus reactor is a receive method. When a message is received from a peer p, +normally the peer round state is updated correspondingly, and some messages +are passed for further processing, for example to ConsensusState service. We now specify the processing of messages +in the receive method of Consensus reactor for each message type. In the following message handler, `rs` and `prs` denote +`RoundState` and `PeerRoundState`, respectively. + +### NewRoundStepMessage handler + +``` +handleMessage(msg): + if msg is from smaller height/round/step then return + // Just remember these values. + prsHeight = prs.Height + prsRound = prs.Round + prsCatchupCommitRound = prs.CatchupCommitRound + prsCatchupCommit = prs.CatchupCommit + + Update prs with values from msg + if prs.Height or prs.Round has been updated then + reset Proposal related fields of the peer state + if prs.Round has been updated and msg.Round == prsCatchupCommitRound then + prs.Precommits = psCatchupCommit + if prs.Height has been updated then + if prsHeight+1 == msg.Height && prsRound == msg.LastCommitRound then + prs.LastCommitRound = msg.LastCommitRound + prs.LastCommit = prs.Precommits + } else { + prs.LastCommitRound = msg.LastCommitRound + prs.LastCommit = nil + } + Reset prs.CatchupCommitRound and prs.CatchupCommit +``` + +### CommitStepMessage handler + +``` +handleMessage(msg): + if prs.Height == msg.Height then + prs.ProposalBlockPartsHeader = msg.BlockPartsHeader + prs.ProposalBlockParts = msg.BlockParts +``` + +### HasVoteMessage handler + +``` +handleMessage(msg): + if prs.Height == msg.Height then + prs.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index) +``` + +### VoteSetMaj23Message handler + +``` +handleMessage(msg): + if prs.Height == msg.Height then + Record in rs that a peer claim to have ⅔ majority for msg.BlockID + Send VoteSetBitsMessage showing votes node has for that BlockId +``` + +### ProposalMessage handler + +``` +handleMessage(msg): + if prs.Height != msg.Height || prs.Round != msg.Round || prs.Proposal then return + prs.Proposal = true + prs.ProposalBlockPartsHeader = msg.BlockPartsHeader + prs.ProposalBlockParts = empty set + prs.ProposalPOLRound = msg.POLRound + prs.ProposalPOL = nil + Send msg through internal peerMsgQueue to ConsensusState service +``` + +### ProposalPOLMessage handler + +``` +handleMessage(msg): + if prs.Height != msg.Height or prs.ProposalPOLRound != msg.ProposalPOLRound then return + prs.ProposalPOL = msg.ProposalPOL +``` + +### BlockPartMessage handler + +``` +handleMessage(msg): + if prs.Height != msg.Height || prs.Round != msg.Round then return + Record in prs that peer has block part msg.Part.Index + Send msg trough internal peerMsgQueue to ConsensusState service +``` + +### VoteMessage handler + +``` +handleMessage(msg): + Record in prs that a peer knows vote with index msg.vote.ValidatorIndex for particular height and round + Send msg trough internal peerMsgQueue to ConsensusState service +``` + +### VoteSetBitsMessage handler + +``` +handleMessage(msg): + Update prs for the bit-array of votes peer claims to have for the msg.BlockID +``` + +## Gossip Data Routine + +It is used to send the following messages to the peer: `BlockPartMessage`, `ProposalMessage` and +`ProposalPOLMessage` on the DataChannel. The gossip data routine is based on the local RoundState (`rs`) +and the known PeerRoundState (`prs`). The routine repeats forever the logic shown below: + +``` +1a) if rs.ProposalBlockPartsHeader == prs.ProposalBlockPartsHeader and the peer does not have all the proposal parts then + Part = pick a random proposal block part the peer does not have + Send BlockPartMessage(rs.Height, rs.Round, Part) to the peer on the DataChannel + if send returns true, record that the peer knows the corresponding block Part + Continue + +1b) if (0 < prs.Height) and (prs.Height < rs.Height) then + help peer catch up using gossipDataForCatchup function + Continue + +1c) if (rs.Height != prs.Height) or (rs.Round != prs.Round) then + Sleep PeerGossipSleepDuration + Continue + +// at this point rs.Height == prs.Height and rs.Round == prs.Round +1d) if (rs.Proposal != nil and !prs.Proposal) then + Send ProposalMessage(rs.Proposal) to the peer + if send returns true, record that the peer knows Proposal + if 0 <= rs.Proposal.POLRound then + polRound = rs.Proposal.POLRound + prevotesBitArray = rs.Votes.Prevotes(polRound).BitArray() + Send ProposalPOLMessage(rs.Height, polRound, prevotesBitArray) + Continue + +2) Sleep PeerGossipSleepDuration +``` + +### Gossip Data For Catchup + +This function is responsible for helping peer catch up if it is at the smaller height (prs.Height < rs.Height). +The function executes the following logic: + + if peer does not have all block parts for prs.ProposalBlockPart then + blockMeta = Load Block Metadata for height prs.Height from blockStore + if (!blockMeta.BlockID.PartsHeader == prs.ProposalBlockPartsHeader) then + Sleep PeerGossipSleepDuration + return + Part = pick a random proposal block part the peer does not have + Send BlockPartMessage(prs.Height, prs.Round, Part) to the peer on the DataChannel + if send returns true, record that the peer knows the corresponding block Part + return + else Sleep PeerGossipSleepDuration + +## Gossip Votes Routine + +It is used to send the following message: `VoteMessage` on the VoteChannel. +The gossip votes routine is based on the local RoundState (`rs`) +and the known PeerRoundState (`prs`). The routine repeats forever the logic shown below: + +``` +1a) if rs.Height == prs.Height then + if prs.Step == RoundStepNewHeight then + vote = random vote from rs.LastCommit the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + + if prs.Step <= RoundStepPrevote and prs.Round != -1 and prs.Round <= rs.Round then + Prevotes = rs.Votes.Prevotes(prs.Round) + vote = random vote from Prevotes the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + + if prs.Step <= RoundStepPrecommit and prs.Round != -1 and prs.Round <= rs.Round then + Precommits = rs.Votes.Precommits(prs.Round) + vote = random vote from Precommits the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + + if prs.ProposalPOLRound != -1 then + PolPrevotes = rs.Votes.Prevotes(prs.ProposalPOLRound) + vote = random vote from PolPrevotes the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + +1b) if prs.Height != 0 and rs.Height == prs.Height+1 then + vote = random vote from rs.LastCommit peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + +1c) if prs.Height != 0 and rs.Height >= prs.Height+2 then + Commit = get commit from BlockStore for prs.Height + vote = random vote from Commit the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + +2) Sleep PeerGossipSleepDuration +``` + +## QueryMaj23Routine + +It is used to send the following message: `VoteSetMaj23Message`. `VoteSetMaj23Message` is sent to indicate that a given +BlockID has seen +2/3 votes. This routine is based on the local RoundState (`rs`) and the known PeerRoundState +(`prs`). The routine repeats forever the logic shown below. + +``` +1a) if rs.Height == prs.Height then + Prevotes = rs.Votes.Prevotes(prs.Round) + if there is a ⅔ majority for some blockId in Prevotes then + m = VoteSetMaj23Message(prs.Height, prs.Round, Prevote, blockId) + Send m to peer + Sleep PeerQueryMaj23SleepDuration + +1b) if rs.Height == prs.Height then + Precommits = rs.Votes.Precommits(prs.Round) + if there is a ⅔ majority for some blockId in Precommits then + m = VoteSetMaj23Message(prs.Height,prs.Round,Precommit,blockId) + Send m to peer + Sleep PeerQueryMaj23SleepDuration + +1c) if rs.Height == prs.Height and prs.ProposalPOLRound >= 0 then + Prevotes = rs.Votes.Prevotes(prs.ProposalPOLRound) + if there is a ⅔ majority for some blockId in Prevotes then + m = VoteSetMaj23Message(prs.Height,prs.ProposalPOLRound,Prevotes,blockId) + Send m to peer + Sleep PeerQueryMaj23SleepDuration + +1d) if prs.CatchupCommitRound != -1 and 0 < prs.Height and + prs.Height <= blockStore.Height() then + Commit = LoadCommit(prs.Height) + m = VoteSetMaj23Message(prs.Height,Commit.Round,Precommit,Commit.blockId) + Send m to peer + Sleep PeerQueryMaj23SleepDuration + +2) Sleep PeerQueryMaj23SleepDuration +``` + +## Broadcast routine + +The Broadcast routine subscribes to an internal event bus to receive new round steps, votes messages and proposal +heartbeat messages, and broadcasts messages to peers upon receiving those events. +It broadcasts `NewRoundStepMessage` or `CommitStepMessage` upon new round state event. Note that +broadcasting these messages does not depend on the PeerRoundState; it is sent on the StateChannel. +Upon receiving VoteMessage it broadcasts `HasVoteMessage` message to its peers on the StateChannel. +`ProposalHeartbeatMessage` is sent the same way on the StateChannel. + +## Channels + +Defines 4 channels: state, data, vote and vote_set_bits. Each channel +has `SendQueueCapacity` and `RecvBufferCapacity` and +`RecvMessageCapacity` set to `maxMsgSize`. + +Sending incorrectly encoded data will result in stopping the peer. diff --git a/docs/spec/reactors/consensus/consensus.md b/docs/spec/reactors/consensus/consensus.md new file mode 100644 index 000000000..4ea619b51 --- /dev/null +++ b/docs/spec/reactors/consensus/consensus.md @@ -0,0 +1,212 @@ +# Tendermint Consensus Reactor + +Tendermint Consensus is a distributed protocol executed by validator processes to agree on +the next block to be added to the Tendermint blockchain. The protocol proceeds in rounds, where +each round is a try to reach agreement on the next block. A round starts by having a dedicated +process (called proposer) suggesting to other processes what should be the next block with +the `ProposalMessage`. +The processes respond by voting for a block with `VoteMessage` (there are two kinds of vote +messages, prevote and precommit votes). Note that a proposal message is just a suggestion what the +next block should be; a validator might vote with a `VoteMessage` for a different block. If in some +round, enough number of processes vote for the same block, then this block is committed and later +added to the blockchain. `ProposalMessage` and `VoteMessage` are signed by the private key of the +validator. The internals of the protocol and how it ensures safety and liveness properties are +explained in a forthcoming document. + +For efficiency reasons, validators in Tendermint consensus protocol do not agree directly on the +block as the block size is big, i.e., they don't embed the block inside `Proposal` and +`VoteMessage`. Instead, they reach agreement on the `BlockID` (see `BlockID` definition in +[Blockchain](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/blockchain.md#blockid) section) that uniquely identifies each block. The block itself is +disseminated to validator processes using peer-to-peer gossiping protocol. It starts by having a +proposer first splitting a block into a number of block parts, that are then gossiped between +processes using `BlockPartMessage`. + +Validators in Tendermint communicate by peer-to-peer gossiping protocol. Each validator is connected +only to a subset of processes called peers. By the gossiping protocol, a validator send to its peers +all needed information (`ProposalMessage`, `VoteMessage` and `BlockPartMessage`) so they can +reach agreement on some block, and also obtain the content of the chosen block (block parts). As +part of the gossiping protocol, processes also send auxiliary messages that inform peers about the +executed steps of the core consensus algorithm (`NewRoundStepMessage` and `CommitStepMessage`), and +also messages that inform peers what votes the process has seen (`HasVoteMessage`, +`VoteSetMaj23Message` and `VoteSetBitsMessage`). These messages are then used in the gossiping +protocol to determine what messages a process should send to its peers. + +We now describe the content of each message exchanged during Tendermint consensus protocol. + +## ProposalMessage + +ProposalMessage is sent when a new block is proposed. It is a suggestion of what the +next block in the blockchain should be. + +```go +type ProposalMessage struct { + Proposal Proposal +} +``` + +### Proposal + +Proposal contains height and round for which this proposal is made, BlockID as a unique identifier +of proposed block, timestamp, and two fields (POLRound and POLBlockID) that are needed for +termination of the consensus. The message is signed by the validator private key. + +```go +type Proposal struct { + Height int64 + Round int + Timestamp Time + BlockID BlockID + POLRound int + POLBlockID BlockID + Signature Signature +} +``` + +NOTE: In the current version of the Tendermint, the consensus value in proposal is represented with +PartSetHeader, and with BlockID in vote message. It should be aligned as suggested in this spec as +BlockID contains PartSetHeader. + +## VoteMessage + +VoteMessage is sent to vote for some block (or to inform others that a process does not vote in the +current round). Vote is defined in the [Blockchain](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/blockchain.md#blockid) section and contains validator's +information (validator address and index), height and round for which the vote is sent, vote type, +blockID if process vote for some block (`nil` otherwise) and a timestamp when the vote is sent. The +message is signed by the validator private key. + +```go +type VoteMessage struct { + Vote Vote +} +``` + +## BlockPartMessage + +BlockPartMessage is sent when gossipping a piece of the proposed block. It contains height, round +and the block part. + +```go +type BlockPartMessage struct { + Height int64 + Round int + Part Part +} +``` + +## ProposalHeartbeatMessage + +ProposalHeartbeatMessage is sent to signal that a node is alive and waiting for transactions +to be able to create a next block proposal. + +```go +type ProposalHeartbeatMessage struct { + Heartbeat Heartbeat +} +``` + +### Heartbeat + +Heartbeat contains validator information (address and index), +height, round and sequence number. It is signed by the private key of the validator. + +```go +type Heartbeat struct { + ValidatorAddress []byte + ValidatorIndex int + Height int64 + Round int + Sequence int + Signature Signature +} +``` + +## NewRoundStepMessage + +NewRoundStepMessage is sent for every step transition during the core consensus algorithm execution. +It is used in the gossip part of the Tendermint protocol to inform peers about a current +height/round/step a process is in. + +```go +type NewRoundStepMessage struct { + Height int64 + Round int + Step RoundStepType + SecondsSinceStartTime int + LastCommitRound int +} +``` + +## CommitStepMessage + +CommitStepMessage is sent when an agreement on some block is reached. It contains height for which +agreement is reached, block parts header that describes the decided block and is used to obtain all +block parts, and a bit array of the block parts a process currently has, so its peers can know what +parts it is missing so they can send them. + +```go +type CommitStepMessage struct { + Height int64 + BlockID BlockID + BlockParts BitArray +} +``` + +TODO: We use BlockID instead of BlockPartsHeader (in current implementation) for symmetry. + +## ProposalPOLMessage + +ProposalPOLMessage is sent when a previous block is re-proposed. +It is used to inform peers in what round the process learned for this block (ProposalPOLRound), +and what prevotes for the re-proposed block the process has. + +```go +type ProposalPOLMessage struct { + Height int64 + ProposalPOLRound int + ProposalPOL BitArray +} +``` + +## HasVoteMessage + +HasVoteMessage is sent to indicate that a particular vote has been received. It contains height, +round, vote type and the index of the validator that is the originator of the corresponding vote. + +```go +type HasVoteMessage struct { + Height int64 + Round int + Type byte + Index int +} +``` + +## VoteSetMaj23Message + +VoteSetMaj23Message is sent to indicate that a process has seen +2/3 votes for some BlockID. +It contains height, round, vote type and the BlockID. + +```go +type VoteSetMaj23Message struct { + Height int64 + Round int + Type byte + BlockID BlockID +} +``` + +## VoteSetBitsMessage + +VoteSetBitsMessage is sent to communicate the bit-array of votes a process has seen for a given +BlockID. It contains height, round, vote type, BlockID and a bit array of +the votes a process has. + +```go +type VoteSetBitsMessage struct { + Height int64 + Round int + Type byte + BlockID BlockID + Votes BitArray +} +``` diff --git a/docs/spec/reactors/consensus/proposer-selection.md b/docs/spec/reactors/consensus/proposer-selection.md new file mode 100644 index 000000000..649d3dd21 --- /dev/null +++ b/docs/spec/reactors/consensus/proposer-selection.md @@ -0,0 +1,46 @@ +# Proposer selection procedure in Tendermint + +This document specifies the Proposer Selection Procedure that is used in Tendermint to choose a round proposer. +As Tendermint is “leader-based protocol”, the proposer selection is critical for its correct functioning. +Let denote with `proposer_p(h,r)` a process returned by the Proposer Selection Procedure at the process p, at height h +and round r. Then the Proposer Selection procedure should fulfill the following properties: + +`Agreement`: Given a validator set V, and two honest validators, +p and q, for each height h, and each round r, +proposer_p(h,r) = proposer_q(h,r) + +`Liveness`: In every consecutive sequence of rounds of size K (K is system parameter), at least a +single round has an honest proposer. + +`Fairness`: The proposer selection is proportional to the validator voting power, i.e., a validator with more +voting power is selected more frequently, proportional to its power. More precisely, given a set of processes +with the total voting power N, during a sequence of rounds of size N, every process is proposer in a number of rounds +equal to its voting power. + +We now look at a few particular cases to understand better how fairness should be implemented. +If we have 4 processes with the following voting power distribution (p0,4), (p1, 2), (p2, 2), (p3, 2) at some round r, +we have the following sequence of proposer selections in the following rounds: + +`p0, p1, p2, p3, p0, p0, p1, p2, p3, p0, p0, p1, p2, p3, p0, p0, p1, p2, p3, p0, etc` + +Let consider now the following scenario where a total voting power of faulty processes is aggregated in a single process +p0: (p0,3), (p1, 1), (p2, 1), (p3, 1), (p4, 1), (p5, 1), (p6, 1), (p7, 1). +In this case the sequence of proposer selections looks like this: + +`p0, p1, p2, p3, p0, p4, p5, p6, p7, p0, p0, p1, p2, p3, p0, p4, p5, p6, p7, p0, etc` + +In this case, we see that a number of rounds coordinated by a faulty process is proportional to its voting power. +We consider also the case where we have voting power uniformly distributed among processes, i.e., we have 10 processes +each with voting power of 1. And let consider that there are 3 faulty processes with consecutive addresses, +for example the first 3 processes are faulty. Then the sequence looks like this: + +`p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, etc` + +In this case, we have 3 consecutive rounds with a faulty proposer. +One special case we consider is the case where a single honest process p0 has most of the voting power, for example: +(p0,100), (p1, 2), (p2, 3), (p3, 4). Then the sequence of proposer selection looks like this: + +p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p1, p0, p0, p0, p0, p0, etc + +This basically means that almost all rounds have the same proposer. But in this case, the process p0 has anyway enough +voting power to decide whatever he wants, so the fact that he coordinates almost all rounds seems correct. diff --git a/docs/spec/reactors/evidence/reactor.md b/docs/spec/reactors/evidence/reactor.md new file mode 100644 index 000000000..efa63aa4c --- /dev/null +++ b/docs/spec/reactors/evidence/reactor.md @@ -0,0 +1,10 @@ +# Evidence Reactor + +## Channels + +[#1503](https://github.com/tendermint/tendermint/issues/1503) + +Sending invalid evidence will result in stopping the peer. + +Sending incorrectly encoded data or data exceeding `maxMsgSize` will result +in stopping the peer. diff --git a/docs/spec/reactors/mempool/concurrency.md b/docs/spec/reactors/mempool/concurrency.md new file mode 100644 index 000000000..991113e6d --- /dev/null +++ b/docs/spec/reactors/mempool/concurrency.md @@ -0,0 +1,8 @@ +# Mempool Concurrency + +Look at the concurrency model this uses... + +* Receiving CheckTx +* Broadcasting new tx +* Interfaces with consensus engine, reap/update while checking +* Calling the ABCI app (ordering. callbacks. how proxy works alongside the blockchain proxy which actually writes blocks) diff --git a/docs/spec/reactors/mempool/config.md b/docs/spec/reactors/mempool/config.md new file mode 100644 index 000000000..776149ba0 --- /dev/null +++ b/docs/spec/reactors/mempool/config.md @@ -0,0 +1,59 @@ +# Mempool Configuration + +Here we describe configuration options around mempool. +For the purposes of this document, they are described +as command-line flags, but they can also be passed in as +environmental variables or in the config.toml file. The +following are all equivalent: + +Flag: `--mempool.recheck_empty=false` + +Environment: `TM_MEMPOOL_RECHECK_EMPTY=false` + +Config: +``` +[mempool] +recheck_empty = false +``` + + +## Recheck + +`--mempool.recheck=false` (default: true) + +`--mempool.recheck_empty=false` (default: true) + +Recheck determines if the mempool rechecks all pending +transactions after a block was committed. Once a block +is committed, the mempool removes all valid transactions +that were successfully included in the block. + +If `recheck` is true, then it will rerun CheckTx on +all remaining transactions with the new block state. + +If the block contained no transactions, it will skip the +recheck unless `recheck_empty` is true. + +## Broadcast + +`--mempool.broadcast=false` (default: true) + +Determines whether this node gossips any valid transactions +that arrive in mempool. Default is to gossip anything that +passes checktx. If this is disabled, transactions are not +gossiped, but instead stored locally and added to the next +block this node is the proposer. + +## WalDir + +`--mempool.wal_dir=/tmp/gaia/mempool.wal` (default: $TM_HOME/data/mempool.wal) + +This defines the directory where mempool writes the write-ahead +logs. These files can be used to reload unbroadcasted +transactions if the node crashes. + +If the directory passed in is an absolute path, the wal file is +created there. If the directory is a relative path, the path is +appended to home directory of the tendermint process to +generate an absolute path to the wal directory +(default `$HOME/.tendermint` or set via `TM_HOME` or `--home``) diff --git a/docs/spec/reactors/mempool/functionality.md b/docs/spec/reactors/mempool/functionality.md new file mode 100644 index 000000000..85c3dc58d --- /dev/null +++ b/docs/spec/reactors/mempool/functionality.md @@ -0,0 +1,37 @@ +# Mempool Functionality + +The mempool maintains a list of potentially valid transactions, +both to broadcast to other nodes, as well as to provide to the +consensus reactor when it is selected as the block proposer. + +There are two sides to the mempool state: + +* External: get, check, and broadcast new transactions +* Internal: return valid transaction, update list after block commit + + +## External functionality + +External functionality is exposed via network interfaces +to potentially untrusted actors. + +* CheckTx - triggered via RPC or P2P +* Broadcast - gossip messages after a successful check + +## Internal functionality + +Internal functionality is exposed via method calls to other +code compiled into the tendermint binary. + +* Reap - get tx to propose in next block +* Update - remove tx that were included in last block +* ABCI.CheckTx - call ABCI app to validate the tx + +What does it provide the consensus reactor? +What guarantees does it need from the ABCI app? +(talk about interleaving processes in concurrency) + +## Optimizations + +Talk about the LRU cache to make sure we don't process any +tx that we have seen before diff --git a/docs/spec/reactors/mempool/messages.md b/docs/spec/reactors/mempool/messages.md new file mode 100644 index 000000000..9a624dff1 --- /dev/null +++ b/docs/spec/reactors/mempool/messages.md @@ -0,0 +1,61 @@ +# Mempool Messages + +## P2P Messages + +There is currently only one message that Mempool broadcasts +and receives over the p2p gossip network (via the reactor): +`TxMessage` + +```go +// TxMessage is a MempoolMessage containing a transaction. +type TxMessage struct { + Tx types.Tx +} +``` + +TxMessage is go-wire encoded and prepended with `0x1` as a +"type byte". This is followed by a go-wire encoded byte-slice. +Prefix of 40=0x28 byte tx is: `0x010128...` followed by +the actual 40-byte tx. Prefix of 350=0x015e byte tx is: +`0x0102015e...` followed by the actual 350 byte tx. + +(Please see the [go-wire repo](https://github.com/tendermint/go-wire#an-interface-example) for more information) + +## RPC Messages + +Mempool exposes `CheckTx([]byte)` over the RPC interface. + +It can be posted via `broadcast_commit`, `broadcast_sync` or +`broadcast_async`. They all parse a message with one argument, +`"tx": "HEX_ENCODED_BINARY"` and differ in only how long they +wait before returning (sync makes sure CheckTx passes, commit +makes sure it was included in a signed block). + +Request (`POST http://gaia.zone:26657/`): + +```json +{ + "id": "", + "jsonrpc": "2.0", + "method": "broadcast_sync", + "params": { + "tx": "F012A4BC68..." + } +} +``` + +Response: + +```json +{ + "error": "", + "result": { + "hash": "E39AAB7A537ABAA237831742DCE1117F187C3C52", + "log": "", + "data": "", + "code": 0 + }, + "id": "", + "jsonrpc": "2.0" +} +``` diff --git a/docs/spec/reactors/mempool/reactor.md b/docs/spec/reactors/mempool/reactor.md new file mode 100644 index 000000000..2bdbd8951 --- /dev/null +++ b/docs/spec/reactors/mempool/reactor.md @@ -0,0 +1,14 @@ +# Mempool Reactor + +## Channels + +[#1503](https://github.com/tendermint/tendermint/issues/1503) + +Mempool maintains a cache of the last 10000 transactions to prevent +replaying old transactions (plus transactions coming from other +validators, who are continually exchanging transactions). Read [Replay +Protection](https://tendermint.readthedocs.io/projects/tools/en/master/app-development.html?#replay-protection) +for details. + +Sending incorrectly encoded data or data exceeding `maxMsgSize` will result +in stopping the peer. diff --git a/docs/spec/reactors/pex/pex.md b/docs/spec/reactors/pex/pex.md new file mode 100644 index 000000000..317803b8e --- /dev/null +++ b/docs/spec/reactors/pex/pex.md @@ -0,0 +1,123 @@ +# Peer Strategy and Exchange + +Here we outline the design of the AddressBook +and how it used by the Peer Exchange Reactor (PEX) to ensure we are connected +to good peers and to gossip peers to others. + +## Peer Types + +Certain peers are special in that they are specified by the user as `persistent`, +which means we auto-redial them if the connection fails, or if we fail to dial +them. +Some peers can be marked as `private`, which means +we will not put them in the address book or gossip them to others. + +All peers except private peers are tracked using the address book. + +## Discovery + +Peer discovery begins with a list of seeds. +When we have no peers, or have been unable to find enough peers from existing ones, +we dial a randomly selected seed to get a list of peers to dial. + +On startup, we will also immediately dial the given list of `persistent_peers`, +and will attempt to maintain persistent connections with them. If the connections die, or we fail to dial, +we will redial every 5s for a few minutes, then switch to an exponential backoff schedule, +and after about a day of trying, stop dialing the peer. + +So long as we have less than `MinNumOutboundPeers`, we periodically request additional peers +from each of our own. If sufficient time goes by and we still can't find enough peers, +we try the seeds again. + +## Listening + +Peers listen on a configurable ListenAddr that they self-report in their +NodeInfo during handshakes with other peers. Peers accept up to (MaxNumPeers - +MinNumOutboundPeers) incoming peers. + +## Address Book + +Peers are tracked via their ID (their PubKey.Address()). +Peers are added to the address book from the PEX when they first connect to us or +when we hear about them from other peers. + +The address book is arranged in sets of buckets, and distinguishes between +vetted (old) and unvetted (new) peers. It keeps different sets of buckets for vetted and +unvetted peers. Buckets provide randomization over peer selection. Peers are put +in buckets according to their IP groups. + +A vetted peer can only be in one bucket. An unvetted peer can be in multiple buckets, and +each instance of the peer can have a different IP:PORT. + +If we're trying to add a new peer but there's no space in its bucket, we'll +remove the worst peer from that bucket to make room. + +## Vetting + +When a peer is first added, it is unvetted. +Marking a peer as vetted is outside the scope of the `p2p` package. +For Tendermint, a Peer becomes vetted once it has contributed sufficiently +at the consensus layer; ie. once it has sent us valid and not-yet-known +votes and/or block parts for `NumBlocksForVetted` blocks. +Other users of the p2p package can determine their own conditions for when a peer is marked vetted. + +If a peer becomes vetted but there are already too many vetted peers, +a randomly selected one of the vetted peers becomes unvetted. + +If a peer becomes unvetted (either a new peer, or one that was previously vetted), +a randomly selected one of the unvetted peers is removed from the address book. + +More fine-grained tracking of peer behaviour can be done using +a trust metric (see below), but it's best to start with something simple. + +## Select Peers to Dial + +When we need more peers, we pick them randomly from the addrbook with some +configurable bias for unvetted peers. The bias should be lower when we have fewer peers +and can increase as we obtain more, ensuring that our first peers are more trustworthy, +but always giving us the chance to discover new good peers. + +We track the last time we dialed a peer and the number of unsuccessful attempts +we've made. If too many attempts are made, we mark the peer as bad. + +Connection attempts are made with exponential backoff (plus jitter). Because +the selection process happens every `ensurePeersPeriod`, we might not end up +dialing a peer for much longer than the backoff duration. + +If we fail to connect to the peer after 16 tries (with exponential backoff), we remove from address book completely. + +## Select Peers to Exchange + +When we’re asked for peers, we select them as follows: +- select at most `maxGetSelection` peers +- try to select at least `minGetSelection` peers - if we have less than that, select them all. +- select a random, unbiased `getSelectionPercent` of the peers + +Send the selected peers. Note we select peers for sending without bias for vetted/unvetted. + +## Preventing Spam + +There are various cases where we decide a peer has misbehaved and we disconnect from them. +When this happens, the peer is removed from the address book and black listed for +some amount of time. We call this "Disconnect and Mark". +Note that the bad behaviour may be detected outside the PEX reactor itself +(for instance, in the mconnection, or another reactor), but it must be communicated to the PEX reactor +so it can remove and mark the peer. + +In the PEX, if a peer sends us an unsolicited list of peers, +or if the peer sends a request too soon after another one, +we Disconnect and MarkBad. + +## Trust Metric + +The quality of peers can be tracked in more fine-grained detail using a +Proportional-Integral-Derivative (PID) controller that incorporates +current, past, and rate-of-change data to inform peer quality. + +While a PID trust metric has been implemented, it remains for future work +to use it in the PEX. + +See the [trustmetric](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-006-trust-metric.md) +and [trustmetric useage](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-007-trust-metric-usage.md) +architecture docs for more details. + diff --git a/docs/spec/reactors/pex/reactor.md b/docs/spec/reactors/pex/reactor.md new file mode 100644 index 000000000..468f182cc --- /dev/null +++ b/docs/spec/reactors/pex/reactor.md @@ -0,0 +1,12 @@ +# PEX Reactor + +## Channels + +Defines only `SendQueueCapacity`. [#1503](https://github.com/tendermint/tendermint/issues/1503) + +Implements rate-limiting by enforcing minimal time between two consecutive +`pexRequestMessage` requests. If the peer sends us addresses we did not ask, +it is stopped. + +Sending incorrectly encoded data or data exceeding `maxMsgSize` will result +in stopping the peer. diff --git a/docs/spec/scripts/crypto.go b/docs/spec/scripts/crypto.go new file mode 100644 index 000000000..9ae800f8f --- /dev/null +++ b/docs/spec/scripts/crypto.go @@ -0,0 +1,16 @@ +package main + +import ( + "fmt" + "os" + + amino "github.com/tendermint/go-amino" + crypto "github.com/tendermint/tendermint/crypto" +) + +func main() { + cdc := amino.NewCodec() + crypto.RegisterAmino(cdc) + cdc.PrintTypes(os.Stdout) + fmt.Println("") +} diff --git a/docs/spec/software/abci.md b/docs/spec/software/abci.md new file mode 100644 index 000000000..613e181f0 --- /dev/null +++ b/docs/spec/software/abci.md @@ -0,0 +1,192 @@ +# Application Blockchain Interface (ABCI) + +ABCI is the interface between Tendermint (a state-machine replication engine) +and an application (the actual state machine). + +The ABCI message types are defined in a [protobuf +file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto). + +For full details on the ABCI message types and protocol, see the [ABCI +specification](https://github.com/tendermint/tendermint/blob/develop/docs/abci-spec.md). +Be sure to read the specification if you're trying to build an ABCI app! + +For additional details on server implementation, see the [ABCI +readme](https://github.com/tendermint/tendermint/blob/develop/abci/README.md). + +Here we provide some more details around the use of ABCI by Tendermint and +clarify common "gotchas". + +## ABCI connections + +Tendermint opens 3 ABCI connections to the app: one for Consensus, one for +Mempool, one for Queries. + +## Async vs Sync + +The main ABCI server (ie. non-GRPC) provides ordered asynchronous messages. +This is useful for DeliverTx and CheckTx, since it allows Tendermint to forward +transactions to the app before it's finished processing previous ones. + +Thus, DeliverTx and CheckTx messages are sent asycnhronously, while all other +messages are sent synchronously. + +## CheckTx and Commit + +It is typical to hold three distinct states in an ABCI app: CheckTxState, DeliverTxState, +QueryState. The QueryState contains the latest committed state for a block. +The CheckTxState and DeliverTxState may be updated concurrently with one another. +Before Commit is called, Tendermint locks and flushes the mempool so that no new changes will happen +to CheckTxState. When Commit completes, it unlocks the mempool. + +Thus, during Commit, it is safe to reset the QueryState and the CheckTxState to the latest DeliverTxState +(ie. the new state from executing all the txs in the block). + +Note, however, that it is not possible to send transactions to Tendermint during Commit - if your app +tries to send a `/broadcast_tx` to Tendermint during Commit, it will deadlock. + + +## EndBlock Validator Updates + +Updates to the Tendermint validator set can be made by returning `Validator` +objects in the `ResponseBeginBlock`: + +``` +message Validator { + bytes address = 1; + PubKey pub_key = 2; + int64 power = 3; +} + +message PubKey { + string type = 1; + bytes data = 2; +} + +``` + +The `pub_key` currently supports two types: + - `type = "ed25519" and `data = ` + - `type = "secp256k1" and `data = <33-byte OpenSSL compressed public key>` + +If the address is provided, it must match the address of the pubkey, as +specified [here](/docs/spec/blockchain/encoding.md#Addresses) + +(Note: In the v0.19 series, the `pub_key` is the [Amino encoded public +key](/docs/spec/blockchain/encoding.md#public-key-cryptography). +For Ed25519 pubkeys, the Amino prefix is always "1624DE6220". For example, the 32-byte Ed25519 pubkey +`76852933A4686A721442E931A8415F62F5F1AEDF4910F1F252FB393F74C40C85` would be +Amino encoded as +`1624DE622076852933A4686A721442E931A8415F62F5F1AEDF4910F1F252FB393F74C40C85`) + +(Note: In old versions of Tendermint (pre-v0.19.0), the pubkey is just prefixed with a +single type byte, so for ED25519 we'd have `pub_key = 0x1 | pub`) + +The `power` is the new voting power for the validator, with the +following rules: + +- power must be non-negative +- if power is 0, the validator must already exist, and will be removed from the + validator set +- if power is non-0: + - if the validator does not already exist, it will be added to the validator + set with the given power + - if the validator does already exist, its power will be adjusted to the given power + +## InitChain Validator Updates + +ResponseInitChain has the option to return a list of validators. +If the list is not empty, Tendermint will adopt it for the validator set. +This way the application can determine the initial validator set for the +blockchain. + +Note that if addressses are included in the returned validators, they must match +the address of the public key. + +ResponseInitChain also includes ConsensusParams, but these are presently +ignored. + +## Query + +Query is a generic message type with lots of flexibility to enable diverse sets +of queries from applications. Tendermint has no requirements from the Query +message for normal operation - that is, the ABCI app developer need not implement Query functionality if they do not wish too. +That said, Tendermint makes a number of queries to support some optional +features. These are: + +### Peer Filtering + +When Tendermint connects to a peer, it sends two queries to the ABCI application +using the following paths, with no additional data: + + - `/p2p/filter/addr/`, where `` denote the IP address and + the port of the connection + - `p2p/filter/id/`, where `` is the peer node ID (ie. the + pubkey.Address() for the peer's PubKey) + +If either of these queries return a non-zero ABCI code, Tendermint will refuse +to connect to the peer. + +## Info and the Handshake/Replay + +On startup, Tendermint calls Info on the Query connection to get the latest +committed state of the app. The app MUST return information consistent with the +last block it succesfully completed Commit for. + +If the app succesfully committed block H but not H+1, then `last_block_height = +H` and `last_block_app_hash = `. If the app +failed during the Commit of block H, then `last_block_height = H-1` and +`last_block_app_hash = `. + +We now distinguish three heights, and describe how Tendermint syncs itself with +the app. + +``` +storeBlockHeight = height of the last block Tendermint saw a commit for +stateBlockHeight = height of the last block for which Tendermint completed all + block processing and saved all ABCI results to disk +appBlockHeight = height of the last block for which ABCI app succesfully + completely Commit +``` + +Note we always have `storeBlockHeight >= stateBlockHeight` and `storeBlockHeight >= appBlockHeight` +Note also we never call Commit on an ABCI app twice for the same height. + +The procedure is as follows. + +First, some simeple start conditions: + +If `appBlockHeight == 0`, then call InitChain. + +If `storeBlockHeight == 0`, we're done. + +Now, some sanity checks: + +If `storeBlockHeight < appBlockHeight`, error +If `storeBlockHeight < stateBlockHeight`, panic +If `storeBlockHeight > stateBlockHeight+1`, panic + +Now, the meat: + +If `storeBlockHeight == stateBlockHeight && appBlockHeight < storeBlockHeight`, + replay all blocks in full from `appBlockHeight` to `storeBlockHeight`. + This happens if we completed processing the block, but the app forgot its height. + +If `storeBlockHeight == stateBlockHeight && appBlockHeight == storeBlockHeight`, we're done + This happens if we crashed at an opportune spot. + +If `storeBlockHeight == stateBlockHeight+1` + This happens if we started processing the block but didn't finish. + + If `appBlockHeight < stateBlockHeight` + replay all blocks in full from `appBlockHeight` to `storeBlockHeight-1`, + and replay the block at `storeBlockHeight` using the WAL. + This happens if the app forgot the last block it committed. + + If `appBlockHeight == stateBlockHeight`, + replay the last block (storeBlockHeight) in full. + This happens if we crashed before the app finished Commit + + If appBlockHeight == storeBlockHeight { + update the state using the saved ABCI responses but dont run the block against the real app. + This happens if we crashed after the app finished Commit but before Tendermint saved the state. diff --git a/docs/spec/software/wal.md b/docs/spec/software/wal.md new file mode 100644 index 000000000..a2e03137d --- /dev/null +++ b/docs/spec/software/wal.md @@ -0,0 +1,33 @@ +# WAL + +Consensus module writes every message to the WAL (write-ahead log). + +It also issues fsync syscall through +[File#Sync](https://golang.org/pkg/os/#File.Sync) for messages signed by this +node (to prevent double signing). + +Under the hood, it uses +[autofile.Group](https://godoc.org/github.com/tendermint/tmlibs/autofile#Group), +which rotates files when those get too big (> 10MB). + +The total maximum size is 1GB. We only need the latest block and the block before it, +but if the former is dragging on across many rounds, we want all those rounds. + +## Replay + +Consensus module will replay all the messages of the last height written to WAL +before a crash (if such occurs). + +The private validator may try to sign messages during replay because it runs +somewhat autonomously and does not know about replay process. + +For example, if we got all the way to precommit in the WAL and then crash, +after we replay the proposal message, the private validator will try to sign a +prevote. But it will fail. That's ok because we’ll see the prevote later in the +WAL. Then it will go to precommit, and that time it will work because the +private validator contains the `LastSignBytes` and then we’ll replay the +precommit from the WAL. + +Make sure to read about [WAL +corruption](https://tendermint.readthedocs.io/projects/tools/en/master/specification/corruption.html#wal-corruption) +and recovery strategies. diff --git a/docs/specification/block-structure.rst b/docs/specification/block-structure.rst new file mode 100644 index 000000000..7d8f3464c --- /dev/null +++ b/docs/specification/block-structure.rst @@ -0,0 +1,218 @@ +Block Structure +=============== + +The tendermint consensus engine records all agreements by a +supermajority of nodes into a blockchain, which is replicated among all +nodes. This blockchain is accessible via various rpc endpoints, mainly +``/block?height=`` to get the full block, as well as +``/blockchain?minHeight=_&maxHeight=_`` to get a list of headers. But +what exactly is stored in these blocks? + +Block +~~~~~ + +A +`Block `__ +contains: + +- a `Header <#header>`__ contains merkle hashes for various chain + states +- the + `Data `__ + is all transactions which are to be processed +- the `LastCommit <#commit>`__ > 2/3 signatures for the last block + +The signatures returned along with block ``H`` are those validating +block ``H-1``. This can be a little confusing, but we must also consider +that the ``Header`` also contains the ``LastCommitHash``. It would be +impossible for a Header to include the commits that sign it, as it would +cause an infinite loop here. But when we get block ``H``, we find +``Header.LastCommitHash``, which must match the hash of ``LastCommit``. + +Header +~~~~~~ + +The +`Header `__ +contains lots of information (follow link for up-to-date info). Notably, +it maintains the ``Height``, the ``LastBlockID`` (to make it a chain), +and hashes of the data, the app state, and the validator set. This is +important as the only item that is signed by the validators is the +``Header``, and all other data must be validated against one of the +merkle hashes in the ``Header``. + +The ``DataHash`` can provide a nice check on the +`Data `__ +returned in this same block. If you are subscribed to new blocks, via +tendermint RPC, in order to display or process the new transactions you +should at least validate that the ``DataHash`` is valid. If it is +important to verify autheniticity, you must wait for the ``LastCommit`` +from the next block to make sure the block header (including +``DataHash``) was properly signed. + +The ``ValidatorHash`` contains a hash of the current +`Validators `__. +Tracking all changes in the validator set is complex, but a client can +quickly compare this hash with the `hash of the currently known +validators `__ +to see if there have been changes. + +The ``AppHash`` serves as the basis for validating any merkle proofs +that come from the ABCI application. It represents the +state of the actual application, rather that the state of the blockchain +itself. This means it's necessary in order to perform any business +logic, such as verifying an account balance. + +**Note** After the transactions are committed to a block, they still +need to be processed in a separate step, which happens between the +blocks. If you find a given transaction in the block at height ``H``, +the effects of running that transaction will be first visible in the +``AppHash`` from the block header at height ``H+1``. + +Like the ``LastCommit`` issue, this is a requirement of the immutability +of the block chain, as the application only applies transactions *after* +they are commited to the chain. + +Commit +~~~~~~ + +The +`Commit `__ +contains a set of +`Votes `__ +that were made by the validator set to reach consensus on this block. +This is the key to the security in any PoS system, and actually no data +that cannot be traced back to a block header with a valid set of Votes +can be trusted. Thus, getting the Commit data and verifying the votes is +extremely important. + +As mentioned above, in order to find the ``precommit votes`` for block +header ``H``, we need to query block ``H+1``. Then we need to check the +votes, make sure they really are for that block, and properly formatted. +Much of this code is implemented in Go in the +`light-client `__ package. +If you look at the code, you will notice that we need to provide the +``chainID`` of the blockchain in order to properly calculate the votes. +This is to protect anyone from swapping votes between chains to fake (or +frame) a validator. Also note that this ``chainID`` is in the +``genesis.json`` from *Tendermint*, not the ``genesis.json`` from the +basecoin app (`that is a different +chainID... `__). + +Once we have those votes, and we calculated the proper `sign +bytes `__ +using the chainID and a `nice helper +function `__, +we can verify them. The light client is responsible for maintaining a +set of validators that we trust. Each vote only stores the validators +``Address``, as well as the ``Signature``. Assuming we have a local copy +of the trusted validator set, we can look up the ``Public Key`` of the +validator given its ``Address``, then verify that the ``Signature`` +matches the ``SignBytes`` and ``Public Key``. Then we sum up the total +voting power of all validators, whose votes fulfilled all these +stringent requirements. If the total number of voting power for a single +block is greater than 2/3 of all voting power, then we can finally trust +the block header, the AppHash, and the proof we got from the ABCI +application. + +Vote Sign Bytes +^^^^^^^^^^^^^^^ + +The ``sign-bytes`` of a vote is produced by taking a +`stable-json `__-like +deterministic JSON `wire <./wire-protocol.html>`__ encoding of +the vote (excluding the ``Signature`` field), and wrapping it with +``{"chain_id":"my_chain","vote":...}``. + +For example, a precommit vote might have the following ``sign-bytes``: + +.. code:: json + + {"chain_id":"my_chain","vote":{"block_hash":"611801F57B4CE378DF1A3FFF1216656E89209A99","block_parts_header":{"hash":"B46697379DBE0774CC2C3B656083F07CA7E0F9CE","total":123},"height":1234,"round":1,"type":2}} + +Block Hash +~~~~~~~~~~ + +The `block +hash `__ +is the `Simple Tree hash <./merkle.html#simple-tree-with-dictionaries>`__ +of the fields of the block ``Header`` encoded as a list of +``KVPair``\ s. + +Transaction +~~~~~~~~~~~ + +A transaction is any sequence of bytes. It is up to your +ABCI application to accept or reject transactions. + +BlockID +~~~~~~~ + +Many of these data structures refer to the +`BlockID `__, +which is the ``BlockHash`` (hash of the block header, also referred to +by the next block) along with the ``PartSetHeader``. The +``PartSetHeader`` is explained below and is used internally to +orchestrate the p2p propogation. For clients, it is basically opaque +bytes, but they must match for all votes. + +PartSetHeader +~~~~~~~~~~~~~ + +The +`PartSetHeader `__ +contains the total number of pieces in a +`PartSet `__, +and the Merkle root hash of those pieces. + +PartSet +~~~~~~~ + +PartSet is used to split a byteslice of data into parts (pieces) for +transmission. By splitting data into smaller parts and computing a +Merkle root hash on the list, you can verify that a part is legitimately +part of the complete data, and the part can be forwarded to other peers +before all the parts are known. In short, it's a fast way to securely +propagate a large chunk of data (like a block) over a gossip network. + +PartSet was inspired by the LibSwift project. + +Usage: + +.. code:: go + + data := RandBytes(2 << 20) // Something large + + partSet := NewPartSetFromData(data) + partSet.Total() // Total number of 4KB parts + partSet.Count() // Equal to the Total, since we already have all the parts + partSet.Hash() // The Merkle root hash + partSet.BitArray() // A BitArray of partSet.Total() 1's + + header := partSet.Header() // Send this to the peer + header.Total // Total number of parts + header.Hash // The merkle root hash + + // Now we'll reconstruct the data from the parts + partSet2 := NewPartSetFromHeader(header) + partSet2.Total() // Same total as partSet.Total() + partSet2.Count() // Zero, since this PartSet doesn't have any parts yet. + partSet2.Hash() // Same hash as in partSet.Hash() + partSet2.BitArray() // A BitArray of partSet.Total() 0's + + // In a gossip network the parts would arrive in arbitrary order, perhaps + // in response to explicit requests for parts, or optimistically in response + // to the receiving peer's partSet.BitArray(). + for !partSet2.IsComplete() { + part := receivePartFromGossipNetwork() + added, err := partSet2.AddPart(part) + if err != nil { + // A wrong part, + // the merkle trail does not hash to partSet2.Hash() + } else if !added { + // A duplicate part already received + } + } + + data2, _ := ioutil.ReadAll(partSet2.GetReader()) + bytes.Equal(data, data2) // true diff --git a/docs/specification/byzantine-consensus-algorithm.rst b/docs/specification/byzantine-consensus-algorithm.rst new file mode 100644 index 000000000..15eab32d7 --- /dev/null +++ b/docs/specification/byzantine-consensus-algorithm.rst @@ -0,0 +1,349 @@ +Byzantine Consensus Algorithm +============================= + +Terms +----- + +- The network is composed of optionally connected *nodes*. Nodes + directly connected to a particular node are called *peers*. +- The consensus process in deciding the next block (at some *height* + ``H``) is composed of one or many *rounds*. +- ``NewHeight``, ``Propose``, ``Prevote``, ``Precommit``, and + ``Commit`` represent state machine states of a round. (aka + ``RoundStep`` or just "step"). +- A node is said to be *at* a given height, round, and step, or at + ``(H,R,S)``, or at ``(H,R)`` in short to omit the step. +- To *prevote* or *precommit* something means to broadcast a `prevote + vote `__ + or `first precommit + vote `__ + for something. +- A vote *at* ``(H,R)`` is a vote signed with the bytes for ``H`` and + ``R`` included in its + `sign-bytes `__. +- *+2/3* is short for "more than 2/3" +- *1/3+* is short for "1/3 or more" +- A set of +2/3 of prevotes for a particular block or ```` at + ``(H,R)`` is called a *proof-of-lock-change* or *PoLC* for short. + +State Machine Overview +---------------------- + +At each height of the blockchain a round-based protocol is run to +determine the next block. Each round is composed of three *steps* +(``Propose``, ``Prevote``, and ``Precommit``), along with two special +steps ``Commit`` and ``NewHeight``. + +In the optimal scenario, the order of steps is: + +:: + + NewHeight -> (Propose -> Prevote -> Precommit)+ -> Commit -> NewHeight ->... + +The sequence ``(Propose -> Prevote -> Precommit)`` is called a *round*. +There may be more than one round required to commit a block at a given +height. Examples for why more rounds may be required include: + +- The designated proposer was not online. +- The block proposed by the designated proposer was not valid. +- The block proposed by the designated proposer did not propagate in + time. +- The block proposed was valid, but +2/3 of prevotes for the proposed + block were not received in time for enough validator nodes by the + time they reached the ``Precommit`` step. Even though +2/3 of + prevotes are necessary to progress to the next step, at least one + validator may have voted ```` or maliciously voted for something + else. +- The block proposed was valid, and +2/3 of prevotes were received for + enough nodes, but +2/3 of precommits for the proposed block were not + received for enough validator nodes. + +Some of these problems are resolved by moving onto the next round & +proposer. Others are resolved by increasing certain round timeout +parameters over each successive round. + +State Machine Diagram +--------------------- + +:: + + +-------------------------------------+ + v |(Wait til `CommmitTime+timeoutCommit`) + +-----------+ +-----+-----+ + +----------> | Propose +--------------+ | NewHeight | + | +-----------+ | +-----------+ + | | ^ + |(Else, after timeoutPrecommit) v | + +-----+-----+ +-----------+ | + | Precommit | <------------------------+ Prevote | | + +-----+-----+ +-----------+ | + |(When +2/3 Precommits for block found) | + v | + +--------------------------------------------------------------------+ + | Commit | + | | + | * Set CommitTime = now; | + | * Wait for block, then stage/save/commit block; | + +--------------------------------------------------------------------+ + +Background Gossip +----------------- + +A node may not have a corresponding validator private key, but it +nevertheless plays an active role in the consensus process by relaying +relevant meta-data, proposals, blocks, and votes to its peers. A node +that has the private keys of an active validator and is engaged in +signing votes is called a *validator-node*. All nodes (not just +validator-nodes) have an associated state (the current height, round, +and step) and work to make progress. + +Between two nodes there exists a ``Connection``, and multiplexed on top +of this connection are fairly throttled ``Channel``\ s of information. +An epidemic gossip protocol is implemented among some of these channels +to bring peers up to speed on the most recent state of consensus. For +example, + +- Nodes gossip ``PartSet`` parts of the current round's proposer's + proposed block. A LibSwift inspired algorithm is used to quickly + broadcast blocks across the gossip network. +- Nodes gossip prevote/precommit votes. A node NODE\_A that is ahead of + NODE\_B can send NODE\_B prevotes or precommits for NODE\_B's current + (or future) round to enable it to progress forward. +- Nodes gossip prevotes for the proposed PoLC (proof-of-lock-change) + round if one is proposed. +- Nodes gossip to nodes lagging in blockchain height with block + `commits `__ + for older blocks. +- Nodes opportunistically gossip ``HasVote`` messages to hint peers + what votes it already has. +- Nodes broadcast their current state to all neighboring peers. (but is + not gossiped further) + +There's more, but let's not get ahead of ourselves here. + +Proposals +--------- + +A proposal is signed and published by the designated proposer at each +round. The proposer is chosen by a deterministic and non-choking round +robin selection algorithm that selects proposers in proportion to their +voting power. (see +`implementation `__) + +A proposal at ``(H,R)`` is composed of a block and an optional latest +``PoLC-Round < R`` which is included iff the proposer knows of one. This +hints the network to allow nodes to unlock (when safe) to ensure the +liveness property. + +State Machine Spec +------------------ + +Propose Step (height:H,round:R) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Upon entering ``Propose``: - The designated proposer proposes a block at +``(H,R)``. + +The ``Propose`` step ends: - After ``timeoutProposeR`` after entering +``Propose``. --> goto ``Prevote(H,R)`` - After receiving proposal block +and all prevotes at ``PoLC-Round``. --> goto ``Prevote(H,R)`` - After +`common exit conditions <#common-exit-conditions>`__ + +Prevote Step (height:H,round:R) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Upon entering ``Prevote``, each validator broadcasts its prevote vote. + +- First, if the validator is locked on a block since ``LastLockRound`` + but now has a PoLC for something else at round ``PoLC-Round`` where + ``LastLockRound < PoLC-Round < R``, then it unlocks. +- If the validator is still locked on a block, it prevotes that. +- Else, if the proposed block from ``Propose(H,R)`` is good, it + prevotes that. +- Else, if the proposal is invalid or wasn't received on time, it + prevotes ````. + +The ``Prevote`` step ends: - After +2/3 prevotes for a particular block +or ````. --> goto ``Precommit(H,R)`` - After ``timeoutPrevote`` +after receiving any +2/3 prevotes. --> goto ``Precommit(H,R)`` - After +`common exit conditions <#common-exit-conditions>`__ + +Precommit Step (height:H,round:R) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Upon entering ``Precommit``, each validator broadcasts its precommit +vote. - If the validator has a PoLC at ``(H,R)`` for a particular block +``B``, it (re)locks (or changes lock to) and precommits ``B`` and sets +``LastLockRound = R``. - Else, if the validator has a PoLC at ``(H,R)`` +for ````, it unlocks and precommits ````. - Else, it keeps the +lock unchanged and precommits ````. + +A precommit for ```` means "I didn’t see a PoLC for this round, but +I did get +2/3 prevotes and waited a bit". + +The Precommit step ends: - After +2/3 precommits for ````. --> goto +``Propose(H,R+1)`` - After ``timeoutPrecommit`` after receiving any +2/3 +precommits. --> goto ``Propose(H,R+1)`` - After `common exit +conditions <#common-exit-conditions>`__ + +common exit conditions +^^^^^^^^^^^^^^^^^^^^^^ + +- After +2/3 precommits for a particular block. --> goto ``Commit(H)`` +- After any +2/3 prevotes received at ``(H,R+x)``. --> goto + ``Prevote(H,R+x)`` +- After any +2/3 precommits received at ``(H,R+x)``. --> goto + ``Precommit(H,R+x)`` + +Commit Step (height:H) +~~~~~~~~~~~~~~~~~~~~~~ + +- Set ``CommitTime = now()`` +- Wait until block is received. --> goto ``NewHeight(H+1)`` + +NewHeight Step (height:H) +~~~~~~~~~~~~~~~~~~~~~~~~~ + +- Move ``Precommits`` to ``LastCommit`` and increment height. +- Set ``StartTime = CommitTime+timeoutCommit`` +- Wait until ``StartTime`` to receive straggler commits. --> goto + ``Propose(H,0)`` + +Proofs +------ + +Proof of Safety +~~~~~~~~~~~~~~~ + +Assume that at most -1/3 of the voting power of validators is byzantine. +If a validator commits block ``B`` at round ``R``, it's because it saw ++2/3 of precommits at round ``R``. This implies that 1/3+ of honest +nodes are still locked at round ``R' > R``. These locked validators will +remain locked until they see a PoLC at ``R' > R``, but this won't happen +because 1/3+ are locked and honest, so at most -2/3 are available to +vote for anything other than ``B``. + +Proof of Liveness +~~~~~~~~~~~~~~~~~ + +If 1/3+ honest validators are locked on two different blocks from +different rounds, a proposers' ``PoLC-Round`` will eventually cause +nodes locked from the earlier round to unlock. Eventually, the +designated proposer will be one that is aware of a PoLC at the later +round. Also, ``timeoutProposalR`` increments with round ``R``, while the +size of a proposal are capped, so eventually the network is able to +"fully gossip" the whole proposal (e.g. the block & PoLC). + +Proof of Fork Accountability +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Define the JSet (justification-vote-set) at height ``H`` of a validator +``V1`` to be all the votes signed by the validator at ``H`` along with +justification PoLC prevotes for each lock change. For example, if ``V1`` +signed the following precommits: ``Precommit(B1 @ round 0)``, +``Precommit( @ round 1)``, ``Precommit(B2 @ round 4)`` (note that +no precommits were signed for rounds 2 and 3, and that's ok), +``Precommit(B1 @ round 0)`` must be justified by a PoLC at round 0, and +``Precommit(B2 @ round 4)`` must be justified by a PoLC at round 4; but +the precommit for ```` at round 1 is not a lock-change by +definition so the JSet for ``V1`` need not include any prevotes at round +1, 2, or 3 (unless ``V1`` happened to have prevoted for those rounds). + +Further, define the JSet at height ``H`` of a set of validators ``VSet`` +to be the union of the JSets for each validator in ``VSet``. For a given +commit by honest validators at round ``R`` for block ``B`` we can +construct a JSet to justify the commit for ``B`` at ``R``. We say that a +JSet *justifies* a commit at ``(H,R)`` if all the committers (validators +in the commit-set) are each justified in the JSet with no duplicitous +vote signatures (by the committers). + +- **Lemma**: When a fork is detected by the existence of two + conflicting `commits <./validators.html#commiting-a-block>`__, + the union of the JSets for both commits (if they can be compiled) + must include double-signing by at least 1/3+ of the validator set. + **Proof**: The commit cannot be at the same round, because that would + immediately imply double-signing by 1/3+. Take the union of the JSets + of both commits. If there is no double-signing by at least 1/3+ of + the validator set in the union, then no honest validator could have + precommitted any different block after the first commit. Yet, +2/3 + did. Reductio ad absurdum. + +As a corollary, when there is a fork, an external process can determine +the blame by requiring each validator to justify all of its round votes. +Either we will find 1/3+ who cannot justify at least one of their votes, +and/or, we will find 1/3+ who had double-signed. + +Alternative algorithm +~~~~~~~~~~~~~~~~~~~~~ + +Alternatively, we can take the JSet of a commit to be the "full commit". +That is, if light clients and validators do not consider a block to be +committed unless the JSet of the commit is also known, then we get the +desirable property that if there ever is a fork (e.g. there are two +conflicting "full commits"), then 1/3+ of the validators are immediately +punishable for double-signing. + +There are many ways to ensure that the gossip network efficiently share +the JSet of a commit. One solution is to add a new message type that +tells peers that this node has (or does not have) a +2/3 majority for B +(or ) at (H,R), and a bitarray of which votes contributed towards that +majority. Peers can react by responding with appropriate votes. + +We will implement such an algorithm for the next iteration of the +Tendermint consensus protocol. + +Other potential improvements include adding more data in votes such as +the last known PoLC round that caused a lock change, and the last voted +round/step (or, we may require that validators not skip any votes). This +may make JSet verification/gossip logic easier to implement. + +Censorship Attacks +~~~~~~~~~~~~~~~~~~ + +Due to the definition of a block +`commit `__, any 1/3+ +coalition of validators can halt the blockchain by not broadcasting +their votes. Such a coalition can also censor particular transactions by +rejecting blocks that include these transactions, though this would +result in a significant proportion of block proposals to be rejected, +which would slow down the rate of block commits of the blockchain, +reducing its utility and value. The malicious coalition might also +broadcast votes in a trickle so as to grind blockchain block commits to +a near halt, or engage in any combination of these attacks. + +If a global active adversary were also involved, it can partition the +network in such a way that it may appear that the wrong subset of +validators were responsible for the slowdown. This is not just a +limitation of Tendermint, but rather a limitation of all consensus +protocols whose network is potentially controlled by an active +adversary. + +Overcoming Forks and Censorship Attacks +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For these types of attacks, a subset of the validators through external +means should coordinate to sign a reorg-proposal that chooses a fork +(and any evidence thereof) and the initial subset of validators with +their signatures. Validators who sign such a reorg-proposal forego its +collateral on all other forks. Clients should verify the signatures on +the reorg-proposal, verify any evidence, and make a judgement or prompt +the end-user for a decision. For example, a phone wallet app may prompt +the user with a security warning, while a refrigerator may accept any +reorg-proposal signed by +1/2 of the original validators. + +No non-synchronous Byzantine fault-tolerant algorithm can come to +consensus when 1/3+ of validators are dishonest, yet a fork assumes that +1/3+ of validators have already been dishonest by double-signing or +lock-changing without justification. So, signing the reorg-proposal is a +coordination problem that cannot be solved by any non-synchronous +protocol (i.e. automatically, and without making assumptions about the +reliability of the underlying network). It must be provided by means +external to the weakly-synchronous Tendermint consensus algorithm. For +now, we leave the problem of reorg-proposal coordination to human +coordination via internet media. Validators must take care to ensure +that there are no significant network partitions, to avoid situations +where two conflicting reorg-proposals are signed. + +Assuming that the external coordination medium and protocol is robust, +it follows that forks are less of a concern than `censorship +attacks <#censorship-attacks>`__. diff --git a/docs/specification/corruption.rst b/docs/specification/corruption.rst new file mode 100644 index 000000000..6ae19fb18 --- /dev/null +++ b/docs/specification/corruption.rst @@ -0,0 +1,70 @@ +Corruption +========== + +Important step +-------------- + +Make sure you have a backup of the Tendermint data directory. + +Possible causes +--------------- + +Remember that most corruption is caused by hardware issues: + +- RAID controllers with faulty / worn out battery backup, and an unexpected power loss +- Hard disk drives with write-back cache enabled, and an unexpected power loss +- Cheap SSDs with insufficient power-loss protection, and an unexpected power-loss +- Defective RAM +- Defective or overheating CPU(s) + +Other causes can be: + +- Database systems configured with fsync=off and an OS crash or power loss +- Filesystems configured to use write barriers plus a storage layer that ignores write barriers. LVM is a particular culprit. +- Tendermint bugs +- Operating system bugs +- Admin error + - directly modifying Tendermint data-directory contents + +(Source: https://wiki.postgresql.org/wiki/Corruption) + +WAL Corruption +-------------- + +If consensus WAL is corrupted at the lastest height and you are trying to start +Tendermint, replay will fail with panic. + +Recovering from data corruption can be hard and time-consuming. Here are two approaches you can take: + +1) Delete the WAL file and restart Tendermint. It will attempt to sync with other peers. +2) Try to repair the WAL file manually: + + 1. Create a backup of the corrupted WAL file: + + .. code:: bash + + cp "$TMHOME/data/cs.wal/wal" > /tmp/corrupted_wal_backup + + 2. Use ./scripts/wal2json to create a human-readable version + + .. code:: bash + + ./scripts/wal2json/wal2json "$TMHOME/data/cs.wal/wal" > /tmp/corrupted_wal + + 3. Search for a "CORRUPTED MESSAGE" line. + 4. By looking at the previous message and the message after the corrupted one + and looking at the logs, try to rebuild the message. If the consequent + messages are marked as corrupted too (this may happen if length header + got corrupted or some writes did not make it to the WAL ~ truncation), + then remove all the lines starting from the corrupted one and restart + Tendermint. + + .. code:: bash + + $EDITOR /tmp/corrupted_wal + + 5. After editing, convert this file back into binary form by running: + + .. code:: bash + + ./scripts/json2wal/json2wal /tmp/corrupted_wal > "$TMHOME/data/cs.wal/wal" diff --git a/docs/specification/fast-sync.rst b/docs/specification/fast-sync.rst new file mode 100644 index 000000000..c98ec43a3 --- /dev/null +++ b/docs/specification/fast-sync.rst @@ -0,0 +1,34 @@ +Fast Sync +========= + +Background +---------- + +In a proof of work blockchain, syncing with the chain is the same +process as staying up-to-date with the consensus: download blocks, and +look for the one with the most total work. In proof-of-stake, the +consensus process is more complex, as it involves rounds of +communication between the nodes to determine what block should be +committed next. Using this process to sync up with the blockchain from +scratch can take a very long time. It's much faster to just download +blocks and check the merkle tree of validators than to run the real-time +consensus gossip protocol. + +Fast Sync +--------- + +To support faster syncing, tendermint offers a ``fast-sync`` mode, which +is enabled by default, and can be toggled in the ``config.toml`` or via +``--fast_sync=false``. + +In this mode, the tendermint daemon will sync hundreds of times faster +than if it used the real-time consensus process. Once caught up, the +daemon will switch out of fast sync and into the normal consensus mode. +After running for some time, the node is considered ``caught up`` if it +has at least one peer and it's height is at least as high as the max +reported peer height. See `the IsCaughtUp +method `__. + +If we're lagging sufficiently, we should go back to fast syncing, but +this is an open issue: +https://github.com/tendermint/tendermint/issues/129 diff --git a/docs/specification/genesis.rst b/docs/specification/genesis.rst new file mode 100644 index 000000000..427c88bb2 --- /dev/null +++ b/docs/specification/genesis.rst @@ -0,0 +1,71 @@ +Genesis +======= + +The genesis.json file in ``$TMHOME/config`` defines the initial TendermintCore +state upon genesis of the blockchain (`see +definition `__). + +Fields +~~~~~~ + +- ``genesis_time``: Official time of blockchain start. +- ``chain_id``: ID of the blockchain. This must be unique for every + blockchain. If your testnet blockchains do not have unique chain IDs, + you will have a bad time. +- ``validators``: +- ``pub_key``: The first element specifies the pub\_key type. 1 == + Ed25519. The second element are the pubkey bytes. +- ``power``: The validator's voting power. +- ``name``: Name of the validator (optional). +- ``app_hash``: The expected application hash (as returned by the + ``ResponseInfo`` ABCI message) upon genesis. If the app's hash does not + match, Tendermint will panic. +- ``app_state``: The application state (e.g. initial distribution of tokens). + +Sample genesis.json +~~~~~~~~~~~~~~~~~~~ + +.. code:: json + + { + "genesis_time": "2016-02-05T06:02:31.526Z", + "chain_id": "chain-tTH4mi", + "validators": [ + { + "pub_key": [ + 1, + "9BC5112CB9614D91CE423FA8744885126CD9D08D9FC9D1F42E552D662BAA411E" + ], + "power": 1, + "name": "mach1" + }, + { + "pub_key": [ + 1, + "F46A5543D51F31660D9F59653B4F96061A740FF7433E0DC1ECBC30BE8494DE06" + ], + "power": 1, + "name": "mach2" + }, + { + "pub_key": [ + 1, + "0E7B423C1635FD07C0FC3603B736D5D27953C1C6CA865BB9392CD79DE1A682BB" + ], + "power": 1, + "name": "mach3" + }, + { + "pub_key": [ + 1, + "4F49237B9A32EB50682EDD83C48CE9CDB1D02A7CFDADCFF6EC8C1FAADB358879" + ], + "power": 1, + "name": "mach4" + } + ], + "app_hash": "15005165891224E721CB664D15CB972240F5703F", + "app_state": { + {"account": "Bob", "coins": 5000} + } + } diff --git a/docs/specification/light-client-protocol.rst b/docs/specification/light-client-protocol.rst new file mode 100644 index 000000000..6c6083b45 --- /dev/null +++ b/docs/specification/light-client-protocol.rst @@ -0,0 +1,33 @@ +Light Client Protocol +===================== + +Light clients are an important part of the complete blockchain system +for most applications. Tendermint provides unique speed and security +properties for light client applications. + +See our `lite package +`__. + +Overview +-------- + +The objective of the light client protocol is to get a +`commit <./validators.html#committing-a-block>`__ for a recent +`block hash <./block-structure.html#block-hash>`__ where the commit +includes a majority of signatures from the last known validator set. +From there, all the application state is verifiable with `merkle +proofs <./merkle.html#iavl-tree>`__. + +Properties +---------- + +- You get the full collateralized security benefits of Tendermint; No + need to wait for confirmations. +- You get the full speed benefits of Tendermint; transactions commit + instantly. +- You can get the most recent version of the application state + non-interactively (without committing anything to the blockchain). + For example, this means that you can get the most recent value of a + name from the name-registry without worrying about fork censorship + attacks, without posting a commit and waiting for confirmations. It's + fast, secure, and free! diff --git a/docs/specification/merkle.rst b/docs/specification/merkle.rst new file mode 100644 index 000000000..588f24a98 --- /dev/null +++ b/docs/specification/merkle.rst @@ -0,0 +1,88 @@ +Merkle +====== + +For an overview of Merkle trees, see +`wikipedia `__. + +There are two types of Merkle trees used in Tendermint. + +- **IAVL+ Tree**: An immutable self-balancing binary + tree for persistent application state +- **Simple Tree**: A simple compact binary tree for + a static list of items + +IAVL+ Tree +---------- + +The purpose of this data structure is to provide persistent storage for +key-value pairs (e.g. account state, name-registrar data, and +per-contract data) such that a deterministic merkle root hash can be +computed. The tree is balanced using a variant of the `AVL +algorithm `__ so all operations +are O(log(n)). + +Nodes of this tree are immutable and indexed by its hash. Thus any node +serves as an immutable snapshot which lets us stage uncommitted +transactions from the mempool cheaply, and we can instantly roll back to +the last committed state to process transactions of a newly committed +block (which may not be the same set of transactions as those from the +mempool). + +In an AVL tree, the heights of the two child subtrees of any node differ +by at most one. Whenever this condition is violated upon an update, the +tree is rebalanced by creating O(log(n)) new nodes that point to +unmodified nodes of the old tree. In the original AVL algorithm, inner +nodes can also hold key-value pairs. The AVL+ algorithm (note the plus) +modifies the AVL algorithm to keep all values on leaf nodes, while only +using branch-nodes to store keys. This simplifies the algorithm while +minimizing the size of merkle proofs + +In Ethereum, the analog is the `Patricia +trie `__. There are tradeoffs. +Keys do not need to be hashed prior to insertion in IAVL+ trees, so this +provides faster iteration in the key space which may benefit some +applications. The logic is simpler to implement, requiring only two +types of nodes -- inner nodes and leaf nodes. The IAVL+ tree is a binary +tree, so merkle proofs are much shorter than the base 16 Patricia trie. +On the other hand, while IAVL+ trees provide a deterministic merkle root +hash, it depends on the order of updates. In practice this shouldn't be +a problem, since you can efficiently encode the tree structure when +serializing the tree contents. + +Simple Tree +----------- + +For merkelizing smaller static lists, use the Simple Tree. The +transactions and validation signatures of a block are hashed using this +simple merkle tree logic. + +If the number of items is not a power of two, the tree will not be full +and some leaf nodes will be at different levels. Simple Tree tries to +keep both sides of the tree the same size, but the left side may be one +greater. + +:: + + Simple Tree with 6 items Simple Tree with 7 items + + * * + / \ / \ + / \ / \ + / \ / \ + / \ / \ + * * * * + / \ / \ / \ / \ + / \ / \ / \ / \ + / \ / \ / \ / \ + * h2 * h5 * * * h6 + / \ / \ / \ / \ / \ + h0 h1 h3 h4 h0 h1 h2 h3 h4 h5 + +Simple Tree with Dictionaries +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The Simple Tree is used to merkelize a list of items, so to merkelize a +(short) dictionary of key-value pairs, encode the dictionary as an +ordered list of ``KVPair`` structs. The block hash is such a hash +derived from all the fields of the block ``Header``. The state hash is +similarly derived. diff --git a/docs/specification/new-spec/README.md b/docs/specification/new-spec/README.md new file mode 100644 index 000000000..907ddd945 --- /dev/null +++ b/docs/specification/new-spec/README.md @@ -0,0 +1 @@ +Spec moved to [docs/spec](https://github.com/tendermint/tendermint/tree/master/docs/spec). diff --git a/docs/specification/secure-p2p.rst b/docs/specification/secure-p2p.rst new file mode 100644 index 000000000..de95f0cf0 --- /dev/null +++ b/docs/specification/secure-p2p.rst @@ -0,0 +1,78 @@ +Secure P2P +========== + +The Tendermint p2p protocol uses an authenticated encryption scheme +based on the `Station-to-Station +Protocol `__. +The implementation uses +`golang's `__ `nacl +box `__ for the actual authenticated +encryption algorithm. + +Each peer generates an ED25519 key-pair to use as a persistent +(long-term) id. + +When two peers establish a TCP connection, they first each generate an +ephemeral ED25519 key-pair to use for this session, and send each other +their respective ephemeral public keys. This happens in the clear. + +They then each compute the shared secret. The shared secret is the +multiplication of the peer's ephemeral private key by the other peer's +ephemeral public key. The result is the same for both peers by the magic +of `elliptic +curves `__. +The shared secret is used as the symmetric key for the encryption +algorithm. + +The two ephemeral public keys are sorted to establish a canonical order. +Then a 24-byte nonce is generated by concatenating the public keys and +hashing them with Ripemd160. Note Ripemd160 produces 20byte hashes, so +the nonce ends with four 0s. + +The nonce is used to seed the encryption - it is critical that the same +nonce never be used twice with the same private key. For convenience, +the last bit of the nonce is flipped, giving us two nonces: one for +encrypting our own messages, one for decrypting our peer's. Which ever +peer has the higher public key uses the "bit-flipped" nonce for +encryption. + +Now, a challenge is generated by concatenating the ephemeral public keys +and taking the SHA256 hash. + +Each peer signs the challenge with their persistent private key, and +sends the other peer an AuthSigMsg, containing their persistent public +key and the signature. On receiving an AuthSigMsg, the peer verifies the +signature. + +The peers are now authenticated. + +All future communications can now be encrypted using the shared secret +and the generated nonces, where each nonce is incremented by one each +time it is used. The communications maintain Perfect Forward Secrecy, as +the persistent key pair was not used for generating secrets - only for +authenticating. + +Caveat +------ + +This system is still vulnerable to a Man-In-The-Middle attack if the +persistent public key of the remote node is not known in advance. The +only way to mitigate this is with a public key authentication system, +such as the Web-of-Trust or Certificate Authorities. In our case, we can +use the blockchain itself as a certificate authority to ensure that we +are connected to at least one validator. + +Config +------ + +Authenticated encryption is enabled by default. + +Additional Reading +------------------ + +- `Implementation `__ +- `Original STS paper by Whitfield Diffie, Paul C. van Oorschot and + Michael J. + Wiener `__ +- `Further work on secret + handshakes `__ diff --git a/docs/specification/validators.rst b/docs/specification/validators.rst new file mode 100644 index 000000000..085994f3d --- /dev/null +++ b/docs/specification/validators.rst @@ -0,0 +1,43 @@ +Validators +========== + +Validators are responsible for committing new blocks in the blockchain. +These validators participate in the consensus protocol by broadcasting +*votes* which contain cryptographic signatures signed by each +validator's private key. + +Some Proof-of-Stake consensus algorithms aim to create a "completely" +decentralized system where all stakeholders (even those who are not +always available online) participate in the committing of blocks. +Tendermint has a different approach to block creation. Validators are +expected to be online, and the set of validators is permissioned/curated +by some external process. Proof-of-stake is not required, but can be +implemented on top of Tendermint consensus. That is, validators may be +required to post collateral on-chain, off-chain, or may not be required +to post any collateral at all. + +Validators have a cryptographic key-pair and an associated amount of +"voting power". Voting power need not be the same. + +Becoming a Validator +-------------------- + +There are two ways to become validator. + +1. They can be pre-established in the `genesis + state <./genesis.html>`__ +2. The ABCI app responds to the EndBlock message with changes to the + existing validator set. + +Committing a Block +------------------ + +*+2/3 is short for "more than 2/3"* + +A block is committed when +2/3 of the validator set sign `precommit +votes <./block-structure.html#vote>`__ for that block at the same +``round``. The +2/3 set of precommit votes is +called a `*commit* <./block-structure.html#commit>`__. While any ++2/3 set of precommits for the same block at the same height&round can +serve as validation, the canonical commit is included in the next block +(see `LastCommit <./block-structure.html>`__). diff --git a/docs/specification/wire-protocol.rst b/docs/specification/wire-protocol.rst new file mode 100644 index 000000000..c0bf3b0ef --- /dev/null +++ b/docs/specification/wire-protocol.rst @@ -0,0 +1,172 @@ +Wire Protocol +============= + +The `Tendermint wire protocol `__ +encodes data in `c-style binary <#binary>`__ and `JSON <#json>`__ form. + +Supported types +--------------- + +- Primitive types +- ``uint8`` (aka ``byte``), ``uint16``, ``uint32``, ``uint64`` +- ``int8``, ``int16``, ``int32``, ``int64`` +- ``uint``, ``int``: variable length (un)signed integers +- ``string``, ``[]byte`` +- ``time`` +- Derived types +- structs +- var-length arrays of a particular type +- fixed-length arrays of a particular type +- interfaces: registered union types preceded by a ``type byte`` +- pointers + +Binary +------ + +**Fixed-length primitive types** are encoded with 1,2,3, or 4 big-endian +bytes. - ``uint8`` (aka ``byte``), ``uint16``, ``uint32``, ``uint64``: +takes 1,2,3, and 4 bytes respectively - ``int8``, ``int16``, ``int32``, +``int64``: takes 1,2,3, and 4 bytes respectively - ``time``: ``int64`` +representation of nanoseconds since epoch + +**Variable-length integers** are encoded with a single leading byte +representing the length of the following big-endian bytes. For signed +negative integers, the most significant bit of the leading byte is a 1. + +- ``uint``: 1-byte length prefixed variable-size (0 ~ 255 bytes) + unsigned integers +- ``int``: 1-byte length prefixed variable-size (0 ~ 127 bytes) signed + integers + +NOTE: While the number 0 (zero) is encoded with a single byte ``x00``, +the number 1 (one) takes two bytes to represent: ``x0101``. This isn't +the most efficient representation, but the rules are easier to remember. + ++---------------+----------------+----------------+ +| number | binary | binary ``int`` | +| | ``uint`` | | ++===============+================+================+ +| 0 | ``x00`` | ``x00`` | ++---------------+----------------+----------------+ +| 1 | ``x0101`` | ``x0101`` | ++---------------+----------------+----------------+ +| 2 | ``x0102`` | ``x0102`` | ++---------------+----------------+----------------+ +| 256 | ``x020100`` | ``x020100`` | ++---------------+----------------+----------------+ +| 2^(127\ *8)-1 | ``x800100...`` | overflow | +| \| | | | +| ``x7FFFFF...` | | | +| ` | | | +| \| | | | +| ``x7FFFFF...` | | | +| ` | | | +| \| \| | | | +| 2^(127*\ 8) | | | ++---------------+----------------+----------------+ +| 2^(255\*8)-1 | +| \| | +| ``xFFFFFF...` | +| ` | +| \| overflow | +| \| \| -1 \| | +| n/a \| | +| ``x8101`` \| | +| \| -2 \| n/a | +| \| ``x8102`` | +| \| \| -256 \| | +| n/a \| | +| ``x820100`` | +| \| | ++---------------+----------------+----------------+ + +**Structures** are encoded by encoding the field values in order of +declaration. + +.. code:: go + + type Foo struct { + MyString string + MyUint32 uint32 + } + var foo = Foo{"626172", math.MaxUint32} + + /* The binary representation of foo: + 0103626172FFFFFFFF + 0103: `int` encoded length of string, here 3 + 626172: 3 bytes of string "bar" + FFFFFFFF: 4 bytes of uint32 MaxUint32 + */ + +**Variable-length arrays** are encoded with a leading ``int`` denoting +the length of the array followed by the binary representation of the +items. **Fixed-length arrays** are similar but aren't preceded by the +leading ``int``. + +.. code:: go + + foos := []Foo{foo, foo} + + /* The binary representation of foos: + 01020103626172FFFFFFFF0103626172FFFFFFFF + 0102: `int` encoded length of array, here 2 + 0103626172FFFFFFFF: the first `foo` + 0103626172FFFFFFFF: the second `foo` + */ + + foos := [2]Foo{foo, foo} // fixed-length array + + /* The binary representation of foos: + 0103626172FFFFFFFF0103626172FFFFFFFF + 0103626172FFFFFFFF: the first `foo` + 0103626172FFFFFFFF: the second `foo` + */ + +**Interfaces** can represent one of any number of concrete types. The +concrete types of an interface must first be declared with their +corresponding ``type byte``. An interface is then encoded with the +leading ``type byte``, then the binary encoding of the underlying +concrete type. + +NOTE: The byte ``x00`` is reserved for the ``nil`` interface value and +``nil`` pointer values. + +.. code:: go + + type Animal interface{} + type Dog uint32 + type Cat string + + RegisterInterface( + struct{ Animal }{}, // Convenience for referencing the 'Animal' interface + ConcreteType{Dog(0), 0x01}, // Register the byte 0x01 to denote a Dog + ConcreteType{Cat(""), 0x02}, // Register the byte 0x02 to denote a Cat + ) + + var animal Animal = Dog(02) + + /* The binary representation of animal: + 010102 + 01: the type byte for a `Dog` + 0102: the bytes of Dog(02) + */ + +**Pointers** are encoded with a single leading byte ``x00`` for ``nil`` +pointers, otherwise encoded with a leading byte ``x01`` followed by the +binary encoding of the value pointed to. + +NOTE: It's easy to convert pointer types into interface types, since the +``type byte`` ``x00`` is always ``nil``. + +JSON +---- + +The JSON codec is compatible with the ```binary`` <#binary>`__ codec, +and is fairly intuitive if you're already familiar with golang's JSON +encoding. Some quirks are noted below: + +- variable-length and fixed-length bytes are encoded as uppercase + hexadecimal strings +- interface values are encoded as an array of two items: + ``[type_byte, concrete_value]`` +- times are encoded as rfc2822 strings diff --git a/docs/tendermint-core/configuration.md b/docs/tendermint-core/configuration.md new file mode 100644 index 000000000..0453bdad7 --- /dev/null +++ b/docs/tendermint-core/configuration.md @@ -0,0 +1,218 @@ +# Configuration + +Tendermint Core can be configured via a TOML file in +`$TMHOME/config/config.toml`. Some of these parameters can be overridden by +command-line flags. For most users, the options in the `##### main base configuration options #####` are intended to be modified while +config options further below are intended for advance power users. + +## Options + +The default configuration file create by `tendermint init` has all +the parameters set with their default values. It will look something +like the file below, however, double check by inspecting the +`config.toml` created with your version of `tendermint` installed: + +``` +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +##### main base config options ##### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the Tendermint binary +proxy_app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "anonymous" + +# If this node is many blocks behind the tip of the chain, FastSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +fast_sync = true + +# Database backend: leveldb | memdb +db_backend = "leveldb" + +# Database directory +db_path = "data" + +# Output level for logging +log_level = "state:info,*:error" + +##### additional base config options ##### + +# The ID of the chain to join (should be signed with every transaction and vote) +chain_id = "" + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_file = "priv_validator.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# TCP or UNIX socket address for the profiling server to listen on +prof_laddr = "" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + +##### advanced configuration options ##### + +##### rpc server configuration options ##### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://0.0.0.0:26657" + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Maximum number of simultaneous connections. +# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections +# If you want to accept more significant number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +grpc_max_open_connections = 900 + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# Does not include gRPC connections. See grpc_max_open_connections +# If you want to accept more significant number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 450 + +##### peer to peer configuration options ##### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +# Do not add private peers to this list if you don't want them advertised +persistent_peers = "" + +# UPNP port forwarding +upnp = false + +# Path to address book +addr_book_file = "addrbook.json" + +# Set true for strict address routability rules +addr_book_strict = true + +# Time to wait before flushing messages out on the connection, in ms +flush_throttle_timeout = 100 + +# Maximum number of peers to connect to +max_num_peers = 50 + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 512000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 512000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +##### mempool configuration options ##### +[mempool] + +recheck = true +recheck_empty = true +broadcast = true +wal_dir = "data/mempool.wal" + +# size of the mempool +size = 100000 + +# size of the cache (used to filter transactions we saw earlier) +cache_size = 100000 + +##### consensus configuration options ##### +[consensus] + +wal_file = "data/cs.wal/wal" + +# All timeouts are in milliseconds +timeout_propose = 3000 +timeout_propose_delta = 500 +timeout_prevote = 1000 +timeout_prevote_delta = 500 +timeout_precommit = 1000 +timeout_precommit_delta = 500 +timeout_commit = 1000 + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# EmptyBlocks mode and possible interval between empty blocks in seconds +create_empty_blocks = true +create_empty_blocks_interval = 0 + +# Reactor sleep duration parameters are in milliseconds +peer_gossip_sleep_duration = 100 +peer_query_maj23_sleep_duration = 2000 + +##### transactions indexer configuration options ##### +[tx_index] + +# What indexer to use for transactions +# +# Options: +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +indexer = "kv" + +# Comma-separated list of tags to index (by default the only tag is tx hash) +# +# It's recommended to index only a subset of tags due to possible memory +# bloat. This is, of course, depends on the indexer's DB and the volume of +# transactions. +index_tags = "" + +# When set to true, tells indexer to index all tags. Note this may be not +# desirable (see the comment above). IndexTags has a precedence over +# IndexAllTags (i.e. when given both, IndexTags will be indexed). +index_all_tags = false + +##### instrumentation configuration options ##### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a more significant number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 3 +``` diff --git a/docs/tendermint-core/how-to-read-logs.md b/docs/tendermint-core/how-to-read-logs.md new file mode 100644 index 000000000..83dab3870 --- /dev/null +++ b/docs/tendermint-core/how-to-read-logs.md @@ -0,0 +1,142 @@ +# How to read logs + +## Walkabout example + +We first create three connections (mempool, consensus and query) to the +application (running `kvstore` locally in this case). + +``` +I[10-04|13:54:27.364] Starting multiAppConn module=proxy impl=multiAppConn +I[10-04|13:54:27.366] Starting localClient module=abci-client connection=query impl=localClient +I[10-04|13:54:27.366] Starting localClient module=abci-client connection=mempool impl=localClient +I[10-04|13:54:27.367] Starting localClient module=abci-client connection=consensus impl=localClient +``` + +Then Tendermint Core and the application perform a handshake. + +``` +I[10-04|13:54:27.367] ABCI Handshake module=consensus appHeight=90 appHash=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD +I[10-04|13:54:27.368] ABCI Replay Blocks module=consensus appHeight=90 storeHeight=90 stateHeight=90 +I[10-04|13:54:27.368] Completed ABCI Handshake - Tendermint and App are synced module=consensus appHeight=90 appHash=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD +``` + +After that, we start a few more things like the event switch, reactors, +and perform UPNP discover in order to detect the IP address. + +``` +I[10-04|13:54:27.374] Starting EventSwitch module=types impl=EventSwitch +I[10-04|13:54:27.375] This node is a validator module=consensus +I[10-04|13:54:27.379] Starting Node module=main impl=Node +I[10-04|13:54:27.381] Local listener module=p2p ip=:: port=26656 +I[10-04|13:54:27.382] Getting UPNP external address module=p2p +I[10-04|13:54:30.386] Could not perform UPNP discover module=p2p err="write udp4 0.0.0.0:38238->239.255.255.250:1900: i/o timeout" +I[10-04|13:54:30.386] Starting DefaultListener module=p2p impl=Listener(@10.0.2.15:26656) +I[10-04|13:54:30.387] Starting P2P Switch module=p2p impl="P2P Switch" +I[10-04|13:54:30.387] Starting MempoolReactor module=mempool impl=MempoolReactor +I[10-04|13:54:30.387] Starting BlockchainReactor module=blockchain impl=BlockchainReactor +I[10-04|13:54:30.387] Starting ConsensusReactor module=consensus impl=ConsensusReactor +I[10-04|13:54:30.387] ConsensusReactor module=consensus fastSync=false +I[10-04|13:54:30.387] Starting ConsensusState module=consensus impl=ConsensusState +I[10-04|13:54:30.387] Starting WAL module=consensus wal=/home/vagrant/.tendermint/data/cs.wal/wal impl=WAL +I[10-04|13:54:30.388] Starting TimeoutTicker module=consensus impl=TimeoutTicker +``` + +Notice the second row where Tendermint Core reports that "This node is a +validator". It also could be just an observer (regular node). + +Next we replay all the messages from the WAL. + +``` +I[10-04|13:54:30.390] Catchup by replaying consensus messages module=consensus height=91 +I[10-04|13:54:30.390] Replay: New Step module=consensus height=91 round=0 step=RoundStepNewHeight +I[10-04|13:54:30.390] Replay: Done module=consensus +``` + +"Started node" message signals that everything is ready for work. + +``` +I[10-04|13:54:30.391] Starting RPC HTTP server on tcp socket 0.0.0.0:26657 module=rpc-server +I[10-04|13:54:30.392] Started node module=main nodeInfo="NodeInfo{id: DF22D7C92C91082324A1312F092AA1DA197FA598DBBFB6526E, moniker: anonymous, network: test-chain-3MNw2N [remote , listen 10.0.2.15:26656], version: 0.11.0-10f361fc ([wire_version=0.6.2 p2p_version=0.5.0 consensus_version=v1/0.2.2 rpc_version=0.7.0/3 tx_index=on rpc_addr=tcp://0.0.0.0:26657])}" +``` + +Next follows a standard block creation cycle, where we enter a new +round, propose a block, receive more than 2/3 of prevotes, then +precommits and finally have a chance to commit a block. For details, +please refer to [Consensus +Overview](./introduction.md#consensus-overview) or [Byzantine Consensus +Algorithm](./spec/consensus). + +``` +I[10-04|13:54:30.393] enterNewRound(91/0). Current: 91/0/RoundStepNewHeight module=consensus +I[10-04|13:54:30.393] enterPropose(91/0). Current: 91/0/RoundStepNewRound module=consensus +I[10-04|13:54:30.393] enterPropose: Our turn to propose module=consensus proposer=125B0E3C5512F5C2B0E1109E31885C4511570C42 privValidator="PrivValidator{125B0E3C5512F5C2B0E1109E31885C4511570C42 LH:90, LR:0, LS:3}" +I[10-04|13:54:30.394] Signed proposal module=consensus height=91 round=0 proposal="Proposal{91/0 1:21B79872514F (-1,:0:000000000000) {/10EDEDD7C84E.../}}" +I[10-04|13:54:30.397] Received complete proposal block module=consensus height=91 hash=F671D562C7B9242900A286E1882EE64E5556FE9E +I[10-04|13:54:30.397] enterPrevote(91/0). Current: 91/0/RoundStepPropose module=consensus +I[10-04|13:54:30.397] enterPrevote: ProposalBlock is valid module=consensus height=91 round=0 +I[10-04|13:54:30.398] Signed and pushed vote module=consensus height=91 round=0 vote="Vote{0:125B0E3C5512 91/00/1(Prevote) F671D562C7B9 {/89047FFC21D8.../}}" err=null +I[10-04|13:54:30.401] Added to prevote module=consensus vote="Vote{0:125B0E3C5512 91/00/1(Prevote) F671D562C7B9 {/89047FFC21D8.../}}" prevotes="VoteSet{H:91 R:0 T:1 +2/3:F671D562C7B9242900A286E1882EE64E5556FE9E:1:21B79872514F BA{1:X} map[]}" +I[10-04|13:54:30.401] enterPrecommit(91/0). Current: 91/0/RoundStepPrevote module=consensus +I[10-04|13:54:30.401] enterPrecommit: +2/3 prevoted proposal block. Locking module=consensus hash=F671D562C7B9242900A286E1882EE64E5556FE9E +I[10-04|13:54:30.402] Signed and pushed vote module=consensus height=91 round=0 vote="Vote{0:125B0E3C5512 91/00/2(Precommit) F671D562C7B9 {/80533478E41A.../}}" err=null +I[10-04|13:54:30.404] Added to precommit module=consensus vote="Vote{0:125B0E3C5512 91/00/2(Precommit) F671D562C7B9 {/80533478E41A.../}}" precommits="VoteSet{H:91 R:0 T:2 +2/3:F671D562C7B9242900A286E1882EE64E5556FE9E:1:21B79872514F BA{1:X} map[]}" +I[10-04|13:54:30.404] enterCommit(91/0). Current: 91/0/RoundStepPrecommit module=consensus +I[10-04|13:54:30.405] Finalizing commit of block with 0 txs module=consensus height=91 hash=F671D562C7B9242900A286E1882EE64E5556FE9E root=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD +I[10-04|13:54:30.405] Block{ + Header{ + ChainID: test-chain-3MNw2N + Height: 91 + Time: 2017-10-04 13:54:30.393 +0000 UTC + NumTxs: 0 + LastBlockID: F15AB8BEF9A6AAB07E457A6E16BC410546AA4DC6:1:D505DA273544 + LastCommit: 56FEF2EFDB8B37E9C6E6D635749DF3169D5F005D + Data: + Validators: CE25FBFF2E10C0D51AA1A07C064A96931BC8B297 + App: E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD + }#F671D562C7B9242900A286E1882EE64E5556FE9E + Data{ + + }# + Commit{ + BlockID: F15AB8BEF9A6AAB07E457A6E16BC410546AA4DC6:1:D505DA273544 + Precommits: Vote{0:125B0E3C5512 90/00/2(Precommit) F15AB8BEF9A6 {/FE98E2B956F0.../}} + }#56FEF2EFDB8B37E9C6E6D635749DF3169D5F005D +}#F671D562C7B9242900A286E1882EE64E5556FE9E module=consensus +I[10-04|13:54:30.408] Executed block module=state height=91 validTxs=0 invalidTxs=0 +I[10-04|13:54:30.410] Committed state module=state height=91 txs=0 hash=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD +I[10-04|13:54:30.410] Recheck txs module=mempool numtxs=0 height=91 +``` + +## List of modules + +Here is the list of modules you may encounter in Tendermint's log and a +little overview what they do. + +- `abci-client` As mentioned in [Application Development Guide](./app-development.md), Tendermint acts as an ABCI + client with respect to the application and maintains 3 connections: + mempool, consensus and query. The code used by Tendermint Core can + be found [here](https://github.com/tendermint/tendermint/tree/develop/abci/client). +- `blockchain` Provides storage, pool (a group of peers), and reactor + for both storing and exchanging blocks between peers. +- `consensus` The heart of Tendermint core, which is the + implementation of the consensus algorithm. Includes two + "submodules": `wal` (write-ahead logging) for ensuring data + integrity and `replay` to replay blocks and messages on recovery + from a crash. +- `events` Simple event notification system. The list of events can be + found + [here](https://github.com/tendermint/tendermint/blob/master/types/events.go). + You can subscribe to them by calling `subscribe` RPC method. Refer + to [RPC docs](./specification/rpc.md) for additional information. +- `mempool` Mempool module handles all incoming transactions, whenever + they are coming from peers or the application. +- `p2p` Provides an abstraction around peer-to-peer communication. For + more details, please check out the + [README](https://github.com/tendermint/tendermint/blob/master/p2p/README.md). +- `rpc` [Tendermint's RPC](./specification/rpc.md). +- `rpc-server` RPC server. For implementation details, please read the + [README](https://github.com/tendermint/tendermint/blob/master/rpc/lib/README.md). +- `state` Represents the latest state and execution submodule, which + executes blocks against the application. +- `types` A collection of the publicly exposed types and methods to + work with them. diff --git a/docs/tendermint-core/metrics.md b/docs/tendermint-core/metrics.md new file mode 100644 index 000000000..b469c6890 --- /dev/null +++ b/docs/tendermint-core/metrics.md @@ -0,0 +1,42 @@ +# Metrics + +Tendermint can report and serve the Prometheus metrics, which in their turn can +be consumed by Prometheus collector(s). + +This functionality is disabled by default. + +To enable the Prometheus metrics, set `instrumentation.prometheus=true` if your +config file. Metrics will be served under `/metrics` on 26660 port by default. +Listen address can be changed in the config file (see +`instrumentation.prometheus_listen_addr`). + +## List of available metrics + +The following metrics are available: + +``` +| Name | Type | Since | Description | +| --------------------------------------- | ------- | --------- | ----------------------------------------------------------------------------- | +| consensus_height | Gauge | 0.21.0 | Height of the chain | +| consensus_validators | Gauge | 0.21.0 | Number of validators | +| consensus_validators_power | Gauge | 0.21.0 | Total voting power of all validators | +| consensus_missing_validators | Gauge | 0.21.0 | Number of validators who did not sign | +| consensus_missing_validators_power | Gauge | 0.21.0 | Total voting power of the missing validators | +| consensus_byzantine_validators | Gauge | 0.21.0 | Number of validators who tried to double sign | +| consensus_byzantine_validators_power | Gauge | 0.21.0 | Total voting power of the byzantine validators | +| consensus_block_interval_seconds | Histogram | 0.21.0 | Time between this and last block (Block.Header.Time) in seconds | +| consensus_rounds | Gauge | 0.21.0 | Number of rounds | +| consensus_num_txs | Gauge | 0.21.0 | Number of transactions | +| mempool_size | Gauge | 0.21.0 | Number of uncommitted transactions | +| consensus_total_txs | Gauge | 0.21.0 | Total number of transactions committed | +| consensus_block_size_bytes | Gauge | 0.21.0 | Block size in bytes | +| p2p_peers | Gauge | 0.21.0 | Number of peers node's connected to | +``` + +## Useful queries + +Percentage of missing + byzantine validators: + +``` +((consensus_byzantine_validators_power + consensus_missing_validators_power) / consensus_validators_power) * 100 +``` diff --git a/docs/tendermint-core/rpc.md b/docs/tendermint-core/rpc.md new file mode 100644 index 000000000..2f3a72c74 --- /dev/null +++ b/docs/tendermint-core/rpc.md @@ -0,0 +1,3 @@ +# RPC + +The RPC documentation is hosted [here](https://tendermint.github.io/slate) and is generated by the CI from our [Slate repo](https://github.com/tendermint/slate). To update the documentation, edit the relevant `godoc` comments in the [rpc/core directory](https://github.com/tendermint/tendermint/tree/develop/rpc/core). diff --git a/docs/tendermint-core/running-in-production.md b/docs/tendermint-core/running-in-production.md new file mode 100644 index 000000000..181d09428 --- /dev/null +++ b/docs/tendermint-core/running-in-production.md @@ -0,0 +1,244 @@ +# Running in production + +## Logging + +Default logging level (`main:info,state:info,*:`) should suffice for +normal operation mode. Read [this +post](https://blog.cosmos.network/one-of-the-exciting-new-features-in-0-10-0-release-is-smart-log-level-flag-e2506b4ab756) +for details on how to configure `log_level` config variable. Some of the +modules can be found [here](./how-to-read-logs.md#list-of-modules). If +you're trying to debug Tendermint or asked to provide logs with debug +logging level, you can do so by running tendermint with +`--log_level="*:debug"`. + +## DOS Exposure and Mitigation + +Validators are supposed to setup [Sentry Node +Architecture](https://blog.cosmos.network/tendermint-explained-bringing-bft-based-pos-to-the-public-blockchain-domain-f22e274a0fdb) +to prevent Denial-of-service attacks. You can read more about it +[here](../interviews/tendermint-bft.md). + +### P2P + +The core of the Tendermint peer-to-peer system is `MConnection`. Each +connection has `MaxPacketMsgPayloadSize`, which is the maximum packet +size and bounded send & receive queues. One can impose restrictions on +send & receive rate per connection (`SendRate`, `RecvRate`). + +### RPC + +Endpoints returning multiple entries are limited by default to return 30 +elements (100 max). + +Rate-limiting and authentication are another key aspects to help protect +against DOS attacks. While in the future we may implement these +features, for now, validators are supposed to use external tools like +[NGINX](https://www.nginx.com/blog/rate-limiting-nginx/) or +[traefik](https://docs.traefik.io/configuration/commons/#rate-limiting) +to achieve the same things. + +## Debugging Tendermint + +If you ever have to debug Tendermint, the first thing you should +probably do is to check out the logs. See ["How to read +logs"](./how-to-read-logs.md), where we explain what certain log +statements mean. + +If, after skimming through the logs, things are not clear still, the +second TODO is to query the /status RPC endpoint. It provides the +necessary info: whenever the node is syncing or not, what height it is +on, etc. + +``` +curl http(s)://{ip}:{rpcPort}/status +``` + +`dump_consensus_state` will give you a detailed overview of the +consensus state (proposer, lastest validators, peers states). From it, +you should be able to figure out why, for example, the network had +halted. + +``` +curl http(s)://{ip}:{rpcPort}/dump_consensus_state +``` + +There is a reduced version of this endpoint - `consensus_state`, which +returns just the votes seen at the current height. + +- [Github Issues](https://github.com/tendermint/tendermint/issues) +- [StackOverflow + questions](https://stackoverflow.com/questions/tagged/tendermint) + +## Monitoring Tendermint + +Each Tendermint instance has a standard `/health` RPC endpoint, which +responds with 200 (OK) if everything is fine and 500 (or no response) - +if something is wrong. + +Other useful endpoints include mentioned earlier `/status`, `/net_info` and +`/validators`. + +We have a small tool, called `tm-monitor`, which outputs information from +the endpoints above plus some statistics. The tool can be found +[here](https://github.com/tendermint/tools/tree/master/tm-monitor). + +Tendermint also can report and serve Prometheus metrics. See +[Metrics](./metrics.md). + +## What happens when my app dies? + +You are supposed to run Tendermint under a [process +supervisor](https://en.wikipedia.org/wiki/Process_supervision) (like +systemd or runit). It will ensure Tendermint is always running (despite +possible errors). + +Getting back to the original question, if your application dies, +Tendermint will panic. After a process supervisor restarts your +application, Tendermint should be able to reconnect successfully. The +order of restart does not matter for it. + +## Signal handling + +We catch SIGINT and SIGTERM and try to clean up nicely. For other +signals we use the default behaviour in Go: [Default behavior of signals +in Go +programs](https://golang.org/pkg/os/signal/#hdr-Default_behavior_of_signals_in_Go_programs). + +## Hardware + +### Processor and Memory + +While actual specs vary depending on the load and validators count, +minimal requirements are: + +- 1GB RAM +- 25GB of disk space +- 1.4 GHz CPU + +SSD disks are preferable for applications with high transaction +throughput. + +Recommended: + +- 2GB RAM +- 100GB SSD +- x64 2.0 GHz 2v CPU + +While for now, Tendermint stores all the history and it may require +significant disk space over time, we are planning to implement state +syncing (See +[this issue](https://github.com/tendermint/tendermint/issues/828)). So, +storing all the past blocks will not be necessary. + +### Operating Systems + +Tendermint can be compiled for a wide range of operating systems thanks +to Go language (the list of \$OS/\$ARCH pairs can be found +[here](https://golang.org/doc/install/source#environment)). + +While we do not favor any operation system, more secure and stable Linux +server distributions (like Centos) should be preferred over desktop +operation systems (like Mac OS). + +### Miscellaneous + +NOTE: if you are going to use Tendermint in a public domain, make sure +you read [hardware recommendations (see "4. +Hardware")](https://cosmos.network/validators) for a validator in the +Cosmos network. + +## Configuration parameters + +- `p2p.flush_throttle_timeout` `p2p.max_packet_msg_payload_size` + `p2p.send_rate` `p2p.recv_rate` + +If you are going to use Tendermint in a private domain and you have a +private high-speed network among your peers, it makes sense to lower +flush throttle timeout and increase other params. + +``` +[p2p] + +send_rate=20000000 # 2MB/s +recv_rate=20000000 # 2MB/s +flush_throttle_timeout=10 +max_packet_msg_payload_size=10240 # 10KB +``` + +- `mempool.recheck` + +After every block, Tendermint rechecks every transaction left in the +mempool to see if transactions committed in that block affected the +application state, so some of the transactions left may become invalid. +If that does not apply to your application, you can disable it by +setting `mempool.recheck=false`. + +- `mempool.broadcast` + +Setting this to false will stop the mempool from relaying transactions +to other peers until they are included in a block. It means only the +peer you send the tx to will see it until it is included in a block. + +- `consensus.skip_timeout_commit` + +We want `skip_timeout_commit=false` when there is economics on the line +because proposers should wait to hear for more votes. But if you don't +care about that and want the fastest consensus, you can skip it. It will +be kept false by default for public deployments (e.g. [Cosmos +Hub](https://cosmos.network/intro/hub)) while for enterprise +applications, setting it to true is not a problem. + +- `consensus.peer_gossip_sleep_duration` + +You can try to reduce the time your node sleeps before checking if +theres something to send its peers. + +- `consensus.timeout_commit` + +You can also try lowering `timeout_commit` (time we sleep before +proposing the next block). + +- `p2p.addr_book_strict` + +By default, Tendermint checks whenever a peer's address is routable before +saving it to the address book. The address is considered as routable if the IP +is [valid and within allowed +ranges](https://github.com/tendermint/tendermint/blob/27bd1deabe4ba6a2d9b463b8f3e3f1e31b993e61/p2p/netaddress.go#L209). + +This may not be the case for private networks, where your IP range is usually +strictly limited and private. If that case, you need to set `addr_book_strict` +to `false` (turn off). + +- `rpc.max_open_connections` + +By default, the number of simultaneous connections is limited because most OS +give you limited number of file descriptors. + +If you want to accept greater number of connections, you will need to increase +these limits. + +[Sysctls to tune the system to be able to open more connections](https://github.com/satori-com/tcpkali/blob/master/doc/tcpkali.man.md#sysctls-to-tune-the-system-to-be-able-to-open-more-connections) + +...for N connections, such as 50k: + +``` +kern.maxfiles=10000+2*N # BSD +kern.maxfilesperproc=100+2*N # BSD +kern.ipc.maxsockets=10000+2*N # BSD +fs.file-max=10000+2*N # Linux +net.ipv4.tcp_max_orphans=N # Linux + +# For load-generating clients. +net.ipv4.ip_local_port_range="10000 65535" # Linux. +net.inet.ip.portrange.first=10000 # BSD/Mac. +net.inet.ip.portrange.last=65535 # (Enough for N < 55535) +net.ipv4.tcp_tw_reuse=1 # Linux +net.inet.tcp.maxtcptw=2*N # BSD + +# If using netfilter on Linux: +net.netfilter.nf_conntrack_max=N +echo $((N/8)) > /sys/module/nf_conntrack/parameters/hashsize +``` + +The similar option exists for limiting the number of gRPC connections - +`rpc.grpc_max_open_connections`. diff --git a/docs/tendermint-core/using-tendermint.md b/docs/tendermint-core/using-tendermint.md new file mode 100644 index 000000000..21280b97b --- /dev/null +++ b/docs/tendermint-core/using-tendermint.md @@ -0,0 +1,474 @@ +# Using Tendermint + +This is a guide to using the `tendermint` program from the command line. +It assumes only that you have the `tendermint` binary installed and have +some rudimentary idea of what Tendermint and ABCI are. + +You can see the help menu with `tendermint --help`, and the version +number with `tendermint version`. + +## Directory Root + +The default directory for blockchain data is `~/.tendermint`. Override +this by setting the `TMHOME` environment variable. + +## Initialize + +Initialize the root directory by running: + +``` +tendermint init +``` + +This will create a new private key (`priv_validator.json`), and a +genesis file (`genesis.json`) containing the associated public key, in +`$TMHOME/config`. This is all that's necessary to run a local testnet +with one validator. + +For more elaborate initialization, see the tesnet command: + +``` +tendermint testnet --help +``` + +## Run + +To run a Tendermint node, use + +``` +tendermint node +``` + +By default, Tendermint will try to connect to an ABCI application on +[127.0.0.1:26658](127.0.0.1:26658). If you have the `kvstore` ABCI app +installed, run it in another window. If you don't, kill Tendermint and +run an in-process version of the `kvstore` app: + +``` +tendermint node --proxy_app=kvstore +``` + +After a few seconds you should see blocks start streaming in. Note that +blocks are produced regularly, even if there are no transactions. See +_No Empty Blocks_, below, to modify this setting. + +Tendermint supports in-process versions of the `counter`, `kvstore` and +`nil` apps that ship as examples with `abci-cli`. It's easy to compile +your own app in-process with Tendermint if it's written in Go. If your +app is not written in Go, simply run it in another process, and use the +`--proxy_app` flag to specify the address of the socket it is listening +on, for instance: + +``` +tendermint node --proxy_app=/var/run/abci.sock +``` + +## Transactions + +To send a transaction, use `curl` to make requests to the Tendermint RPC +server, for example: + +``` +curl http://localhost:26657/broadcast_tx_commit?tx=\"abcd\" +``` + +We can see the chain's status at the `/status` end-point: + +``` +curl http://localhost:26657/status | json_pp +``` + +and the `latest_app_hash` in particular: + +``` +curl http://localhost:26657/status | json_pp | grep latest_app_hash +``` + +Visit http://localhost:26657> in your browser to see the list of other +endpoints. Some take no arguments (like `/status`), while others specify +the argument name and use `_` as a placeholder. + +### Formatting + +The following nuances when sending/formatting transactions should be +taken into account: + +With `GET`: + +To send a UTF8 string byte array, quote the value of the tx pramater: + +``` +curl 'http://localhost:26657/broadcast_tx_commit?tx="hello"' +``` + +which sends a 5 byte transaction: "h e l l o" \[68 65 6c 6c 6f\]. + +Note the URL must be wrapped with single quoes, else bash will ignore +the double quotes. To avoid the single quotes, escape the double quotes: + +``` +curl http://localhost:26657/broadcast_tx_commit?tx=\"hello\" +``` + +Using a special character: + +``` +curl 'http://localhost:26657/broadcast_tx_commit?tx="€5"' +``` + +sends a 4 byte transaction: "€5" (UTF8) \[e2 82 ac 35\]. + +To send as raw hex, omit quotes AND prefix the hex string with `0x`: + +``` +curl http://localhost:26657/broadcast_tx_commit?tx=0x01020304 +``` + +which sends a 4 byte transaction: \[01 02 03 04\]. + +With `POST` (using `json`), the raw hex must be `base64` encoded: + +``` +curl --data-binary '{"jsonrpc":"2.0","id":"anything","method":"broadcast_tx_commit","params": {"tx": "AQIDBA=="}}' -H 'content-type:text/plain;' http://localhost:26657 +``` + +which sends the same 4 byte transaction: \[01 02 03 04\]. + +Note that raw hex cannot be used in `POST` transactions. + +## Reset + +**WARNING: UNSAFE** Only do this in development and only if you can +afford to lose all blockchain data! + +To reset a blockchain, stop the node, remove the `~/.tendermint/data` +directory and run + +``` +tendermint unsafe_reset_priv_validator +``` + +This final step is necessary to reset the `priv_validator.json`, which +otherwise prevents you from making conflicting votes in the consensus +(something that could get you in trouble if you do it on a real +blockchain). If you don't reset the `priv_validator.json`, your fresh +new blockchain will not make any blocks. + +## Configuration + +Tendermint uses a `config.toml` for configuration. For details, see [the +config specification](./specification/configuration.md). + +Notable options include the socket address of the application +(`proxy_app`), the listening address of the Tendermint peer +(`p2p.laddr`), and the listening address of the RPC server +(`rpc.laddr`). + +Some fields from the config file can be overwritten with flags. + +## No Empty Blocks + +This much requested feature was implemented in version 0.10.3. While the +default behaviour of `tendermint` is still to create blocks +approximately once per second, it is possible to disable empty blocks or +set a block creation interval. In the former case, blocks will be +created when there are new transactions or when the AppHash changes. + +To configure Tendermint to not produce empty blocks unless there are +transactions or the app hash changes, run Tendermint with this +additional flag: + +``` +tendermint node --consensus.create_empty_blocks=false +``` + +or set the configuration via the `config.toml` file: + +``` +[consensus] +create_empty_blocks = false +``` + +Remember: because the default is to _create empty blocks_, avoiding +empty blocks requires the config option to be set to `false`. + +The block interval setting allows for a delay (in seconds) between the +creation of each new empty block. It is set via the `config.toml`: + +``` +[consensus] +create_empty_blocks_interval = 5 +``` + +With this setting, empty blocks will be produced every 5s if no block +has been produced otherwise, regardless of the value of +`create_empty_blocks`. + +## Broadcast API + +Earlier, we used the `broadcast_tx_commit` endpoint to send a +transaction. When a transaction is sent to a Tendermint node, it will +run via `CheckTx` against the application. If it passes `CheckTx`, it +will be included in the mempool, broadcasted to other peers, and +eventually included in a block. + +Since there are multiple phases to processing a transaction, we offer +multiple endpoints to broadcast a transaction: + +``` +/broadcast_tx_async +/broadcast_tx_sync +/broadcast_tx_commit +``` + +These correspond to no-processing, processing through the mempool, and +processing through a block, respectively. That is, `broadcast_tx_async`, +will return right away without waiting to hear if the transaction is +even valid, while `broadcast_tx_sync` will return with the result of +running the transaction through `CheckTx`. Using `broadcast_tx_commit` +will wait until the transaction is committed in a block or until some +timeout is reached, but will return right away if the transaction does +not pass `CheckTx`. The return value for `broadcast_tx_commit` includes +two fields, `check_tx` and `deliver_tx`, pertaining to the result of +running the transaction through those ABCI messages. + +The benefit of using `broadcast_tx_commit` is that the request returns +after the transaction is committed (i.e. included in a block), but that +can take on the order of a second. For a quick result, use +`broadcast_tx_sync`, but the transaction will not be committed until +later, and by that point its effect on the state may change. + +## Tendermint Networks + +When `tendermint init` is run, both a `genesis.json` and +`priv_validator.json` are created in `~/.tendermint/config`. The +`genesis.json` might look like: + +``` +{ + "validators" : [ + { + "pub_key" : { + "value" : "h3hk+QE8c6QLTySp8TcfzclJw/BG79ziGB/pIA+DfPE=", + "type" : "tendermint/PubKeyEd25519" + }, + "power" : 10, + "name" : "" + } + ], + "app_hash" : "", + "chain_id" : "test-chain-rDlYSN", + "genesis_time" : "0001-01-01T00:00:00Z" +} +``` + +And the `priv_validator.json`: + +``` +{ + "last_step" : 0, + "last_round" : "0", + "address" : "B788DEDE4F50AD8BC9462DE76741CCAFF87D51E2", + "pub_key" : { + "value" : "h3hk+QE8c6QLTySp8TcfzclJw/BG79ziGB/pIA+DfPE=", + "type" : "tendermint/PubKeyEd25519" + }, + "last_height" : "0", + "priv_key" : { + "value" : "JPivl82x+LfVkp8i3ztoTjY6c6GJ4pBxQexErOCyhwqHeGT5ATxzpAtPJKnxNx/NyUnD8Ebv3OIYH+kgD4N88Q==", + "type" : "tendermint/PrivKeyEd25519" + } +} +``` + +The `priv_validator.json` actually contains a private key, and should +thus be kept absolutely secret; for now we work with the plain text. +Note the `last_` fields, which are used to prevent us from signing +conflicting messages. + +Note also that the `pub_key` (the public key) in the +`priv_validator.json` is also present in the `genesis.json`. + +The genesis file contains the list of public keys which may participate +in the consensus, and their corresponding voting power. Greater than 2/3 +of the voting power must be active (i.e. the corresponding private keys +must be producing signatures) for the consensus to make progress. In our +case, the genesis file contains the public key of our +`priv_validator.json`, so a Tendermint node started with the default +root directory will be able to make progress. Voting power uses an int64 +but must be positive, thus the range is: 0 through 9223372036854775807. +Because of how the current proposer selection algorithm works, we do not +recommend having voting powers greater than 10\^12 (ie. 1 trillion) (see +[Proposals section of Byzantine Consensus +Algorithm](./specification/byzantine-consensus-algorithm.md#proposals) +for details). + +If we want to add more nodes to the network, we have two choices: we can +add a new validator node, who will also participate in the consensus by +proposing blocks and voting on them, or we can add a new non-validator +node, who will not participate directly, but will verify and keep up +with the consensus protocol. + +### Peers + +#### Seed + +A seed node is a node who relays the addresses of other peers which they know +of. These nodes constantly crawl the network to try to get more peers. The +addresses which the seed node relays get saved into a local address book. Once +these are in the address book, you will connect to those addresses directly. +Basically the seed nodes job is just to relay everyones addresses. You won't +connect to seed nodes once you have received enough addresses, so typically you +only need them on the first start. The seed node will immediately disconnect +from you after sending you some addresses. + +#### Persistent Peer + +Persistent peers are people you want to be constantly connected with. If you +disconnect you will try to connect directly back to them as opposed to using +another address from the address book. On restarts you will always try to +connect to these peers regardless of the size of your address book. + +All peers relay peers they know of by default. This is called the peer exchange +protocol (PeX). With PeX, peers will be gossipping about known peers and forming +a network, storing peer addresses in the addrbook. Because of this, you don't +have to use a seed node if you have a live persistent peer. + +#### Connecting to Peers + +To connect to peers on start-up, specify them in the +`$TMHOME/config/config.toml` or on the command line. Use `seeds` to +specify seed nodes, and +`persistent_peers` to specify peers that your node will maintain +persistent connections with. + +For example, + +``` +tendermint node --p2p.seeds "f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@1.2.3.4:26656,0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@5.6.7.8:26656" +``` + +Alternatively, you can use the `/dial_seeds` endpoint of the RPC to +specify seeds for a running node to connect to: + +``` +curl 'localhost:26657/dial_seeds?seeds=\["f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@1.2.3.4:26656","0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@5.6.7.8:26656"\]' +``` + +Note, with PeX enabled, you +should not need seeds after the first start. + +If you want Tendermint to connect to specific set of addresses and +maintain a persistent connection with each, you can use the +`--p2p.persistent_peers` flag or the corresponding setting in the +`config.toml` or the `/dial_peers` RPC endpoint to do it without +stopping Tendermint core instance. + +``` +tendermint node --p2p.persistent_peers "429fcf25974313b95673f58d77eacdd434402665@10.11.12.13:26656,96663a3dd0d7b9d17d4c8211b191af259621c693@10.11.12.14:26656" + +curl 'localhost:26657/dial_peers?persistent=true&peers=\["429fcf25974313b95673f58d77eacdd434402665@10.11.12.13:26656","96663a3dd0d7b9d17d4c8211b191af259621c693@10.11.12.14:26656"\]' +``` + +### Adding a Non-Validator + +Adding a non-validator is simple. Just copy the original `genesis.json` +to `~/.tendermint/config` on the new machine and start the node, +specifying seeds or persistent peers as necessary. If no seeds or +persistent peers are specified, the node won't make any blocks, because +it's not a validator, and it won't hear about any blocks, because it's +not connected to the other peer. + +### Adding a Validator + +The easiest way to add new validators is to do it in the `genesis.json`, +before starting the network. For instance, we could make a new +`priv_validator.json`, and copy it's `pub_key` into the above genesis. + +We can generate a new `priv_validator.json` with the command: + +``` +tendermint gen_validator +``` + +Now we can update our genesis file. For instance, if the new +`priv_validator.json` looks like: + +``` +{ + "address" : "5AF49D2A2D4F5AD4C7C8C4CC2FB020131E9C4902", + "pub_key" : { + "value" : "l9X9+fjkeBzDfPGbUM7AMIRE6uJN78zN5+lk5OYotek=", + "type" : "tendermint/PubKeyEd25519" + }, + "priv_key" : { + "value" : "EDJY9W6zlAw+su6ITgTKg2nTZcHAH1NMTW5iwlgmNDuX1f35+OR4HMN88ZtQzsAwhETq4k3vzM3n6WTk5ii16Q==", + "type" : "tendermint/PrivKeyEd25519" + }, + "last_step" : 0, + "last_round" : "0", + "last_height" : "0" +} +``` + +then the new `genesis.json` will be: + +``` +{ + "validators" : [ + { + "pub_key" : { + "value" : "h3hk+QE8c6QLTySp8TcfzclJw/BG79ziGB/pIA+DfPE=", + "type" : "tendermint/PubKeyEd25519" + }, + "power" : 10, + "name" : "" + }, + { + "pub_key" : { + "value" : "l9X9+fjkeBzDfPGbUM7AMIRE6uJN78zN5+lk5OYotek=", + "type" : "tendermint/PubKeyEd25519" + }, + "power" : 10, + "name" : "" + } + ], + "app_hash" : "", + "chain_id" : "test-chain-rDlYSN", + "genesis_time" : "0001-01-01T00:00:00Z" +} +``` + +Update the `genesis.json` in `~/.tendermint/config`. Copy the genesis +file and the new `priv_validator.json` to the `~/.tendermint/config` on +a new machine. + +Now run `tendermint node` on both machines, and use either +`--p2p.persistent_peers` or the `/dial_peers` to get them to peer up. +They should start making blocks, and will only continue to do so as long +as both of them are online. + +To make a Tendermint network that can tolerate one of the validators +failing, you need at least four validator nodes (e.g., 2/3). + +Updating validators in a live network is supported but must be +explicitly programmed by the application developer. See the [application +developers guide](./app-development.md) for more details. + +### Local Network + +To run a network locally, say on a single machine, you must change the +`_laddr` fields in the `config.toml` (or using the flags) so that the +listening addresses of the various sockets don't conflict. Additionally, +you must set `addrbook_strict=false` in the `config.toml`, otherwise +Tendermint's p2p library will deny making connections to peers with the +same IP address. + +### Upgrading + +The Tendermint development cycle currently includes a lot of breaking changes. +Upgrading from an old version to a new version usually means throwing +away the chain data. Try out the +[tm-migrate](https://github.com/hxzqlh/tm-tools) tool written by +[@hxzqlh](https://github.com/hxzqlh) if you are keen to preserve the +state of your chain when upgrading to newer versions. diff --git a/docs/tools/benchmarking.md b/docs/tools/benchmarking.md new file mode 100644 index 000000000..20c368e29 --- /dev/null +++ b/docs/tools/benchmarking.md @@ -0,0 +1,80 @@ +# tm-bench + +Tendermint blockchain benchmarking tool: + +- https://github.com/tendermint/tools/tree/master/tm-bench + +For example, the following: + +``` +tm-bench -T 10 -r 1000 localhost:26657 +``` + +will output: + +``` +Stats Avg StdDev Max Total +Txs/sec 818 532 1549 9000 +Blocks/sec 0.818 0.386 1 9 +``` + +## Quick Start + +[Install Tendermint](../introduction/install) +This currently is setup to work on tendermint's develop branch. Please ensure +you are on that. (If not, update `tendermint` and `tmlibs` in gopkg.toml to use + the master branch.) + +then run: + +``` +tendermint init +tendermint node --proxy_app=kvstore +``` + +``` +tm-bench localhost:26657 +``` + +with the last command being in a seperate window. + +## Usage + +``` +tm-bench [-c 1] [-T 10] [-r 1000] [-s 250] [endpoints] + +Examples: + tm-bench localhost:26657 +Flags: + -T int + Exit after the specified amount of time in seconds (default 10) + -c int + Connections to keep open per endpoint (default 1) + -r int + Txs per second to send in a connection (default 1000) + -s int + Size per tx in bytes + -v Verbose output +``` + +## How stats are collected + +These stats are derived by having each connection send transactions at the +specified rate (or as close as it can get) for the specified time. After the +specified time, it iterates over all of the blocks that were created in that +time. The average and stddev per second are computed based off of that, by +grouping the data by second. + +To send transactions at the specified rate in each connection, we loop +through the number of transactions. If its too slow, the loop stops at one second. +If its too fast, we wait until the one second mark ends. The transactions per +second stat is computed based off of what ends up in the block. + +Each of the connections is handled via two separate goroutines. + +## Development + +``` +make get_vendor_deps +make test +``` diff --git a/docs/tools/monitoring.md b/docs/tools/monitoring.md new file mode 100644 index 000000000..5cc2ad3b1 --- /dev/null +++ b/docs/tools/monitoring.md @@ -0,0 +1,92 @@ +# tm-monitor + +Tendermint blockchain monitoring tool; watches over one or more nodes, +collecting and providing various statistics to the user: + +- https://github.com/tendermint/tools/tree/master/tm-monitor + +## Quick Start + +### Docker + +Assuming your application is running in another container with the name +`app`: + +``` +docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init +docker run -it --rm -v "/tmp:/tendermint" -p "26657:26657" --name=tm --link=app tendermint/tendermint node --proxy_app=tcp://app:26658 + +docker run -it --rm -p "26670:26670" --link=tm tendermint/monitor tm:26657 +``` + +If you don't have an application yet, but still want to try monitor out, +use `kvstore`: + +``` +docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init +docker run -it --rm -v "/tmp:/tendermint" -p "26657:26657" --name=tm tendermint/tendermint node --proxy_app=kvstore +``` +``` +docker run -it --rm -p "26670:26670" --link=tm tendermint/monitor tm:26657 +``` + +### Using Binaries + +[Install Tendermint](https://github.com/tendermint/tendermint#install) + +then run: + +``` +tendermint init +tendermint node --proxy_app=kvstore +``` + +``` +tm-monitor localhost:26657 +``` + +with the last command being in a seperate window. + +## Usage + +``` +tm-monitor [-v] [-no-ton] [-listen-addr="tcp://0.0.0.0:26670"] [endpoints] + +Examples: + # monitor single instance + tm-monitor localhost:26657 + + # monitor a few instances by providing comma-separated list of RPC endpoints + tm-monitor host1:26657,host2:26657 +Flags: + -listen-addr string + HTTP and Websocket server listen address (default "tcp://0.0.0.0:26670") + -no-ton + Do not show ton (table of nodes) + -v verbose logging +``` + +### RPC UI + +Run `tm-monitor` and visit http://localhost:26670 You should see the +list of the available RPC endpoints: + +``` +http://localhost:26670/status +http://localhost:26670/status/network +http://localhost:26670/monitor?endpoint=_ +http://localhost:26670/status/node?name=_ +http://localhost:26670/unmonitor?endpoint=_ +``` + +The API is available as GET requests with URI encoded parameters, or as +JSONRPC POST requests. The JSONRPC methods are also exposed over +websocket. + +## Development + +``` +make get_tools +make get_vendor_deps +make test +``` diff --git a/docs/yarn.lock b/docs/yarn.lock new file mode 100644 index 000000000..5591b8fa0 --- /dev/null +++ b/docs/yarn.lock @@ -0,0 +1,2507 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +"@azu/format-text@^1.0.1": + version "1.0.1" + resolved "https://registry.yarnpkg.com/@azu/format-text/-/format-text-1.0.1.tgz#6967350a94640f6b02855169bd897ce54d6cebe2" + +"@azu/style-format@^1.0.0": + version "1.0.0" + resolved "https://registry.yarnpkg.com/@azu/style-format/-/style-format-1.0.0.tgz#e70187f8a862e191b1bce6c0268f13acd3a56b20" + dependencies: + "@azu/format-text" "^1.0.1" + +"@sindresorhus/is@^0.7.0": + version "0.7.0" + resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-0.7.0.tgz#9a06f4f137ee84d7df0460c1fdb1135ffa6c50fd" + +"@textlint/ast-node-types@^4.0.2": + version "4.0.2" + resolved "https://registry.yarnpkg.com/@textlint/ast-node-types/-/ast-node-types-4.0.2.tgz#5386a15187798efb48eb71fa1cbf6ca2770b206a" + +"@textlint/ast-traverse@^2.0.8": + version "2.0.8" + resolved "https://registry.yarnpkg.com/@textlint/ast-traverse/-/ast-traverse-2.0.8.tgz#c180fe23dc3b8a6aa68539be70efb4ff17c38a3a" + dependencies: + "@textlint/ast-node-types" "^4.0.2" + +"@textlint/feature-flag@^3.0.4": + version "3.0.4" + resolved "https://registry.yarnpkg.com/@textlint/feature-flag/-/feature-flag-3.0.4.tgz#4290a4bb53da28c1f5f1d5ce0f4ae6630ab939ea" + dependencies: + map-like "^2.0.0" + +"@textlint/fixer-formatter@^3.0.7": + version "3.0.7" + resolved "https://registry.yarnpkg.com/@textlint/fixer-formatter/-/fixer-formatter-3.0.7.tgz#4ef15d5e606e2d32b89257afd382ed9dbb218846" + dependencies: + "@textlint/kernel" "^2.0.9" + chalk "^1.1.3" + debug "^2.1.0" + diff "^2.2.2" + interop-require "^1.0.0" + is-file "^1.0.0" + string-width "^1.0.1" + text-table "^0.2.0" + try-resolve "^1.0.1" + +"@textlint/kernel@^2.0.9": + version "2.0.9" + resolved "https://registry.yarnpkg.com/@textlint/kernel/-/kernel-2.0.9.tgz#a4471b7969e192551230c35ea9fae32d80128ee0" + dependencies: + "@textlint/ast-node-types" "^4.0.2" + "@textlint/ast-traverse" "^2.0.8" + "@textlint/feature-flag" "^3.0.4" + "@types/bluebird" "^3.5.18" + bluebird "^3.5.1" + debug "^2.6.6" + deep-equal "^1.0.1" + object-assign "^4.1.1" + structured-source "^3.0.2" + +"@textlint/linter-formatter@^3.0.7": + version "3.0.7" + resolved "https://registry.yarnpkg.com/@textlint/linter-formatter/-/linter-formatter-3.0.7.tgz#66716cac94c047d94627a7e6af427a0d199eda7c" + dependencies: + "@azu/format-text" "^1.0.1" + "@azu/style-format" "^1.0.0" + "@textlint/kernel" "^2.0.9" + chalk "^1.0.0" + concat-stream "^1.5.1" + js-yaml "^3.2.4" + optionator "^0.8.1" + pluralize "^2.0.0" + string-width "^1.0.1" + string.prototype.padstart "^3.0.0" + strip-ansi "^3.0.1" + table "^3.7.8" + text-table "^0.2.0" + try-resolve "^1.0.1" + xml-escape "^1.0.0" + +"@textlint/markdown-to-ast@^6.0.8": + version "6.0.8" + resolved "https://registry.yarnpkg.com/@textlint/markdown-to-ast/-/markdown-to-ast-6.0.8.tgz#baa509c42f842b4dba36ad91547a288c063396b8" + dependencies: + "@textlint/ast-node-types" "^4.0.2" + debug "^2.1.3" + remark-frontmatter "^1.2.0" + remark-parse "^5.0.0" + structured-source "^3.0.2" + traverse "^0.6.6" + unified "^6.1.6" + +"@textlint/text-to-ast@^3.0.8": + version "3.0.8" + resolved "https://registry.yarnpkg.com/@textlint/text-to-ast/-/text-to-ast-3.0.8.tgz#6211977f369cec484447867f10dc155120f4c082" + dependencies: + "@textlint/ast-node-types" "^4.0.2" + +"@textlint/textlint-plugin-markdown@^4.0.10": + version "4.0.10" + resolved "https://registry.yarnpkg.com/@textlint/textlint-plugin-markdown/-/textlint-plugin-markdown-4.0.10.tgz#a99b4a308067597e89439a9e87bc1c4a7f4d076b" + dependencies: + "@textlint/markdown-to-ast" "^6.0.8" + +"@textlint/textlint-plugin-text@^3.0.10": + version "3.0.10" + resolved "https://registry.yarnpkg.com/@textlint/textlint-plugin-text/-/textlint-plugin-text-3.0.10.tgz#619600bdc352d33a68e7a73d77d58b0c52b2a44f" + dependencies: + "@textlint/text-to-ast" "^3.0.8" + +"@types/bluebird@^3.5.18": + version "3.5.21" + resolved "https://registry.yarnpkg.com/@types/bluebird/-/bluebird-3.5.21.tgz#567615589cc913e84a28ecf9edb031732bdf2634" + +abbrev@1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.1.tgz#f8f2c887ad10bf67f634f005b6987fed3179aac8" + +aggregate-error@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/aggregate-error/-/aggregate-error-1.0.0.tgz#888344dad0220a72e3af50906117f48771925fac" + dependencies: + clean-stack "^1.0.0" + indent-string "^3.0.0" + +ajv-keywords@^1.0.0: + version "1.5.1" + resolved "https://registry.yarnpkg.com/ajv-keywords/-/ajv-keywords-1.5.1.tgz#314dd0a4b3368fad3dfcdc54ede6171b886daf3c" + +ajv@^4.7.0: + version "4.11.8" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-4.11.8.tgz#82ffb02b29e662ae53bdc20af15947706739c536" + dependencies: + co "^4.6.0" + json-stable-stringify "^1.0.1" + +ajv@^5.1.0: + version "5.5.2" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-5.5.2.tgz#73b5eeca3fab653e3d3f9422b341ad42205dc965" + dependencies: + co "^4.6.0" + fast-deep-equal "^1.0.0" + fast-json-stable-stringify "^2.0.0" + json-schema-traverse "^0.3.0" + +ansi-regex@^2.0.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df" + +ansi-regex@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-3.0.0.tgz#ed0317c322064f79466c02966bddb605ab37d998" + +ansi-styles@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe" + +ansi-styles@^3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" + dependencies: + color-convert "^1.9.0" + +anymatch@^1.3.0: + version "1.3.2" + resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-1.3.2.tgz#553dcb8f91e3c889845dfdba34c77721b90b9d7a" + dependencies: + micromatch "^2.1.5" + normalize-path "^2.0.0" + +aproba@^1.0.3: + version "1.2.0" + resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.2.0.tgz#6802e6264efd18c790a1b0d517f0f2627bf2c94a" + +are-we-there-yet@~1.1.2: + version "1.1.5" + resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.5.tgz#4b35c2944f062a8bfcda66410760350fe9ddfc21" + dependencies: + delegates "^1.0.0" + readable-stream "^2.0.6" + +argparse@^1.0.7: + version "1.0.10" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911" + dependencies: + sprintf-js "~1.0.2" + +arr-diff@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-2.0.0.tgz#8f3b827f955a8bd669697e4a4256ac3ceae356cf" + dependencies: + arr-flatten "^1.0.1" + +arr-flatten@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.1.0.tgz#36048bbff4e7b47e136644316c99669ea5ae91f1" + +array-iterate@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/array-iterate/-/array-iterate-1.1.2.tgz#f66a57e84426f8097f4197fbb6c051b8e5cdf7d8" + +array-union@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/array-union/-/array-union-1.0.2.tgz#9a34410e4f4e3da23dea375be5be70f24778ec39" + dependencies: + array-uniq "^1.0.1" + +array-uniq@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/array-uniq/-/array-uniq-1.0.3.tgz#af6ac877a25cc7f74e058894753858dfdb24fdb6" + +array-unique@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.2.1.tgz#a1d97ccafcbc2625cc70fadceb36a50c58b01a53" + +arrify@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d" + +asn1@~0.2.3: + version "0.2.3" + resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.3.tgz#dac8787713c9966849fc8180777ebe9c1ddf3b86" + +assert-plus@1.0.0, assert-plus@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525" + +async-each@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/async-each/-/async-each-1.0.1.tgz#19d386a1d9edc6e7c1c85d388aedbcc56d33602d" + +asynckit@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" + +aws-sign2@~0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.7.0.tgz#b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8" + +aws4@^1.6.0: + version "1.7.0" + resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.7.0.tgz#d4d0e9b9dbfca77bf08eeb0a8a471550fe39e289" + +bail@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/bail/-/bail-1.0.3.tgz#63cfb9ddbac829b02a3128cd53224be78e6c21a3" + +balanced-match@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.0.tgz#89b4d199ab2bee49de164ea02b89ce462d71b767" + +bcrypt-pbkdf@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz#a4301d389b6a43f9b67ff3ca11a3f6637e360e9e" + dependencies: + tweetnacl "^0.14.3" + +binary-extensions@^1.0.0: + version "1.11.0" + resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-1.11.0.tgz#46aa1751fb6a2f93ee5e689bb1087d4b14c6c205" + +bluebird@^3.0.5, bluebird@^3.5.1: + version "3.5.1" + resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.5.1.tgz#d9551f9de98f1fcda1e683d17ee91a0602ee2eb9" + +boundary@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/boundary/-/boundary-1.0.1.tgz#4d67dc2602c0cc16dd9bce7ebf87e948290f5812" + +brace-expansion@^1.1.7: + version "1.1.11" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" + dependencies: + balanced-match "^1.0.0" + concat-map "0.0.1" + +braces@^1.8.2: + version "1.8.5" + resolved "https://registry.yarnpkg.com/braces/-/braces-1.8.5.tgz#ba77962e12dff969d6b76711e914b737857bf6a7" + dependencies: + expand-range "^1.8.1" + preserve "^0.2.0" + repeat-element "^1.1.2" + +buffer-from@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.0.tgz#87fcaa3a298358e0ade6e442cfce840740d1ad04" + +builtin-modules@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/builtin-modules/-/builtin-modules-1.1.1.tgz#270f076c5a72c02f5b65a47df94c5fe3a278892f" + +cacheable-request@^2.1.1: + version "2.1.4" + resolved "https://registry.yarnpkg.com/cacheable-request/-/cacheable-request-2.1.4.tgz#0d808801b6342ad33c91df9d0b44dc09b91e5c3d" + dependencies: + clone-response "1.0.2" + get-stream "3.0.0" + http-cache-semantics "3.8.1" + keyv "3.0.0" + lowercase-keys "1.0.0" + normalize-url "2.0.1" + responselike "1.0.2" + +camelcase@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-4.1.0.tgz#d545635be1e33c542649c69173e5de6acfae34dd" + +capture-stack-trace@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/capture-stack-trace/-/capture-stack-trace-1.0.0.tgz#4a6fa07399c26bba47f0b2496b4d0fb408c5550d" + +caseless@~0.12.0: + version "0.12.0" + resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc" + +ccount@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/ccount/-/ccount-1.0.3.tgz#f1cec43f332e2ea5a569fd46f9f5bde4e6102aff" + +chalk@^1.0.0, chalk@^1.1.1, chalk@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98" + dependencies: + ansi-styles "^2.2.1" + escape-string-regexp "^1.0.2" + has-ansi "^2.0.0" + strip-ansi "^3.0.0" + supports-color "^2.0.0" + +chalk@^2.0.0: + version "2.4.1" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.1.tgz#18c49ab16a037b6eb0152cc83e3471338215b66e" + dependencies: + ansi-styles "^3.2.1" + escape-string-regexp "^1.0.5" + supports-color "^5.3.0" + +character-entities-html4@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/character-entities-html4/-/character-entities-html4-1.1.2.tgz#c44fdde3ce66b52e8d321d6c1bf46101f0150610" + +character-entities-legacy@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/character-entities-legacy/-/character-entities-legacy-1.1.2.tgz#7c6defb81648498222c9855309953d05f4d63a9c" + +character-entities@^1.0.0: + version "1.2.2" + resolved "https://registry.yarnpkg.com/character-entities/-/character-entities-1.2.2.tgz#58c8f371c0774ef0ba9b2aca5f00d8f100e6e363" + +character-reference-invalid@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/character-reference-invalid/-/character-reference-invalid-1.1.2.tgz#21e421ad3d84055952dab4a43a04e73cd425d3ed" + +charenc@~0.0.1: + version "0.0.2" + resolved "https://registry.yarnpkg.com/charenc/-/charenc-0.0.2.tgz#c0a1d2f3a7092e03774bfa83f14c0fc5790a8667" + +chokidar@^1.5.1: + version "1.7.0" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-1.7.0.tgz#798e689778151c8076b4b360e5edd28cda2bb468" + dependencies: + anymatch "^1.3.0" + async-each "^1.0.0" + glob-parent "^2.0.0" + inherits "^2.0.1" + is-binary-path "^1.0.0" + is-glob "^2.0.0" + path-is-absolute "^1.0.0" + readdirp "^2.0.0" + optionalDependencies: + fsevents "^1.0.0" + +chownr@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.0.1.tgz#e2a75042a9551908bebd25b8523d5f9769d79181" + +circular-json@^0.3.1: + version "0.3.3" + resolved "https://registry.yarnpkg.com/circular-json/-/circular-json-0.3.3.tgz#815c99ea84f6809529d2f45791bdf82711352d66" + +clean-stack@^1.0.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/clean-stack/-/clean-stack-1.3.0.tgz#9e821501ae979986c46b1d66d2d432db2fd4ae31" + +clone-response@1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/clone-response/-/clone-response-1.0.2.tgz#d1dc973920314df67fbeb94223b4ee350239e96b" + dependencies: + mimic-response "^1.0.0" + +co@3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/co/-/co-3.1.0.tgz#4ea54ea5a08938153185e15210c68d9092bc1b78" + +co@^4.6.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184" + +code-point-at@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77" + +collapse-white-space@^1.0.2: + version "1.0.4" + resolved "https://registry.yarnpkg.com/collapse-white-space/-/collapse-white-space-1.0.4.tgz#ce05cf49e54c3277ae573036a26851ba430a0091" + +color-convert@^1.9.0: + version "1.9.2" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.2.tgz#49881b8fba67df12a96bdf3f56c0aab9e7913147" + dependencies: + color-name "1.1.1" + +color-name@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.1.tgz#4b1415304cf50028ea81643643bd82ea05803689" + +combined-stream@1.0.6, combined-stream@~1.0.5: + version "1.0.6" + resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.6.tgz#723e7df6e801ac5613113a7e445a9b69cb632818" + dependencies: + delayed-stream "~1.0.0" + +concat-map@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" + +concat-stream@^1.5.1: + version "1.6.2" + resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.6.2.tgz#904bdf194cd3122fc675c77fc4ac3d4ff0fd1a34" + dependencies: + buffer-from "^1.0.0" + inherits "^2.0.3" + readable-stream "^2.2.2" + typedarray "^0.0.6" + +console-control-strings@^1.0.0, console-control-strings@~1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/console-control-strings/-/console-control-strings-1.1.0.tgz#3d7cf4464db6446ea644bf4b39507f9851008e8e" + +core-util-is@1.0.2, core-util-is@~1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" + +create-error-class@^3.0.0: + version "3.0.2" + resolved "https://registry.yarnpkg.com/create-error-class/-/create-error-class-3.0.2.tgz#06be7abef947a3f14a30fd610671d401bca8b7b6" + dependencies: + capture-stack-trace "^1.0.0" + +crypt@~0.0.1: + version "0.0.2" + resolved "https://registry.yarnpkg.com/crypt/-/crypt-0.0.2.tgz#88d7ff7ec0dfb86f713dc87bbb42d044d3e6c41b" + +dashdash@^1.12.0: + version "1.14.1" + resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0" + dependencies: + assert-plus "^1.0.0" + +debug@^2.1.0, debug@^2.1.2, debug@^2.1.3, debug@^2.2.0, debug@^2.6.6: + version "2.6.9" + resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" + dependencies: + ms "2.0.0" + +debug@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/debug/-/debug-3.1.0.tgz#5bb5a0672628b64149566ba16819e61518c67261" + dependencies: + ms "2.0.0" + +decode-uri-component@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/decode-uri-component/-/decode-uri-component-0.2.0.tgz#eb3913333458775cb84cd1a1fae062106bb87545" + +decompress-response@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/decompress-response/-/decompress-response-3.3.0.tgz#80a4dd323748384bfa248083622aedec982adff3" + dependencies: + mimic-response "^1.0.0" + +deep-equal@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/deep-equal/-/deep-equal-1.0.1.tgz#f5d260292b660e084eff4cdbc9f08ad3247448b5" + +deep-extend@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.6.0.tgz#c4fa7c95404a17a9c3e8ca7e1537312b736330ac" + +deep-is@~0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.3.tgz#b369d6fb5dbc13eecf524f91b070feedc357cf34" + +define-properties@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.1.2.tgz#83a73f2fea569898fb737193c8f873caf6d45c94" + dependencies: + foreach "^2.0.5" + object-keys "^1.0.8" + +del@^2.0.2: + version "2.2.2" + resolved "https://registry.yarnpkg.com/del/-/del-2.2.2.tgz#c12c981d067846c84bcaf862cff930d907ffd1a8" + dependencies: + globby "^5.0.0" + is-path-cwd "^1.0.0" + is-path-in-cwd "^1.0.0" + object-assign "^4.0.1" + pify "^2.0.0" + pinkie-promise "^2.0.0" + rimraf "^2.2.8" + +delayed-stream@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" + +delegates@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a" + +detect-libc@^1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/detect-libc/-/detect-libc-1.0.3.tgz#fa137c4bd698edf55cd5cd02ac559f91a4c4ba9b" + +diff@^2.2.2: + version "2.2.3" + resolved "https://registry.yarnpkg.com/diff/-/diff-2.2.3.tgz#60eafd0d28ee906e4e8ff0a52c1229521033bf99" + +dns-packet@^1.1.0: + version "1.3.1" + resolved "https://registry.yarnpkg.com/dns-packet/-/dns-packet-1.3.1.tgz#12aa426981075be500b910eedcd0b47dd7deda5a" + dependencies: + ip "^1.1.0" + safe-buffer "^5.0.1" + +dns-socket@^1.6.2: + version "1.6.3" + resolved "https://registry.yarnpkg.com/dns-socket/-/dns-socket-1.6.3.tgz#5268724fad4aa46ad9c5ca4ffcd16e1de5342aab" + dependencies: + dns-packet "^1.1.0" + +duplexer3@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/duplexer3/-/duplexer3-0.1.4.tgz#ee01dd1cac0ed3cbc7fdbea37dc0a8f1ce002ce2" + +ecc-jsbn@~0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz#0fc73a9ed5f0d53c38193398523ef7e543777505" + dependencies: + jsbn "~0.1.0" + +error-ex@^1.2.0, error-ex@^1.3.1: + version "1.3.2" + resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf" + dependencies: + is-arrayish "^0.2.1" + +es-abstract@^1.4.3: + version "1.12.0" + resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.12.0.tgz#9dbbdd27c6856f0001421ca18782d786bf8a6165" + dependencies: + es-to-primitive "^1.1.1" + function-bind "^1.1.1" + has "^1.0.1" + is-callable "^1.1.3" + is-regex "^1.0.4" + +es-to-primitive@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/es-to-primitive/-/es-to-primitive-1.1.1.tgz#45355248a88979034b6792e19bb81f2b7975dd0d" + dependencies: + is-callable "^1.1.1" + is-date-object "^1.0.1" + is-symbol "^1.0.1" + +escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" + +esprima@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.0.tgz#4499eddcd1110e0b218bacf2fa7f7f59f55ca804" + +expand-brackets@^0.1.4: + version "0.1.5" + resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-0.1.5.tgz#df07284e342a807cd733ac5af72411e581d1177b" + dependencies: + is-posix-bracket "^0.1.0" + +expand-range@^1.8.1: + version "1.8.2" + resolved "https://registry.yarnpkg.com/expand-range/-/expand-range-1.8.2.tgz#a299effd335fe2721ebae8e257ec79644fc85337" + dependencies: + fill-range "^2.1.0" + +extend@^3.0.0, extend@~3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.1.tgz#a755ea7bc1adfcc5a31ce7e762dbaadc5e636444" + +extglob@^0.3.1: + version "0.3.2" + resolved "https://registry.yarnpkg.com/extglob/-/extglob-0.3.2.tgz#2e18ff3d2f49ab2765cec9023f011daa8d8349a1" + dependencies: + is-extglob "^1.0.0" + +extsprintf@1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.3.0.tgz#96918440e3041a7a414f8c52e3c574eb3c3e1e05" + +extsprintf@^1.2.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.4.0.tgz#e2689f8f356fad62cca65a3a91c5df5f9551692f" + +fast-deep-equal@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-1.1.0.tgz#c053477817c86b51daa853c81e059b733d023614" + +fast-json-stable-stringify@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz#d5142c0caee6b1189f87d3a76111064f86c8bbf2" + +fast-levenshtein@~2.0.4: + version "2.0.6" + resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" + +fault@^1.0.0, fault@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/fault/-/fault-1.0.2.tgz#c3d0fec202f172a3a4d414042ad2bb5e2a3ffbaa" + dependencies: + format "^0.2.2" + +file-entry-cache@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/file-entry-cache/-/file-entry-cache-2.0.0.tgz#c392990c3e684783d838b8c84a45d8a048458361" + dependencies: + flat-cache "^1.2.1" + object-assign "^4.0.1" + +filename-regex@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/filename-regex/-/filename-regex-2.0.1.tgz#c1c4b9bee3e09725ddb106b75c1e301fe2f18b26" + +fill-range@^2.1.0: + version "2.2.4" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-2.2.4.tgz#eb1e773abb056dcd8df2bfdf6af59b8b3a936565" + dependencies: + is-number "^2.1.0" + isobject "^2.0.0" + randomatic "^3.0.0" + repeat-element "^1.1.2" + repeat-string "^1.5.2" + +find-up@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-2.1.0.tgz#45d1b7e506c717ddd482775a2b77920a3c0c57a7" + dependencies: + locate-path "^2.0.0" + +flat-cache@^1.2.1: + version "1.3.0" + resolved "https://registry.yarnpkg.com/flat-cache/-/flat-cache-1.3.0.tgz#d3030b32b38154f4e3b7e9c709f490f7ef97c481" + dependencies: + circular-json "^0.3.1" + del "^2.0.2" + graceful-fs "^4.1.2" + write "^0.2.1" + +fn-name@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/fn-name/-/fn-name-2.0.1.tgz#5214d7537a4d06a4a301c0cc262feb84188002e7" + +for-in@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80" + +for-own@^0.1.4: + version "0.1.5" + resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.5.tgz#5265c681a4f294dabbf17c9509b6763aa84510ce" + dependencies: + for-in "^1.0.1" + +foreach@^2.0.5: + version "2.0.5" + resolved "https://registry.yarnpkg.com/foreach/-/foreach-2.0.5.tgz#0bee005018aeb260d0a3af3ae658dd0136ec1b99" + +forever-agent@~0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" + +form-data@~2.3.1: + version "2.3.2" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.3.2.tgz#4970498be604c20c005d4f5c23aecd21d6b49099" + dependencies: + asynckit "^0.4.0" + combined-stream "1.0.6" + mime-types "^2.1.12" + +format@^0.2.2: + version "0.2.2" + resolved "https://registry.yarnpkg.com/format/-/format-0.2.2.tgz#d6170107e9efdc4ed30c9dc39016df942b5cb58b" + +from2@^2.1.1: + version "2.3.0" + resolved "https://registry.yarnpkg.com/from2/-/from2-2.3.0.tgz#8bfb5502bde4a4d36cfdeea007fcca21d7e382af" + dependencies: + inherits "^2.0.1" + readable-stream "^2.0.0" + +fs-minipass@^1.2.5: + version "1.2.5" + resolved "https://registry.yarnpkg.com/fs-minipass/-/fs-minipass-1.2.5.tgz#06c277218454ec288df77ada54a03b8702aacb9d" + dependencies: + minipass "^2.2.1" + +fs.realpath@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" + +fsevents@^1.0.0: + version "1.2.4" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-1.2.4.tgz#f41dcb1af2582af3692da36fc55cbd8e1041c426" + dependencies: + nan "^2.9.2" + node-pre-gyp "^0.10.0" + +function-bind@^1.0.2, function-bind@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d" + +gauge@~2.7.3: + version "2.7.4" + resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7" + dependencies: + aproba "^1.0.3" + console-control-strings "^1.0.0" + has-unicode "^2.0.0" + object-assign "^4.1.0" + signal-exit "^3.0.0" + string-width "^1.0.1" + strip-ansi "^3.0.1" + wide-align "^1.1.0" + +get-stdin@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-5.0.1.tgz#122e161591e21ff4c52530305693f20e6393a398" + +get-stream@3.0.0, get-stream@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-3.0.0.tgz#8e943d1358dc37555054ecbe2edb05aa174ede14" + +getpass@^0.1.1: + version "0.1.7" + resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa" + dependencies: + assert-plus "^1.0.0" + +glob-base@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/glob-base/-/glob-base-0.3.0.tgz#dbb164f6221b1c0b1ccf82aea328b497df0ea3c4" + dependencies: + glob-parent "^2.0.0" + is-glob "^2.0.0" + +glob-parent@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-2.0.0.tgz#81383d72db054fcccf5336daa902f182f6edbb28" + dependencies: + is-glob "^2.0.0" + +glob@^7.0.3, glob@^7.0.5, glob@^7.1.1: + version "7.1.2" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.2.tgz#c19c9df9a028702d678612384a6552404c636d15" + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.0.4" + once "^1.3.0" + path-is-absolute "^1.0.0" + +globby@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/globby/-/globby-5.0.0.tgz#ebd84667ca0dbb330b99bcfc68eac2bc54370e0d" + dependencies: + array-union "^1.0.1" + arrify "^1.0.0" + glob "^7.0.3" + object-assign "^4.0.1" + pify "^2.0.0" + pinkie-promise "^2.0.0" + +got@^6.7.1: + version "6.7.1" + resolved "https://registry.yarnpkg.com/got/-/got-6.7.1.tgz#240cd05785a9a18e561dc1b44b41c763ef1e8db0" + dependencies: + create-error-class "^3.0.0" + duplexer3 "^0.1.4" + get-stream "^3.0.0" + is-redirect "^1.0.0" + is-retry-allowed "^1.0.0" + is-stream "^1.0.0" + lowercase-keys "^1.0.0" + safe-buffer "^5.0.1" + timed-out "^4.0.0" + unzip-response "^2.0.1" + url-parse-lax "^1.0.0" + +got@^8.0.0: + version "8.3.2" + resolved "https://registry.yarnpkg.com/got/-/got-8.3.2.tgz#1d23f64390e97f776cac52e5b936e5f514d2e937" + dependencies: + "@sindresorhus/is" "^0.7.0" + cacheable-request "^2.1.1" + decompress-response "^3.3.0" + duplexer3 "^0.1.4" + get-stream "^3.0.0" + into-stream "^3.1.0" + is-retry-allowed "^1.1.0" + isurl "^1.0.0-alpha5" + lowercase-keys "^1.0.0" + mimic-response "^1.0.0" + p-cancelable "^0.4.0" + p-timeout "^2.0.1" + pify "^3.0.0" + safe-buffer "^5.1.1" + timed-out "^4.0.1" + url-parse-lax "^3.0.0" + url-to-options "^1.0.1" + +graceful-fs@^4.1.2: + version "4.1.11" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.11.tgz#0e8bdfe4d1ddb8854d64e04ea7c00e2a026e5658" + +har-schema@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-2.0.0.tgz#a94c2224ebcac04782a0d9035521f24735b7ec92" + +har-validator@~5.0.3: + version "5.0.3" + resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-5.0.3.tgz#ba402c266194f15956ef15e0fcf242993f6a7dfd" + dependencies: + ajv "^5.1.0" + har-schema "^2.0.0" + +has-ansi@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91" + dependencies: + ansi-regex "^2.0.0" + +has-flag@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-2.0.0.tgz#e8207af1cc7b30d446cc70b734b5e8be18f88d51" + +has-flag@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" + +has-symbol-support-x@^1.4.1: + version "1.4.2" + resolved "https://registry.yarnpkg.com/has-symbol-support-x/-/has-symbol-support-x-1.4.2.tgz#1409f98bc00247da45da67cee0a36f282ff26455" + +has-to-string-tag-x@^1.2.0: + version "1.4.1" + resolved "https://registry.yarnpkg.com/has-to-string-tag-x/-/has-to-string-tag-x-1.4.1.tgz#a045ab383d7b4b2012a00148ab0aa5f290044d4d" + dependencies: + has-symbol-support-x "^1.4.1" + +has-unicode@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9" + +has@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has/-/has-1.0.3.tgz#722d7cbfc1f6aa8241f16dd814e011e1f41e8796" + dependencies: + function-bind "^1.1.1" + +hosted-git-info@^2.1.4: + version "2.6.1" + resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.6.1.tgz#6e4cee78b01bb849dcf93527708c69fdbee410df" + +http-cache-semantics@3.8.1: + version "3.8.1" + resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-3.8.1.tgz#39b0e16add9b605bf0a9ef3d9daaf4843b4cacd2" + +http-signature@~1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.2.0.tgz#9aecd925114772f3d95b65a60abb8f7c18fbace1" + dependencies: + assert-plus "^1.0.0" + jsprim "^1.2.2" + sshpk "^1.7.0" + +iconv-lite@^0.4.4: + version "0.4.23" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.23.tgz#297871f63be507adcfbfca715d0cd0eed84e9a63" + dependencies: + safer-buffer ">= 2.1.2 < 3" + +ignore-walk@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/ignore-walk/-/ignore-walk-3.0.1.tgz#a83e62e7d272ac0e3b551aaa82831a19b69f82f8" + dependencies: + minimatch "^3.0.4" + +ignore@^3.2.0: + version "3.3.10" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-3.3.10.tgz#0a97fb876986e8081c631160f8f9f389157f0043" + +indent-string@^3.0.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-3.2.0.tgz#4a5fd6d27cc332f37e5419a504dbb837105c9289" + +inflight@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" + dependencies: + once "^1.3.0" + wrappy "1" + +inherits@2, inherits@^2.0.1, inherits@^2.0.3, inherits@~2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" + +ini@~1.3.0: + version "1.3.5" + resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.5.tgz#eee25f56db1c9ec6085e0c22778083f596abf927" + +interop-require@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/interop-require/-/interop-require-1.0.0.tgz#e53103679944c88d7e6105b62a9f4475c783971e" + +into-stream@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/into-stream/-/into-stream-3.1.0.tgz#96fb0a936c12babd6ff1752a17d05616abd094c6" + dependencies: + from2 "^2.1.1" + p-is-promise "^1.1.0" + +ip-regex@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/ip-regex/-/ip-regex-2.1.0.tgz#fa78bf5d2e6913c911ce9f819ee5146bb6d844e9" + +ip@^1.1.0: + version "1.1.5" + resolved "https://registry.yarnpkg.com/ip/-/ip-1.1.5.tgz#bdded70114290828c0a039e72ef25f5aaec4354a" + +is-absolute-url@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-absolute-url/-/is-absolute-url-2.1.0.tgz#50530dfb84fcc9aa7dbe7852e83a37b93b9f2aa6" + +is-alphabetical@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-alphabetical/-/is-alphabetical-1.0.2.tgz#1fa6e49213cb7885b75d15862fb3f3d96c884f41" + +is-alphanumeric@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-alphanumeric/-/is-alphanumeric-1.0.0.tgz#4a9cef71daf4c001c1d81d63d140cf53fd6889f4" + +is-alphanumerical@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-alphanumerical/-/is-alphanumerical-1.0.2.tgz#1138e9ae5040158dc6ff76b820acd6b7a181fd40" + dependencies: + is-alphabetical "^1.0.0" + is-decimal "^1.0.0" + +is-arrayish@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" + +is-binary-path@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-binary-path/-/is-binary-path-1.0.1.tgz#75f16642b480f187a711c814161fd3a4a7655898" + dependencies: + binary-extensions "^1.0.0" + +is-buffer@^1.1.4, is-buffer@^1.1.5, is-buffer@~1.1.1: + version "1.1.6" + resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.6.tgz#efaa2ea9daa0d7ab2ea13a97b2b8ad51fefbe8be" + +is-builtin-module@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-builtin-module/-/is-builtin-module-1.0.0.tgz#540572d34f7ac3119f8f76c30cbc1b1e037affbe" + dependencies: + builtin-modules "^1.0.0" + +is-callable@^1.1.1, is-callable@^1.1.3: + version "1.1.4" + resolved "https://registry.yarnpkg.com/is-callable/-/is-callable-1.1.4.tgz#1e1adf219e1eeb684d691f9d6a05ff0d30a24d75" + +is-date-object@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.1.tgz#9aa20eb6aeebbff77fbd33e74ca01b33581d3a16" + +is-decimal@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-decimal/-/is-decimal-1.0.2.tgz#894662d6a8709d307f3a276ca4339c8fa5dff0ff" + +is-dotfile@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/is-dotfile/-/is-dotfile-1.0.3.tgz#a6a2f32ffd2dfb04f5ca25ecd0f6b83cf798a1e1" + +is-empty@^1.0.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/is-empty/-/is-empty-1.2.0.tgz#de9bb5b278738a05a0b09a57e1fb4d4a341a9f6b" + +is-equal-shallow@^0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/is-equal-shallow/-/is-equal-shallow-0.1.3.tgz#2238098fc221de0bcfa5d9eac4c45d638aa1c534" + dependencies: + is-primitive "^2.0.0" + +is-extendable@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89" + +is-extglob@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-1.0.0.tgz#ac468177c4943405a092fc8f29760c6ffc6206c0" + +is-file@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-file/-/is-file-1.0.0.tgz#28a44cfbd9d3db193045f22b65fce8edf9620596" + +is-fullwidth-code-point@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb" + dependencies: + number-is-nan "^1.0.0" + +is-fullwidth-code-point@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz#a3b30a5c4f199183167aaab93beefae3ddfb654f" + +is-glob@^2.0.0, is-glob@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-2.0.1.tgz#d096f926a3ded5600f3fdfd91198cb0888c2d863" + dependencies: + is-extglob "^1.0.0" + +is-hexadecimal@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-hexadecimal/-/is-hexadecimal-1.0.2.tgz#b6e710d7d07bb66b98cb8cece5c9b4921deeb835" + +is-hidden@^1.0.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/is-hidden/-/is-hidden-1.1.1.tgz#82ee6a93aeef3fb007ad5b9457c0584d45329f38" + +is-ip@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-ip/-/is-ip-2.0.0.tgz#68eea07e8a0a0a94c2d080dd674c731ab2a461ab" + dependencies: + ip-regex "^2.0.0" + +is-number@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-2.1.0.tgz#01fcbbb393463a548f2f466cce16dece49db908f" + dependencies: + kind-of "^3.0.2" + +is-number@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-4.0.0.tgz#0026e37f5454d73e356dfe6564699867c6a7f0ff" + +is-object@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-object/-/is-object-1.0.1.tgz#8952688c5ec2ffd6b03ecc85e769e02903083470" + +is-online@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/is-online/-/is-online-7.0.0.tgz#7e2408c0ae1e7e37ba8d50bdb237260d32bfd96e" + dependencies: + got "^6.7.1" + p-any "^1.0.0" + p-timeout "^1.0.0" + public-ip "^2.3.0" + +is-path-cwd@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-path-cwd/-/is-path-cwd-1.0.0.tgz#d225ec23132e89edd38fda767472e62e65f1106d" + +is-path-in-cwd@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-path-in-cwd/-/is-path-in-cwd-1.0.1.tgz#5ac48b345ef675339bd6c7a48a912110b241cf52" + dependencies: + is-path-inside "^1.0.0" + +is-path-inside@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-path-inside/-/is-path-inside-1.0.1.tgz#8ef5b7de50437a3fdca6b4e865ef7aa55cb48036" + dependencies: + path-is-inside "^1.0.1" + +is-plain-obj@^1.0.0, is-plain-obj@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-1.1.0.tgz#71a50c8429dfca773c92a390a4a03b39fcd51d3e" + +is-posix-bracket@^0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz#3334dc79774368e92f016e6fbc0a88f5cd6e6bc4" + +is-primitive@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-primitive/-/is-primitive-2.0.0.tgz#207bab91638499c07b2adf240a41a87210034575" + +is-redirect@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-redirect/-/is-redirect-1.0.0.tgz#1d03dded53bd8db0f30c26e4f95d36fc7c87dc24" + +is-regex@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.0.4.tgz#5517489b547091b0930e095654ced25ee97e9491" + dependencies: + has "^1.0.1" + +is-relative-url@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-relative-url/-/is-relative-url-2.0.0.tgz#72902d7fe04b3d4792e7db15f9db84b7204c9cef" + dependencies: + is-absolute-url "^2.0.0" + +is-retry-allowed@^1.0.0, is-retry-allowed@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-retry-allowed/-/is-retry-allowed-1.1.0.tgz#11a060568b67339444033d0125a61a20d564fb34" + +is-stream@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44" + +is-symbol@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-symbol/-/is-symbol-1.0.1.tgz#3cc59f00025194b6ab2e38dbae6689256b660572" + +is-typedarray@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" + +is-utf8@^0.2.0: + version "0.2.1" + resolved "https://registry.yarnpkg.com/is-utf8/-/is-utf8-0.2.1.tgz#4b0da1442104d1b336340e80797e865cf39f7d72" + +is-whitespace-character@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-whitespace-character/-/is-whitespace-character-1.0.2.tgz#ede53b4c6f6fb3874533751ec9280d01928d03ed" + +is-word-character@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-word-character/-/is-word-character-1.0.2.tgz#46a5dac3f2a1840898b91e576cd40d493f3ae553" + +isarray@1.0.0, isarray@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11" + +isemail@^3.1.2: + version "3.1.2" + resolved "https://registry.yarnpkg.com/isemail/-/isemail-3.1.2.tgz#937cf919002077999a73ea8b1951d590e84e01dd" + dependencies: + punycode "2.x.x" + +isobject@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/isobject/-/isobject-2.1.0.tgz#f065561096a3f1da2ef46272f815c840d87e0c89" + dependencies: + isarray "1.0.0" + +isstream@~0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a" + +isurl@^1.0.0-alpha5: + version "1.0.0" + resolved "https://registry.yarnpkg.com/isurl/-/isurl-1.0.0.tgz#b27f4f49f3cdaa3ea44a0a5b7f3462e6edc39d67" + dependencies: + has-to-string-tag-x "^1.2.0" + is-object "^1.0.1" + +js-yaml@^3.2.4, js-yaml@^3.6.1: + version "3.12.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.12.0.tgz#eaed656ec8344f10f527c6bfa1b6e2244de167d1" + dependencies: + argparse "^1.0.7" + esprima "^4.0.0" + +jsbn@~0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513" + +json-buffer@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/json-buffer/-/json-buffer-3.0.0.tgz#5b1f397afc75d677bde8bcfc0e47e1f9a3d9a898" + +json-parse-better-errors@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz#bb867cfb3450e69107c131d1c514bab3dc8bcaa9" + +json-schema-traverse@^0.3.0: + version "0.3.1" + resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.3.1.tgz#349a6d44c53a51de89b40805c5d5e59b417d3340" + +json-schema@0.2.3: + version "0.2.3" + resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.2.3.tgz#b480c892e59a2f05954ce727bd3f2a4e882f9e13" + +json-stable-stringify@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/json-stable-stringify/-/json-stable-stringify-1.0.1.tgz#9a759d39c5f2ff503fd5300646ed445f88c4f9af" + dependencies: + jsonify "~0.0.0" + +json-stringify-safe@~5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" + +json5@^0.5.0, json5@^0.5.1: + version "0.5.1" + resolved "https://registry.yarnpkg.com/json5/-/json5-0.5.1.tgz#1eade7acc012034ad84e2396767ead9fa5495821" + +jsonify@~0.0.0: + version "0.0.0" + resolved "https://registry.yarnpkg.com/jsonify/-/jsonify-0.0.0.tgz#2c74b6ee41d93ca51b7b5aaee8f503631d252a73" + +jsprim@^1.2.2: + version "1.4.1" + resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.1.tgz#313e66bc1e5cc06e438bc1b7499c2e5c56acb6a2" + dependencies: + assert-plus "1.0.0" + extsprintf "1.3.0" + json-schema "0.2.3" + verror "1.10.0" + +keyv@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/keyv/-/keyv-3.0.0.tgz#44923ba39e68b12a7cec7df6c3268c031f2ef373" + dependencies: + json-buffer "3.0.0" + +kind-of@^3.0.2: + version "3.2.2" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.2.2.tgz#31ea21a734bab9bbb0f32466d893aea51e4a3c64" + dependencies: + is-buffer "^1.1.5" + +kind-of@^6.0.0: + version "6.0.2" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-6.0.2.tgz#01146b36a6218e64e58f3a8d66de5d7fc6f6d051" + +levn@~0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/levn/-/levn-0.3.0.tgz#3b09924edf9f083c0490fdd4c0bc4421e04764ee" + dependencies: + prelude-ls "~1.1.2" + type-check "~0.3.2" + +link-check@^4.1.0: + version "4.4.4" + resolved "https://registry.yarnpkg.com/link-check/-/link-check-4.4.4.tgz#08dbb881b70c23f1c173889c3a34d682c2e68c1a" + dependencies: + is-relative-url "^2.0.0" + isemail "^3.1.2" + ms "^2.1.1" + request "^2.87.0" + +load-json-file@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-1.1.0.tgz#956905708d58b4bab4c2261b04f59f31c99374c0" + dependencies: + graceful-fs "^4.1.2" + parse-json "^2.2.0" + pify "^2.0.0" + pinkie-promise "^2.0.0" + strip-bom "^2.0.0" + +load-json-file@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-4.0.0.tgz#2f5f45ab91e33216234fd53adab668eb4ec0993b" + dependencies: + graceful-fs "^4.1.2" + parse-json "^4.0.0" + pify "^3.0.0" + strip-bom "^3.0.0" + +load-plugin@^2.0.0: + version "2.2.2" + resolved "https://registry.yarnpkg.com/load-plugin/-/load-plugin-2.2.2.tgz#ebc7599491ff33e5077719fbe051d5725a9f7a89" + dependencies: + npm-prefix "^1.2.0" + resolve-from "^4.0.0" + +locate-path@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-2.0.0.tgz#2b568b265eec944c6d9c0de9c3dbbbca0354cd8e" + dependencies: + p-locate "^2.0.0" + path-exists "^3.0.0" + +lodash@^4.0.0: + version "4.17.10" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.10.tgz#1b7793cf7259ea38fb3661d4d38b3260af8ae4e7" + +log-symbols@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-1.0.2.tgz#376ff7b58ea3086a0f09facc74617eca501e1a18" + dependencies: + chalk "^1.0.0" + +longest-streak@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/longest-streak/-/longest-streak-2.0.2.tgz#2421b6ba939a443bb9ffebf596585a50b4c38e2e" + +lowercase-keys@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-1.0.0.tgz#4e3366b39e7f5457e35f1324bdf6f88d0bfc7306" + +lowercase-keys@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-1.0.1.tgz#6f9e30b47084d971a7c820ff15a6c5167b74c26f" + +map-like@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/map-like/-/map-like-2.0.0.tgz#94496d49ad333c0dc3234b27adbbd1e8535953b4" + +markdown-escapes@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/markdown-escapes/-/markdown-escapes-1.0.2.tgz#e639cbde7b99c841c0bacc8a07982873b46d2122" + +markdown-extensions@^1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/markdown-extensions/-/markdown-extensions-1.1.1.tgz#fea03b539faeaee9b4ef02a3769b455b189f7fc3" + +markdown-table@^1.1.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/markdown-table/-/markdown-table-1.1.2.tgz#c78db948fa879903a41bce522e3b96f801c63786" + +math-random@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/math-random/-/math-random-1.0.1.tgz#8b3aac588b8a66e4975e3cdea67f7bb329601fac" + +md5@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/md5/-/md5-2.2.1.tgz#53ab38d5fe3c8891ba465329ea23fac0540126f9" + dependencies: + charenc "~0.0.1" + crypt "~0.0.1" + is-buffer "~1.1.1" + +mdast-util-compact@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/mdast-util-compact/-/mdast-util-compact-1.0.1.tgz#cdb5f84e2b6a2d3114df33bd05d9cb32e3c4083a" + dependencies: + unist-util-modify-children "^1.0.0" + unist-util-visit "^1.1.0" + +micromatch@^2.1.5: + version "2.3.11" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-2.3.11.tgz#86677c97d1720b363431d04d0d15293bd38c1565" + dependencies: + arr-diff "^2.0.0" + array-unique "^0.2.1" + braces "^1.8.2" + expand-brackets "^0.1.4" + extglob "^0.3.1" + filename-regex "^2.0.0" + is-extglob "^1.0.0" + is-glob "^2.0.1" + kind-of "^3.0.2" + normalize-path "^2.0.1" + object.omit "^2.0.0" + parse-glob "^3.0.4" + regex-cache "^0.4.2" + +mime-db@~1.33.0: + version "1.33.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.33.0.tgz#a3492050a5cb9b63450541e39d9788d2272783db" + +mime-types@^2.1.12, mime-types@~2.1.17: + version "2.1.18" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.18.tgz#6f323f60a83d11146f831ff11fd66e2fe5503bb8" + dependencies: + mime-db "~1.33.0" + +mimic-response@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/mimic-response/-/mimic-response-1.0.0.tgz#df3d3652a73fded6b9b0b24146e6fd052353458e" + +minimatch@^3.0.2, minimatch@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083" + dependencies: + brace-expansion "^1.1.7" + +minimist@0.0.8: + version "0.0.8" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d" + +minimist@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.0.tgz#a35008b20f41383eec1fb914f4cd5df79a264284" + +minipass@^2.2.1, minipass@^2.3.3: + version "2.3.3" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-2.3.3.tgz#a7dcc8b7b833f5d368759cce544dccb55f50f233" + dependencies: + safe-buffer "^5.1.2" + yallist "^3.0.0" + +minizlib@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-1.1.0.tgz#11e13658ce46bc3a70a267aac58359d1e0c29ceb" + dependencies: + minipass "^2.2.1" + +mkdirp@^0.5.0, mkdirp@^0.5.1: + version "0.5.1" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903" + dependencies: + minimist "0.0.8" + +ms@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" + +ms@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.1.tgz#30a5864eb3ebb0a66f2ebe6d727af06a09d86e0a" + +nan@^2.9.2: + version "2.10.0" + resolved "https://registry.yarnpkg.com/nan/-/nan-2.10.0.tgz#96d0cd610ebd58d4b4de9cc0c6828cda99c7548f" + +needle@^2.2.0: + version "2.2.1" + resolved "https://registry.yarnpkg.com/needle/-/needle-2.2.1.tgz#b5e325bd3aae8c2678902fa296f729455d1d3a7d" + dependencies: + debug "^2.1.2" + iconv-lite "^0.4.4" + sax "^1.2.4" + +node-pre-gyp@^0.10.0: + version "0.10.2" + resolved "https://registry.yarnpkg.com/node-pre-gyp/-/node-pre-gyp-0.10.2.tgz#e8945c20ef6795a20aac2b44f036eb13cf5146e3" + dependencies: + detect-libc "^1.0.2" + mkdirp "^0.5.1" + needle "^2.2.0" + nopt "^4.0.1" + npm-packlist "^1.1.6" + npmlog "^4.0.2" + rc "^1.2.7" + rimraf "^2.6.1" + semver "^5.3.0" + tar "^4" + +nopt@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/nopt/-/nopt-4.0.1.tgz#d0d4685afd5415193c8c7505602d0d17cd64474d" + dependencies: + abbrev "1" + osenv "^0.1.4" + +normalize-package-data@^2.3.2: + version "2.4.0" + resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.4.0.tgz#12f95a307d58352075a04907b84ac8be98ac012f" + dependencies: + hosted-git-info "^2.1.4" + is-builtin-module "^1.0.0" + semver "2 || 3 || 4 || 5" + validate-npm-package-license "^3.0.1" + +normalize-path@^2.0.0, normalize-path@^2.0.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-2.1.1.tgz#1ab28b556e198363a8c1a6f7e6fa20137fe6aed9" + dependencies: + remove-trailing-separator "^1.0.1" + +normalize-url@2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-2.0.1.tgz#835a9da1551fa26f70e92329069a23aa6574d7e6" + dependencies: + prepend-http "^2.0.0" + query-string "^5.0.1" + sort-keys "^2.0.0" + +npm-bundled@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/npm-bundled/-/npm-bundled-1.0.3.tgz#7e71703d973af3370a9591bafe3a63aca0be2308" + +npm-packlist@^1.1.6: + version "1.1.10" + resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-1.1.10.tgz#1039db9e985727e464df066f4cf0ab6ef85c398a" + dependencies: + ignore-walk "^3.0.1" + npm-bundled "^1.0.1" + +npm-prefix@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/npm-prefix/-/npm-prefix-1.2.0.tgz#e619455f7074ba54cc66d6d0d37dd9f1be6bcbc0" + dependencies: + rc "^1.1.0" + shellsubstitute "^1.1.0" + untildify "^2.1.0" + +npmlog@^4.0.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-4.1.2.tgz#08a7f2a8bf734604779a9efa4ad5cc717abb954b" + dependencies: + are-we-there-yet "~1.1.2" + console-control-strings "~1.1.0" + gauge "~2.7.3" + set-blocking "~2.0.0" + +number-is-nan@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d" + +oauth-sign@~0.8.2: + version "0.8.2" + resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.8.2.tgz#46a6ab7f0aead8deae9ec0565780b7d4efeb9d43" + +object-assign@^4.0.1, object-assign@^4.1.0, object-assign@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" + +object-keys@^1.0.8, object-keys@^1.0.9: + version "1.0.12" + resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.0.12.tgz#09c53855377575310cca62f55bb334abff7b3ed2" + +object.omit@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/object.omit/-/object.omit-2.0.1.tgz#1a9c744829f39dbb858c76ca3579ae2a54ebd1fa" + dependencies: + for-own "^0.1.4" + is-extendable "^0.1.1" + +once@^1.3.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" + dependencies: + wrappy "1" + +optionator@^0.8.0, optionator@^0.8.1: + version "0.8.2" + resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.8.2.tgz#364c5e409d3f4d6301d6c0b4c05bba50180aeb64" + dependencies: + deep-is "~0.1.3" + fast-levenshtein "~2.0.4" + levn "~0.3.0" + prelude-ls "~1.1.2" + type-check "~0.3.2" + wordwrap "~1.0.0" + +os-homedir@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3" + +os-tmpdir@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" + +osenv@^0.1.4: + version "0.1.5" + resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.1.5.tgz#85cdfafaeb28e8677f416e287592b5f3f49ea410" + dependencies: + os-homedir "^1.0.0" + os-tmpdir "^1.0.0" + +p-any@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/p-any/-/p-any-1.1.0.tgz#1d03835c7eed1e34b8e539c47b7b60d0d015d4e1" + dependencies: + p-some "^2.0.0" + +p-cancelable@^0.4.0: + version "0.4.1" + resolved "https://registry.yarnpkg.com/p-cancelable/-/p-cancelable-0.4.1.tgz#35f363d67d52081c8d9585e37bcceb7e0bbcb2a0" + +p-finally@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-1.0.0.tgz#3fbcfb15b899a44123b34b6dcc18b724336a2cae" + +p-is-promise@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/p-is-promise/-/p-is-promise-1.1.0.tgz#9c9456989e9f6588017b0434d56097675c3da05e" + +p-limit@^1.1.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-1.3.0.tgz#b86bd5f0c25690911c7590fcbfc2010d54b3ccb8" + dependencies: + p-try "^1.0.0" + +p-locate@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-2.0.0.tgz#20a0103b222a70c8fd39cc2e580680f3dde5ec43" + dependencies: + p-limit "^1.1.0" + +p-some@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/p-some/-/p-some-2.0.1.tgz#65d87c8b154edbcf5221d167778b6d2e150f6f06" + dependencies: + aggregate-error "^1.0.0" + +p-timeout@^1.0.0: + version "1.2.1" + resolved "https://registry.yarnpkg.com/p-timeout/-/p-timeout-1.2.1.tgz#5eb3b353b7fce99f101a1038880bb054ebbea386" + dependencies: + p-finally "^1.0.0" + +p-timeout@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/p-timeout/-/p-timeout-2.0.1.tgz#d8dd1979595d2dc0139e1fe46b8b646cb3cdf038" + dependencies: + p-finally "^1.0.0" + +p-try@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-try/-/p-try-1.0.0.tgz#cbc79cdbaf8fd4228e13f621f2b1a237c1b207b3" + +parse-entities@^1.0.2, parse-entities@^1.1.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/parse-entities/-/parse-entities-1.1.2.tgz#9eaf719b29dc3bd62246b4332009072e01527777" + dependencies: + character-entities "^1.0.0" + character-entities-legacy "^1.0.0" + character-reference-invalid "^1.0.0" + is-alphanumerical "^1.0.0" + is-decimal "^1.0.0" + is-hexadecimal "^1.0.0" + +parse-glob@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/parse-glob/-/parse-glob-3.0.4.tgz#b2c376cfb11f35513badd173ef0bb6e3a388391c" + dependencies: + glob-base "^0.3.0" + is-dotfile "^1.0.0" + is-extglob "^1.0.0" + is-glob "^2.0.0" + +parse-json@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-2.2.0.tgz#f480f40434ef80741f8469099f8dea18f55a4dc9" + dependencies: + error-ex "^1.2.0" + +parse-json@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-4.0.0.tgz#be35f5425be1f7f6c747184f98a788cb99477ee0" + dependencies: + error-ex "^1.3.1" + json-parse-better-errors "^1.0.1" + +path-exists@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-2.1.0.tgz#0feb6c64f0fc518d9a754dd5efb62c7022761f4b" + dependencies: + pinkie-promise "^2.0.0" + +path-exists@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-3.0.0.tgz#ce0ebeaa5f78cb18925ea7d810d7b59b010fd515" + +path-is-absolute@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" + +path-is-inside@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/path-is-inside/-/path-is-inside-1.0.2.tgz#365417dede44430d1c11af61027facf074bdfc53" + +path-to-glob-pattern@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/path-to-glob-pattern/-/path-to-glob-pattern-1.0.2.tgz#473e6a3a292a9d13fbae3edccee72d3baba8c619" + +path-type@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-1.1.0.tgz#59c44f7ee491da704da415da5a4070ba4f8fe441" + dependencies: + graceful-fs "^4.1.2" + pify "^2.0.0" + pinkie-promise "^2.0.0" + +path-type@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-3.0.0.tgz#cef31dc8e0a1a3bb0d105c0cd97cf3bf47f4e36f" + dependencies: + pify "^3.0.0" + +performance-now@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" + +pify@^2.0.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c" + +pify@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/pify/-/pify-3.0.0.tgz#e5a4acd2c101fdf3d9a4d07f0dbc4db49dd28176" + +pinkie-promise@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa" + dependencies: + pinkie "^2.0.0" + +pinkie@^2.0.0: + version "2.0.4" + resolved "https://registry.yarnpkg.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870" + +pluralize@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/pluralize/-/pluralize-2.0.0.tgz#72b726aa6fac1edeee42256c7d8dc256b335677f" + +prelude-ls@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.1.2.tgz#21932a549f5e52ffd9a827f570e04be62a97da54" + +prepend-http@^1.0.1: + version "1.0.4" + resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-1.0.4.tgz#d4f4562b0ce3696e41ac52d0e002e57a635dc6dc" + +prepend-http@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-2.0.0.tgz#e92434bfa5ea8c19f41cdfd401d741a3c819d897" + +preserve@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/preserve/-/preserve-0.2.0.tgz#815ed1f6ebc65926f865b310c0713bcb3315ce4b" + +prettier@^1.13.7: + version "1.13.7" + resolved "https://registry.yarnpkg.com/prettier/-/prettier-1.13.7.tgz#850f3b8af784a49a6ea2d2eaa7ed1428a34b7281" + +process-nextick-args@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.0.tgz#a37d732f4271b4ab1ad070d35508e8290788ffaa" + +public-ip@^2.3.0: + version "2.4.0" + resolved "https://registry.yarnpkg.com/public-ip/-/public-ip-2.4.0.tgz#f00c028a15366d8c798e47efab6acd09a17666da" + dependencies: + dns-socket "^1.6.2" + got "^8.0.0" + is-ip "^2.0.0" + pify "^3.0.0" + +punycode@2.x.x: + version "2.1.1" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.1.tgz#b58b010ac40c22c5657616c8d2c2c02c7bf479ec" + +punycode@^1.4.1: + version "1.4.1" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e" + +qs@~6.5.1: + version "6.5.2" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.5.2.tgz#cb3ae806e8740444584ef154ce8ee98d403f3e36" + +query-string@^5.0.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/query-string/-/query-string-5.1.1.tgz#a78c012b71c17e05f2e3fa2319dd330682efb3cb" + dependencies: + decode-uri-component "^0.2.0" + object-assign "^4.1.0" + strict-uri-encode "^1.0.0" + +randomatic@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/randomatic/-/randomatic-3.0.0.tgz#d35490030eb4f7578de292ce6dfb04a91a128923" + dependencies: + is-number "^4.0.0" + kind-of "^6.0.0" + math-random "^1.0.1" + +rc-config-loader@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/rc-config-loader/-/rc-config-loader-2.0.1.tgz#8c8452f59bdd10d448a67762dccf7c1b247db860" + dependencies: + debug "^2.2.0" + js-yaml "^3.6.1" + json5 "^0.5.0" + object-assign "^4.1.0" + object-keys "^1.0.9" + path-exists "^2.1.0" + require-from-string "^2.0.1" + +rc@^1.1.0, rc@^1.2.7: + version "1.2.8" + resolved "https://registry.yarnpkg.com/rc/-/rc-1.2.8.tgz#cd924bf5200a075b83c188cd6b9e211b7fc0d3ed" + dependencies: + deep-extend "^0.6.0" + ini "~1.3.0" + minimist "^1.2.0" + strip-json-comments "~2.0.1" + +read-pkg-up@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-3.0.0.tgz#3ed496685dba0f8fe118d0691dc51f4a1ff96f07" + dependencies: + find-up "^2.0.0" + read-pkg "^3.0.0" + +read-pkg@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-1.1.0.tgz#f5ffaa5ecd29cb31c0474bca7d756b6bb29e3f28" + dependencies: + load-json-file "^1.0.0" + normalize-package-data "^2.3.2" + path-type "^1.0.0" + +read-pkg@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-3.0.0.tgz#9cbc686978fee65d16c00e2b19c237fcf6e38389" + dependencies: + load-json-file "^4.0.0" + normalize-package-data "^2.3.2" + path-type "^3.0.0" + +readable-stream@^2.0.0, readable-stream@^2.0.2, readable-stream@^2.0.6, readable-stream@^2.2.2: + version "2.3.6" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.6.tgz#b11c27d88b8ff1fbe070643cf94b0c79ae1b0aaf" + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.3" + isarray "~1.0.0" + process-nextick-args "~2.0.0" + safe-buffer "~5.1.1" + string_decoder "~1.1.1" + util-deprecate "~1.0.1" + +readdirp@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-2.1.0.tgz#4ed0ad060df3073300c48440373f72d1cc642d78" + dependencies: + graceful-fs "^4.1.2" + minimatch "^3.0.2" + readable-stream "^2.0.2" + set-immediate-shim "^1.0.1" + +regex-cache@^0.4.2: + version "0.4.4" + resolved "https://registry.yarnpkg.com/regex-cache/-/regex-cache-0.4.4.tgz#75bdc58a2a1496cec48a12835bc54c8d562336dd" + dependencies: + is-equal-shallow "^0.1.3" + +remark-cli@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/remark-cli/-/remark-cli-5.0.0.tgz#9feefd06474f3d0ff132df21b5334c546df12ab6" + dependencies: + markdown-extensions "^1.1.0" + remark "^9.0.0" + unified-args "^5.0.0" + +remark-frontmatter@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/remark-frontmatter/-/remark-frontmatter-1.2.0.tgz#67905d178c0fe531ed12c57b98759f101fc2c1b5" + dependencies: + fault "^1.0.1" + xtend "^4.0.1" + +remark-lint-no-dead-urls@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/remark-lint-no-dead-urls/-/remark-lint-no-dead-urls-0.3.0.tgz#b640ecbb4ccaf780afe28c8d13e79f5dc6769449" + dependencies: + is-online "^7.0.0" + is-relative-url "^2.0.0" + link-check "^4.1.0" + unified-lint-rule "^1.0.1" + unist-util-visit "^1.1.3" + +remark-parse@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/remark-parse/-/remark-parse-5.0.0.tgz#4c077f9e499044d1d5c13f80d7a98cf7b9285d95" + dependencies: + collapse-white-space "^1.0.2" + is-alphabetical "^1.0.0" + is-decimal "^1.0.0" + is-whitespace-character "^1.0.0" + is-word-character "^1.0.0" + markdown-escapes "^1.0.0" + parse-entities "^1.1.0" + repeat-string "^1.5.4" + state-toggle "^1.0.0" + trim "0.0.1" + trim-trailing-lines "^1.0.0" + unherit "^1.0.4" + unist-util-remove-position "^1.0.0" + vfile-location "^2.0.0" + xtend "^4.0.1" + +remark-stringify@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/remark-stringify/-/remark-stringify-5.0.0.tgz#336d3a4d4a6a3390d933eeba62e8de4bd280afba" + dependencies: + ccount "^1.0.0" + is-alphanumeric "^1.0.0" + is-decimal "^1.0.0" + is-whitespace-character "^1.0.0" + longest-streak "^2.0.1" + markdown-escapes "^1.0.0" + markdown-table "^1.1.0" + mdast-util-compact "^1.0.0" + parse-entities "^1.0.2" + repeat-string "^1.5.4" + state-toggle "^1.0.0" + stringify-entities "^1.0.1" + unherit "^1.0.4" + xtend "^4.0.1" + +remark@^9.0.0: + version "9.0.0" + resolved "https://registry.yarnpkg.com/remark/-/remark-9.0.0.tgz#c5cfa8ec535c73a67c4b0f12bfdbd3a67d8b2f60" + dependencies: + remark-parse "^5.0.0" + remark-stringify "^5.0.0" + unified "^6.0.0" + +remove-trailing-separator@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz#c24bce2a283adad5bc3f58e0d48249b92379d8ef" + +repeat-element@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/repeat-element/-/repeat-element-1.1.2.tgz#ef089a178d1483baae4d93eb98b4f9e4e11d990a" + +repeat-string@^1.5.0, repeat-string@^1.5.2, repeat-string@^1.5.4: + version "1.6.1" + resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637" + +replace-ext@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/replace-ext/-/replace-ext-1.0.0.tgz#de63128373fcbf7c3ccfa4de5a480c45a67958eb" + +request@^2.87.0: + version "2.87.0" + resolved "https://registry.yarnpkg.com/request/-/request-2.87.0.tgz#32f00235cd08d482b4d0d68db93a829c0ed5756e" + dependencies: + aws-sign2 "~0.7.0" + aws4 "^1.6.0" + caseless "~0.12.0" + combined-stream "~1.0.5" + extend "~3.0.1" + forever-agent "~0.6.1" + form-data "~2.3.1" + har-validator "~5.0.3" + http-signature "~1.2.0" + is-typedarray "~1.0.0" + isstream "~0.1.2" + json-stringify-safe "~5.0.1" + mime-types "~2.1.17" + oauth-sign "~0.8.2" + performance-now "^2.1.0" + qs "~6.5.1" + safe-buffer "^5.1.1" + tough-cookie "~2.3.3" + tunnel-agent "^0.6.0" + uuid "^3.1.0" + +require-from-string@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/require-from-string/-/require-from-string-2.0.2.tgz#89a7fdd938261267318eafe14f9c32e598c36909" + +resolve-from@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6" + +responselike@1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/responselike/-/responselike-1.0.2.tgz#918720ef3b631c5642be068f15ade5a46f4ba1e7" + dependencies: + lowercase-keys "^1.0.0" + +rimraf@^2.2.8, rimraf@^2.6.1: + version "2.6.2" + resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.2.tgz#2ed8150d24a16ea8651e6d6ef0f47c4158ce7a36" + dependencies: + glob "^7.0.5" + +safe-buffer@^5.0.1, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1: + version "5.1.2" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" + +"safer-buffer@>= 2.1.2 < 3", safer-buffer@^2.0.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" + +sax@^1.2.4: + version "1.2.4" + resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.4.tgz#2816234e2378bddc4e5354fab5caa895df7100d9" + +"semver@2 || 3 || 4 || 5", semver@^5.3.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-5.5.0.tgz#dc4bbc7a6ca9d916dee5d43516f0092b58f7b8ab" + +set-blocking@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7" + +set-immediate-shim@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/set-immediate-shim/-/set-immediate-shim-1.0.1.tgz#4b2b1b27eb808a9f8dcc481a58e5e56f599f3f61" + +shellsubstitute@^1.1.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/shellsubstitute/-/shellsubstitute-1.2.0.tgz#e4f702a50c518b0f6fe98451890d705af29b6b70" + +signal-exit@^3.0.0: + version "3.0.2" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.2.tgz#b5fdc08f1287ea1178628e415e25132b73646c6d" + +slice-ansi@0.0.4: + version "0.0.4" + resolved "https://registry.yarnpkg.com/slice-ansi/-/slice-ansi-0.0.4.tgz#edbf8903f66f7ce2f8eafd6ceed65e264c831b35" + +sliced@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/sliced/-/sliced-1.0.1.tgz#0b3a662b5d04c3177b1926bea82b03f837a2ef41" + +sort-keys@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/sort-keys/-/sort-keys-2.0.0.tgz#658535584861ec97d730d6cf41822e1f56684128" + dependencies: + is-plain-obj "^1.0.0" + +spdx-correct@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-3.0.0.tgz#05a5b4d7153a195bc92c3c425b69f3b2a9524c82" + dependencies: + spdx-expression-parse "^3.0.0" + spdx-license-ids "^3.0.0" + +spdx-exceptions@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/spdx-exceptions/-/spdx-exceptions-2.1.0.tgz#2c7ae61056c714a5b9b9b2b2af7d311ef5c78fe9" + +spdx-expression-parse@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-3.0.0.tgz#99e119b7a5da00e05491c9fa338b7904823b41d0" + dependencies: + spdx-exceptions "^2.1.0" + spdx-license-ids "^3.0.0" + +spdx-license-ids@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.0.tgz#7a7cd28470cc6d3a1cfe6d66886f6bc430d3ac87" + +sprintf-js@~1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" + +sshpk@^1.7.0: + version "1.14.2" + resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.14.2.tgz#c6fc61648a3d9c4e764fd3fcdf4ea105e492ba98" + dependencies: + asn1 "~0.2.3" + assert-plus "^1.0.0" + dashdash "^1.12.0" + getpass "^0.1.1" + safer-buffer "^2.0.2" + optionalDependencies: + bcrypt-pbkdf "^1.0.0" + ecc-jsbn "~0.1.1" + jsbn "~0.1.0" + tweetnacl "~0.14.0" + +state-toggle@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/state-toggle/-/state-toggle-1.0.1.tgz#c3cb0974f40a6a0f8e905b96789eb41afa1cde3a" + +strict-uri-encode@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz#279b225df1d582b1f54e65addd4352e18faa0713" + +string-width@^1.0.0, string-width@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3" + dependencies: + code-point-at "^1.0.0" + is-fullwidth-code-point "^1.0.0" + strip-ansi "^3.0.0" + +"string-width@^1.0.2 || 2", string-width@^2.0.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-2.1.1.tgz#ab93f27a8dc13d28cac815c462143a6d9012ae9e" + dependencies: + is-fullwidth-code-point "^2.0.0" + strip-ansi "^4.0.0" + +string.prototype.padstart@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/string.prototype.padstart/-/string.prototype.padstart-3.0.0.tgz#5bcfad39f4649bb2d031292e19bcf0b510d4b242" + dependencies: + define-properties "^1.1.2" + es-abstract "^1.4.3" + function-bind "^1.0.2" + +string_decoder@~1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.1.1.tgz#9cf1611ba62685d7030ae9e4ba34149c3af03fc8" + dependencies: + safe-buffer "~5.1.0" + +stringify-entities@^1.0.1: + version "1.3.2" + resolved "https://registry.yarnpkg.com/stringify-entities/-/stringify-entities-1.3.2.tgz#a98417e5471fd227b3e45d3db1861c11caf668f7" + dependencies: + character-entities-html4 "^1.0.0" + character-entities-legacy "^1.0.0" + is-alphanumerical "^1.0.0" + is-hexadecimal "^1.0.0" + +strip-ansi@^3.0.0, strip-ansi@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf" + dependencies: + ansi-regex "^2.0.0" + +strip-ansi@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-4.0.0.tgz#a8479022eb1ac368a871389b635262c505ee368f" + dependencies: + ansi-regex "^3.0.0" + +strip-bom@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-2.0.0.tgz#6219a85616520491f35788bdbf1447a99c7e6b0e" + dependencies: + is-utf8 "^0.2.0" + +strip-bom@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-3.0.0.tgz#2334c18e9c759f7bdd56fdef7e9ae3d588e68ed3" + +strip-json-comments@~2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" + +structured-source@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/structured-source/-/structured-source-3.0.2.tgz#dd802425e0f53dc4a6e7aca3752901a1ccda7af5" + dependencies: + boundary "^1.0.1" + +supports-color@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7" + +supports-color@^4.1.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-4.5.0.tgz#be7a0de484dec5c5cddf8b3d59125044912f635b" + dependencies: + has-flag "^2.0.0" + +supports-color@^5.3.0: + version "5.4.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.4.0.tgz#1c6b337402c2137605efe19f10fec390f6faab54" + dependencies: + has-flag "^3.0.0" + +table@^3.7.8: + version "3.8.3" + resolved "https://registry.yarnpkg.com/table/-/table-3.8.3.tgz#2bbc542f0fda9861a755d3947fefd8b3f513855f" + dependencies: + ajv "^4.7.0" + ajv-keywords "^1.0.0" + chalk "^1.1.1" + lodash "^4.0.0" + slice-ansi "0.0.4" + string-width "^2.0.0" + +tar@^4: + version "4.4.4" + resolved "https://registry.yarnpkg.com/tar/-/tar-4.4.4.tgz#ec8409fae9f665a4355cc3b4087d0820232bb8cd" + dependencies: + chownr "^1.0.1" + fs-minipass "^1.2.5" + minipass "^2.3.3" + minizlib "^1.1.0" + mkdirp "^0.5.0" + safe-buffer "^5.1.2" + yallist "^3.0.2" + +text-table@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4" + +textlint@^10.2.1: + version "10.2.1" + resolved "https://registry.yarnpkg.com/textlint/-/textlint-10.2.1.tgz#ee22b7967d59cef7c74a04a5f4e8883134e5c79d" + dependencies: + "@textlint/ast-node-types" "^4.0.2" + "@textlint/ast-traverse" "^2.0.8" + "@textlint/feature-flag" "^3.0.4" + "@textlint/fixer-formatter" "^3.0.7" + "@textlint/kernel" "^2.0.9" + "@textlint/linter-formatter" "^3.0.7" + "@textlint/textlint-plugin-markdown" "^4.0.10" + "@textlint/textlint-plugin-text" "^3.0.10" + "@types/bluebird" "^3.5.18" + bluebird "^3.0.5" + debug "^2.1.0" + deep-equal "^1.0.1" + file-entry-cache "^2.0.0" + get-stdin "^5.0.1" + glob "^7.1.1" + interop-require "^1.0.0" + is-file "^1.0.0" + log-symbols "^1.0.2" + map-like "^2.0.0" + md5 "^2.2.1" + mkdirp "^0.5.0" + object-assign "^4.0.1" + optionator "^0.8.0" + path-to-glob-pattern "^1.0.2" + rc-config-loader "^2.0.1" + read-pkg "^1.1.0" + read-pkg-up "^3.0.0" + structured-source "^3.0.2" + try-resolve "^1.0.1" + unique-concat "^0.2.2" + +timed-out@^4.0.0, timed-out@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/timed-out/-/timed-out-4.0.1.tgz#f32eacac5a175bea25d7fab565ab3ed8741ef56f" + +to-vfile@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/to-vfile/-/to-vfile-2.2.0.tgz#342d1705e6df526d569b1fc8bfa29f1f36d6c416" + dependencies: + is-buffer "^1.1.4" + vfile "^2.0.0" + x-is-function "^1.0.4" + +tough-cookie@~2.3.3: + version "2.3.4" + resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.3.4.tgz#ec60cee38ac675063ffc97a5c18970578ee83655" + dependencies: + punycode "^1.4.1" + +traverse@^0.6.6: + version "0.6.6" + resolved "https://registry.yarnpkg.com/traverse/-/traverse-0.6.6.tgz#cbdf560fd7b9af632502fed40f918c157ea97137" + +trim-trailing-lines@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/trim-trailing-lines/-/trim-trailing-lines-1.1.1.tgz#e0ec0810fd3c3f1730516b45f49083caaf2774d9" + +trim@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/trim/-/trim-0.0.1.tgz#5858547f6b290757ee95cccc666fb50084c460dd" + +trough@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/trough/-/trough-1.0.2.tgz#7f1663ec55c480139e2de5e486c6aef6cc24a535" + +try-resolve@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/try-resolve/-/try-resolve-1.0.1.tgz#cfde6fabd72d63e5797cfaab873abbe8e700e912" + +tunnel-agent@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd" + dependencies: + safe-buffer "^5.0.1" + +tweetnacl@^0.14.3, tweetnacl@~0.14.0: + version "0.14.5" + resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" + +type-check@~0.3.2: + version "0.3.2" + resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.3.2.tgz#5884cab512cf1d355e3fb784f30804b2b520db72" + dependencies: + prelude-ls "~1.1.2" + +typedarray@^0.0.6: + version "0.0.6" + resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777" + +unherit@^1.0.4: + version "1.1.1" + resolved "https://registry.yarnpkg.com/unherit/-/unherit-1.1.1.tgz#132748da3e88eab767e08fabfbb89c5e9d28628c" + dependencies: + inherits "^2.0.1" + xtend "^4.0.1" + +unified-args@^5.0.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/unified-args/-/unified-args-5.1.0.tgz#1889200e072998a662e6e84d817d6f4b5f448dd1" + dependencies: + camelcase "^4.0.0" + chalk "^2.0.0" + chokidar "^1.5.1" + json5 "^0.5.1" + minimist "^1.2.0" + text-table "^0.2.0" + unified-engine "^5.1.0" + +unified-engine@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/unified-engine/-/unified-engine-5.1.0.tgz#30db83bcc76c821f773bb5a8a491aa0e2471e3d1" + dependencies: + concat-stream "^1.5.1" + debug "^3.1.0" + fault "^1.0.0" + fn-name "^2.0.1" + glob "^7.0.3" + ignore "^3.2.0" + is-empty "^1.0.0" + is-hidden "^1.0.1" + is-object "^1.0.1" + js-yaml "^3.6.1" + load-plugin "^2.0.0" + parse-json "^4.0.0" + to-vfile "^2.0.0" + trough "^1.0.0" + unist-util-inspect "^4.1.2" + vfile-reporter "^4.0.0" + vfile-statistics "^1.1.0" + x-is-function "^1.0.4" + x-is-string "^0.1.0" + xtend "^4.0.1" + +unified-lint-rule@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/unified-lint-rule/-/unified-lint-rule-1.0.3.tgz#e302b0c4a7ac428c0980e049a500e59528001299" + dependencies: + wrapped "^1.0.1" + +unified@^6.0.0, unified@^6.1.6: + version "6.2.0" + resolved "https://registry.yarnpkg.com/unified/-/unified-6.2.0.tgz#7fbd630f719126d67d40c644b7e3f617035f6dba" + dependencies: + bail "^1.0.0" + extend "^3.0.0" + is-plain-obj "^1.1.0" + trough "^1.0.0" + vfile "^2.0.0" + x-is-string "^0.1.0" + +unique-concat@^0.2.2: + version "0.2.2" + resolved "https://registry.yarnpkg.com/unique-concat/-/unique-concat-0.2.2.tgz#9210f9bdcaacc5e1e3929490d7c019df96f18712" + +unist-util-inspect@^4.1.2: + version "4.1.3" + resolved "https://registry.yarnpkg.com/unist-util-inspect/-/unist-util-inspect-4.1.3.tgz#39470e6d77485db285966df78431219aa1287822" + dependencies: + is-empty "^1.0.0" + +unist-util-is@^2.1.1: + version "2.1.2" + resolved "https://registry.yarnpkg.com/unist-util-is/-/unist-util-is-2.1.2.tgz#1193fa8f2bfbbb82150633f3a8d2eb9a1c1d55db" + +unist-util-modify-children@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/unist-util-modify-children/-/unist-util-modify-children-1.1.2.tgz#c7f1b91712554ee59c47a05b551ed3e052a4e2d1" + dependencies: + array-iterate "^1.0.0" + +unist-util-remove-position@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/unist-util-remove-position/-/unist-util-remove-position-1.1.2.tgz#86b5dad104d0bbfbeb1db5f5c92f3570575c12cb" + dependencies: + unist-util-visit "^1.1.0" + +unist-util-stringify-position@^1.0.0, unist-util-stringify-position@^1.1.1: + version "1.1.2" + resolved "https://registry.yarnpkg.com/unist-util-stringify-position/-/unist-util-stringify-position-1.1.2.tgz#3f37fcf351279dcbca7480ab5889bb8a832ee1c6" + +unist-util-visit@^1.1.0, unist-util-visit@^1.1.3: + version "1.3.1" + resolved "https://registry.yarnpkg.com/unist-util-visit/-/unist-util-visit-1.3.1.tgz#c019ac9337a62486be58531bc27e7499ae7d55c7" + dependencies: + unist-util-is "^2.1.1" + +untildify@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/untildify/-/untildify-2.1.0.tgz#17eb2807987f76952e9c0485fc311d06a826a2e0" + dependencies: + os-homedir "^1.0.0" + +unzip-response@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/unzip-response/-/unzip-response-2.0.1.tgz#d2f0f737d16b0615e72a6935ed04214572d56f97" + +url-parse-lax@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/url-parse-lax/-/url-parse-lax-1.0.0.tgz#7af8f303645e9bd79a272e7a14ac68bc0609da73" + dependencies: + prepend-http "^1.0.1" + +url-parse-lax@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/url-parse-lax/-/url-parse-lax-3.0.0.tgz#16b5cafc07dbe3676c1b1999177823d6503acb0c" + dependencies: + prepend-http "^2.0.0" + +url-to-options@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/url-to-options/-/url-to-options-1.0.1.tgz#1505a03a289a48cbd7a434efbaeec5055f5633a9" + +util-deprecate@~1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" + +uuid@^3.1.0: + version "3.3.2" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.3.2.tgz#1b4af4955eb3077c501c23872fc6513811587131" + +validate-npm-package-license@^3.0.1: + version "3.0.3" + resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.3.tgz#81643bcbef1bdfecd4623793dc4648948ba98338" + dependencies: + spdx-correct "^3.0.0" + spdx-expression-parse "^3.0.0" + +verror@1.10.0: + version "1.10.0" + resolved "https://registry.yarnpkg.com/verror/-/verror-1.10.0.tgz#3a105ca17053af55d6e270c1f8288682e18da400" + dependencies: + assert-plus "^1.0.0" + core-util-is "1.0.2" + extsprintf "^1.2.0" + +vfile-location@^2.0.0: + version "2.0.3" + resolved "https://registry.yarnpkg.com/vfile-location/-/vfile-location-2.0.3.tgz#083ba80e50968e8d420be49dd1ea9a992131df77" + +vfile-message@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/vfile-message/-/vfile-message-1.0.1.tgz#51a2ccd8a6b97a7980bb34efb9ebde9632e93677" + dependencies: + unist-util-stringify-position "^1.1.1" + +vfile-reporter@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/vfile-reporter/-/vfile-reporter-4.0.0.tgz#ea6f0ae1342f4841573985e05f941736f27de9da" + dependencies: + repeat-string "^1.5.0" + string-width "^1.0.0" + supports-color "^4.1.0" + unist-util-stringify-position "^1.0.0" + vfile-statistics "^1.1.0" + +vfile-statistics@^1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/vfile-statistics/-/vfile-statistics-1.1.1.tgz#a22fd4eb844c9eaddd781ad3b3246db88375e2e3" + +vfile@^2.0.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/vfile/-/vfile-2.3.0.tgz#e62d8e72b20e83c324bc6c67278ee272488bf84a" + dependencies: + is-buffer "^1.1.4" + replace-ext "1.0.0" + unist-util-stringify-position "^1.0.0" + vfile-message "^1.0.0" + +wide-align@^1.1.0: + version "1.1.3" + resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.3.tgz#ae074e6bdc0c14a431e804e624549c633b000457" + dependencies: + string-width "^1.0.2 || 2" + +wordwrap@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-1.0.0.tgz#27584810891456a4171c8d0226441ade90cbcaeb" + +wrapped@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/wrapped/-/wrapped-1.0.1.tgz#c783d9d807b273e9b01e851680a938c87c907242" + dependencies: + co "3.1.0" + sliced "^1.0.1" + +wrappy@1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" + +write@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/write/-/write-0.2.1.tgz#5fc03828e264cea3fe91455476f7a3c566cb0757" + dependencies: + mkdirp "^0.5.1" + +x-is-function@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/x-is-function/-/x-is-function-1.0.4.tgz#5d294dc3d268cbdd062580e0c5df77a391d1fa1e" + +x-is-string@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/x-is-string/-/x-is-string-0.1.0.tgz#474b50865af3a49a9c4657f05acd145458f77d82" + +xml-escape@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/xml-escape/-/xml-escape-1.1.0.tgz#3904c143fa8eb3a0030ec646d2902a2f1b706c44" + +xtend@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.1.tgz#a5c6d532be656e23db820efb943a1f04998d63af" + +yallist@^3.0.0, yallist@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.0.2.tgz#8452b4bb7e83c7c188d8041c1a837c773d6d8bb9" diff --git a/evidence/pool.go b/evidence/pool.go new file mode 100644 index 000000000..247629b6b --- /dev/null +++ b/evidence/pool.go @@ -0,0 +1,152 @@ +package evidence + +import ( + "fmt" + "sync" + + clist "github.com/tendermint/tendermint/libs/clist" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" + + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" +) + +// EvidencePool maintains a pool of valid evidence +// in an EvidenceStore. +type EvidencePool struct { + logger log.Logger + + evidenceStore *EvidenceStore + evidenceList *clist.CList // concurrent linked-list of evidence + + // needed to load validators to verify evidence + stateDB dbm.DB + + // latest state + mtx sync.Mutex + state sm.State +} + +func NewEvidencePool(stateDB dbm.DB, evidenceStore *EvidenceStore) *EvidencePool { + evpool := &EvidencePool{ + stateDB: stateDB, + state: sm.LoadState(stateDB), + logger: log.NewNopLogger(), + evidenceStore: evidenceStore, + evidenceList: clist.New(), + } + return evpool +} + +func (evpool *EvidencePool) EvidenceFront() *clist.CElement { + return evpool.evidenceList.Front() +} + +func (evpool *EvidencePool) EvidenceWaitChan() <-chan struct{} { + return evpool.evidenceList.WaitChan() +} + +// SetLogger sets the Logger. +func (evpool *EvidencePool) SetLogger(l log.Logger) { + evpool.logger = l +} + +// PriorityEvidence returns the priority evidence. +func (evpool *EvidencePool) PriorityEvidence() []types.Evidence { + return evpool.evidenceStore.PriorityEvidence() +} + +// PendingEvidence returns all uncommitted evidence. +func (evpool *EvidencePool) PendingEvidence() []types.Evidence { + return evpool.evidenceStore.PendingEvidence() +} + +// State returns the current state of the evpool. +func (evpool *EvidencePool) State() sm.State { + evpool.mtx.Lock() + defer evpool.mtx.Unlock() + return evpool.state +} + +// Update loads the latest +func (evpool *EvidencePool) Update(block *types.Block, state sm.State) { + + // sanity check + if state.LastBlockHeight != block.Height { + panic(fmt.Sprintf("Failed EvidencePool.Update sanity check: got state.Height=%d with block.Height=%d", state.LastBlockHeight, block.Height)) + } + + // update the state + evpool.mtx.Lock() + evpool.state = state + evpool.mtx.Unlock() + + // remove evidence from pending and mark committed + evpool.MarkEvidenceAsCommitted(block.Height, block.Evidence.Evidence) +} + +// AddEvidence checks the evidence is valid and adds it to the pool. +func (evpool *EvidencePool) AddEvidence(evidence types.Evidence) (err error) { + + // TODO: check if we already have evidence for this + // validator at this height so we dont get spammed + + if err := sm.VerifyEvidence(evpool.stateDB, evpool.State(), evidence); err != nil { + return err + } + + // fetch the validator and return its voting power as its priority + // TODO: something better ? + valset, _ := sm.LoadValidators(evpool.stateDB, evidence.Height()) + _, val := valset.GetByAddress(evidence.Address()) + priority := val.VotingPower + + added := evpool.evidenceStore.AddNewEvidence(evidence, priority) + if !added { + // evidence already known, just ignore + return + } + + evpool.logger.Info("Verified new evidence of byzantine behaviour", "evidence", evidence) + + // add evidence to clist + evpool.evidenceList.PushBack(evidence) + + return nil +} + +// MarkEvidenceAsCommitted marks all the evidence as committed and removes it from the queue. +func (evpool *EvidencePool) MarkEvidenceAsCommitted(height int64, evidence []types.Evidence) { + // make a map of committed evidence to remove from the clist + blockEvidenceMap := make(map[string]struct{}) + for _, ev := range evidence { + evpool.evidenceStore.MarkEvidenceAsCommitted(ev) + blockEvidenceMap[evMapKey(ev)] = struct{}{} + } + + // remove committed evidence from the clist + maxAge := evpool.State().ConsensusParams.EvidenceParams.MaxAge + evpool.removeEvidence(height, maxAge, blockEvidenceMap) + +} + +func (evpool *EvidencePool) removeEvidence(height, maxAge int64, blockEvidenceMap map[string]struct{}) { + for e := evpool.evidenceList.Front(); e != nil; e = e.Next() { + ev := e.Value.(types.Evidence) + + // Remove the evidence if it's already in a block + // or if it's now too old. + if _, ok := blockEvidenceMap[evMapKey(ev)]; ok || + ev.Height() < height-maxAge { + + // remove from clist + evpool.evidenceList.Remove(e) + e.DetachPrev() + } + } +} + +func evMapKey(ev types.Evidence) string { + return string(ev.Hash()) +} diff --git a/evidence/pool_test.go b/evidence/pool_test.go new file mode 100644 index 000000000..915cba327 --- /dev/null +++ b/evidence/pool_test.go @@ -0,0 +1,79 @@ +package evidence + +import ( + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" + dbm "github.com/tendermint/tendermint/libs/db" +) + +var mockState = sm.State{} + +func initializeValidatorState(valAddr []byte, height int64) dbm.DB { + stateDB := dbm.NewMemDB() + + // create validator set and state + valSet := &types.ValidatorSet{ + Validators: []*types.Validator{ + {Address: valAddr}, + }, + } + state := sm.State{ + LastBlockHeight: 0, + LastBlockTime: time.Now(), + Validators: valSet, + LastHeightValidatorsChanged: 1, + ConsensusParams: types.ConsensusParams{ + EvidenceParams: types.EvidenceParams{ + MaxAge: 1000000, + }, + }, + } + + // save all states up to height + for i := int64(0); i < height; i++ { + state.LastBlockHeight = i + sm.SaveState(stateDB, state) + } + + return stateDB +} + +func TestEvidencePool(t *testing.T) { + + valAddr := []byte("val1") + height := int64(5) + stateDB := initializeValidatorState(valAddr, height) + store := NewEvidenceStore(dbm.NewMemDB()) + pool := NewEvidencePool(stateDB, store) + + goodEvidence := types.NewMockGoodEvidence(height, 0, valAddr) + badEvidence := types.MockBadEvidence{goodEvidence} + + // bad evidence + err := pool.AddEvidence(badEvidence) + assert.NotNil(t, err) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + <-pool.EvidenceWaitChan() + wg.Done() + }() + + err = pool.AddEvidence(goodEvidence) + assert.Nil(t, err) + wg.Wait() + + assert.Equal(t, 1, pool.evidenceList.Len()) + + // if we send it again, it shouldnt change the size + err = pool.AddEvidence(goodEvidence) + assert.Nil(t, err) + assert.Equal(t, 1, pool.evidenceList.Len()) +} diff --git a/evidence/reactor.go b/evidence/reactor.go new file mode 100644 index 000000000..bf11ac105 --- /dev/null +++ b/evidence/reactor.go @@ -0,0 +1,225 @@ +package evidence + +import ( + "fmt" + "reflect" + "time" + + amino "github.com/tendermint/go-amino" + + clist "github.com/tendermint/tendermint/libs/clist" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/types" +) + +const ( + EvidenceChannel = byte(0x38) + + maxMsgSize = 1048576 // 1MB TODO make it configurable + + broadcastEvidenceIntervalS = 60 // broadcast uncommitted evidence this often + peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount +) + +// EvidenceReactor handles evpool evidence broadcasting amongst peers. +type EvidenceReactor struct { + p2p.BaseReactor + evpool *EvidencePool + eventBus *types.EventBus +} + +// NewEvidenceReactor returns a new EvidenceReactor with the given config and evpool. +func NewEvidenceReactor(evpool *EvidencePool) *EvidenceReactor { + evR := &EvidenceReactor{ + evpool: evpool, + } + evR.BaseReactor = *p2p.NewBaseReactor("EvidenceReactor", evR) + return evR +} + +// SetLogger sets the Logger on the reactor and the underlying Evidence. +func (evR *EvidenceReactor) SetLogger(l log.Logger) { + evR.Logger = l + evR.evpool.SetLogger(l) +} + +// OnStart implements cmn.Service +func (evR *EvidenceReactor) OnStart() error { + return evR.BaseReactor.OnStart() +} + +// GetChannels implements Reactor. +// It returns the list of channels for this reactor. +func (evR *EvidenceReactor) GetChannels() []*p2p.ChannelDescriptor { + return []*p2p.ChannelDescriptor{ + &p2p.ChannelDescriptor{ + ID: EvidenceChannel, + Priority: 5, + }, + } +} + +// AddPeer implements Reactor. +func (evR *EvidenceReactor) AddPeer(peer p2p.Peer) { + go evR.broadcastEvidenceRoutine(peer) +} + +// RemovePeer implements Reactor. +func (evR *EvidenceReactor) RemovePeer(peer p2p.Peer, reason interface{}) { + // nothing to do +} + +// Receive implements Reactor. +// It adds any received evidence to the evpool. +func (evR *EvidenceReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { + msg, err := decodeMsg(msgBytes) + if err != nil { + evR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) + evR.Switch.StopPeerForError(src, err) + return + } + evR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg) + + switch msg := msg.(type) { + case *EvidenceListMessage: + for _, ev := range msg.Evidence { + err := evR.evpool.AddEvidence(ev) + if err != nil { + evR.Logger.Info("Evidence is not valid", "evidence", msg.Evidence, "err", err) + // punish peer + evR.Switch.StopPeerForError(src, err) + } + } + default: + evR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) + } +} + +// SetEventSwitch implements events.Eventable. +func (evR *EvidenceReactor) SetEventBus(b *types.EventBus) { + evR.eventBus = b +} + +// Modeled after the mempool routine. +// - Evidence accumulates in a clist. +// - Each peer has a routien that iterates through the clist, +// sending available evidence to the peer. +// - If we're waiting for new evidence and the list is not empty, +// start iterating from the beginning again. +func (evR *EvidenceReactor) broadcastEvidenceRoutine(peer p2p.Peer) { + var next *clist.CElement + for { + // This happens because the CElement we were looking at got garbage + // collected (removed). That is, .NextWait() returned nil. Go ahead and + // start from the beginning. + if next == nil { + select { + case <-evR.evpool.EvidenceWaitChan(): // Wait until evidence is available + if next = evR.evpool.EvidenceFront(); next == nil { + continue + } + case <-peer.Quit(): + return + case <-evR.Quit(): + return + } + } + + ev := next.Value.(types.Evidence) + msg, retry := evR.checkSendEvidenceMessage(peer, ev) + if msg != nil { + success := peer.Send(EvidenceChannel, cdc.MustMarshalBinaryBare(msg)) + retry = !success + } + + if retry { + time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) + continue + } + + afterCh := time.After(time.Second * broadcastEvidenceIntervalS) + select { + case <-afterCh: + // start from the beginning every tick. + // TODO: only do this if we're at the end of the list! + next = nil + case <-next.NextWaitChan(): + // see the start of the for loop for nil check + next = next.Next() + case <-peer.Quit(): + return + case <-evR.Quit(): + return + } + } +} + +// Returns the message to send the peer, or nil if the evidence is invalid for the peer. +// If message is nil, return true if we should sleep and try again. +func (evR EvidenceReactor) checkSendEvidenceMessage(peer p2p.Peer, ev types.Evidence) (msg EvidenceMessage, retry bool) { + + // make sure the peer is up to date + evHeight := ev.Height() + peerState, ok := peer.Get(types.PeerStateKey).(PeerState) + if !ok { + evR.Logger.Info("Found peer without PeerState", "peer", peer) + return nil, true + } + + // NOTE: We only send evidence to peers where + // peerHeight - maxAge < evidenceHeight < peerHeight + maxAge := evR.evpool.State().ConsensusParams.EvidenceParams.MaxAge + peerHeight := peerState.GetHeight() + if peerHeight < evHeight { + // peer is behind. sleep while he catches up + return nil, true + } else if peerHeight > evHeight+maxAge { + // evidence is too old, skip + // NOTE: if evidence is too old for an honest peer, + // then we're behind and either it already got committed or it never will! + evR.Logger.Info("Not sending peer old evidence", "peerHeight", peerHeight, "evHeight", evHeight, "maxAge", maxAge, "peer", peer) + return nil, false + } + + // send evidence + msg = &EvidenceListMessage{[]types.Evidence{ev}} + return msg, false +} + +// PeerState describes the state of a peer. +type PeerState interface { + GetHeight() int64 +} + +//----------------------------------------------------------------------------- +// Messages + +// EvidenceMessage is a message sent or received by the EvidenceReactor. +type EvidenceMessage interface{} + +func RegisterEvidenceMessages(cdc *amino.Codec) { + cdc.RegisterInterface((*EvidenceMessage)(nil), nil) + cdc.RegisterConcrete(&EvidenceListMessage{}, + "tendermint/evidence/EvidenceListMessage", nil) +} + +func decodeMsg(bz []byte) (msg EvidenceMessage, err error) { + if len(bz) > maxMsgSize { + return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize) + } + err = cdc.UnmarshalBinaryBare(bz, &msg) + return +} + +//------------------------------------- + +// EvidenceMessage contains a list of evidence. +type EvidenceListMessage struct { + Evidence []types.Evidence +} + +// String returns a string representation of the EvidenceListMessage. +func (m *EvidenceListMessage) String() string { + return fmt.Sprintf("[EvidenceListMessage %v]", m.Evidence) +} diff --git a/evidence/reactor_test.go b/evidence/reactor_test.go new file mode 100644 index 000000000..1687f25a3 --- /dev/null +++ b/evidence/reactor_test.go @@ -0,0 +1,182 @@ +package evidence + +import ( + "fmt" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/go-kit/kit/log/term" + + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/types" +) + +// evidenceLogger is a TestingLogger which uses a different +// color for each validator ("validator" key must exist). +func evidenceLogger() log.Logger { + return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor { + for i := 0; i < len(keyvals)-1; i += 2 { + if keyvals[i] == "validator" { + return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))} + } + } + return term.FgBgColor{} + }) +} + +// connect N evidence reactors through N switches +func makeAndConnectEvidenceReactors(config *cfg.Config, stateDBs []dbm.DB) []*EvidenceReactor { + N := len(stateDBs) + reactors := make([]*EvidenceReactor, N) + logger := evidenceLogger() + for i := 0; i < N; i++ { + + store := NewEvidenceStore(dbm.NewMemDB()) + pool := NewEvidencePool(stateDBs[i], store) + reactors[i] = NewEvidenceReactor(pool) + reactors[i].SetLogger(logger.With("validator", i)) + } + + p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch { + s.AddReactor("EVIDENCE", reactors[i]) + return s + + }, p2p.Connect2Switches) + return reactors +} + +// wait for all evidence on all reactors +func waitForEvidence(t *testing.T, evs types.EvidenceList, reactors []*EvidenceReactor) { + // wait for the evidence in all evpools + wg := new(sync.WaitGroup) + for i := 0; i < len(reactors); i++ { + wg.Add(1) + go _waitForEvidence(t, wg, evs, i, reactors) + } + + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + timer := time.After(TIMEOUT) + select { + case <-timer: + t.Fatal("Timed out waiting for evidence") + case <-done: + } +} + +// wait for all evidence on a single evpool +func _waitForEvidence(t *testing.T, wg *sync.WaitGroup, evs types.EvidenceList, reactorIdx int, reactors []*EvidenceReactor) { + + evpool := reactors[reactorIdx].evpool + for len(evpool.PendingEvidence()) != len(evs) { + time.Sleep(time.Millisecond * 100) + } + + reapedEv := evpool.PendingEvidence() + // put the reaped evidence in a map so we can quickly check we got everything + evMap := make(map[string]types.Evidence) + for _, e := range reapedEv { + evMap[string(e.Hash())] = e + } + for i, expectedEv := range evs { + gotEv := evMap[string(expectedEv.Hash())] + assert.Equal(t, expectedEv, gotEv, + fmt.Sprintf("evidence at index %d on reactor %d don't match: %v vs %v", + i, reactorIdx, expectedEv, gotEv)) + } + + wg.Done() +} + +func sendEvidence(t *testing.T, evpool *EvidencePool, valAddr []byte, n int) types.EvidenceList { + evList := make([]types.Evidence, n) + for i := 0; i < n; i++ { + ev := types.NewMockGoodEvidence(int64(i+1), 0, valAddr) + err := evpool.AddEvidence(ev) + assert.Nil(t, err) + evList[i] = ev + } + return evList +} + +var ( + NUM_EVIDENCE = 10 + TIMEOUT = 120 * time.Second // ridiculously high because CircleCI is slow +) + +func TestReactorBroadcastEvidence(t *testing.T) { + config := cfg.TestConfig() + N := 7 + + // create statedb for everyone + stateDBs := make([]dbm.DB, N) + valAddr := []byte("myval") + // we need validators saved for heights at least as high as we have evidence for + height := int64(NUM_EVIDENCE) + 10 + for i := 0; i < N; i++ { + stateDBs[i] = initializeValidatorState(valAddr, height) + } + + // make reactors from statedb + reactors := makeAndConnectEvidenceReactors(config, stateDBs) + + // set the peer height on each reactor + for _, r := range reactors { + for _, peer := range r.Switch.Peers().List() { + ps := peerState{height} + peer.Set(types.PeerStateKey, ps) + } + } + + // send a bunch of valid evidence to the first reactor's evpool + // and wait for them all to be received in the others + evList := sendEvidence(t, reactors[0].evpool, valAddr, NUM_EVIDENCE) + waitForEvidence(t, evList, reactors) +} + +type peerState struct { + height int64 +} + +func (ps peerState) GetHeight() int64 { + return ps.height +} + +func TestReactorSelectiveBroadcast(t *testing.T) { + config := cfg.TestConfig() + + valAddr := []byte("myval") + height1 := int64(NUM_EVIDENCE) + 10 + height2 := int64(NUM_EVIDENCE) / 2 + + // DB1 is ahead of DB2 + stateDB1 := initializeValidatorState(valAddr, height1) + stateDB2 := initializeValidatorState(valAddr, height2) + + // make reactors from statedb + reactors := makeAndConnectEvidenceReactors(config, []dbm.DB{stateDB1, stateDB2}) + peer := reactors[0].Switch.Peers().List()[0] + ps := peerState{height2} + peer.Set(types.PeerStateKey, ps) + + // send a bunch of valid evidence to the first reactor's evpool + evList := sendEvidence(t, reactors[0].evpool, valAddr, NUM_EVIDENCE) + + // only ones less than the peers height should make it through + waitForEvidence(t, evList[:NUM_EVIDENCE/2], reactors[1:2]) + + // peers should still be connected + peers := reactors[1].Switch.Peers().List() + assert.Equal(t, 1, len(peers)) +} diff --git a/evidence/store.go b/evidence/store.go new file mode 100644 index 000000000..20b37bdb2 --- /dev/null +++ b/evidence/store.go @@ -0,0 +1,190 @@ +package evidence + +import ( + "fmt" + + "github.com/tendermint/tendermint/types" + dbm "github.com/tendermint/tendermint/libs/db" +) + +/* +Requirements: + - Valid new evidence must be persisted immediately and never forgotten + - Uncommitted evidence must be continuously broadcast + - Uncommitted evidence has a partial order, the evidence's priority + +Impl: + - First commit atomically in outqueue, pending, lookup. + - Once broadcast, remove from outqueue. No need to sync + - Once committed, atomically remove from pending and update lookup. + +Schema for indexing evidence (note you need both height and hash to find a piece of evidence): + +"evidence-lookup"// -> EvidenceInfo +"evidence-outqueue"/// -> EvidenceInfo +"evidence-pending"// -> EvidenceInfo +*/ + +type EvidenceInfo struct { + Committed bool + Priority int64 + Evidence types.Evidence +} + +const ( + baseKeyLookup = "evidence-lookup" // all evidence + baseKeyOutqueue = "evidence-outqueue" // not-yet broadcast + baseKeyPending = "evidence-pending" // broadcast but not committed +) + +func keyLookup(evidence types.Evidence) []byte { + return keyLookupFromHeightAndHash(evidence.Height(), evidence.Hash()) +} + +// big endian padded hex +func bE(h int64) string { + return fmt.Sprintf("%0.16X", h) +} + +func keyLookupFromHeightAndHash(height int64, hash []byte) []byte { + return _key("%s/%s/%X", baseKeyLookup, bE(height), hash) +} + +func keyOutqueue(evidence types.Evidence, priority int64) []byte { + return _key("%s/%s/%s/%X", baseKeyOutqueue, bE(priority), bE(evidence.Height()), evidence.Hash()) +} + +func keyPending(evidence types.Evidence) []byte { + return _key("%s/%s/%X", baseKeyPending, bE(evidence.Height()), evidence.Hash()) +} + +func _key(fmt_ string, o ...interface{}) []byte { + return []byte(fmt.Sprintf(fmt_, o...)) +} + +// EvidenceStore is a store of all the evidence we've seen, including +// evidence that has been committed, evidence that has been verified but not broadcast, +// and evidence that has been broadcast but not yet committed. +type EvidenceStore struct { + db dbm.DB +} + +func NewEvidenceStore(db dbm.DB) *EvidenceStore { + return &EvidenceStore{ + db: db, + } +} + +// PriorityEvidence returns the evidence from the outqueue, sorted by highest priority. +func (store *EvidenceStore) PriorityEvidence() (evidence []types.Evidence) { + // reverse the order so highest priority is first + l := store.ListEvidence(baseKeyOutqueue) + l2 := make([]types.Evidence, len(l)) + for i := range l { + l2[i] = l[len(l)-1-i] + } + return l2 +} + +// PendingEvidence returns all known uncommitted evidence. +func (store *EvidenceStore) PendingEvidence() (evidence []types.Evidence) { + return store.ListEvidence(baseKeyPending) +} + +// ListEvidence lists the evidence for the given prefix key. +// It is wrapped by PriorityEvidence and PendingEvidence for convenience. +func (store *EvidenceStore) ListEvidence(prefixKey string) (evidence []types.Evidence) { + iter := dbm.IteratePrefix(store.db, []byte(prefixKey)) + for ; iter.Valid(); iter.Next() { + val := iter.Value() + + var ei EvidenceInfo + err := cdc.UnmarshalBinaryBare(val, &ei) + if err != nil { + panic(err) + } + evidence = append(evidence, ei.Evidence) + } + return evidence +} + +// GetEvidence fetches the evidence with the given height and hash. +func (store *EvidenceStore) GetEvidence(height int64, hash []byte) *EvidenceInfo { + key := keyLookupFromHeightAndHash(height, hash) + val := store.db.Get(key) + + if len(val) == 0 { + return nil + } + var ei EvidenceInfo + err := cdc.UnmarshalBinaryBare(val, &ei) + if err != nil { + panic(err) + } + return &ei +} + +// AddNewEvidence adds the given evidence to the database. +// It returns false if the evidence is already stored. +func (store *EvidenceStore) AddNewEvidence(evidence types.Evidence, priority int64) bool { + // check if we already have seen it + ei_ := store.GetEvidence(evidence.Height(), evidence.Hash()) + if ei_ != nil && ei_.Evidence != nil { + return false + } + + ei := EvidenceInfo{ + Committed: false, + Priority: priority, + Evidence: evidence, + } + eiBytes := cdc.MustMarshalBinaryBare(ei) + + // add it to the store + key := keyOutqueue(evidence, priority) + store.db.Set(key, eiBytes) + + key = keyPending(evidence) + store.db.Set(key, eiBytes) + + key = keyLookup(evidence) + store.db.SetSync(key, eiBytes) + + return true +} + +// MarkEvidenceAsBroadcasted removes evidence from Outqueue. +func (store *EvidenceStore) MarkEvidenceAsBroadcasted(evidence types.Evidence) { + ei := store.getEvidenceInfo(evidence) + key := keyOutqueue(evidence, ei.Priority) + store.db.Delete(key) +} + +// MarkEvidenceAsCommitted removes evidence from pending and outqueue and sets the state to committed. +func (store *EvidenceStore) MarkEvidenceAsCommitted(evidence types.Evidence) { + // if its committed, its been broadcast + store.MarkEvidenceAsBroadcasted(evidence) + + pendingKey := keyPending(evidence) + store.db.Delete(pendingKey) + + ei := store.getEvidenceInfo(evidence) + ei.Committed = true + + lookupKey := keyLookup(evidence) + store.db.SetSync(lookupKey, cdc.MustMarshalBinaryBare(ei)) +} + +//--------------------------------------------------- +// utils + +func (store *EvidenceStore) getEvidenceInfo(evidence types.Evidence) EvidenceInfo { + key := keyLookup(evidence) + var ei EvidenceInfo + b := store.db.Get(key) + err := cdc.UnmarshalBinaryBare(b, &ei) + if err != nil { + panic(err) + } + return ei +} diff --git a/evidence/store_test.go b/evidence/store_test.go new file mode 100644 index 000000000..30dc1c4d5 --- /dev/null +++ b/evidence/store_test.go @@ -0,0 +1,109 @@ +package evidence + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/types" + dbm "github.com/tendermint/tendermint/libs/db" +) + +//------------------------------------------- + +func TestStoreAddDuplicate(t *testing.T) { + assert := assert.New(t) + + db := dbm.NewMemDB() + store := NewEvidenceStore(db) + + priority := int64(10) + ev := types.NewMockGoodEvidence(2, 1, []byte("val1")) + + added := store.AddNewEvidence(ev, priority) + assert.True(added) + + // cant add twice + added = store.AddNewEvidence(ev, priority) + assert.False(added) +} + +func TestStoreMark(t *testing.T) { + assert := assert.New(t) + + db := dbm.NewMemDB() + store := NewEvidenceStore(db) + + // before we do anything, priority/pending are empty + priorityEv := store.PriorityEvidence() + pendingEv := store.PendingEvidence() + assert.Equal(0, len(priorityEv)) + assert.Equal(0, len(pendingEv)) + + priority := int64(10) + ev := types.NewMockGoodEvidence(2, 1, []byte("val1")) + + added := store.AddNewEvidence(ev, priority) + assert.True(added) + + // get the evidence. verify. should be uncommitted + ei := store.GetEvidence(ev.Height(), ev.Hash()) + assert.Equal(ev, ei.Evidence) + assert.Equal(priority, ei.Priority) + assert.False(ei.Committed) + + // new evidence should be returns in priority/pending + priorityEv = store.PriorityEvidence() + pendingEv = store.PendingEvidence() + assert.Equal(1, len(priorityEv)) + assert.Equal(1, len(pendingEv)) + + // priority is now empty + store.MarkEvidenceAsBroadcasted(ev) + priorityEv = store.PriorityEvidence() + pendingEv = store.PendingEvidence() + assert.Equal(0, len(priorityEv)) + assert.Equal(1, len(pendingEv)) + + // priority and pending are now empty + store.MarkEvidenceAsCommitted(ev) + priorityEv = store.PriorityEvidence() + pendingEv = store.PendingEvidence() + assert.Equal(0, len(priorityEv)) + assert.Equal(0, len(pendingEv)) + + // evidence should show committed + ei = store.GetEvidence(ev.Height(), ev.Hash()) + assert.Equal(ev, ei.Evidence) + assert.Equal(priority, ei.Priority) + assert.True(ei.Committed) +} + +func TestStorePriority(t *testing.T) { + assert := assert.New(t) + + db := dbm.NewMemDB() + store := NewEvidenceStore(db) + + // sorted by priority and then height + cases := []struct { + ev types.MockGoodEvidence + priority int64 + }{ + {types.NewMockGoodEvidence(2, 1, []byte("val1")), 17}, + {types.NewMockGoodEvidence(5, 2, []byte("val2")), 15}, + {types.NewMockGoodEvidence(10, 2, []byte("val2")), 13}, + {types.NewMockGoodEvidence(100, 2, []byte("val2")), 11}, + {types.NewMockGoodEvidence(90, 2, []byte("val2")), 11}, + {types.NewMockGoodEvidence(80, 2, []byte("val2")), 11}, + } + + for _, c := range cases { + added := store.AddNewEvidence(c.ev, c.priority) + assert.True(added) + } + + evList := store.PriorityEvidence() + for i, ev := range evList { + assert.Equal(ev, cases[i].ev) + } +} diff --git a/evidence/wire.go b/evidence/wire.go new file mode 100644 index 000000000..fb3a177cc --- /dev/null +++ b/evidence/wire.go @@ -0,0 +1,25 @@ +package evidence + +import ( + "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/types" +) + +var cdc = amino.NewCodec() + +func init() { + RegisterEvidenceMessages(cdc) + crypto.RegisterAmino(cdc) + types.RegisterEvidences(cdc) + RegisterMockEvidences(cdc) // For testing +} + +//------------------------------------------- + +func RegisterMockEvidences(cdc *amino.Codec) { + cdc.RegisterConcrete(types.MockGoodEvidence{}, + "tendermint/MockGoodEvidence", nil) + cdc.RegisterConcrete(types.MockBadEvidence{}, + "tendermint/MockBadEvidence", nil) +} diff --git a/libs/.editorconfig b/libs/.editorconfig new file mode 100644 index 000000000..82f774362 --- /dev/null +++ b/libs/.editorconfig @@ -0,0 +1,19 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true + +[Makefile] +indent_style = tab + +[*.sh] +indent_style = tab + +[*.proto] +indent_style = space +indent_size = 2 diff --git a/libs/.gitignore b/libs/.gitignore new file mode 100644 index 000000000..a2ebfde29 --- /dev/null +++ b/libs/.gitignore @@ -0,0 +1,5 @@ +*.sw[opqr] +vendor +.glide + +pubsub/query/fuzz_test/output diff --git a/libs/CHANGELOG.md b/libs/CHANGELOG.md new file mode 100644 index 000000000..0f900c57f --- /dev/null +++ b/libs/CHANGELOG.md @@ -0,0 +1,438 @@ +# Changelog + +## 0.9.0 + +*June 24th, 2018* + +BREAKING: + - [events, pubsub] Removed - moved to github.com/tendermint/tendermint + - [merkle] Use 20-bytes of SHA256 instead of RIPEMD160. NOTE: this package is + moving to github.com/tendermint/go-crypto ! + - [common] Remove gogoproto from KVPair types + - [common] Error simplification, #220 + +FEATURES: + + - [db/remotedb] New DB type using an external CLevelDB process via + GRPC + - [autofile] logjack command for piping stdin to a rotating file + - [bech32] New package. NOTE: should move out of here - it's just two small + functions + - [common] ColoredBytes([]byte) string for printing mixed ascii and bytes + - [db] DebugDB uses ColoredBytes() + +## 0.8.4 + +*June 5, 2018* + +IMPROVEMENTS: + + - [autofile] Flush on Stop; Close() method to Flush and close file + +## 0.8.3 + +*May 21, 2018* + +FEATURES: + + - [common] ASCIITrim() + +## 0.8.2 (April 23rd, 2018) + +FEATURES: + + - [pubsub] TagMap, NewTagMap + - [merkle] SimpleProofsFromMap() + - [common] IsASCIIText() + - [common] PrefixEndBytes // e.g. increment or nil + - [common] BitArray.MarshalJSON/.UnmarshalJSON + - [common] BitArray uses 'x' not 'X' for String() and above. + - [db] DebugDB shows better colorized output + +BUG FIXES: + + - [common] Fix TestParallelAbort nondeterministic failure #201/#202 + - [db] PrefixDB Iterator/ReverseIterator fixes + - [db] DebugDB fixes + +## 0.8.1 (April 5th, 2018) + +FEATURES: + + - [common] Error.Error() includes cause + - [common] IsEmpty() for 0 length + +## 0.8.0 (April 4th, 2018) + +BREAKING: + + - [merkle] `PutVarint->PutUvarint` in encodeByteSlice + - [db] batch.WriteSync() + - [common] Refactored and fixed `Parallel` function + - [common] Refactored `Rand` functionality + - [common] Remove unused `Right/LeftPadString` functions + - [common] Remove StackError, introduce Error interface (to replace use of pkg/errors) + +FEATURES: + + - [db] NewPrefixDB for a DB with all keys prefixed + - [db] NewDebugDB prints everything during operation + - [common] SplitAndTrim func + - [common] rand.Float64(), rand.Int63n(n), rand.Int31n(n) and global equivalents + - [common] HexBytes Format() + +BUG FIXES: + + - [pubsub] Fix unsubscribing + - [cli] Return config errors + - [common] Fix WriteFileAtomic Windows bug + +## 0.7.1 (March 22, 2018) + +IMPROVEMENTS: + + - glide -> dep + +BUG FIXES: + + - [common] Fix panic in NewBitArray for negative bits + - [common] Fix and simplify WriteFileAtomic so it cleans up properly + +## 0.7.0 (February 20, 2018) + +BREAKING: + + - [db] Major API upgrade. See `db/types.go`. + - [common] added `Quit() <-chan struct{}` to Service interface. + The returned channel is closed when service is stopped. + - [common] Remove HTTP functions + - [common] Heap.Push takes an `int`, new Heap.PushComparable takes the comparable. + - [logger] Removed. Use `log` + - [merkle] Major API updade - uses cmn.KVPairs. + - [cli] WriteDemoConfig -> WriteConfigValues + - [all] Remove go-wire dependency! + +FEATURES: + + - [db] New FSDB that uses the filesystem directly + - [common] HexBytes + - [common] KVPair and KI64Pair (protobuf based key-value pair objects) + +IMPROVEMENTS: + + - [clist] add WaitChan() to CList, NextWaitChan() and PrevWaitChan() + to CElement. These can be used instead of blocking `*Wait()` methods + if you need to be able to send quit signal and not block forever + - [common] IsHex handles 0x-prefix + +BUG FIXES: + + - [common] BitArray check for nil arguments + - [common] Fix memory leak in RepeatTimer + +## 0.6.0 (December 29, 2017) + +BREAKING: + - [cli] remove --root + - [pubsub] add String() method to Query interface + +IMPROVEMENTS: + - [common] use a thread-safe and well seeded non-crypto rng + +BUG FIXES + - [clist] fix misuse of wait group + - [common] introduce Ticker interface and logicalTicker for better testing of timers + +## 0.5.0 (December 5, 2017) + +BREAKING: + - [common] replace Service#Start, Service#Stop first return value (bool) with an + error (ErrAlreadyStarted, ErrAlreadyStopped) + - [common] replace Service#Reset first return value (bool) with an error + - [process] removed + +FEATURES: + - [common] IntInSlice and StringInSlice functions + - [pubsub/query] introduce `Condition` struct, expose `Operator`, and add `query.Conditions()` + +## 0.4.1 (November 27, 2017) + +FEATURES: + - [common] `Keys()` method on `CMap` + +IMPROVEMENTS: + - [log] complex types now encoded as "%+v" by default if `String()` method is undefined (previously resulted in error) + - [log] logger logs its own errors + +BUG FIXES: + - [common] fixed `Kill()` to build on Windows (Windows does not have `syscall.Kill`) + +## 0.4.0 (October 26, 2017) + +BREAKING: + - [common] GoPath is now a function + - [db] `DB` and `Iterator` interfaces have new methods to better support iteration + +FEATURES: + - [autofile] `Read([]byte)` and `Write([]byte)` methods on `Group` to support binary WAL + - [common] `Kill()` sends SIGTERM to the current process + +IMPROVEMENTS: + - comments and linting + +BUG FIXES: + - [events] fix allocation error prefixing cache with 1000 empty events + +## 0.3.2 (October 2, 2017) + +BUG FIXES: + +- [autofile] fix AutoFile.Sync() to open file if it's been closed +- [db] fix MemDb.Close() to not empty the database (ie. its just a noop) + + +## 0.3.1 (September 22, 2017) + +BUG FIXES: + +- [common] fix WriteFileAtomic to not use /tmp, which can be on another device + +## 0.3.0 (September 22, 2017) + +BREAKING CHANGES: + +- [log] logger functions no longer returns an error +- [common] NewBaseService takes the new logger +- [cli] RunCaptureWithArgs now captures stderr and stdout + - +func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (stdout, stderr string, err error) + - -func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (output string, err error) + +FEATURES: + +- [common] various common HTTP functionality +- [common] Date range parsing from string (ex. "2015-12-31:2017-12-31") +- [common] ProtocolAndAddress function +- [pubsub] New package for publish-subscribe with more advanced filtering + +BUG FIXES: + +- [common] fix atomicity of WriteFileAtomic by calling fsync +- [db] fix memDb iteration index out of range +- [autofile] fix Flush by calling fsync + +## 0.2.2 (June 16, 2017) + +FEATURES: + +- [common] IsHex and StripHex for handling `0x` prefixed hex strings +- [log] NewTracingLogger returns a logger that output error traces, ala `github.com/pkg/errors` + +IMPROVEMENTS: + +- [cli] Error handling for tests +- [cli] Support dashes in ENV variables + +BUG FIXES: + +- [flowrate] Fix non-deterministic test failures + +## 0.2.1 (June 2, 2017) + +FEATURES: + +- [cli] Log level parsing moved here from tendermint repo + +## 0.2.0 (May 18, 2017) + +BREAKING CHANGES: + +- [common] NewBaseService takes the new logger + + +FEATURES: + +- [cli] New library to standardize building command line tools +- [log] New logging library + +BUG FIXES: + +- [autofile] Close file before rotating + +## 0.1.0 (May 1, 2017) + +Initial release, combines what were previously independent repos: + +- go-autofile +- go-clist +- go-common +- go-db +- go-events +- go-flowrate +- go-logger +- go-merkle +- go-process + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/libs/README.md b/libs/README.md new file mode 100644 index 000000000..9ea618dbd --- /dev/null +++ b/libs/README.md @@ -0,0 +1,49 @@ +# TMLIBS + +This repo is a home for various small packages. + +## autofile + +Autofile is file access with automatic log rotation. A group of files is maintained and rotation happens +when the leading file gets too big. Provides a reader for reading from the file group. + +## cli + +CLI wraps the `cobra` and `viper` packages and handles some common elements of building a CLI like flags and env vars for the home directory and the logger. + +## clist + +Clist provides a linekd list that is safe for concurrent access by many readers. + +## common + +Common provides a hodgepodge of useful functions. + +## db + +DB provides a database interface and a number of implementions, including ones using an in-memory map, the filesystem directory structure, +an implemention of LevelDB in Go, and the official LevelDB in C. + +## events + +Events is a synchronous PubSub package. + +## flowrate + +Flowrate is a fork of https://github.com/mxk/go-flowrate that added a `SetREMA` method. + +## log + +Log is a log package structured around key-value pairs that allows logging level to be set differently for different keys. + +## merkle + +Merkle provides a simple static merkle tree and corresponding proofs. + +## process + +Process is a simple utility for spawning OS processes. + +## pubsub + +PubSub is an asynchronous PubSub package. diff --git a/libs/autofile/README.md b/libs/autofile/README.md new file mode 100644 index 000000000..23799200c --- /dev/null +++ b/libs/autofile/README.md @@ -0,0 +1 @@ +# go-autofile diff --git a/libs/autofile/autofile.go b/libs/autofile/autofile.go new file mode 100644 index 000000000..313da6789 --- /dev/null +++ b/libs/autofile/autofile.go @@ -0,0 +1,142 @@ +package autofile + +import ( + "os" + "sync" + "time" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +/* AutoFile usage + +// Create/Append to ./autofile_test +af, err := OpenAutoFile("autofile_test") +if err != nil { + panic(err) +} + +// Stream of writes. +// During this time, the file may be moved e.g. by logRotate. +for i := 0; i < 60; i++ { + af.Write([]byte(Fmt("LOOP(%v)", i))) + time.Sleep(time.Second) +} + +// Close the AutoFile +err = af.Close() +if err != nil { + panic(err) +} +*/ + +const autoFileOpenDuration = 1000 * time.Millisecond + +// Automatically closes and re-opens file for writing. +// This is useful for using a log file with the logrotate tool. +type AutoFile struct { + ID string + Path string + ticker *time.Ticker + mtx sync.Mutex + file *os.File +} + +func OpenAutoFile(path string) (af *AutoFile, err error) { + af = &AutoFile{ + ID: cmn.RandStr(12) + ":" + path, + Path: path, + ticker: time.NewTicker(autoFileOpenDuration), + } + if err = af.openFile(); err != nil { + return + } + go af.processTicks() + sighupWatchers.addAutoFile(af) + return +} + +func (af *AutoFile) Close() error { + af.ticker.Stop() + err := af.closeFile() + sighupWatchers.removeAutoFile(af) + return err +} + +func (af *AutoFile) processTicks() { + for { + _, ok := <-af.ticker.C + if !ok { + return // Done. + } + af.closeFile() + } +} + +func (af *AutoFile) closeFile() (err error) { + af.mtx.Lock() + defer af.mtx.Unlock() + + file := af.file + if file == nil { + return nil + } + af.file = nil + return file.Close() +} + +func (af *AutoFile) Write(b []byte) (n int, err error) { + af.mtx.Lock() + defer af.mtx.Unlock() + + if af.file == nil { + if err = af.openFile(); err != nil { + return + } + } + + n, err = af.file.Write(b) + return +} + +func (af *AutoFile) Sync() error { + af.mtx.Lock() + defer af.mtx.Unlock() + + if af.file == nil { + if err := af.openFile(); err != nil { + return err + } + } + return af.file.Sync() +} + +func (af *AutoFile) openFile() error { + file, err := os.OpenFile(af.Path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) + if err != nil { + return err + } + af.file = file + return nil +} + +func (af *AutoFile) Size() (int64, error) { + af.mtx.Lock() + defer af.mtx.Unlock() + + if af.file == nil { + err := af.openFile() + if err != nil { + if err == os.ErrNotExist { + return 0, nil + } + return -1, err + } + } + stat, err := af.file.Stat() + if err != nil { + return -1, err + } + return stat.Size(), nil + +} diff --git a/libs/autofile/autofile_test.go b/libs/autofile/autofile_test.go new file mode 100644 index 000000000..b39fb7cf3 --- /dev/null +++ b/libs/autofile/autofile_test.go @@ -0,0 +1,71 @@ +package autofile + +import ( + "os" + "sync/atomic" + "syscall" + "testing" + "time" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +func TestSIGHUP(t *testing.T) { + + // First, create an AutoFile writing to a tempfile dir + file, name := cmn.Tempfile("sighup_test") + if err := file.Close(); err != nil { + t.Fatalf("Error creating tempfile: %v", err) + } + // Here is the actual AutoFile + af, err := OpenAutoFile(name) + if err != nil { + t.Fatalf("Error creating autofile: %v", err) + } + + // Write to the file. + _, err = af.Write([]byte("Line 1\n")) + if err != nil { + t.Fatalf("Error writing to autofile: %v", err) + } + _, err = af.Write([]byte("Line 2\n")) + if err != nil { + t.Fatalf("Error writing to autofile: %v", err) + } + + // Move the file over + err = os.Rename(name, name+"_old") + if err != nil { + t.Fatalf("Error moving autofile: %v", err) + } + + // Send SIGHUP to self. + oldSighupCounter := atomic.LoadInt32(&sighupCounter) + syscall.Kill(syscall.Getpid(), syscall.SIGHUP) + + // Wait a bit... signals are not handled synchronously. + for atomic.LoadInt32(&sighupCounter) == oldSighupCounter { + time.Sleep(time.Millisecond * 10) + } + + // Write more to the file. + _, err = af.Write([]byte("Line 3\n")) + if err != nil { + t.Fatalf("Error writing to autofile: %v", err) + } + _, err = af.Write([]byte("Line 4\n")) + if err != nil { + t.Fatalf("Error writing to autofile: %v", err) + } + if err := af.Close(); err != nil { + t.Fatalf("Error closing autofile") + } + + // Both files should exist + if body := cmn.MustReadFile(name + "_old"); string(body) != "Line 1\nLine 2\n" { + t.Errorf("Unexpected body %s", body) + } + if body := cmn.MustReadFile(name); string(body) != "Line 3\nLine 4\n" { + t.Errorf("Unexpected body %s", body) + } +} diff --git a/libs/autofile/cmd/logjack.go b/libs/autofile/cmd/logjack.go new file mode 100644 index 000000000..17b482bed --- /dev/null +++ b/libs/autofile/cmd/logjack.go @@ -0,0 +1,107 @@ +package main + +import ( + "flag" + "fmt" + "io" + "os" + "strconv" + "strings" + + auto "github.com/tendermint/tendermint/libs/autofile" + cmn "github.com/tendermint/tendermint/libs/common" +) + +const Version = "0.0.1" +const readBufferSize = 1024 // 1KB at a time + +// Parse command-line options +func parseFlags() (headPath string, chopSize int64, limitSize int64, version bool) { + var flagSet = flag.NewFlagSet(os.Args[0], flag.ExitOnError) + var chopSizeStr, limitSizeStr string + flagSet.StringVar(&headPath, "head", "logjack.out", "Destination (head) file.") + flagSet.StringVar(&chopSizeStr, "chop", "100M", "Move file if greater than this") + flagSet.StringVar(&limitSizeStr, "limit", "10G", "Only keep this much (for each specified file). Remove old files.") + flagSet.BoolVar(&version, "version", false, "Version") + flagSet.Parse(os.Args[1:]) + chopSize = parseBytesize(chopSizeStr) + limitSize = parseBytesize(limitSizeStr) + return +} + +func main() { + + // Read options + headPath, chopSize, limitSize, version := parseFlags() + if version { + fmt.Printf("logjack version %v\n", Version) + return + } + + // Open Group + group, err := auto.OpenGroup(headPath) + if err != nil { + fmt.Printf("logjack couldn't create output file %v\n", headPath) + os.Exit(1) + } + group.SetHeadSizeLimit(chopSize) + group.SetTotalSizeLimit(limitSize) + err = group.Start() + if err != nil { + fmt.Printf("logjack couldn't start with file %v\n", headPath) + os.Exit(1) + } + + go func() { + // Forever, read from stdin and write to AutoFile. + buf := make([]byte, readBufferSize) + for { + n, err := os.Stdin.Read(buf) + group.Write(buf[:n]) + group.Flush() + if err != nil { + group.Stop() + if err == io.EOF { + os.Exit(0) + } else { + fmt.Println("logjack errored") + os.Exit(1) + } + } + } + }() + + // Trap signal + cmn.TrapSignal(func() { + fmt.Println("logjack shutting down") + }) +} + +func parseBytesize(chopSize string) int64 { + // Handle suffix multiplier + var multiplier int64 = 1 + if strings.HasSuffix(chopSize, "T") { + multiplier = 1042 * 1024 * 1024 * 1024 + chopSize = chopSize[:len(chopSize)-1] + } + if strings.HasSuffix(chopSize, "G") { + multiplier = 1042 * 1024 * 1024 + chopSize = chopSize[:len(chopSize)-1] + } + if strings.HasSuffix(chopSize, "M") { + multiplier = 1042 * 1024 + chopSize = chopSize[:len(chopSize)-1] + } + if strings.HasSuffix(chopSize, "K") { + multiplier = 1042 + chopSize = chopSize[:len(chopSize)-1] + } + + // Parse the numeric part + chopSizeInt, err := strconv.Atoi(chopSize) + if err != nil { + panic(err) + } + + return int64(chopSizeInt) * multiplier +} diff --git a/libs/autofile/group.go b/libs/autofile/group.go new file mode 100644 index 000000000..b4368ed9e --- /dev/null +++ b/libs/autofile/group.go @@ -0,0 +1,747 @@ +package autofile + +import ( + "bufio" + "errors" + "fmt" + "io" + "log" + "os" + "path" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "time" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +const ( + groupCheckDuration = 5000 * time.Millisecond + defaultHeadSizeLimit = 10 * 1024 * 1024 // 10MB + defaultTotalSizeLimit = 1 * 1024 * 1024 * 1024 // 1GB + maxFilesToRemove = 4 // needs to be greater than 1 +) + +/* +You can open a Group to keep restrictions on an AutoFile, like +the maximum size of each chunk, and/or the total amount of bytes +stored in the group. + +The first file to be written in the Group.Dir is the head file. + + Dir/ + - + +Once the Head file reaches the size limit, it will be rotated. + + Dir/ + - .000 // First rolled file + - // New head path, starts empty. + // The implicit index is 001. + +As more files are written, the index numbers grow... + + Dir/ + - .000 // First rolled file + - .001 // Second rolled file + - ... + - // New head path + +The Group can also be used to binary-search for some line, +assuming that marker lines are written occasionally. +*/ +type Group struct { + cmn.BaseService + + ID string + Head *AutoFile // The head AutoFile to write to + headBuf *bufio.Writer + Dir string // Directory that contains .Head + ticker *time.Ticker + mtx sync.Mutex + headSizeLimit int64 + totalSizeLimit int64 + minIndex int // Includes head + maxIndex int // Includes head, where Head will move to + + // TODO: When we start deleting files, we need to start tracking GroupReaders + // and their dependencies. +} + +// OpenGroup creates a new Group with head at headPath. It returns an error if +// it fails to open head file. +func OpenGroup(headPath string) (g *Group, err error) { + dir := path.Dir(headPath) + head, err := OpenAutoFile(headPath) + if err != nil { + return nil, err + } + + g = &Group{ + ID: "group:" + head.ID, + Head: head, + headBuf: bufio.NewWriterSize(head, 4096*10), + Dir: dir, + ticker: time.NewTicker(groupCheckDuration), + headSizeLimit: defaultHeadSizeLimit, + totalSizeLimit: defaultTotalSizeLimit, + minIndex: 0, + maxIndex: 0, + } + g.BaseService = *cmn.NewBaseService(nil, "Group", g) + + gInfo := g.readGroupInfo() + g.minIndex = gInfo.MinIndex + g.maxIndex = gInfo.MaxIndex + return +} + +// OnStart implements Service by starting the goroutine that checks file and +// group limits. +func (g *Group) OnStart() error { + go g.processTicks() + return nil +} + +// OnStop implements Service by stopping the goroutine described above. +// NOTE: g.Head must be closed separately using Close. +func (g *Group) OnStop() { + g.ticker.Stop() + g.Flush() // flush any uncommitted data +} + +// Close closes the head file. The group must be stopped by this moment. +func (g *Group) Close() { + g.Flush() // flush any uncommitted data + + g.mtx.Lock() + _ = g.Head.closeFile() + g.mtx.Unlock() +} + +// SetHeadSizeLimit allows you to overwrite default head size limit - 10MB. +func (g *Group) SetHeadSizeLimit(limit int64) { + g.mtx.Lock() + g.headSizeLimit = limit + g.mtx.Unlock() +} + +// HeadSizeLimit returns the current head size limit. +func (g *Group) HeadSizeLimit() int64 { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.headSizeLimit +} + +// SetTotalSizeLimit allows you to overwrite default total size limit of the +// group - 1GB. +func (g *Group) SetTotalSizeLimit(limit int64) { + g.mtx.Lock() + g.totalSizeLimit = limit + g.mtx.Unlock() +} + +// TotalSizeLimit returns total size limit of the group. +func (g *Group) TotalSizeLimit() int64 { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.totalSizeLimit +} + +// MaxIndex returns index of the last file in the group. +func (g *Group) MaxIndex() int { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.maxIndex +} + +// MinIndex returns index of the first file in the group. +func (g *Group) MinIndex() int { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.minIndex +} + +// Write writes the contents of p into the current head of the group. It +// returns the number of bytes written. If nn < len(p), it also returns an +// error explaining why the write is short. +// NOTE: Writes are buffered so they don't write synchronously +// TODO: Make it halt if space is unavailable +func (g *Group) Write(p []byte) (nn int, err error) { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.headBuf.Write(p) +} + +// WriteLine writes line into the current head of the group. It also appends "\n". +// NOTE: Writes are buffered so they don't write synchronously +// TODO: Make it halt if space is unavailable +func (g *Group) WriteLine(line string) error { + g.mtx.Lock() + defer g.mtx.Unlock() + _, err := g.headBuf.Write([]byte(line + "\n")) + return err +} + +// Flush writes any buffered data to the underlying file and commits the +// current content of the file to stable storage. +func (g *Group) Flush() error { + g.mtx.Lock() + defer g.mtx.Unlock() + err := g.headBuf.Flush() + if err == nil { + err = g.Head.Sync() + } + return err +} + +func (g *Group) processTicks() { + for { + _, ok := <-g.ticker.C + if !ok { + return // Done. + } + g.checkHeadSizeLimit() + g.checkTotalSizeLimit() + } +} + +// NOTE: for testing +func (g *Group) stopTicker() { + g.ticker.Stop() +} + +// NOTE: this function is called manually in tests. +func (g *Group) checkHeadSizeLimit() { + limit := g.HeadSizeLimit() + if limit == 0 { + return + } + size, err := g.Head.Size() + if err != nil { + panic(err) + } + if size >= limit { + g.RotateFile() + } +} + +func (g *Group) checkTotalSizeLimit() { + limit := g.TotalSizeLimit() + if limit == 0 { + return + } + + gInfo := g.readGroupInfo() + totalSize := gInfo.TotalSize + for i := 0; i < maxFilesToRemove; i++ { + index := gInfo.MinIndex + i + if totalSize < limit { + return + } + if index == gInfo.MaxIndex { + // Special degenerate case, just do nothing. + log.Println("WARNING: Group's head " + g.Head.Path + "may grow without bound") + return + } + pathToRemove := filePathForIndex(g.Head.Path, index, gInfo.MaxIndex) + fileInfo, err := os.Stat(pathToRemove) + if err != nil { + log.Println("WARNING: Failed to fetch info for file @" + pathToRemove) + continue + } + err = os.Remove(pathToRemove) + if err != nil { + log.Println(err) + return + } + totalSize -= fileInfo.Size() + } +} + +// RotateFile causes group to close the current head and assign it some index. +// Note it does not create a new head. +func (g *Group) RotateFile() { + g.mtx.Lock() + defer g.mtx.Unlock() + + headPath := g.Head.Path + + if err := g.Head.closeFile(); err != nil { + panic(err) + } + + indexPath := filePathForIndex(headPath, g.maxIndex, g.maxIndex+1) + if err := os.Rename(headPath, indexPath); err != nil { + panic(err) + } + + g.maxIndex++ +} + +// NewReader returns a new group reader. +// CONTRACT: Caller must close the returned GroupReader. +func (g *Group) NewReader(index int) (*GroupReader, error) { + r := newGroupReader(g) + err := r.SetIndex(index) + if err != nil { + return nil, err + } + return r, nil +} + +// Returns -1 if line comes after, 0 if found, 1 if line comes before. +type SearchFunc func(line string) (int, error) + +// Searches for the right file in Group, then returns a GroupReader to start +// streaming lines. +// Returns true if an exact match was found, otherwise returns the next greater +// line that starts with prefix. +// CONTRACT: Caller must close the returned GroupReader +func (g *Group) Search(prefix string, cmp SearchFunc) (*GroupReader, bool, error) { + g.mtx.Lock() + minIndex, maxIndex := g.minIndex, g.maxIndex + g.mtx.Unlock() + // Now minIndex/maxIndex may change meanwhile, + // but it shouldn't be a big deal + // (maybe we'll want to limit scanUntil though) + + for { + curIndex := (minIndex + maxIndex + 1) / 2 + + // Base case, when there's only 1 choice left. + if minIndex == maxIndex { + r, err := g.NewReader(maxIndex) + if err != nil { + return nil, false, err + } + match, err := scanUntil(r, prefix, cmp) + if err != nil { + r.Close() + return nil, false, err + } + return r, match, err + } + + // Read starting roughly at the middle file, + // until we find line that has prefix. + r, err := g.NewReader(curIndex) + if err != nil { + return nil, false, err + } + foundIndex, line, err := scanNext(r, prefix) + r.Close() + if err != nil { + return nil, false, err + } + + // Compare this line to our search query. + val, err := cmp(line) + if err != nil { + return nil, false, err + } + if val < 0 { + // Line will come later + minIndex = foundIndex + } else if val == 0 { + // Stroke of luck, found the line + r, err := g.NewReader(foundIndex) + if err != nil { + return nil, false, err + } + match, err := scanUntil(r, prefix, cmp) + if !match { + panic("Expected match to be true") + } + if err != nil { + r.Close() + return nil, false, err + } + return r, true, err + } else { + // We passed it + maxIndex = curIndex - 1 + } + } + +} + +// Scans and returns the first line that starts with 'prefix' +// Consumes line and returns it. +func scanNext(r *GroupReader, prefix string) (int, string, error) { + for { + line, err := r.ReadLine() + if err != nil { + return 0, "", err + } + if !strings.HasPrefix(line, prefix) { + continue + } + index := r.CurIndex() + return index, line, nil + } +} + +// Returns true iff an exact match was found. +// Pushes line, does not consume it. +func scanUntil(r *GroupReader, prefix string, cmp SearchFunc) (bool, error) { + for { + line, err := r.ReadLine() + if err != nil { + return false, err + } + if !strings.HasPrefix(line, prefix) { + continue + } + val, err := cmp(line) + if err != nil { + return false, err + } + if val < 0 { + continue + } else if val == 0 { + r.PushLine(line) + return true, nil + } else { + r.PushLine(line) + return false, nil + } + } +} + +// Searches backwards for the last line in Group with prefix. +// Scans each file forward until the end to find the last match. +func (g *Group) FindLast(prefix string) (match string, found bool, err error) { + g.mtx.Lock() + minIndex, maxIndex := g.minIndex, g.maxIndex + g.mtx.Unlock() + + r, err := g.NewReader(maxIndex) + if err != nil { + return "", false, err + } + defer r.Close() + + // Open files from the back and read +GROUP_LOOP: + for i := maxIndex; i >= minIndex; i-- { + err := r.SetIndex(i) + if err != nil { + return "", false, err + } + // Scan each line and test whether line matches + for { + line, err := r.ReadLine() + if err == io.EOF { + if found { + return match, found, nil + } + continue GROUP_LOOP + } else if err != nil { + return "", false, err + } + if strings.HasPrefix(line, prefix) { + match = line + found = true + } + if r.CurIndex() > i { + if found { + return match, found, nil + } + continue GROUP_LOOP + } + } + } + + return +} + +// GroupInfo holds information about the group. +type GroupInfo struct { + MinIndex int // index of the first file in the group, including head + MaxIndex int // index of the last file in the group, including head + TotalSize int64 // total size of the group + HeadSize int64 // size of the head +} + +// Returns info after scanning all files in g.Head's dir. +func (g *Group) ReadGroupInfo() GroupInfo { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.readGroupInfo() +} + +// Index includes the head. +// CONTRACT: caller should have called g.mtx.Lock +func (g *Group) readGroupInfo() GroupInfo { + groupDir := filepath.Dir(g.Head.Path) + headBase := filepath.Base(g.Head.Path) + var minIndex, maxIndex int = -1, -1 + var totalSize, headSize int64 = 0, 0 + + dir, err := os.Open(groupDir) + if err != nil { + panic(err) + } + defer dir.Close() + fiz, err := dir.Readdir(0) + if err != nil { + panic(err) + } + + // For each file in the directory, filter by pattern + for _, fileInfo := range fiz { + if fileInfo.Name() == headBase { + fileSize := fileInfo.Size() + totalSize += fileSize + headSize = fileSize + continue + } else if strings.HasPrefix(fileInfo.Name(), headBase) { + fileSize := fileInfo.Size() + totalSize += fileSize + indexedFilePattern := regexp.MustCompile(`^.+\.([0-9]{3,})$`) + submatch := indexedFilePattern.FindSubmatch([]byte(fileInfo.Name())) + if len(submatch) != 0 { + // Matches + fileIndex, err := strconv.Atoi(string(submatch[1])) + if err != nil { + panic(err) + } + if maxIndex < fileIndex { + maxIndex = fileIndex + } + if minIndex == -1 || fileIndex < minIndex { + minIndex = fileIndex + } + } + } + } + + // Now account for the head. + if minIndex == -1 { + // If there were no numbered files, + // then the head is index 0. + minIndex, maxIndex = 0, 0 + } else { + // Otherwise, the head file is 1 greater + maxIndex++ + } + return GroupInfo{minIndex, maxIndex, totalSize, headSize} +} + +func filePathForIndex(headPath string, index int, maxIndex int) string { + if index == maxIndex { + return headPath + } + return fmt.Sprintf("%v.%03d", headPath, index) +} + +//-------------------------------------------------------------------------------- + +// GroupReader provides an interface for reading from a Group. +type GroupReader struct { + *Group + mtx sync.Mutex + curIndex int + curFile *os.File + curReader *bufio.Reader + curLine []byte +} + +func newGroupReader(g *Group) *GroupReader { + return &GroupReader{ + Group: g, + curIndex: 0, + curFile: nil, + curReader: nil, + curLine: nil, + } +} + +// Close closes the GroupReader by closing the cursor file. +func (gr *GroupReader) Close() error { + gr.mtx.Lock() + defer gr.mtx.Unlock() + + if gr.curReader != nil { + err := gr.curFile.Close() + gr.curIndex = 0 + gr.curReader = nil + gr.curFile = nil + gr.curLine = nil + return err + } + return nil +} + +// Read implements io.Reader, reading bytes from the current Reader +// incrementing index until enough bytes are read. +func (gr *GroupReader) Read(p []byte) (n int, err error) { + lenP := len(p) + if lenP == 0 { + return 0, errors.New("given empty slice") + } + + gr.mtx.Lock() + defer gr.mtx.Unlock() + + // Open file if not open yet + if gr.curReader == nil { + if err = gr.openFile(gr.curIndex); err != nil { + return 0, err + } + } + + // Iterate over files until enough bytes are read + var nn int + for { + nn, err = gr.curReader.Read(p[n:]) + n += nn + if err == io.EOF { + if n >= lenP { + return n, nil + } + // Open the next file + if err1 := gr.openFile(gr.curIndex + 1); err1 != nil { + return n, err1 + } + } else if err != nil { + return n, err + } else if nn == 0 { // empty file + return n, err + } + } +} + +// ReadLine reads a line (without delimiter). +// just return io.EOF if no new lines found. +func (gr *GroupReader) ReadLine() (string, error) { + gr.mtx.Lock() + defer gr.mtx.Unlock() + + // From PushLine + if gr.curLine != nil { + line := string(gr.curLine) + gr.curLine = nil + return line, nil + } + + // Open file if not open yet + if gr.curReader == nil { + err := gr.openFile(gr.curIndex) + if err != nil { + return "", err + } + } + + // Iterate over files until line is found + var linePrefix string + for { + bytesRead, err := gr.curReader.ReadBytes('\n') + if err == io.EOF { + // Open the next file + if err1 := gr.openFile(gr.curIndex + 1); err1 != nil { + return "", err1 + } + if len(bytesRead) > 0 && bytesRead[len(bytesRead)-1] == byte('\n') { + return linePrefix + string(bytesRead[:len(bytesRead)-1]), nil + } + linePrefix += string(bytesRead) + continue + } else if err != nil { + return "", err + } + return linePrefix + string(bytesRead[:len(bytesRead)-1]), nil + } +} + +// IF index > gr.Group.maxIndex, returns io.EOF +// CONTRACT: caller should hold gr.mtx +func (gr *GroupReader) openFile(index int) error { + + // Lock on Group to ensure that head doesn't move in the meanwhile. + gr.Group.mtx.Lock() + defer gr.Group.mtx.Unlock() + + if index > gr.Group.maxIndex { + return io.EOF + } + + curFilePath := filePathForIndex(gr.Head.Path, index, gr.Group.maxIndex) + curFile, err := os.Open(curFilePath) + if err != nil { + return err + } + curReader := bufio.NewReader(curFile) + + // Update gr.cur* + if gr.curFile != nil { + gr.curFile.Close() // TODO return error? + } + gr.curIndex = index + gr.curFile = curFile + gr.curReader = curReader + gr.curLine = nil + return nil +} + +// PushLine makes the given line the current one, so the next time somebody +// calls ReadLine, this line will be returned. +// panics if called twice without calling ReadLine. +func (gr *GroupReader) PushLine(line string) { + gr.mtx.Lock() + defer gr.mtx.Unlock() + + if gr.curLine == nil { + gr.curLine = []byte(line) + } else { + panic("PushLine failed, already have line") + } +} + +// CurIndex returns cursor's file index. +func (gr *GroupReader) CurIndex() int { + gr.mtx.Lock() + defer gr.mtx.Unlock() + return gr.curIndex +} + +// SetIndex sets the cursor's file index to index by opening a file at this +// position. +func (gr *GroupReader) SetIndex(index int) error { + gr.mtx.Lock() + defer gr.mtx.Unlock() + return gr.openFile(index) +} + +//-------------------------------------------------------------------------------- + +// A simple SearchFunc that assumes that the marker is of form +// . +// For example, if prefix is '#HEIGHT:', the markers of expected to be of the form: +// +// #HEIGHT:1 +// ... +// #HEIGHT:2 +// ... +func MakeSimpleSearchFunc(prefix string, target int) SearchFunc { + return func(line string) (int, error) { + if !strings.HasPrefix(line, prefix) { + return -1, errors.New(cmn.Fmt("Marker line did not have prefix: %v", prefix)) + } + i, err := strconv.Atoi(line[len(prefix):]) + if err != nil { + return -1, errors.New(cmn.Fmt("Failed to parse marker line: %v", err.Error())) + } + if target < i { + return 1, nil + } else if target == i { + return 0, nil + } else { + return -1, nil + } + } +} diff --git a/libs/autofile/group_test.go b/libs/autofile/group_test.go new file mode 100644 index 000000000..72581f9e2 --- /dev/null +++ b/libs/autofile/group_test.go @@ -0,0 +1,438 @@ +package autofile + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +// NOTE: Returned group has ticker stopped +func createTestGroup(t *testing.T, headSizeLimit int64) *Group { + testID := cmn.RandStr(12) + testDir := "_test_" + testID + err := cmn.EnsureDir(testDir, 0700) + require.NoError(t, err, "Error creating dir") + headPath := testDir + "/myfile" + g, err := OpenGroup(headPath) + require.NoError(t, err, "Error opening Group") + g.SetHeadSizeLimit(headSizeLimit) + g.stopTicker() + require.NotEqual(t, nil, g, "Failed to create Group") + return g +} + +func destroyTestGroup(t *testing.T, g *Group) { + g.Close() + err := os.RemoveAll(g.Dir) + require.NoError(t, err, "Error removing test Group directory") +} + +func assertGroupInfo(t *testing.T, gInfo GroupInfo, minIndex, maxIndex int, totalSize, headSize int64) { + assert.Equal(t, minIndex, gInfo.MinIndex) + assert.Equal(t, maxIndex, gInfo.MaxIndex) + assert.Equal(t, totalSize, gInfo.TotalSize) + assert.Equal(t, headSize, gInfo.HeadSize) +} + +func TestCheckHeadSizeLimit(t *testing.T) { + g := createTestGroup(t, 1000*1000) + + // At first, there are no files. + assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 0, 0) + + // Write 1000 bytes 999 times. + for i := 0; i < 999; i++ { + err := g.WriteLine(cmn.RandStr(999)) + require.NoError(t, err, "Error appending to head") + } + g.Flush() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000) + + // Even calling checkHeadSizeLimit manually won't rotate it. + g.checkHeadSizeLimit() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000) + + // Write 1000 more bytes. + err := g.WriteLine(cmn.RandStr(999)) + require.NoError(t, err, "Error appending to head") + g.Flush() + + // Calling checkHeadSizeLimit this time rolls it. + g.checkHeadSizeLimit() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 1000000, 0) + + // Write 1000 more bytes. + err = g.WriteLine(cmn.RandStr(999)) + require.NoError(t, err, "Error appending to head") + g.Flush() + + // Calling checkHeadSizeLimit does nothing. + g.checkHeadSizeLimit() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 1001000, 1000) + + // Write 1000 bytes 999 times. + for i := 0; i < 999; i++ { + err = g.WriteLine(cmn.RandStr(999)) + require.NoError(t, err, "Error appending to head") + } + g.Flush() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 2000000, 1000000) + + // Calling checkHeadSizeLimit rolls it again. + g.checkHeadSizeLimit() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2000000, 0) + + // Write 1000 more bytes. + _, err = g.Head.Write([]byte(cmn.RandStr(999) + "\n")) + require.NoError(t, err, "Error appending to head") + g.Flush() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2001000, 1000) + + // Calling checkHeadSizeLimit does nothing. + g.checkHeadSizeLimit() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2001000, 1000) + + // Cleanup + destroyTestGroup(t, g) +} + +func TestSearch(t *testing.T) { + g := createTestGroup(t, 10*1000) + + // Create some files in the group that have several INFO lines in them. + // Try to put the INFO lines in various spots. + for i := 0; i < 100; i++ { + // The random junk at the end ensures that this INFO linen + // is equally likely to show up at the end. + _, err := g.Head.Write([]byte(fmt.Sprintf("INFO %v %v\n", i, cmn.RandStr(123)))) + require.NoError(t, err, "Failed to write to head") + g.checkHeadSizeLimit() + for j := 0; j < 10; j++ { + _, err1 := g.Head.Write([]byte(cmn.RandStr(123) + "\n")) + require.NoError(t, err1, "Failed to write to head") + g.checkHeadSizeLimit() + } + } + + // Create a search func that searches for line + makeSearchFunc := func(target int) SearchFunc { + return func(line string) (int, error) { + parts := strings.Split(line, " ") + if len(parts) != 3 { + return -1, errors.New("Line did not have 3 parts") + } + i, err := strconv.Atoi(parts[1]) + if err != nil { + return -1, errors.New("Failed to parse INFO: " + err.Error()) + } + if target < i { + return 1, nil + } else if target == i { + return 0, nil + } else { + return -1, nil + } + } + } + + // Now search for each number + for i := 0; i < 100; i++ { + t.Log("Testing for i", i) + gr, match, err := g.Search("INFO", makeSearchFunc(i)) + require.NoError(t, err, "Failed to search for line") + assert.True(t, match, "Expected Search to return exact match") + line, err := gr.ReadLine() + require.NoError(t, err, "Failed to read line after search") + if !strings.HasPrefix(line, fmt.Sprintf("INFO %v ", i)) { + t.Fatal("Failed to get correct line") + } + // Make sure we can continue to read from there. + cur := i + 1 + for { + line, err := gr.ReadLine() + if err == io.EOF { + if cur == 99+1 { + // OK! + break + } else { + t.Fatal("Got EOF after the wrong INFO #") + } + } else if err != nil { + t.Fatal("Error reading line", err) + } + if !strings.HasPrefix(line, "INFO ") { + continue + } + if !strings.HasPrefix(line, fmt.Sprintf("INFO %v ", cur)) { + t.Fatalf("Unexpected INFO #. Expected %v got:\n%v", cur, line) + } + cur++ + } + gr.Close() + } + + // Now search for something that is too small. + // We should get the first available line. + { + gr, match, err := g.Search("INFO", makeSearchFunc(-999)) + require.NoError(t, err, "Failed to search for line") + assert.False(t, match, "Expected Search to not return exact match") + line, err := gr.ReadLine() + require.NoError(t, err, "Failed to read line after search") + if !strings.HasPrefix(line, "INFO 0 ") { + t.Error("Failed to fetch correct line, which is the earliest INFO") + } + err = gr.Close() + require.NoError(t, err, "Failed to close GroupReader") + } + + // Now search for something that is too large. + // We should get an EOF error. + { + gr, _, err := g.Search("INFO", makeSearchFunc(999)) + assert.Equal(t, io.EOF, err) + assert.Nil(t, gr) + } + + // Cleanup + destroyTestGroup(t, g) +} + +func TestRotateFile(t *testing.T) { + g := createTestGroup(t, 0) + g.WriteLine("Line 1") + g.WriteLine("Line 2") + g.WriteLine("Line 3") + g.Flush() + g.RotateFile() + g.WriteLine("Line 4") + g.WriteLine("Line 5") + g.WriteLine("Line 6") + g.Flush() + + // Read g.Head.Path+"000" + body1, err := ioutil.ReadFile(g.Head.Path + ".000") + assert.NoError(t, err, "Failed to read first rolled file") + if string(body1) != "Line 1\nLine 2\nLine 3\n" { + t.Errorf("Got unexpected contents: [%v]", string(body1)) + } + + // Read g.Head.Path + body2, err := ioutil.ReadFile(g.Head.Path) + assert.NoError(t, err, "Failed to read first rolled file") + if string(body2) != "Line 4\nLine 5\nLine 6\n" { + t.Errorf("Got unexpected contents: [%v]", string(body2)) + } + + // Cleanup + destroyTestGroup(t, g) +} + +func TestFindLast1(t *testing.T) { + g := createTestGroup(t, 0) + + g.WriteLine("Line 1") + g.WriteLine("Line 2") + g.WriteLine("# a") + g.WriteLine("Line 3") + g.Flush() + g.RotateFile() + g.WriteLine("Line 4") + g.WriteLine("Line 5") + g.WriteLine("Line 6") + g.WriteLine("# b") + g.Flush() + + match, found, err := g.FindLast("#") + assert.NoError(t, err) + assert.True(t, found) + assert.Equal(t, "# b", match) + + // Cleanup + destroyTestGroup(t, g) +} + +func TestFindLast2(t *testing.T) { + g := createTestGroup(t, 0) + + g.WriteLine("Line 1") + g.WriteLine("Line 2") + g.WriteLine("Line 3") + g.Flush() + g.RotateFile() + g.WriteLine("# a") + g.WriteLine("Line 4") + g.WriteLine("Line 5") + g.WriteLine("# b") + g.WriteLine("Line 6") + g.Flush() + + match, found, err := g.FindLast("#") + assert.NoError(t, err) + assert.True(t, found) + assert.Equal(t, "# b", match) + + // Cleanup + destroyTestGroup(t, g) +} + +func TestFindLast3(t *testing.T) { + g := createTestGroup(t, 0) + + g.WriteLine("Line 1") + g.WriteLine("# a") + g.WriteLine("Line 2") + g.WriteLine("# b") + g.WriteLine("Line 3") + g.Flush() + g.RotateFile() + g.WriteLine("Line 4") + g.WriteLine("Line 5") + g.WriteLine("Line 6") + g.Flush() + + match, found, err := g.FindLast("#") + assert.NoError(t, err) + assert.True(t, found) + assert.Equal(t, "# b", match) + + // Cleanup + destroyTestGroup(t, g) +} + +func TestFindLast4(t *testing.T) { + g := createTestGroup(t, 0) + + g.WriteLine("Line 1") + g.WriteLine("Line 2") + g.WriteLine("Line 3") + g.Flush() + g.RotateFile() + g.WriteLine("Line 4") + g.WriteLine("Line 5") + g.WriteLine("Line 6") + g.Flush() + + match, found, err := g.FindLast("#") + assert.NoError(t, err) + assert.False(t, found) + assert.Empty(t, match) + + // Cleanup + destroyTestGroup(t, g) +} + +func TestWrite(t *testing.T) { + g := createTestGroup(t, 0) + + written := []byte("Medusa") + g.Write(written) + g.Flush() + + read := make([]byte, len(written)) + gr, err := g.NewReader(0) + require.NoError(t, err, "failed to create reader") + + _, err = gr.Read(read) + assert.NoError(t, err, "failed to read data") + assert.Equal(t, written, read) + + // Cleanup + destroyTestGroup(t, g) +} + +// test that Read reads the required amount of bytes from all the files in the +// group and returns no error if n == size of the given slice. +func TestGroupReaderRead(t *testing.T) { + g := createTestGroup(t, 0) + + professor := []byte("Professor Monster") + g.Write(professor) + g.Flush() + g.RotateFile() + frankenstein := []byte("Frankenstein's Monster") + g.Write(frankenstein) + g.Flush() + + totalWrittenLength := len(professor) + len(frankenstein) + read := make([]byte, totalWrittenLength) + gr, err := g.NewReader(0) + require.NoError(t, err, "failed to create reader") + + n, err := gr.Read(read) + assert.NoError(t, err, "failed to read data") + assert.Equal(t, totalWrittenLength, n, "not enough bytes read") + professorPlusFrankenstein := professor + professorPlusFrankenstein = append(professorPlusFrankenstein, frankenstein...) + assert.Equal(t, professorPlusFrankenstein, read) + + // Cleanup + destroyTestGroup(t, g) +} + +// test that Read returns an error if number of bytes read < size of +// the given slice. Subsequent call should return 0, io.EOF. +func TestGroupReaderRead2(t *testing.T) { + g := createTestGroup(t, 0) + + professor := []byte("Professor Monster") + g.Write(professor) + g.Flush() + g.RotateFile() + frankenstein := []byte("Frankenstein's Monster") + frankensteinPart := []byte("Frankenstein") + g.Write(frankensteinPart) // note writing only a part + g.Flush() + + totalLength := len(professor) + len(frankenstein) + read := make([]byte, totalLength) + gr, err := g.NewReader(0) + require.NoError(t, err, "failed to create reader") + + // 1) n < (size of the given slice), io.EOF + n, err := gr.Read(read) + assert.Equal(t, io.EOF, err) + assert.Equal(t, len(professor)+len(frankensteinPart), n, "Read more/less bytes than it is in the group") + + // 2) 0, io.EOF + n, err = gr.Read([]byte("0")) + assert.Equal(t, io.EOF, err) + assert.Equal(t, 0, n) + + // Cleanup + destroyTestGroup(t, g) +} + +func TestMinIndex(t *testing.T) { + g := createTestGroup(t, 0) + + assert.Zero(t, g.MinIndex(), "MinIndex should be zero at the beginning") + + // Cleanup + destroyTestGroup(t, g) +} + +func TestMaxIndex(t *testing.T) { + g := createTestGroup(t, 0) + + assert.Zero(t, g.MaxIndex(), "MaxIndex should be zero at the beginning") + + g.WriteLine("Line 1") + g.Flush() + g.RotateFile() + + assert.Equal(t, 1, g.MaxIndex(), "MaxIndex should point to the last file") + + // Cleanup + destroyTestGroup(t, g) +} diff --git a/libs/autofile/sighup_watcher.go b/libs/autofile/sighup_watcher.go new file mode 100644 index 000000000..56fbd4d86 --- /dev/null +++ b/libs/autofile/sighup_watcher.go @@ -0,0 +1,63 @@ +package autofile + +import ( + "os" + "os/signal" + "sync" + "sync/atomic" + "syscall" +) + +func init() { + initSighupWatcher() +} + +var sighupWatchers *SighupWatcher +var sighupCounter int32 // For testing + +func initSighupWatcher() { + sighupWatchers = newSighupWatcher() + + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGHUP) + + go func() { + for range c { + sighupWatchers.closeAll() + atomic.AddInt32(&sighupCounter, 1) + } + }() +} + +// Watchces for SIGHUP events and notifies registered AutoFiles +type SighupWatcher struct { + mtx sync.Mutex + autoFiles map[string]*AutoFile +} + +func newSighupWatcher() *SighupWatcher { + return &SighupWatcher{ + autoFiles: make(map[string]*AutoFile, 10), + } +} + +func (w *SighupWatcher) addAutoFile(af *AutoFile) { + w.mtx.Lock() + w.autoFiles[af.ID] = af + w.mtx.Unlock() +} + +// If AutoFile isn't registered or was already removed, does nothing. +func (w *SighupWatcher) removeAutoFile(af *AutoFile) { + w.mtx.Lock() + delete(w.autoFiles, af.ID) + w.mtx.Unlock() +} + +func (w *SighupWatcher) closeAll() { + w.mtx.Lock() + for _, af := range w.autoFiles { + af.closeFile() + } + w.mtx.Unlock() +} diff --git a/libs/bech32/bech32.go b/libs/bech32/bech32.go new file mode 100644 index 000000000..a4db86d5f --- /dev/null +++ b/libs/bech32/bech32.go @@ -0,0 +1,29 @@ +package bech32 + +import ( + "github.com/btcsuite/btcutil/bech32" + "github.com/pkg/errors" +) + +//ConvertAndEncode converts from a base64 encoded byte string to base32 encoded byte string and then to bech32 +func ConvertAndEncode(hrp string, data []byte) (string, error) { + converted, err := bech32.ConvertBits(data, 8, 5, true) + if err != nil { + return "", errors.Wrap(err, "encoding bech32 failed") + } + return bech32.Encode(hrp, converted) + +} + +//DecodeAndConvert decodes a bech32 encoded string and converts to base64 encoded bytes +func DecodeAndConvert(bech string) (string, []byte, error) { + hrp, data, err := bech32.Decode(bech) + if err != nil { + return "", nil, errors.Wrap(err, "decoding bech32 failed") + } + converted, err := bech32.ConvertBits(data, 5, 8, false) + if err != nil { + return "", nil, errors.Wrap(err, "decoding bech32 failed") + } + return hrp, converted, nil +} diff --git a/libs/bech32/bech32_test.go b/libs/bech32/bech32_test.go new file mode 100644 index 000000000..830942061 --- /dev/null +++ b/libs/bech32/bech32_test.go @@ -0,0 +1,31 @@ +package bech32_test + +import ( + "bytes" + "crypto/sha256" + "testing" + + "github.com/tendermint/tendermint/libs/bech32" +) + +func TestEncodeAndDecode(t *testing.T) { + + sum := sha256.Sum256([]byte("hello world\n")) + + bech, err := bech32.ConvertAndEncode("shasum", sum[:]) + + if err != nil { + t.Error(err) + } + hrp, data, err := bech32.DecodeAndConvert(bech) + + if err != nil { + t.Error(err) + } + if hrp != "shasum" { + t.Error("Invalid hrp") + } + if !bytes.Equal(data, sum[:]) { + t.Error("Invalid decode") + } +} diff --git a/libs/circle.yml b/libs/circle.yml new file mode 100644 index 000000000..390ffb039 --- /dev/null +++ b/libs/circle.yml @@ -0,0 +1,21 @@ +machine: + environment: + GOPATH: "${HOME}/.go_workspace" + PROJECT_PARENT_PATH: "$GOPATH/src/github.com/$CIRCLE_PROJECT_USERNAME" + PROJECT_PATH: $GOPATH/src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME + hosts: + localhost: 127.0.0.1 + +dependencies: + override: + - mkdir -p "$PROJECT_PARENT_PATH" + - ln -sf "$HOME/$CIRCLE_PROJECT_REPONAME/" "$PROJECT_PATH" + post: + - go version + +test: + override: + - cd $PROJECT_PATH && make get_tools && make get_vendor_deps && bash ./test.sh + post: + - cd "$PROJECT_PATH" && bash <(curl -s https://codecov.io/bash) -f coverage.txt + - cd "$PROJECT_PATH" && mv coverage.txt "${CIRCLE_ARTIFACTS}" diff --git a/libs/cli/flags/log_level.go b/libs/cli/flags/log_level.go new file mode 100644 index 000000000..156106a5a --- /dev/null +++ b/libs/cli/flags/log_level.go @@ -0,0 +1,86 @@ +package flags + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + + "github.com/tendermint/tendermint/libs/log" +) + +const ( + defaultLogLevelKey = "*" +) + +// ParseLogLevel parses complex log level - comma-separated +// list of module:level pairs with an optional *:level pair (* means +// all other modules). +// +// Example: +// ParseLogLevel("consensus:debug,mempool:debug,*:error", log.NewTMLogger(os.Stdout), "info") +func ParseLogLevel(lvl string, logger log.Logger, defaultLogLevelValue string) (log.Logger, error) { + if lvl == "" { + return nil, errors.New("Empty log level") + } + + l := lvl + + // prefix simple one word levels (e.g. "info") with "*" + if !strings.Contains(l, ":") { + l = defaultLogLevelKey + ":" + l + } + + options := make([]log.Option, 0) + + isDefaultLogLevelSet := false + var option log.Option + var err error + + list := strings.Split(l, ",") + for _, item := range list { + moduleAndLevel := strings.Split(item, ":") + + if len(moduleAndLevel) != 2 { + return nil, fmt.Errorf("Expected list in a form of \"module:level\" pairs, given pair %s, list %s", item, list) + } + + module := moduleAndLevel[0] + level := moduleAndLevel[1] + + if module == defaultLogLevelKey { + option, err = log.AllowLevel(level) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("Failed to parse default log level (pair %s, list %s)", item, l)) + } + options = append(options, option) + isDefaultLogLevelSet = true + } else { + switch level { + case "debug": + option = log.AllowDebugWith("module", module) + case "info": + option = log.AllowInfoWith("module", module) + case "error": + option = log.AllowErrorWith("module", module) + case "none": + option = log.AllowNoneWith("module", module) + default: + return nil, fmt.Errorf("Expected either \"info\", \"debug\", \"error\" or \"none\" log level, given %s (pair %s, list %s)", level, item, list) + } + options = append(options, option) + + } + } + + // if "*" is not provided, set default global level + if !isDefaultLogLevelSet { + option, err = log.AllowLevel(defaultLogLevelValue) + if err != nil { + return nil, err + } + options = append(options, option) + } + + return log.NewFilter(logger, options...), nil +} diff --git a/libs/cli/flags/log_level_test.go b/libs/cli/flags/log_level_test.go new file mode 100644 index 000000000..1503ec281 --- /dev/null +++ b/libs/cli/flags/log_level_test.go @@ -0,0 +1,94 @@ +package flags_test + +import ( + "bytes" + "strings" + "testing" + + tmflags "github.com/tendermint/tendermint/libs/cli/flags" + "github.com/tendermint/tendermint/libs/log" +) + +const ( + defaultLogLevelValue = "info" +) + +func TestParseLogLevel(t *testing.T) { + var buf bytes.Buffer + jsonLogger := log.NewTMJSONLogger(&buf) + + correctLogLevels := []struct { + lvl string + expectedLogLines []string + }{ + {"mempool:error", []string{ + ``, // if no default is given, assume info + ``, + `{"_msg":"Mesmero","level":"error","module":"mempool"}`, + `{"_msg":"Mind","level":"info","module":"state"}`, // if no default is given, assume info + ``}}, + + {"mempool:error,*:debug", []string{ + `{"_msg":"Kingpin","level":"debug","module":"wire"}`, + ``, + `{"_msg":"Mesmero","level":"error","module":"mempool"}`, + `{"_msg":"Mind","level":"info","module":"state"}`, + `{"_msg":"Gideon","level":"debug"}`}}, + + {"*:debug,wire:none", []string{ + ``, + `{"_msg":"Kitty Pryde","level":"info","module":"mempool"}`, + `{"_msg":"Mesmero","level":"error","module":"mempool"}`, + `{"_msg":"Mind","level":"info","module":"state"}`, + `{"_msg":"Gideon","level":"debug"}`}}, + } + + for _, c := range correctLogLevels { + logger, err := tmflags.ParseLogLevel(c.lvl, jsonLogger, defaultLogLevelValue) + if err != nil { + t.Fatal(err) + } + + buf.Reset() + + logger.With("module", "wire").Debug("Kingpin") + if have := strings.TrimSpace(buf.String()); c.expectedLogLines[0] != have { + t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[0], have, c.lvl) + } + + buf.Reset() + + logger.With("module", "mempool").Info("Kitty Pryde") + if have := strings.TrimSpace(buf.String()); c.expectedLogLines[1] != have { + t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[1], have, c.lvl) + } + + buf.Reset() + + logger.With("module", "mempool").Error("Mesmero") + if have := strings.TrimSpace(buf.String()); c.expectedLogLines[2] != have { + t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[2], have, c.lvl) + } + + buf.Reset() + + logger.With("module", "state").Info("Mind") + if have := strings.TrimSpace(buf.String()); c.expectedLogLines[3] != have { + t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[3], have, c.lvl) + } + + buf.Reset() + + logger.Debug("Gideon") + if have := strings.TrimSpace(buf.String()); c.expectedLogLines[4] != have { + t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[4], have, c.lvl) + } + } + + incorrectLogLevel := []string{"some", "mempool:some", "*:some,mempool:error"} + for _, lvl := range incorrectLogLevel { + if _, err := tmflags.ParseLogLevel(lvl, jsonLogger, defaultLogLevelValue); err == nil { + t.Fatalf("Expected %s to produce error", lvl) + } + } +} diff --git a/libs/cli/helper.go b/libs/cli/helper.go new file mode 100644 index 000000000..878cf26e5 --- /dev/null +++ b/libs/cli/helper.go @@ -0,0 +1,87 @@ +package cli + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// WriteConfigVals writes a toml file with the given values. +// It returns an error if writing was impossible. +func WriteConfigVals(dir string, vals map[string]string) error { + data := "" + for k, v := range vals { + data = data + fmt.Sprintf("%s = \"%s\"\n", k, v) + } + cfile := filepath.Join(dir, "config.toml") + return ioutil.WriteFile(cfile, []byte(data), 0666) +} + +// RunWithArgs executes the given command with the specified command line args +// and environmental variables set. It returns any error returned from cmd.Execute() +func RunWithArgs(cmd Executable, args []string, env map[string]string) error { + oargs := os.Args + oenv := map[string]string{} + // defer returns the environment back to normal + defer func() { + os.Args = oargs + for k, v := range oenv { + os.Setenv(k, v) + } + }() + + // set the args and env how we want them + os.Args = args + for k, v := range env { + // backup old value if there, to restore at end + oenv[k] = os.Getenv(k) + err := os.Setenv(k, v) + if err != nil { + return err + } + } + + // and finally run the command + return cmd.Execute() +} + +// RunCaptureWithArgs executes the given command with the specified command +// line args and environmental variables set. It returns string fields +// representing output written to stdout and stderr, additionally any error +// from cmd.Execute() is also returned +func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (stdout, stderr string, err error) { + oldout, olderr := os.Stdout, os.Stderr // keep backup of the real stdout + rOut, wOut, _ := os.Pipe() + rErr, wErr, _ := os.Pipe() + os.Stdout, os.Stderr = wOut, wErr + defer func() { + os.Stdout, os.Stderr = oldout, olderr // restoring the real stdout + }() + + // copy the output in a separate goroutine so printing can't block indefinitely + copyStd := func(reader *os.File) *(chan string) { + stdC := make(chan string) + go func() { + var buf bytes.Buffer + // io.Copy will end when we call reader.Close() below + io.Copy(&buf, reader) + stdC <- buf.String() + }() + return &stdC + } + outC := copyStd(rOut) + errC := copyStd(rErr) + + // now run the command + err = RunWithArgs(cmd, args, env) + + // and grab the stdout to return + wOut.Close() + wErr.Close() + stdout = <-*outC + stderr = <-*errC + return stdout, stderr, err +} diff --git a/libs/cli/setup.go b/libs/cli/setup.go new file mode 100644 index 000000000..06cf1cd1f --- /dev/null +++ b/libs/cli/setup.go @@ -0,0 +1,157 @@ +package cli + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +const ( + HomeFlag = "home" + TraceFlag = "trace" + OutputFlag = "output" + EncodingFlag = "encoding" +) + +// Executable is the minimal interface to *corba.Command, so we can +// wrap if desired before the test +type Executable interface { + Execute() error +} + +// PrepareBaseCmd is meant for tendermint and other servers +func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defaultHome string) Executor { + cobra.OnInitialize(func() { initEnv(envPrefix) }) + cmd.PersistentFlags().StringP(HomeFlag, "", defaultHome, "directory for config and data") + cmd.PersistentFlags().Bool(TraceFlag, false, "print out full stack trace on errors") + cmd.PersistentPreRunE = concatCobraCmdFuncs(bindFlagsLoadViper, cmd.PersistentPreRunE) + return Executor{cmd, os.Exit} +} + +// PrepareMainCmd is meant for client side libs that want some more flags +// +// This adds --encoding (hex, btc, base64) and --output (text, json) to +// the command. These only really make sense in interactive commands. +func PrepareMainCmd(cmd *cobra.Command, envPrefix, defaultHome string) Executor { + cmd.PersistentFlags().StringP(EncodingFlag, "e", "hex", "Binary encoding (hex|b64|btc)") + cmd.PersistentFlags().StringP(OutputFlag, "o", "text", "Output format (text|json)") + cmd.PersistentPreRunE = concatCobraCmdFuncs(validateOutput, cmd.PersistentPreRunE) + return PrepareBaseCmd(cmd, envPrefix, defaultHome) +} + +// initEnv sets to use ENV variables if set. +func initEnv(prefix string) { + copyEnvVars(prefix) + + // env variables with TM prefix (eg. TM_ROOT) + viper.SetEnvPrefix(prefix) + viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_")) + viper.AutomaticEnv() +} + +// This copies all variables like TMROOT to TM_ROOT, +// so we can support both formats for the user +func copyEnvVars(prefix string) { + prefix = strings.ToUpper(prefix) + ps := prefix + "_" + for _, e := range os.Environ() { + kv := strings.SplitN(e, "=", 2) + if len(kv) == 2 { + k, v := kv[0], kv[1] + if strings.HasPrefix(k, prefix) && !strings.HasPrefix(k, ps) { + k2 := strings.Replace(k, prefix, ps, 1) + os.Setenv(k2, v) + } + } + } +} + +// Executor wraps the cobra Command with a nicer Execute method +type Executor struct { + *cobra.Command + Exit func(int) // this is os.Exit by default, override in tests +} + +type ExitCoder interface { + ExitCode() int +} + +// execute adds all child commands to the root command sets flags appropriately. +// This is called by main.main(). It only needs to happen once to the rootCmd. +func (e Executor) Execute() error { + e.SilenceUsage = true + e.SilenceErrors = true + err := e.Command.Execute() + if err != nil { + if viper.GetBool(TraceFlag) { + fmt.Fprintf(os.Stderr, "ERROR: %+v\n", err) + } else { + fmt.Fprintf(os.Stderr, "ERROR: %v\n", err) + } + + // return error code 1 by default, can override it with a special error type + exitCode := 1 + if ec, ok := err.(ExitCoder); ok { + exitCode = ec.ExitCode() + } + e.Exit(exitCode) + } + return err +} + +type cobraCmdFunc func(cmd *cobra.Command, args []string) error + +// Returns a single function that calls each argument function in sequence +// RunE, PreRunE, PersistentPreRunE, etc. all have this same signature +func concatCobraCmdFuncs(fs ...cobraCmdFunc) cobraCmdFunc { + return func(cmd *cobra.Command, args []string) error { + for _, f := range fs { + if f != nil { + if err := f(cmd, args); err != nil { + return err + } + } + } + return nil + } +} + +// Bind all flags and read the config into viper +func bindFlagsLoadViper(cmd *cobra.Command, args []string) error { + // cmd.Flags() includes flags from this command and all persistent flags from the parent + if err := viper.BindPFlags(cmd.Flags()); err != nil { + return err + } + + homeDir := viper.GetString(HomeFlag) + viper.Set(HomeFlag, homeDir) + viper.SetConfigName("config") // name of config file (without extension) + viper.AddConfigPath(homeDir) // search root directory + viper.AddConfigPath(filepath.Join(homeDir, "config")) // search root directory /config + + // If a config file is found, read it in. + if err := viper.ReadInConfig(); err == nil { + // stderr, so if we redirect output to json file, this doesn't appear + // fmt.Fprintln(os.Stderr, "Using config file:", viper.ConfigFileUsed()) + } else if _, ok := err.(viper.ConfigFileNotFoundError); !ok { + // ignore not found error, return other errors + return err + } + return nil +} + +func validateOutput(cmd *cobra.Command, args []string) error { + // validate output format + output := viper.GetString(OutputFlag) + switch output { + case "text", "json": + default: + return errors.Errorf("Unsupported output format: %s", output) + } + return nil +} diff --git a/libs/cli/setup_test.go b/libs/cli/setup_test.go new file mode 100644 index 000000000..04209e493 --- /dev/null +++ b/libs/cli/setup_test.go @@ -0,0 +1,237 @@ +package cli + +import ( + "fmt" + "io/ioutil" + "strconv" + "strings" + "testing" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSetupEnv(t *testing.T) { + cases := []struct { + args []string + env map[string]string + expected string + }{ + {nil, nil, ""}, + {[]string{"--foobar", "bang!"}, nil, "bang!"}, + // make sure reset is good + {nil, nil, ""}, + // test both variants of the prefix + {nil, map[string]string{"DEMO_FOOBAR": "good"}, "good"}, + {nil, map[string]string{"DEMOFOOBAR": "silly"}, "silly"}, + // and that cli overrides env... + {[]string{"--foobar", "important"}, + map[string]string{"DEMO_FOOBAR": "ignored"}, "important"}, + } + + for idx, tc := range cases { + i := strconv.Itoa(idx) + // test command that store value of foobar in local variable + var foo string + demo := &cobra.Command{ + Use: "demo", + RunE: func(cmd *cobra.Command, args []string) error { + foo = viper.GetString("foobar") + return nil + }, + } + demo.Flags().String("foobar", "", "Some test value from config") + cmd := PrepareBaseCmd(demo, "DEMO", "/qwerty/asdfgh") // some missing dir.. + cmd.Exit = func(int) {} + + viper.Reset() + args := append([]string{cmd.Use}, tc.args...) + err := RunWithArgs(cmd, args, tc.env) + require.Nil(t, err, i) + assert.Equal(t, tc.expected, foo, i) + } +} + +func tempDir() string { + cdir, err := ioutil.TempDir("", "test-cli") + if err != nil { + panic(err) + } + return cdir +} + +func TestSetupConfig(t *testing.T) { + // we pre-create two config files we can refer to in the rest of + // the test cases. + cval1 := "fubble" + conf1 := tempDir() + err := WriteConfigVals(conf1, map[string]string{"boo": cval1}) + require.Nil(t, err) + + cases := []struct { + args []string + env map[string]string + expected string + expectedTwo string + }{ + {nil, nil, "", ""}, + // setting on the command line + {[]string{"--boo", "haha"}, nil, "haha", ""}, + {[]string{"--two-words", "rocks"}, nil, "", "rocks"}, + {[]string{"--home", conf1}, nil, cval1, ""}, + // test both variants of the prefix + {nil, map[string]string{"RD_BOO": "bang"}, "bang", ""}, + {nil, map[string]string{"RD_TWO_WORDS": "fly"}, "", "fly"}, + {nil, map[string]string{"RDTWO_WORDS": "fly"}, "", "fly"}, + {nil, map[string]string{"RD_HOME": conf1}, cval1, ""}, + {nil, map[string]string{"RDHOME": conf1}, cval1, ""}, + } + + for idx, tc := range cases { + i := strconv.Itoa(idx) + // test command that store value of foobar in local variable + var foo, two string + boo := &cobra.Command{ + Use: "reader", + RunE: func(cmd *cobra.Command, args []string) error { + foo = viper.GetString("boo") + two = viper.GetString("two-words") + return nil + }, + } + boo.Flags().String("boo", "", "Some test value from config") + boo.Flags().String("two-words", "", "Check out env handling -") + cmd := PrepareBaseCmd(boo, "RD", "/qwerty/asdfgh") // some missing dir... + cmd.Exit = func(int) {} + + viper.Reset() + args := append([]string{cmd.Use}, tc.args...) + err := RunWithArgs(cmd, args, tc.env) + require.Nil(t, err, i) + assert.Equal(t, tc.expected, foo, i) + assert.Equal(t, tc.expectedTwo, two, i) + } +} + +type DemoConfig struct { + Name string `mapstructure:"name"` + Age int `mapstructure:"age"` + Unused int `mapstructure:"unused"` +} + +func TestSetupUnmarshal(t *testing.T) { + // we pre-create two config files we can refer to in the rest of + // the test cases. + cval1, cval2 := "someone", "else" + conf1 := tempDir() + err := WriteConfigVals(conf1, map[string]string{"name": cval1}) + require.Nil(t, err) + // even with some ignored fields, should be no problem + conf2 := tempDir() + err = WriteConfigVals(conf2, map[string]string{"name": cval2, "foo": "bar"}) + require.Nil(t, err) + + // unused is not declared on a flag and remains from base + base := DemoConfig{ + Name: "default", + Age: 42, + Unused: -7, + } + c := func(name string, age int) DemoConfig { + r := base + // anything set on the flags as a default is used over + // the default config object + r.Name = "from-flag" + if name != "" { + r.Name = name + } + if age != 0 { + r.Age = age + } + return r + } + + cases := []struct { + args []string + env map[string]string + expected DemoConfig + }{ + {nil, nil, c("", 0)}, + // setting on the command line + {[]string{"--name", "haha"}, nil, c("haha", 0)}, + {[]string{"--home", conf1}, nil, c(cval1, 0)}, + // test both variants of the prefix + {nil, map[string]string{"MR_AGE": "56"}, c("", 56)}, + {nil, map[string]string{"MR_HOME": conf1}, c(cval1, 0)}, + {[]string{"--age", "17"}, map[string]string{"MRHOME": conf2}, c(cval2, 17)}, + } + + for idx, tc := range cases { + i := strconv.Itoa(idx) + // test command that store value of foobar in local variable + cfg := base + marsh := &cobra.Command{ + Use: "marsh", + RunE: func(cmd *cobra.Command, args []string) error { + return viper.Unmarshal(&cfg) + }, + } + marsh.Flags().String("name", "from-flag", "Some test value from config") + // if we want a flag to use the proper default, then copy it + // from the default config here + marsh.Flags().Int("age", base.Age, "Some test value from config") + cmd := PrepareBaseCmd(marsh, "MR", "/qwerty/asdfgh") // some missing dir... + cmd.Exit = func(int) {} + + viper.Reset() + args := append([]string{cmd.Use}, tc.args...) + err := RunWithArgs(cmd, args, tc.env) + require.Nil(t, err, i) + assert.Equal(t, tc.expected, cfg, i) + } +} + +func TestSetupTrace(t *testing.T) { + cases := []struct { + args []string + env map[string]string + long bool + expected string + }{ + {nil, nil, false, "Trace flag = false"}, + {[]string{"--trace"}, nil, true, "Trace flag = true"}, + {[]string{"--no-such-flag"}, nil, false, "unknown flag: --no-such-flag"}, + {nil, map[string]string{"DBG_TRACE": "true"}, true, "Trace flag = true"}, + } + + for idx, tc := range cases { + i := strconv.Itoa(idx) + // test command that store value of foobar in local variable + trace := &cobra.Command{ + Use: "trace", + RunE: func(cmd *cobra.Command, args []string) error { + return errors.Errorf("Trace flag = %t", viper.GetBool(TraceFlag)) + }, + } + cmd := PrepareBaseCmd(trace, "DBG", "/qwerty/asdfgh") // some missing dir.. + cmd.Exit = func(int) {} + + viper.Reset() + args := append([]string{cmd.Use}, tc.args...) + stdout, stderr, err := RunCaptureWithArgs(cmd, args, tc.env) + require.NotNil(t, err, i) + require.Equal(t, "", stdout, i) + require.NotEqual(t, "", stderr, i) + msg := strings.Split(stderr, "\n") + desired := fmt.Sprintf("ERROR: %s", tc.expected) + assert.Equal(t, desired, msg[0], i) + if tc.long && assert.True(t, len(msg) > 2, i) { + // the next line starts the stack trace... + assert.Contains(t, msg[1], "TestSetupTrace", i) + assert.Contains(t, msg[2], "setup_test.go", i) + } + } +} diff --git a/libs/clist/clist.go b/libs/clist/clist.go new file mode 100644 index 000000000..ccb1f5777 --- /dev/null +++ b/libs/clist/clist.go @@ -0,0 +1,384 @@ +package clist + +/* + +The purpose of CList is to provide a goroutine-safe linked-list. +This list can be traversed concurrently by any number of goroutines. +However, removed CElements cannot be added back. +NOTE: Not all methods of container/list are (yet) implemented. +NOTE: Removed elements need to DetachPrev or DetachNext consistently +to ensure garbage collection of removed elements. + +*/ + +import ( + "sync" +) + +/* + +CElement is an element of a linked-list +Traversal from a CElement is goroutine-safe. + +We can't avoid using WaitGroups or for-loops given the documentation +spec without re-implementing the primitives that already exist in +golang/sync. Notice that WaitGroup allows many go-routines to be +simultaneously released, which is what we want. Mutex doesn't do +this. RWMutex does this, but it's clumsy to use in the way that a +WaitGroup would be used -- and we'd end up having two RWMutex's for +prev/next each, which is doubly confusing. + +sync.Cond would be sort-of useful, but we don't need a write-lock in +the for-loop. Use sync.Cond when you need serial access to the +"condition". In our case our condition is if `next != nil || removed`, +and there's no reason to serialize that condition for goroutines +waiting on NextWait() (since it's just a read operation). + +*/ +type CElement struct { + mtx sync.RWMutex + prev *CElement + prevWg *sync.WaitGroup + prevWaitCh chan struct{} + next *CElement + nextWg *sync.WaitGroup + nextWaitCh chan struct{} + removed bool + + Value interface{} // immutable +} + +// Blocking implementation of Next(). +// May return nil iff CElement was tail and got removed. +func (e *CElement) NextWait() *CElement { + for { + e.mtx.RLock() + next := e.next + nextWg := e.nextWg + removed := e.removed + e.mtx.RUnlock() + + if next != nil || removed { + return next + } + + nextWg.Wait() + // e.next doesn't necessarily exist here. + // That's why we need to continue a for-loop. + } +} + +// Blocking implementation of Prev(). +// May return nil iff CElement was head and got removed. +func (e *CElement) PrevWait() *CElement { + for { + e.mtx.RLock() + prev := e.prev + prevWg := e.prevWg + removed := e.removed + e.mtx.RUnlock() + + if prev != nil || removed { + return prev + } + + prevWg.Wait() + } +} + +// PrevWaitChan can be used to wait until Prev becomes not nil. Once it does, +// channel will be closed. +func (e *CElement) PrevWaitChan() <-chan struct{} { + e.mtx.RLock() + defer e.mtx.RUnlock() + + return e.prevWaitCh +} + +// NextWaitChan can be used to wait until Next becomes not nil. Once it does, +// channel will be closed. +func (e *CElement) NextWaitChan() <-chan struct{} { + e.mtx.RLock() + defer e.mtx.RUnlock() + + return e.nextWaitCh +} + +// Nonblocking, may return nil if at the end. +func (e *CElement) Next() *CElement { + e.mtx.RLock() + defer e.mtx.RUnlock() + + return e.next +} + +// Nonblocking, may return nil if at the end. +func (e *CElement) Prev() *CElement { + e.mtx.RLock() + defer e.mtx.RUnlock() + + return e.prev +} + +func (e *CElement) Removed() bool { + e.mtx.RLock() + defer e.mtx.RUnlock() + + return e.removed +} + +func (e *CElement) DetachNext() { + if !e.Removed() { + panic("DetachNext() must be called after Remove(e)") + } + e.mtx.Lock() + defer e.mtx.Unlock() + + e.next = nil +} + +func (e *CElement) DetachPrev() { + if !e.Removed() { + panic("DetachPrev() must be called after Remove(e)") + } + e.mtx.Lock() + defer e.mtx.Unlock() + + e.prev = nil +} + +// NOTE: This function needs to be safe for +// concurrent goroutines waiting on nextWg. +func (e *CElement) SetNext(newNext *CElement) { + e.mtx.Lock() + defer e.mtx.Unlock() + + oldNext := e.next + e.next = newNext + if oldNext != nil && newNext == nil { + // See https://golang.org/pkg/sync/: + // + // If a WaitGroup is reused to wait for several independent sets of + // events, new Add calls must happen after all previous Wait calls have + // returned. + e.nextWg = waitGroup1() // WaitGroups are difficult to re-use. + e.nextWaitCh = make(chan struct{}) + } + if oldNext == nil && newNext != nil { + e.nextWg.Done() + close(e.nextWaitCh) + } +} + +// NOTE: This function needs to be safe for +// concurrent goroutines waiting on prevWg +func (e *CElement) SetPrev(newPrev *CElement) { + e.mtx.Lock() + defer e.mtx.Unlock() + + oldPrev := e.prev + e.prev = newPrev + if oldPrev != nil && newPrev == nil { + e.prevWg = waitGroup1() // WaitGroups are difficult to re-use. + e.prevWaitCh = make(chan struct{}) + } + if oldPrev == nil && newPrev != nil { + e.prevWg.Done() + close(e.prevWaitCh) + } +} + +func (e *CElement) SetRemoved() { + e.mtx.Lock() + defer e.mtx.Unlock() + + e.removed = true + + // This wakes up anyone waiting in either direction. + if e.prev == nil { + e.prevWg.Done() + close(e.prevWaitCh) + } + if e.next == nil { + e.nextWg.Done() + close(e.nextWaitCh) + } +} + +//-------------------------------------------------------------------------------- + +// CList represents a linked list. +// The zero value for CList is an empty list ready to use. +// Operations are goroutine-safe. +type CList struct { + mtx sync.RWMutex + wg *sync.WaitGroup + waitCh chan struct{} + head *CElement // first element + tail *CElement // last element + len int // list length +} + +func (l *CList) Init() *CList { + l.mtx.Lock() + defer l.mtx.Unlock() + + l.wg = waitGroup1() + l.waitCh = make(chan struct{}) + l.head = nil + l.tail = nil + l.len = 0 + return l +} + +func New() *CList { return new(CList).Init() } + +func (l *CList) Len() int { + l.mtx.RLock() + defer l.mtx.RUnlock() + + return l.len +} + +func (l *CList) Front() *CElement { + l.mtx.RLock() + defer l.mtx.RUnlock() + + return l.head +} + +func (l *CList) FrontWait() *CElement { + // Loop until the head is non-nil else wait and try again + for { + l.mtx.RLock() + head := l.head + wg := l.wg + l.mtx.RUnlock() + + if head != nil { + return head + } + wg.Wait() + // NOTE: If you think l.head exists here, think harder. + } +} + +func (l *CList) Back() *CElement { + l.mtx.RLock() + defer l.mtx.RUnlock() + + return l.tail +} + +func (l *CList) BackWait() *CElement { + for { + l.mtx.RLock() + tail := l.tail + wg := l.wg + l.mtx.RUnlock() + + if tail != nil { + return tail + } + wg.Wait() + // l.tail doesn't necessarily exist here. + // That's why we need to continue a for-loop. + } +} + +// WaitChan can be used to wait until Front or Back becomes not nil. Once it +// does, channel will be closed. +func (l *CList) WaitChan() <-chan struct{} { + l.mtx.Lock() + defer l.mtx.Unlock() + + return l.waitCh +} + +func (l *CList) PushBack(v interface{}) *CElement { + l.mtx.Lock() + defer l.mtx.Unlock() + + // Construct a new element + e := &CElement{ + prev: nil, + prevWg: waitGroup1(), + prevWaitCh: make(chan struct{}), + next: nil, + nextWg: waitGroup1(), + nextWaitCh: make(chan struct{}), + removed: false, + Value: v, + } + + // Release waiters on FrontWait/BackWait maybe + if l.len == 0 { + l.wg.Done() + close(l.waitCh) + } + l.len++ + + // Modify the tail + if l.tail == nil { + l.head = e + l.tail = e + } else { + e.SetPrev(l.tail) // We must init e first. + l.tail.SetNext(e) // This will make e accessible. + l.tail = e // Update the list. + } + + return e +} + +// CONTRACT: Caller must call e.DetachPrev() and/or e.DetachNext() to avoid memory leaks. +// NOTE: As per the contract of CList, removed elements cannot be added back. +func (l *CList) Remove(e *CElement) interface{} { + l.mtx.Lock() + defer l.mtx.Unlock() + + prev := e.Prev() + next := e.Next() + + if l.head == nil || l.tail == nil { + panic("Remove(e) on empty CList") + } + if prev == nil && l.head != e { + panic("Remove(e) with false head") + } + if next == nil && l.tail != e { + panic("Remove(e) with false tail") + } + + // If we're removing the only item, make CList FrontWait/BackWait wait. + if l.len == 1 { + l.wg = waitGroup1() // WaitGroups are difficult to re-use. + l.waitCh = make(chan struct{}) + } + + // Update l.len + l.len-- + + // Connect next/prev and set head/tail + if prev == nil { + l.head = next + } else { + prev.SetNext(next) + } + if next == nil { + l.tail = prev + } else { + next.SetPrev(prev) + } + + // Set .Done() on e, otherwise waiters will wait forever. + e.SetRemoved() + + return e.Value +} + +func waitGroup1() (wg *sync.WaitGroup) { + wg = &sync.WaitGroup{} + wg.Add(1) + return +} diff --git a/libs/clist/clist_test.go b/libs/clist/clist_test.go new file mode 100644 index 000000000..6171f1a39 --- /dev/null +++ b/libs/clist/clist_test.go @@ -0,0 +1,293 @@ +package clist + +import ( + "fmt" + "math/rand" + "runtime" + "sync/atomic" + "testing" + "time" +) + +func TestSmall(t *testing.T) { + l := New() + el1 := l.PushBack(1) + el2 := l.PushBack(2) + el3 := l.PushBack(3) + if l.Len() != 3 { + t.Error("Expected len 3, got ", l.Len()) + } + + //fmt.Printf("%p %v\n", el1, el1) + //fmt.Printf("%p %v\n", el2, el2) + //fmt.Printf("%p %v\n", el3, el3) + + r1 := l.Remove(el1) + + //fmt.Printf("%p %v\n", el1, el1) + //fmt.Printf("%p %v\n", el2, el2) + //fmt.Printf("%p %v\n", el3, el3) + + r2 := l.Remove(el2) + + //fmt.Printf("%p %v\n", el1, el1) + //fmt.Printf("%p %v\n", el2, el2) + //fmt.Printf("%p %v\n", el3, el3) + + r3 := l.Remove(el3) + + if r1 != 1 { + t.Error("Expected 1, got ", r1) + } + if r2 != 2 { + t.Error("Expected 2, got ", r2) + } + if r3 != 3 { + t.Error("Expected 3, got ", r3) + } + if l.Len() != 0 { + t.Error("Expected len 0, got ", l.Len()) + } + +} + +/* +This test is quite hacky because it relies on SetFinalizer +which isn't guaranteed to run at all. +*/ +// nolint: megacheck +func _TestGCFifo(t *testing.T) { + + const numElements = 1000000 + l := New() + gcCount := new(uint64) + + // SetFinalizer doesn't work well with circular structures, + // so we construct a trivial non-circular structure to + // track. + type value struct { + Int int + } + done := make(chan struct{}) + + for i := 0; i < numElements; i++ { + v := new(value) + v.Int = i + l.PushBack(v) + runtime.SetFinalizer(v, func(v *value) { + atomic.AddUint64(gcCount, 1) + }) + } + + for el := l.Front(); el != nil; { + l.Remove(el) + //oldEl := el + el = el.Next() + //oldEl.DetachPrev() + //oldEl.DetachNext() + } + + runtime.GC() + time.Sleep(time.Second * 3) + runtime.GC() + time.Sleep(time.Second * 3) + _ = done + + if *gcCount != numElements { + t.Errorf("Expected gcCount to be %v, got %v", numElements, + *gcCount) + } +} + +/* +This test is quite hacky because it relies on SetFinalizer +which isn't guaranteed to run at all. +*/ +// nolint: megacheck +func _TestGCRandom(t *testing.T) { + + const numElements = 1000000 + l := New() + gcCount := 0 + + // SetFinalizer doesn't work well with circular structures, + // so we construct a trivial non-circular structure to + // track. + type value struct { + Int int + } + + for i := 0; i < numElements; i++ { + v := new(value) + v.Int = i + l.PushBack(v) + runtime.SetFinalizer(v, func(v *value) { + gcCount++ + }) + } + + els := make([]*CElement, 0, numElements) + for el := l.Front(); el != nil; el = el.Next() { + els = append(els, el) + } + + for _, i := range rand.Perm(numElements) { + el := els[i] + l.Remove(el) + _ = el.Next() + } + + runtime.GC() + time.Sleep(time.Second * 3) + + if gcCount != numElements { + t.Errorf("Expected gcCount to be %v, got %v", numElements, + gcCount) + } +} + +func TestScanRightDeleteRandom(t *testing.T) { + + const numElements = 10000 + const numTimes = 1000 + const numScanners = 10 + + l := New() + stop := make(chan struct{}) + + els := make([]*CElement, numElements) + for i := 0; i < numElements; i++ { + el := l.PushBack(i) + els[i] = el + } + + // Launch scanner routines that will rapidly iterate over elements. + for i := 0; i < numScanners; i++ { + go func(scannerID int) { + var el *CElement + restartCounter := 0 + counter := 0 + FOR_LOOP: + for { + select { + case <-stop: + fmt.Println("stopped") + break FOR_LOOP + default: + } + if el == nil { + el = l.FrontWait() + restartCounter++ + } + el = el.Next() + counter++ + } + fmt.Printf("Scanner %v restartCounter: %v counter: %v\n", scannerID, restartCounter, counter) + }(i) + } + + // Remove an element, push back an element. + for i := 0; i < numTimes; i++ { + // Pick an element to remove + rmElIdx := rand.Intn(len(els)) + rmEl := els[rmElIdx] + + // Remove it + l.Remove(rmEl) + //fmt.Print(".") + + // Insert a new element + newEl := l.PushBack(-1*i - 1) + els[rmElIdx] = newEl + + if i%100000 == 0 { + fmt.Printf("Pushed %vK elements so far...\n", i/1000) + } + + } + + // Stop scanners + close(stop) + time.Sleep(time.Second * 1) + + // And remove all the elements. + for el := l.Front(); el != nil; el = el.Next() { + l.Remove(el) + } + if l.Len() != 0 { + t.Fatal("Failed to remove all elements from CList") + } +} + +func TestWaitChan(t *testing.T) { + l := New() + ch := l.WaitChan() + + // 1) add one element to an empty list + go l.PushBack(1) + <-ch + + // 2) and remove it + el := l.Front() + v := l.Remove(el) + if v != 1 { + t.Fatal("where is 1 coming from?") + } + + // 3) test iterating forward and waiting for Next (NextWaitChan and Next) + el = l.PushBack(0) + + done := make(chan struct{}) + pushed := 0 + go func() { + for i := 1; i < 100; i++ { + l.PushBack(i) + pushed++ + time.Sleep(time.Duration(rand.Intn(100)) * time.Millisecond) + } + close(done) + }() + + next := el + seen := 0 +FOR_LOOP: + for { + select { + case <-next.NextWaitChan(): + next = next.Next() + seen++ + if next == nil { + continue + } + case <-done: + break FOR_LOOP + case <-time.After(10 * time.Second): + t.Fatal("max execution time") + } + } + + if pushed != seen { + t.Fatalf("number of pushed items (%d) not equal to number of seen items (%d)", pushed, seen) + } + + // 4) test iterating backwards (PrevWaitChan and Prev) + prev := next + seen = 0 +FOR_LOOP2: + for { + select { + case <-prev.PrevWaitChan(): + prev = prev.Prev() + seen++ + if prev == nil { + t.Fatal("expected PrevWaitChan to block forever on nil when reached first elem") + } + case <-time.After(5 * time.Second): + break FOR_LOOP2 + } + } + + if pushed != seen { + t.Fatalf("number of pushed items (%d) not equal to number of seen items (%d)", pushed, seen) + } +} diff --git a/libs/common/LICENSE b/libs/common/LICENSE new file mode 100644 index 000000000..8a142a71b --- /dev/null +++ b/libs/common/LICENSE @@ -0,0 +1,193 @@ +Tendermint Go-Common +Copyright (C) 2015 Tendermint + + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/libs/common/async.go b/libs/common/async.go new file mode 100644 index 000000000..e3293ab4c --- /dev/null +++ b/libs/common/async.go @@ -0,0 +1,175 @@ +package common + +import ( + "sync/atomic" +) + +//---------------------------------------- +// Task + +// val: the value returned after task execution. +// err: the error returned during task completion. +// abort: tells Parallel to return, whether or not all tasks have completed. +type Task func(i int) (val interface{}, err error, abort bool) + +type TaskResult struct { + Value interface{} + Error error +} + +type TaskResultCh <-chan TaskResult + +type taskResultOK struct { + TaskResult + OK bool +} + +type TaskResultSet struct { + chz []TaskResultCh + results []taskResultOK +} + +func newTaskResultSet(chz []TaskResultCh) *TaskResultSet { + return &TaskResultSet{ + chz: chz, + results: make([]taskResultOK, len(chz)), + } +} + +func (trs *TaskResultSet) Channels() []TaskResultCh { + return trs.chz +} + +func (trs *TaskResultSet) LatestResult(index int) (TaskResult, bool) { + if len(trs.results) <= index { + return TaskResult{}, false + } + resultOK := trs.results[index] + return resultOK.TaskResult, resultOK.OK +} + +// NOTE: Not concurrency safe. +// Writes results to trs.results without waiting for all tasks to complete. +func (trs *TaskResultSet) Reap() *TaskResultSet { + for i := 0; i < len(trs.results); i++ { + var trch = trs.chz[i] + select { + case result, ok := <-trch: + if ok { + // Write result. + trs.results[i] = taskResultOK{ + TaskResult: result, + OK: true, + } + } else { + // We already wrote it. + } + default: + // Do nothing. + } + } + return trs +} + +// NOTE: Not concurrency safe. +// Like Reap() but waits until all tasks have returned or panic'd. +func (trs *TaskResultSet) Wait() *TaskResultSet { + for i := 0; i < len(trs.results); i++ { + var trch = trs.chz[i] + result, ok := <-trch + if ok { + // Write result. + trs.results[i] = taskResultOK{ + TaskResult: result, + OK: true, + } + } else { + // We already wrote it. + } + } + return trs +} + +// Returns the firstmost (by task index) error as +// discovered by all previous Reap() calls. +func (trs *TaskResultSet) FirstValue() interface{} { + for _, result := range trs.results { + if result.Value != nil { + return result.Value + } + } + return nil +} + +// Returns the firstmost (by task index) error as +// discovered by all previous Reap() calls. +func (trs *TaskResultSet) FirstError() error { + for _, result := range trs.results { + if result.Error != nil { + return result.Error + } + } + return nil +} + +//---------------------------------------- +// Parallel + +// Run tasks in parallel, with ability to abort early. +// Returns ok=false iff any of the tasks returned abort=true. +// NOTE: Do not implement quit features here. Instead, provide convenient +// concurrent quit-like primitives, passed implicitly via Task closures. (e.g. +// it's not Parallel's concern how you quit/abort your tasks). +func Parallel(tasks ...Task) (trs *TaskResultSet, ok bool) { + var taskResultChz = make([]TaskResultCh, len(tasks)) // To return. + var taskDoneCh = make(chan bool, len(tasks)) // A "wait group" channel, early abort if any true received. + var numPanics = new(int32) // Keep track of panics to set ok=false later. + ok = true // We will set it to false iff any tasks panic'd or returned abort. + + // Start all tasks in parallel in separate goroutines. + // When the task is complete, it will appear in the + // respective taskResultCh (associated by task index). + for i, task := range tasks { + var taskResultCh = make(chan TaskResult, 1) // Capacity for 1 result. + taskResultChz[i] = taskResultCh + go func(i int, task Task, taskResultCh chan TaskResult) { + // Recovery + defer func() { + if pnk := recover(); pnk != nil { + atomic.AddInt32(numPanics, 1) + // Send panic to taskResultCh. + taskResultCh <- TaskResult{nil, ErrorWrap(pnk, "Panic in task")} + // Closing taskResultCh lets trs.Wait() work. + close(taskResultCh) + // Decrement waitgroup. + taskDoneCh <- false + } + }() + // Run the task. + var val, err, abort = task(i) + // Send val/err to taskResultCh. + // NOTE: Below this line, nothing must panic/ + taskResultCh <- TaskResult{val, err} + // Closing taskResultCh lets trs.Wait() work. + close(taskResultCh) + // Decrement waitgroup. + taskDoneCh <- abort + }(i, task, taskResultCh) + } + + // Wait until all tasks are done, or until abort. + // DONE_LOOP: + for i := 0; i < len(tasks); i++ { + abort := <-taskDoneCh + if abort { + ok = false + break + } + } + + // Ok is also false if there were any panics. + // We must do this check here (after DONE_LOOP). + ok = ok && (atomic.LoadInt32(numPanics) == 0) + + return newTaskResultSet(taskResultChz).Reap(), ok +} diff --git a/libs/common/async_test.go b/libs/common/async_test.go new file mode 100644 index 000000000..f565b4bd3 --- /dev/null +++ b/libs/common/async_test.go @@ -0,0 +1,156 @@ +package common + +import ( + "errors" + "fmt" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestParallel(t *testing.T) { + + // Create tasks. + var counter = new(int32) + var tasks = make([]Task, 100*1000) + for i := 0; i < len(tasks); i++ { + tasks[i] = func(i int) (res interface{}, err error, abort bool) { + atomic.AddInt32(counter, 1) + return -1 * i, nil, false + } + } + + // Run in parallel. + var trs, ok = Parallel(tasks...) + assert.True(t, ok) + + // Verify. + assert.Equal(t, int(*counter), len(tasks), "Each task should have incremented the counter already") + var failedTasks int + for i := 0; i < len(tasks); i++ { + taskResult, ok := trs.LatestResult(i) + if !ok { + assert.Fail(t, "Task #%v did not complete.", i) + failedTasks++ + } else if taskResult.Error != nil { + assert.Fail(t, "Task should not have errored but got %v", taskResult.Error) + failedTasks++ + } else if !assert.Equal(t, -1*i, taskResult.Value.(int)) { + assert.Fail(t, "Task should have returned %v but got %v", -1*i, taskResult.Value.(int)) + failedTasks++ + } else { + // Good! + } + } + assert.Equal(t, failedTasks, 0, "No task should have failed") + assert.Nil(t, trs.FirstError(), "There should be no errors") + assert.Equal(t, 0, trs.FirstValue(), "First value should be 0") +} + +func TestParallelAbort(t *testing.T) { + + var flow1 = make(chan struct{}, 1) + var flow2 = make(chan struct{}, 1) + var flow3 = make(chan struct{}, 1) // Cap must be > 0 to prevent blocking. + var flow4 = make(chan struct{}, 1) + + // Create tasks. + var tasks = []Task{ + func(i int) (res interface{}, err error, abort bool) { + assert.Equal(t, i, 0) + flow1 <- struct{}{} + return 0, nil, false + }, + func(i int) (res interface{}, err error, abort bool) { + assert.Equal(t, i, 1) + flow2 <- <-flow1 + return 1, errors.New("some error"), false + }, + func(i int) (res interface{}, err error, abort bool) { + assert.Equal(t, i, 2) + flow3 <- <-flow2 + return 2, nil, true + }, + func(i int) (res interface{}, err error, abort bool) { + assert.Equal(t, i, 3) + <-flow4 + return 3, nil, false + }, + } + + // Run in parallel. + var taskResultSet, ok = Parallel(tasks...) + assert.False(t, ok, "ok should be false since we aborted task #2.") + + // Verify task #3. + // Initially taskResultSet.chz[3] sends nothing since flow4 didn't send. + waitTimeout(t, taskResultSet.chz[3], "Task #3") + + // Now let the last task (#3) complete after abort. + flow4 <- <-flow3 + + // Wait until all tasks have returned or panic'd. + taskResultSet.Wait() + + // Verify task #0, #1, #2. + checkResult(t, taskResultSet, 0, 0, nil, nil) + checkResult(t, taskResultSet, 1, 1, errors.New("some error"), nil) + checkResult(t, taskResultSet, 2, 2, nil, nil) + checkResult(t, taskResultSet, 3, 3, nil, nil) +} + +func TestParallelRecover(t *testing.T) { + + // Create tasks. + var tasks = []Task{ + func(i int) (res interface{}, err error, abort bool) { + return 0, nil, false + }, + func(i int) (res interface{}, err error, abort bool) { + return 1, errors.New("some error"), false + }, + func(i int) (res interface{}, err error, abort bool) { + panic(2) + }, + } + + // Run in parallel. + var taskResultSet, ok = Parallel(tasks...) + assert.False(t, ok, "ok should be false since we panic'd in task #2.") + + // Verify task #0, #1, #2. + checkResult(t, taskResultSet, 0, 0, nil, nil) + checkResult(t, taskResultSet, 1, 1, errors.New("some error"), nil) + checkResult(t, taskResultSet, 2, nil, nil, 2) +} + +// Wait for result +func checkResult(t *testing.T, taskResultSet *TaskResultSet, index int, val interface{}, err error, pnk interface{}) { + taskResult, ok := taskResultSet.LatestResult(index) + taskName := fmt.Sprintf("Task #%v", index) + assert.True(t, ok, "TaskResultCh unexpectedly closed for %v", taskName) + assert.Equal(t, val, taskResult.Value, taskName) + if err != nil { + assert.Equal(t, err, taskResult.Error, taskName) + } else if pnk != nil { + assert.Equal(t, pnk, taskResult.Error.(Error).Data(), taskName) + } else { + assert.Nil(t, taskResult.Error, taskName) + } +} + +// Wait for timeout (no result) +func waitTimeout(t *testing.T, taskResultCh TaskResultCh, taskName string) { + select { + case _, ok := <-taskResultCh: + if !ok { + assert.Fail(t, "TaskResultCh unexpectedly closed (%v)", taskName) + } else { + assert.Fail(t, "TaskResultCh unexpectedly returned for %v", taskName) + } + case <-time.After(1 * time.Second): // TODO use deterministic time? + // Good! + } +} diff --git a/libs/common/bit_array.go b/libs/common/bit_array.go new file mode 100644 index 000000000..0290921a6 --- /dev/null +++ b/libs/common/bit_array.go @@ -0,0 +1,378 @@ +package common + +import ( + "encoding/binary" + "fmt" + "regexp" + "strings" + "sync" +) + +type BitArray struct { + mtx sync.Mutex + Bits int `json:"bits"` // NOTE: persisted via reflect, must be exported + Elems []uint64 `json:"elems"` // NOTE: persisted via reflect, must be exported +} + +// There is no BitArray whose Size is 0. Use nil instead. +func NewBitArray(bits int) *BitArray { + if bits <= 0 { + return nil + } + return &BitArray{ + Bits: bits, + Elems: make([]uint64, (bits+63)/64), + } +} + +func (bA *BitArray) Size() int { + if bA == nil { + return 0 + } + return bA.Bits +} + +// NOTE: behavior is undefined if i >= bA.Bits +func (bA *BitArray) GetIndex(i int) bool { + if bA == nil { + return false + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + return bA.getIndex(i) +} + +func (bA *BitArray) getIndex(i int) bool { + if i >= bA.Bits { + return false + } + return bA.Elems[i/64]&(uint64(1)< 0 +} + +// NOTE: behavior is undefined if i >= bA.Bits +func (bA *BitArray) SetIndex(i int, v bool) bool { + if bA == nil { + return false + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + return bA.setIndex(i, v) +} + +func (bA *BitArray) setIndex(i int, v bool) bool { + if i >= bA.Bits { + return false + } + if v { + bA.Elems[i/64] |= (uint64(1) << uint(i%64)) + } else { + bA.Elems[i/64] &= ^(uint64(1) << uint(i%64)) + } + return true +} + +func (bA *BitArray) Copy() *BitArray { + if bA == nil { + return nil + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + return bA.copy() +} + +func (bA *BitArray) copy() *BitArray { + c := make([]uint64, len(bA.Elems)) + copy(c, bA.Elems) + return &BitArray{ + Bits: bA.Bits, + Elems: c, + } +} + +func (bA *BitArray) copyBits(bits int) *BitArray { + c := make([]uint64, (bits+63)/64) + copy(c, bA.Elems) + return &BitArray{ + Bits: bits, + Elems: c, + } +} + +// Returns a BitArray of larger bits size. +func (bA *BitArray) Or(o *BitArray) *BitArray { + if bA == nil && o == nil { + return nil + } + if bA == nil && o != nil { + return o.Copy() + } + if o == nil { + return bA.Copy() + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + c := bA.copyBits(MaxInt(bA.Bits, o.Bits)) + for i := 0; i < len(c.Elems); i++ { + c.Elems[i] |= o.Elems[i] + } + return c +} + +// Returns a BitArray of smaller bit size. +func (bA *BitArray) And(o *BitArray) *BitArray { + if bA == nil || o == nil { + return nil + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + return bA.and(o) +} + +func (bA *BitArray) and(o *BitArray) *BitArray { + c := bA.copyBits(MinInt(bA.Bits, o.Bits)) + for i := 0; i < len(c.Elems); i++ { + c.Elems[i] &= o.Elems[i] + } + return c +} + +func (bA *BitArray) Not() *BitArray { + if bA == nil { + return nil // Degenerate + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + c := bA.copy() + for i := 0; i < len(c.Elems); i++ { + c.Elems[i] = ^c.Elems[i] + } + return c +} + +func (bA *BitArray) Sub(o *BitArray) *BitArray { + if bA == nil || o == nil { + // TODO: Decide if we should do 1's complement here? + return nil + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + if bA.Bits > o.Bits { + c := bA.copy() + for i := 0; i < len(o.Elems)-1; i++ { + c.Elems[i] &= ^c.Elems[i] + } + i := len(o.Elems) - 1 + if i >= 0 { + for idx := i * 64; idx < o.Bits; idx++ { + // NOTE: each individual GetIndex() call to o is safe. + c.setIndex(idx, c.getIndex(idx) && !o.GetIndex(idx)) + } + } + return c + } + return bA.and(o.Not()) // Note degenerate case where o == nil +} + +func (bA *BitArray) IsEmpty() bool { + if bA == nil { + return true // should this be opposite? + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + for _, e := range bA.Elems { + if e > 0 { + return false + } + } + return true +} + +func (bA *BitArray) IsFull() bool { + if bA == nil { + return true + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + + // Check all elements except the last + for _, elem := range bA.Elems[:len(bA.Elems)-1] { + if (^elem) != 0 { + return false + } + } + + // Check that the last element has (lastElemBits) 1's + lastElemBits := (bA.Bits+63)%64 + 1 + lastElem := bA.Elems[len(bA.Elems)-1] + return (lastElem+1)&((uint64(1)< 0 { + randBitStart := RandIntn(64) + for j := 0; j < 64; j++ { + bitIdx := ((j + randBitStart) % 64) + if (bA.Elems[elemIdx] & (uint64(1) << uint(bitIdx))) > 0 { + return 64*elemIdx + bitIdx, true + } + } + PanicSanity("should not happen") + } + } else { + // Special case for last elem, to ignore straggler bits + elemBits := bA.Bits % 64 + if elemBits == 0 { + elemBits = 64 + } + randBitStart := RandIntn(elemBits) + for j := 0; j < elemBits; j++ { + bitIdx := ((j + randBitStart) % elemBits) + if (bA.Elems[elemIdx] & (uint64(1) << uint(bitIdx))) > 0 { + return 64*elemIdx + bitIdx, true + } + } + } + } + return 0, false +} + +// String returns a string representation of BitArray: BA{}, +// where is a sequence of 'x' (1) and '_' (0). +// The includes spaces and newlines to help people. +// For a simple sequence of 'x' and '_' characters with no spaces or newlines, +// see the MarshalJSON() method. +// Example: "BA{_x_}" or "nil-BitArray" for nil. +func (bA *BitArray) String() string { + return bA.StringIndented("") +} + +func (bA *BitArray) StringIndented(indent string) string { + if bA == nil { + return "nil-BitArray" + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + return bA.stringIndented(indent) +} + +func (bA *BitArray) stringIndented(indent string) string { + lines := []string{} + bits := "" + for i := 0; i < bA.Bits; i++ { + if bA.getIndex(i) { + bits += "x" + } else { + bits += "_" + } + if i%100 == 99 { + lines = append(lines, bits) + bits = "" + } + if i%10 == 9 { + bits += indent + } + if i%50 == 49 { + bits += indent + } + } + if len(bits) > 0 { + lines = append(lines, bits) + } + return fmt.Sprintf("BA{%v:%v}", bA.Bits, strings.Join(lines, indent)) +} + +func (bA *BitArray) Bytes() []byte { + bA.mtx.Lock() + defer bA.mtx.Unlock() + + numBytes := (bA.Bits + 7) / 8 + bytes := make([]byte, numBytes) + for i := 0; i < len(bA.Elems); i++ { + elemBytes := [8]byte{} + binary.LittleEndian.PutUint64(elemBytes[:], bA.Elems[i]) + copy(bytes[i*8:], elemBytes[:]) + } + return bytes +} + +// NOTE: other bitarray o is not locked when reading, +// so if necessary, caller must copy or lock o prior to calling Update. +// If bA is nil, does nothing. +func (bA *BitArray) Update(o *BitArray) { + if bA == nil || o == nil { + return + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + + copy(bA.Elems, o.Elems) +} + +// MarshalJSON implements json.Marshaler interface by marshaling bit array +// using a custom format: a string of '-' or 'x' where 'x' denotes the 1 bit. +func (bA *BitArray) MarshalJSON() ([]byte, error) { + if bA == nil { + return []byte("null"), nil + } + + bA.mtx.Lock() + defer bA.mtx.Unlock() + + bits := `"` + for i := 0; i < bA.Bits; i++ { + if bA.getIndex(i) { + bits += `x` + } else { + bits += `_` + } + } + bits += `"` + return []byte(bits), nil +} + +var bitArrayJSONRegexp = regexp.MustCompile(`\A"([_x]*)"\z`) + +// UnmarshalJSON implements json.Unmarshaler interface by unmarshaling a custom +// JSON description. +func (bA *BitArray) UnmarshalJSON(bz []byte) error { + b := string(bz) + if b == "null" { + // This is required e.g. for encoding/json when decoding + // into a pointer with pre-allocated BitArray. + bA.Bits = 0 + bA.Elems = nil + return nil + } + + // Validate 'b'. + match := bitArrayJSONRegexp.FindStringSubmatch(b) + if match == nil { + return fmt.Errorf("BitArray in JSON should be a string of format %q but got %s", bitArrayJSONRegexp.String(), b) + } + bits := match[1] + + // Construct new BitArray and copy over. + numBits := len(bits) + bA2 := NewBitArray(numBits) + for i := 0; i < numBits; i++ { + if bits[i] == 'x' { + bA2.SetIndex(i, true) + } + } + *bA = *bA2 + return nil +} diff --git a/libs/common/bit_array_test.go b/libs/common/bit_array_test.go new file mode 100644 index 000000000..c697ba5de --- /dev/null +++ b/libs/common/bit_array_test.go @@ -0,0 +1,267 @@ +package common + +import ( + "bytes" + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func randBitArray(bits int) (*BitArray, []byte) { + src := RandBytes((bits + 7) / 8) + bA := NewBitArray(bits) + for i := 0; i < len(src); i++ { + for j := 0; j < 8; j++ { + if i*8+j >= bits { + return bA, src + } + setBit := src[i]&(1< 0 + bA.SetIndex(i*8+j, setBit) + } + } + return bA, src +} + +func TestAnd(t *testing.T) { + + bA1, _ := randBitArray(51) + bA2, _ := randBitArray(31) + bA3 := bA1.And(bA2) + + var bNil *BitArray + require.Equal(t, bNil.And(bA1), (*BitArray)(nil)) + require.Equal(t, bA1.And(nil), (*BitArray)(nil)) + require.Equal(t, bNil.And(nil), (*BitArray)(nil)) + + if bA3.Bits != 31 { + t.Error("Expected min bits", bA3.Bits) + } + if len(bA3.Elems) != len(bA2.Elems) { + t.Error("Expected min elems length") + } + for i := 0; i < bA3.Bits; i++ { + expected := bA1.GetIndex(i) && bA2.GetIndex(i) + if bA3.GetIndex(i) != expected { + t.Error("Wrong bit from bA3", i, bA1.GetIndex(i), bA2.GetIndex(i), bA3.GetIndex(i)) + } + } +} + +func TestOr(t *testing.T) { + + bA1, _ := randBitArray(51) + bA2, _ := randBitArray(31) + bA3 := bA1.Or(bA2) + + bNil := (*BitArray)(nil) + require.Equal(t, bNil.Or(bA1), bA1) + require.Equal(t, bA1.Or(nil), bA1) + require.Equal(t, bNil.Or(nil), (*BitArray)(nil)) + + if bA3.Bits != 51 { + t.Error("Expected max bits") + } + if len(bA3.Elems) != len(bA1.Elems) { + t.Error("Expected max elems length") + } + for i := 0; i < bA3.Bits; i++ { + expected := bA1.GetIndex(i) || bA2.GetIndex(i) + if bA3.GetIndex(i) != expected { + t.Error("Wrong bit from bA3", i, bA1.GetIndex(i), bA2.GetIndex(i), bA3.GetIndex(i)) + } + } +} + +func TestSub1(t *testing.T) { + + bA1, _ := randBitArray(31) + bA2, _ := randBitArray(51) + bA3 := bA1.Sub(bA2) + + bNil := (*BitArray)(nil) + require.Equal(t, bNil.Sub(bA1), (*BitArray)(nil)) + require.Equal(t, bA1.Sub(nil), (*BitArray)(nil)) + require.Equal(t, bNil.Sub(nil), (*BitArray)(nil)) + + if bA3.Bits != bA1.Bits { + t.Error("Expected bA1 bits") + } + if len(bA3.Elems) != len(bA1.Elems) { + t.Error("Expected bA1 elems length") + } + for i := 0; i < bA3.Bits; i++ { + expected := bA1.GetIndex(i) + if bA2.GetIndex(i) { + expected = false + } + if bA3.GetIndex(i) != expected { + t.Error("Wrong bit from bA3", i, bA1.GetIndex(i), bA2.GetIndex(i), bA3.GetIndex(i)) + } + } +} + +func TestSub2(t *testing.T) { + + bA1, _ := randBitArray(51) + bA2, _ := randBitArray(31) + bA3 := bA1.Sub(bA2) + + bNil := (*BitArray)(nil) + require.Equal(t, bNil.Sub(bA1), (*BitArray)(nil)) + require.Equal(t, bA1.Sub(nil), (*BitArray)(nil)) + require.Equal(t, bNil.Sub(nil), (*BitArray)(nil)) + + if bA3.Bits != bA1.Bits { + t.Error("Expected bA1 bits") + } + if len(bA3.Elems) != len(bA1.Elems) { + t.Error("Expected bA1 elems length") + } + for i := 0; i < bA3.Bits; i++ { + expected := bA1.GetIndex(i) + if i < bA2.Bits && bA2.GetIndex(i) { + expected = false + } + if bA3.GetIndex(i) != expected { + t.Error("Wrong bit from bA3") + } + } +} + +func TestPickRandom(t *testing.T) { + for idx := 0; idx < 123; idx++ { + bA1 := NewBitArray(123) + bA1.SetIndex(idx, true) + index, ok := bA1.PickRandom() + if !ok { + t.Fatal("Expected to pick element but got none") + } + if index != idx { + t.Fatalf("Expected to pick element at %v but got wrong index", idx) + } + } +} + +func TestBytes(t *testing.T) { + bA := NewBitArray(4) + bA.SetIndex(0, true) + check := func(bA *BitArray, bz []byte) { + if !bytes.Equal(bA.Bytes(), bz) { + panic(Fmt("Expected %X but got %X", bz, bA.Bytes())) + } + } + check(bA, []byte{0x01}) + bA.SetIndex(3, true) + check(bA, []byte{0x09}) + + bA = NewBitArray(9) + check(bA, []byte{0x00, 0x00}) + bA.SetIndex(7, true) + check(bA, []byte{0x80, 0x00}) + bA.SetIndex(8, true) + check(bA, []byte{0x80, 0x01}) + + bA = NewBitArray(16) + check(bA, []byte{0x00, 0x00}) + bA.SetIndex(7, true) + check(bA, []byte{0x80, 0x00}) + bA.SetIndex(8, true) + check(bA, []byte{0x80, 0x01}) + bA.SetIndex(9, true) + check(bA, []byte{0x80, 0x03}) +} + +func TestEmptyFull(t *testing.T) { + ns := []int{47, 123} + for _, n := range ns { + bA := NewBitArray(n) + if !bA.IsEmpty() { + t.Fatal("Expected bit array to be empty") + } + for i := 0; i < n; i++ { + bA.SetIndex(i, true) + } + if !bA.IsFull() { + t.Fatal("Expected bit array to be full") + } + } +} + +func TestUpdateNeverPanics(t *testing.T) { + newRandBitArray := func(n int) *BitArray { + ba, _ := randBitArray(n) + return ba + } + pairs := []struct { + a, b *BitArray + }{ + {nil, nil}, + {newRandBitArray(10), newRandBitArray(12)}, + {newRandBitArray(23), newRandBitArray(23)}, + {newRandBitArray(37), nil}, + {nil, NewBitArray(10)}, + } + + for _, pair := range pairs { + a, b := pair.a, pair.b + a.Update(b) + b.Update(a) + } +} + +func TestNewBitArrayNeverCrashesOnNegatives(t *testing.T) { + bitList := []int{-127, -128, -1 << 31} + for _, bits := range bitList { + _ = NewBitArray(bits) + } +} + +func TestJSONMarshalUnmarshal(t *testing.T) { + + bA1 := NewBitArray(0) + + bA2 := NewBitArray(1) + + bA3 := NewBitArray(1) + bA3.SetIndex(0, true) + + bA4 := NewBitArray(5) + bA4.SetIndex(0, true) + bA4.SetIndex(1, true) + + testCases := []struct { + bA *BitArray + marshalledBA string + }{ + {nil, `null`}, + {bA1, `null`}, + {bA2, `"_"`}, + {bA3, `"x"`}, + {bA4, `"xx___"`}, + } + + for _, tc := range testCases { + t.Run(tc.bA.String(), func(t *testing.T) { + bz, err := json.Marshal(tc.bA) + require.NoError(t, err) + + assert.Equal(t, tc.marshalledBA, string(bz)) + + var unmarshalledBA *BitArray + err = json.Unmarshal(bz, &unmarshalledBA) + require.NoError(t, err) + + if tc.bA == nil { + require.Nil(t, unmarshalledBA) + } else { + require.NotNil(t, unmarshalledBA) + assert.EqualValues(t, tc.bA.Bits, unmarshalledBA.Bits) + if assert.EqualValues(t, tc.bA.String(), unmarshalledBA.String()) { + assert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems) + } + } + }) + } +} diff --git a/libs/common/bytes.go b/libs/common/bytes.go new file mode 100644 index 000000000..711720aa7 --- /dev/null +++ b/libs/common/bytes.go @@ -0,0 +1,62 @@ +package common + +import ( + "encoding/hex" + "fmt" + "strings" +) + +// The main purpose of HexBytes is to enable HEX-encoding for json/encoding. +type HexBytes []byte + +// Marshal needed for protobuf compatibility +func (bz HexBytes) Marshal() ([]byte, error) { + return bz, nil +} + +// Unmarshal needed for protobuf compatibility +func (bz *HexBytes) Unmarshal(data []byte) error { + *bz = data + return nil +} + +// This is the point of Bytes. +func (bz HexBytes) MarshalJSON() ([]byte, error) { + s := strings.ToUpper(hex.EncodeToString(bz)) + jbz := make([]byte, len(s)+2) + jbz[0] = '"' + copy(jbz[1:], []byte(s)) + jbz[len(jbz)-1] = '"' + return jbz, nil +} + +// This is the point of Bytes. +func (bz *HexBytes) UnmarshalJSON(data []byte) error { + if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { + return fmt.Errorf("Invalid hex string: %s", data) + } + bz2, err := hex.DecodeString(string(data[1 : len(data)-1])) + if err != nil { + return err + } + *bz = bz2 + return nil +} + +// Allow it to fulfill various interfaces in light-client, etc... +func (bz HexBytes) Bytes() []byte { + return bz +} + +func (bz HexBytes) String() string { + return strings.ToUpper(hex.EncodeToString(bz)) +} + +func (bz HexBytes) Format(s fmt.State, verb rune) { + switch verb { + case 'p': + s.Write([]byte(fmt.Sprintf("%p", bz))) + default: + s.Write([]byte(fmt.Sprintf("%X", []byte(bz)))) + } +} diff --git a/libs/common/bytes_test.go b/libs/common/bytes_test.go new file mode 100644 index 000000000..9e11988f2 --- /dev/null +++ b/libs/common/bytes_test.go @@ -0,0 +1,65 @@ +package common + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +// This is a trivial test for protobuf compatibility. +func TestMarshal(t *testing.T) { + bz := []byte("hello world") + dataB := HexBytes(bz) + bz2, err := dataB.Marshal() + assert.Nil(t, err) + assert.Equal(t, bz, bz2) + + var dataB2 HexBytes + err = (&dataB2).Unmarshal(bz) + assert.Nil(t, err) + assert.Equal(t, dataB, dataB2) +} + +// Test that the hex encoding works. +func TestJSONMarshal(t *testing.T) { + + type TestStruct struct { + B1 []byte + B2 HexBytes + } + + cases := []struct { + input []byte + expected string + }{ + {[]byte(``), `{"B1":"","B2":""}`}, + {[]byte(`a`), `{"B1":"YQ==","B2":"61"}`}, + {[]byte(`abc`), `{"B1":"YWJj","B2":"616263"}`}, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("Case %d", i), func(t *testing.T) { + ts := TestStruct{B1: tc.input, B2: tc.input} + + // Test that it marshals correctly to JSON. + jsonBytes, err := json.Marshal(ts) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, string(jsonBytes), tc.expected) + + // TODO do fuzz testing to ensure that unmarshal fails + + // Test that unmarshaling works correctly. + ts2 := TestStruct{} + err = json.Unmarshal(jsonBytes, &ts2) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, ts2.B1, tc.input) + assert.Equal(t, ts2.B2, HexBytes(tc.input)) + }) + } +} diff --git a/libs/common/byteslice.go b/libs/common/byteslice.go new file mode 100644 index 000000000..57b3a8a2b --- /dev/null +++ b/libs/common/byteslice.go @@ -0,0 +1,73 @@ +package common + +import ( + "bytes" +) + +// Fingerprint returns the first 6 bytes of a byte slice. +// If the slice is less than 6 bytes, the fingerprint +// contains trailing zeroes. +func Fingerprint(slice []byte) []byte { + fingerprint := make([]byte, 6) + copy(fingerprint, slice) + return fingerprint +} + +func IsZeros(slice []byte) bool { + for _, byt := range slice { + if byt != byte(0) { + return false + } + } + return true +} + +func RightPadBytes(slice []byte, l int) []byte { + if l < len(slice) { + return slice + } + padded := make([]byte, l) + copy(padded[0:len(slice)], slice) + return padded +} + +func LeftPadBytes(slice []byte, l int) []byte { + if l < len(slice) { + return slice + } + padded := make([]byte, l) + copy(padded[l-len(slice):], slice) + return padded +} + +func TrimmedString(b []byte) string { + trimSet := string([]byte{0}) + return string(bytes.TrimLeft(b, trimSet)) + +} + +// PrefixEndBytes returns the end byteslice for a noninclusive range +// that would include all byte slices for which the input is the prefix +func PrefixEndBytes(prefix []byte) []byte { + if prefix == nil { + return nil + } + + end := make([]byte, len(prefix)) + copy(end, prefix) + finished := false + + for !finished { + if end[len(end)-1] != byte(255) { + end[len(end)-1]++ + finished = true + } else { + end = end[:len(end)-1] + if len(end) == 0 { + end = nil + finished = true + } + } + } + return end +} diff --git a/libs/common/byteslice_test.go b/libs/common/byteslice_test.go new file mode 100644 index 000000000..98085d125 --- /dev/null +++ b/libs/common/byteslice_test.go @@ -0,0 +1,28 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPrefixEndBytes(t *testing.T) { + assert := assert.New(t) + + var testCases = []struct { + prefix []byte + expected []byte + }{ + {[]byte{byte(55), byte(255), byte(255), byte(0)}, []byte{byte(55), byte(255), byte(255), byte(1)}}, + {[]byte{byte(55), byte(255), byte(255), byte(15)}, []byte{byte(55), byte(255), byte(255), byte(16)}}, + {[]byte{byte(55), byte(200), byte(255)}, []byte{byte(55), byte(201)}}, + {[]byte{byte(55), byte(255), byte(255)}, []byte{byte(56)}}, + {[]byte{byte(255), byte(255), byte(255)}, nil}, + {nil, nil}, + } + + for _, test := range testCases { + end := PrefixEndBytes(test.prefix) + assert.Equal(test.expected, end) + } +} diff --git a/libs/common/cmap.go b/libs/common/cmap.go new file mode 100644 index 000000000..c65c27d4c --- /dev/null +++ b/libs/common/cmap.go @@ -0,0 +1,73 @@ +package common + +import "sync" + +// CMap is a goroutine-safe map +type CMap struct { + m map[string]interface{} + l sync.Mutex +} + +func NewCMap() *CMap { + return &CMap{ + m: make(map[string]interface{}), + } +} + +func (cm *CMap) Set(key string, value interface{}) { + cm.l.Lock() + defer cm.l.Unlock() + cm.m[key] = value +} + +func (cm *CMap) Get(key string) interface{} { + cm.l.Lock() + defer cm.l.Unlock() + return cm.m[key] +} + +func (cm *CMap) Has(key string) bool { + cm.l.Lock() + defer cm.l.Unlock() + _, ok := cm.m[key] + return ok +} + +func (cm *CMap) Delete(key string) { + cm.l.Lock() + defer cm.l.Unlock() + delete(cm.m, key) +} + +func (cm *CMap) Size() int { + cm.l.Lock() + defer cm.l.Unlock() + return len(cm.m) +} + +func (cm *CMap) Clear() { + cm.l.Lock() + defer cm.l.Unlock() + cm.m = make(map[string]interface{}) +} + +func (cm *CMap) Keys() []string { + cm.l.Lock() + defer cm.l.Unlock() + + keys := []string{} + for k := range cm.m { + keys = append(keys, k) + } + return keys +} + +func (cm *CMap) Values() []interface{} { + cm.l.Lock() + defer cm.l.Unlock() + items := []interface{}{} + for _, v := range cm.m { + items = append(items, v) + } + return items +} diff --git a/libs/common/cmap_test.go b/libs/common/cmap_test.go new file mode 100644 index 000000000..c665a7f3e --- /dev/null +++ b/libs/common/cmap_test.go @@ -0,0 +1,53 @@ +package common + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIterateKeysWithValues(t *testing.T) { + cmap := NewCMap() + + for i := 1; i <= 10; i++ { + cmap.Set(fmt.Sprintf("key%d", i), fmt.Sprintf("value%d", i)) + } + + // Testing size + assert.Equal(t, 10, cmap.Size()) + assert.Equal(t, 10, len(cmap.Keys())) + assert.Equal(t, 10, len(cmap.Values())) + + // Iterating Keys, checking for matching Value + for _, key := range cmap.Keys() { + val := strings.Replace(key, "key", "value", -1) + assert.Equal(t, val, cmap.Get(key)) + } + + // Test if all keys are within []Keys() + keys := cmap.Keys() + for i := 1; i <= 10; i++ { + assert.Contains(t, keys, fmt.Sprintf("key%d", i), "cmap.Keys() should contain key") + } + + // Delete 1 Key + cmap.Delete("key1") + + assert.NotEqual(t, len(keys), len(cmap.Keys()), "[]keys and []Keys() should not be equal, they are copies, one item was removed") +} + +func TestContains(t *testing.T) { + cmap := NewCMap() + + cmap.Set("key1", "value1") + + // Test for known values + assert.True(t, cmap.Has("key1")) + assert.Equal(t, "value1", cmap.Get("key1")) + + // Test for unknown values + assert.False(t, cmap.Has("key2")) + assert.Nil(t, cmap.Get("key2")) +} diff --git a/libs/common/colors.go b/libs/common/colors.go new file mode 100644 index 000000000..049ce7a50 --- /dev/null +++ b/libs/common/colors.go @@ -0,0 +1,95 @@ +package common + +import ( + "fmt" + "strings" +) + +const ( + ANSIReset = "\x1b[0m" + ANSIBright = "\x1b[1m" + ANSIDim = "\x1b[2m" + ANSIUnderscore = "\x1b[4m" + ANSIBlink = "\x1b[5m" + ANSIReverse = "\x1b[7m" + ANSIHidden = "\x1b[8m" + + ANSIFgBlack = "\x1b[30m" + ANSIFgRed = "\x1b[31m" + ANSIFgGreen = "\x1b[32m" + ANSIFgYellow = "\x1b[33m" + ANSIFgBlue = "\x1b[34m" + ANSIFgMagenta = "\x1b[35m" + ANSIFgCyan = "\x1b[36m" + ANSIFgWhite = "\x1b[37m" + + ANSIBgBlack = "\x1b[40m" + ANSIBgRed = "\x1b[41m" + ANSIBgGreen = "\x1b[42m" + ANSIBgYellow = "\x1b[43m" + ANSIBgBlue = "\x1b[44m" + ANSIBgMagenta = "\x1b[45m" + ANSIBgCyan = "\x1b[46m" + ANSIBgWhite = "\x1b[47m" +) + +// color the string s with color 'color' +// unless s is already colored +func treat(s string, color string) string { + if len(s) > 2 && s[:2] == "\x1b[" { + return s + } + return color + s + ANSIReset +} + +func treatAll(color string, args ...interface{}) string { + var parts []string + for _, arg := range args { + parts = append(parts, treat(fmt.Sprintf("%v", arg), color)) + } + return strings.Join(parts, "") +} + +func Black(args ...interface{}) string { + return treatAll(ANSIFgBlack, args...) +} + +func Red(args ...interface{}) string { + return treatAll(ANSIFgRed, args...) +} + +func Green(args ...interface{}) string { + return treatAll(ANSIFgGreen, args...) +} + +func Yellow(args ...interface{}) string { + return treatAll(ANSIFgYellow, args...) +} + +func Blue(args ...interface{}) string { + return treatAll(ANSIFgBlue, args...) +} + +func Magenta(args ...interface{}) string { + return treatAll(ANSIFgMagenta, args...) +} + +func Cyan(args ...interface{}) string { + return treatAll(ANSIFgCyan, args...) +} + +func White(args ...interface{}) string { + return treatAll(ANSIFgWhite, args...) +} + +func ColoredBytes(data []byte, textColor, bytesColor func(...interface{}) string) string { + s := "" + for _, b := range data { + if 0x21 <= b && b < 0x7F { + s += textColor(string(b)) + } else { + s += bytesColor(Fmt("%02X", b)) + } + } + return s +} diff --git a/libs/common/date.go b/libs/common/date.go new file mode 100644 index 000000000..e017a4b41 --- /dev/null +++ b/libs/common/date.go @@ -0,0 +1,43 @@ +package common + +import ( + "strings" + "time" + + "github.com/pkg/errors" +) + +// TimeLayout helps to parse a date string of the format YYYY-MM-DD +// Intended to be used with the following function: +// time.Parse(TimeLayout, date) +var TimeLayout = "2006-01-02" //this represents YYYY-MM-DD + +// ParseDateRange parses a date range string of the format start:end +// where the start and end date are of the format YYYY-MM-DD. +// The parsed dates are time.Time and will return the zero time for +// unbounded dates, ex: +// unbounded start: :2000-12-31 +// unbounded end: 2000-12-31: +func ParseDateRange(dateRange string) (startDate, endDate time.Time, err error) { + dates := strings.Split(dateRange, ":") + if len(dates) != 2 { + err = errors.New("bad date range, must be in format date:date") + return + } + parseDate := func(date string) (out time.Time, err error) { + if len(date) == 0 { + return + } + out, err = time.Parse(TimeLayout, date) + return + } + startDate, err = parseDate(dates[0]) + if err != nil { + return + } + endDate, err = parseDate(dates[1]) + if err != nil { + return + } + return +} diff --git a/libs/common/date_test.go b/libs/common/date_test.go new file mode 100644 index 000000000..2c0632477 --- /dev/null +++ b/libs/common/date_test.go @@ -0,0 +1,46 @@ +package common + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +var ( + date = time.Date(2015, time.Month(12), 31, 0, 0, 0, 0, time.UTC) + date2 = time.Date(2016, time.Month(12), 31, 0, 0, 0, 0, time.UTC) + zero time.Time +) + +func TestParseDateRange(t *testing.T) { + assert := assert.New(t) + + var testDates = []struct { + dateStr string + start time.Time + end time.Time + errNil bool + }{ + {"2015-12-31:2016-12-31", date, date2, true}, + {"2015-12-31:", date, zero, true}, + {":2016-12-31", zero, date2, true}, + {"2016-12-31", zero, zero, false}, + {"2016-31-12:", zero, zero, false}, + {":2016-31-12", zero, zero, false}, + } + + for _, test := range testDates { + start, end, err := ParseDateRange(test.dateStr) + if test.errNil { + assert.Nil(err) + testPtr := func(want, have time.Time) { + assert.True(have.Equal(want)) + } + testPtr(test.start, start) + testPtr(test.end, end) + } else { + assert.NotNil(err) + } + } +} diff --git a/libs/common/errors.go b/libs/common/errors.go new file mode 100644 index 000000000..5c31b8968 --- /dev/null +++ b/libs/common/errors.go @@ -0,0 +1,246 @@ +package common + +import ( + "fmt" + "runtime" +) + +//---------------------------------------- +// Convenience method. + +func ErrorWrap(cause interface{}, format string, args ...interface{}) Error { + if causeCmnError, ok := cause.(*cmnError); ok { + msg := Fmt(format, args...) + return causeCmnError.Stacktrace().Trace(1, msg) + } else if cause == nil { + return newCmnError(FmtError{format, args}).Stacktrace() + } else { + // NOTE: causeCmnError is a typed nil here. + msg := Fmt(format, args...) + return newCmnError(cause).Stacktrace().Trace(1, msg) + } +} + +//---------------------------------------- +// Error & cmnError + +/* + +Usage with arbitrary error data: + +```go + // Error construction + type MyError struct{} + var err1 error = NewErrorWithData(MyError{}, "my message") + ... + // Wrapping + var err2 error = ErrorWrap(err1, "another message") + if (err1 != err2) { panic("should be the same") + ... + // Error handling + switch err2.Data().(type){ + case MyError: ... + default: ... + } +``` +*/ +type Error interface { + Error() string + Stacktrace() Error + Trace(offset int, format string, args ...interface{}) Error + Data() interface{} +} + +// New Error with formatted message. +// The Error's Data will be a FmtError type. +func NewError(format string, args ...interface{}) Error { + err := FmtError{format, args} + return newCmnError(err) +} + +// New Error with specified data. +func NewErrorWithData(data interface{}) Error { + return newCmnError(data) +} + +type cmnError struct { + data interface{} // associated data + msgtraces []msgtraceItem // all messages traced + stacktrace []uintptr // first stack trace +} + +var _ Error = &cmnError{} + +// NOTE: do not expose. +func newCmnError(data interface{}) *cmnError { + return &cmnError{ + data: data, + msgtraces: nil, + stacktrace: nil, + } +} + +// Implements error. +func (err *cmnError) Error() string { + return fmt.Sprintf("%v", err) +} + +// Captures a stacktrace if one was not already captured. +func (err *cmnError) Stacktrace() Error { + if err.stacktrace == nil { + var offset = 3 + var depth = 32 + err.stacktrace = captureStacktrace(offset, depth) + } + return err +} + +// Add tracing information with msg. +// Set n=0 unless wrapped with some function, then n > 0. +func (err *cmnError) Trace(offset int, format string, args ...interface{}) Error { + msg := Fmt(format, args...) + return err.doTrace(msg, offset) +} + +// Return the "data" of this error. +// Data could be used for error handling/switching, +// or for holding general error/debug information. +func (err *cmnError) Data() interface{} { + return err.data +} + +func (err *cmnError) doTrace(msg string, n int) Error { + pc, _, _, _ := runtime.Caller(n + 2) // +1 for doTrace(). +1 for the caller. + // Include file & line number & msg. + // Do not include the whole stack trace. + err.msgtraces = append(err.msgtraces, msgtraceItem{ + pc: pc, + msg: msg, + }) + return err +} + +func (err *cmnError) Format(s fmt.State, verb rune) { + switch verb { + case 'p': + s.Write([]byte(fmt.Sprintf("%p", &err))) + default: + if s.Flag('#') { + s.Write([]byte("--= Error =--\n")) + // Write data. + s.Write([]byte(fmt.Sprintf("Data: %#v\n", err.data))) + // Write msg trace items. + s.Write([]byte(fmt.Sprintf("Msg Traces:\n"))) + for i, msgtrace := range err.msgtraces { + s.Write([]byte(fmt.Sprintf(" %4d %s\n", i, msgtrace.String()))) + } + // Write stack trace. + if err.stacktrace != nil { + s.Write([]byte(fmt.Sprintf("Stack Trace:\n"))) + for i, pc := range err.stacktrace { + fnc := runtime.FuncForPC(pc) + file, line := fnc.FileLine(pc) + s.Write([]byte(fmt.Sprintf(" %4d %s:%d\n", i, file, line))) + } + } + s.Write([]byte("--= /Error =--\n")) + } else { + // Write msg. + s.Write([]byte(fmt.Sprintf("Error{%v}", err.data))) // TODO tick-esc? + } + } +} + +//---------------------------------------- +// stacktrace & msgtraceItem + +func captureStacktrace(offset int, depth int) []uintptr { + var pcs = make([]uintptr, depth) + n := runtime.Callers(offset, pcs) + return pcs[0:n] +} + +type msgtraceItem struct { + pc uintptr + msg string +} + +func (mti msgtraceItem) String() string { + fnc := runtime.FuncForPC(mti.pc) + file, line := fnc.FileLine(mti.pc) + return fmt.Sprintf("%s:%d - %s", + file, line, + mti.msg, + ) +} + +//---------------------------------------- +// fmt error + +/* + +FmtError is the data type for NewError() (e.g. NewError().Data().(FmtError)) +Theoretically it could be used to switch on the format string. + +```go + // Error construction + var err1 error = NewError("invalid username %v", "BOB") + var err2 error = NewError("another kind of error") + ... + // Error handling + switch err1.Data().(cmn.FmtError).Format() { + case "invalid username %v": ... + case "another kind of error": ... + default: ... + } +``` +*/ +type FmtError struct { + format string + args []interface{} +} + +func (fe FmtError) Error() string { + return fmt.Sprintf(fe.format, fe.args...) +} + +func (fe FmtError) String() string { + return fmt.Sprintf("FmtError{format:%v,args:%v}", + fe.format, fe.args) +} + +func (fe FmtError) Format() string { + return fe.format +} + +//---------------------------------------- +// Panic wrappers +// XXX DEPRECATED + +// A panic resulting from a sanity check means there is a programmer error +// and some guarantee is not satisfied. +// XXX DEPRECATED +func PanicSanity(v interface{}) { + panic(Fmt("Panicked on a Sanity Check: %v", v)) +} + +// A panic here means something has gone horribly wrong, in the form of data corruption or +// failure of the operating system. In a correct/healthy system, these should never fire. +// If they do, it's indicative of a much more serious problem. +// XXX DEPRECATED +func PanicCrisis(v interface{}) { + panic(Fmt("Panicked on a Crisis: %v", v)) +} + +// Indicates a failure of consensus. Someone was malicious or something has +// gone horribly wrong. These should really boot us into an "emergency-recover" mode +// XXX DEPRECATED +func PanicConsensus(v interface{}) { + panic(Fmt("Panicked on a Consensus Failure: %v", v)) +} + +// For those times when we're not sure if we should panic +// XXX DEPRECATED +func PanicQ(v interface{}) { + panic(Fmt("Panicked questionably: %v", v)) +} diff --git a/libs/common/errors_test.go b/libs/common/errors_test.go new file mode 100644 index 000000000..52c78a765 --- /dev/null +++ b/libs/common/errors_test.go @@ -0,0 +1,101 @@ +package common + +import ( + fmt "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestErrorPanic(t *testing.T) { + type pnk struct { + msg string + } + + capturePanic := func() (err Error) { + defer func() { + if r := recover(); r != nil { + err = ErrorWrap(r, "This is the message in ErrorWrap(r, message).") + } + }() + panic(pnk{"something"}) + } + + var err = capturePanic() + + assert.Equal(t, pnk{"something"}, err.Data()) + assert.Equal(t, "Error{{something}}", fmt.Sprintf("%v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), "This is the message in ErrorWrap(r, message).") + assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") +} + +func TestErrorWrapSomething(t *testing.T) { + + var err = ErrorWrap("something", "formatter%v%v", 0, 1) + + assert.Equal(t, "something", err.Data()) + assert.Equal(t, "Error{something}", fmt.Sprintf("%v", err)) + assert.Regexp(t, `formatter01\n`, fmt.Sprintf("%#v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") +} + +func TestErrorWrapNothing(t *testing.T) { + + var err = ErrorWrap(nil, "formatter%v%v", 0, 1) + + assert.Equal(t, + FmtError{"formatter%v%v", []interface{}{0, 1}}, + err.Data()) + assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) + assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") +} + +func TestErrorNewError(t *testing.T) { + + var err = NewError("formatter%v%v", 0, 1) + + assert.Equal(t, + FmtError{"formatter%v%v", []interface{}{0, 1}}, + err.Data()) + assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) + assert.NotContains(t, fmt.Sprintf("%#v", err), "Stack Trace") +} + +func TestErrorNewErrorWithStacktrace(t *testing.T) { + + var err = NewError("formatter%v%v", 0, 1).Stacktrace() + + assert.Equal(t, + FmtError{"formatter%v%v", []interface{}{0, 1}}, + err.Data()) + assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) + assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") +} + +func TestErrorNewErrorWithTrace(t *testing.T) { + + var err = NewError("formatter%v%v", 0, 1) + err.Trace(0, "trace %v", 1) + err.Trace(0, "trace %v", 2) + err.Trace(0, "trace %v", 3) + + assert.Equal(t, + FmtError{"formatter%v%v", []interface{}{0, 1}}, + err.Data()) + assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) + dump := fmt.Sprintf("%#v", err) + assert.NotContains(t, dump, "Stack Trace") + assert.Regexp(t, `common/errors_test\.go:[0-9]+ - trace 1`, dump) + assert.Regexp(t, `common/errors_test\.go:[0-9]+ - trace 2`, dump) + assert.Regexp(t, `common/errors_test\.go:[0-9]+ - trace 3`, dump) +} + +func TestErrorWrapError(t *testing.T) { + var err1 error = NewError("my message") + var err2 error = ErrorWrap(err1, "another message") + assert.Equal(t, err1, err2) +} diff --git a/libs/common/heap.go b/libs/common/heap.go new file mode 100644 index 000000000..b3bcb9db8 --- /dev/null +++ b/libs/common/heap.go @@ -0,0 +1,125 @@ +package common + +import ( + "bytes" + "container/heap" +) + +/* + Example usage: + + ``` + h := NewHeap() + + h.Push("msg1", 1) + h.Push("msg3", 3) + h.Push("msg2", 2) + + fmt.Println(h.Pop()) // msg1 + fmt.Println(h.Pop()) // msg2 + fmt.Println(h.Pop()) // msg3 + ``` +*/ +type Heap struct { + pq priorityQueue +} + +func NewHeap() *Heap { + return &Heap{pq: make([]*pqItem, 0)} +} + +func (h *Heap) Len() int64 { + return int64(len(h.pq)) +} + +func (h *Heap) Push(value interface{}, priority int) { + heap.Push(&h.pq, &pqItem{value: value, priority: cmpInt(priority)}) +} + +func (h *Heap) PushBytes(value interface{}, priority []byte) { + heap.Push(&h.pq, &pqItem{value: value, priority: cmpBytes(priority)}) +} + +func (h *Heap) PushComparable(value interface{}, priority Comparable) { + heap.Push(&h.pq, &pqItem{value: value, priority: priority}) +} + +func (h *Heap) Peek() interface{} { + if len(h.pq) == 0 { + return nil + } + return h.pq[0].value +} + +func (h *Heap) Update(value interface{}, priority Comparable) { + h.pq.Update(h.pq[0], value, priority) +} + +func (h *Heap) Pop() interface{} { + item := heap.Pop(&h.pq).(*pqItem) + return item.value +} + +//----------------------------------------------------------------------------- +// From: http://golang.org/pkg/container/heap/#example__priorityQueue + +type pqItem struct { + value interface{} + priority Comparable + index int +} + +type priorityQueue []*pqItem + +func (pq priorityQueue) Len() int { return len(pq) } + +func (pq priorityQueue) Less(i, j int) bool { + return pq[i].priority.Less(pq[j].priority) +} + +func (pq priorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] + pq[i].index = i + pq[j].index = j +} + +func (pq *priorityQueue) Push(x interface{}) { + n := len(*pq) + item := x.(*pqItem) + item.index = n + *pq = append(*pq, item) +} + +func (pq *priorityQueue) Pop() interface{} { + old := *pq + n := len(old) + item := old[n-1] + item.index = -1 // for safety + *pq = old[0 : n-1] + return item +} + +func (pq *priorityQueue) Update(item *pqItem, value interface{}, priority Comparable) { + item.value = value + item.priority = priority + heap.Fix(pq, item.index) +} + +//-------------------------------------------------------------------------------- +// Comparable + +type Comparable interface { + Less(o interface{}) bool +} + +type cmpInt int + +func (i cmpInt) Less(o interface{}) bool { + return int(i) < int(o.(cmpInt)) +} + +type cmpBytes []byte + +func (bz cmpBytes) Less(o interface{}) bool { + return bytes.Compare([]byte(bz), []byte(o.(cmpBytes))) < 0 +} diff --git a/libs/common/int.go b/libs/common/int.go new file mode 100644 index 000000000..a8a5f1e00 --- /dev/null +++ b/libs/common/int.go @@ -0,0 +1,65 @@ +package common + +import ( + "encoding/binary" + "sort" +) + +// Sort for []uint64 + +type Uint64Slice []uint64 + +func (p Uint64Slice) Len() int { return len(p) } +func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Uint64Slice) Sort() { sort.Sort(p) } + +func SearchUint64s(a []uint64, x uint64) int { + return sort.Search(len(a), func(i int) bool { return a[i] >= x }) +} + +func (p Uint64Slice) Search(x uint64) int { return SearchUint64s(p, x) } + +//-------------------------------------------------------------------------------- + +func PutUint64LE(dest []byte, i uint64) { + binary.LittleEndian.PutUint64(dest, i) +} + +func GetUint64LE(src []byte) uint64 { + return binary.LittleEndian.Uint64(src) +} + +func PutUint64BE(dest []byte, i uint64) { + binary.BigEndian.PutUint64(dest, i) +} + +func GetUint64BE(src []byte) uint64 { + return binary.BigEndian.Uint64(src) +} + +func PutInt64LE(dest []byte, i int64) { + binary.LittleEndian.PutUint64(dest, uint64(i)) +} + +func GetInt64LE(src []byte) int64 { + return int64(binary.LittleEndian.Uint64(src)) +} + +func PutInt64BE(dest []byte, i int64) { + binary.BigEndian.PutUint64(dest, uint64(i)) +} + +func GetInt64BE(src []byte) int64 { + return int64(binary.BigEndian.Uint64(src)) +} + +// IntInSlice returns true if a is found in the list. +func IntInSlice(a int, list []int) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} diff --git a/libs/common/int_test.go b/libs/common/int_test.go new file mode 100644 index 000000000..1ecc7844c --- /dev/null +++ b/libs/common/int_test.go @@ -0,0 +1,14 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIntInSlice(t *testing.T) { + assert.True(t, IntInSlice(1, []int{1, 2, 3})) + assert.False(t, IntInSlice(4, []int{1, 2, 3})) + assert.True(t, IntInSlice(0, []int{0})) + assert.False(t, IntInSlice(0, []int{})) +} diff --git a/libs/common/io.go b/libs/common/io.go new file mode 100644 index 000000000..fa0443e09 --- /dev/null +++ b/libs/common/io.go @@ -0,0 +1,74 @@ +package common + +import ( + "bytes" + "errors" + "io" +) + +type PrefixedReader struct { + Prefix []byte + reader io.Reader +} + +func NewPrefixedReader(prefix []byte, reader io.Reader) *PrefixedReader { + return &PrefixedReader{prefix, reader} +} + +func (pr *PrefixedReader) Read(p []byte) (n int, err error) { + if len(pr.Prefix) > 0 { + read := copy(p, pr.Prefix) + pr.Prefix = pr.Prefix[read:] + return read, nil + } + return pr.reader.Read(p) +} + +// NOTE: Not goroutine safe +type BufferCloser struct { + bytes.Buffer + Closed bool +} + +func NewBufferCloser(buf []byte) *BufferCloser { + return &BufferCloser{ + *bytes.NewBuffer(buf), + false, + } +} + +func (bc *BufferCloser) Close() error { + if bc.Closed { + return errors.New("BufferCloser already closed") + } + bc.Closed = true + return nil +} + +func (bc *BufferCloser) Write(p []byte) (n int, err error) { + if bc.Closed { + return 0, errors.New("Cannot write to closed BufferCloser") + } + return bc.Buffer.Write(p) +} + +func (bc *BufferCloser) WriteByte(c byte) error { + if bc.Closed { + return errors.New("Cannot write to closed BufferCloser") + } + return bc.Buffer.WriteByte(c) +} + +func (bc *BufferCloser) WriteRune(r rune) (n int, err error) { + if bc.Closed { + return 0, errors.New("Cannot write to closed BufferCloser") + } + return bc.Buffer.WriteRune(r) +} + +func (bc *BufferCloser) WriteString(s string) (n int, err error) { + if bc.Closed { + return 0, errors.New("Cannot write to closed BufferCloser") + } + return bc.Buffer.WriteString(s) +} diff --git a/libs/common/kvpair.go b/libs/common/kvpair.go new file mode 100644 index 000000000..54c3a58c0 --- /dev/null +++ b/libs/common/kvpair.go @@ -0,0 +1,67 @@ +package common + +import ( + "bytes" + "sort" +) + +//---------------------------------------- +// KVPair + +/* +Defined in types.proto + +type KVPair struct { + Key []byte + Value []byte +} +*/ + +type KVPairs []KVPair + +// Sorting +func (kvs KVPairs) Len() int { return len(kvs) } +func (kvs KVPairs) Less(i, j int) bool { + switch bytes.Compare(kvs[i].Key, kvs[j].Key) { + case -1: + return true + case 0: + return bytes.Compare(kvs[i].Value, kvs[j].Value) < 0 + case 1: + return false + default: + panic("invalid comparison result") + } +} +func (kvs KVPairs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] } +func (kvs KVPairs) Sort() { sort.Sort(kvs) } + +//---------------------------------------- +// KI64Pair + +/* +Defined in types.proto +type KI64Pair struct { + Key []byte + Value int64 +} +*/ + +type KI64Pairs []KI64Pair + +// Sorting +func (kvs KI64Pairs) Len() int { return len(kvs) } +func (kvs KI64Pairs) Less(i, j int) bool { + switch bytes.Compare(kvs[i].Key, kvs[j].Key) { + case -1: + return true + case 0: + return kvs[i].Value < kvs[j].Value + case 1: + return false + default: + panic("invalid comparison result") + } +} +func (kvs KI64Pairs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] } +func (kvs KI64Pairs) Sort() { sort.Sort(kvs) } diff --git a/libs/common/math.go b/libs/common/math.go new file mode 100644 index 000000000..b037d1a71 --- /dev/null +++ b/libs/common/math.go @@ -0,0 +1,157 @@ +package common + +func MaxInt8(a, b int8) int8 { + if a > b { + return a + } + return b +} + +func MaxUint8(a, b uint8) uint8 { + if a > b { + return a + } + return b +} + +func MaxInt16(a, b int16) int16 { + if a > b { + return a + } + return b +} + +func MaxUint16(a, b uint16) uint16 { + if a > b { + return a + } + return b +} + +func MaxInt32(a, b int32) int32 { + if a > b { + return a + } + return b +} + +func MaxUint32(a, b uint32) uint32 { + if a > b { + return a + } + return b +} + +func MaxInt64(a, b int64) int64 { + if a > b { + return a + } + return b +} + +func MaxUint64(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +func MaxInt(a, b int) int { + if a > b { + return a + } + return b +} + +func MaxUint(a, b uint) uint { + if a > b { + return a + } + return b +} + +//----------------------------------------------------------------------------- + +func MinInt8(a, b int8) int8 { + if a < b { + return a + } + return b +} + +func MinUint8(a, b uint8) uint8 { + if a < b { + return a + } + return b +} + +func MinInt16(a, b int16) int16 { + if a < b { + return a + } + return b +} + +func MinUint16(a, b uint16) uint16 { + if a < b { + return a + } + return b +} + +func MinInt32(a, b int32) int32 { + if a < b { + return a + } + return b +} + +func MinUint32(a, b uint32) uint32 { + if a < b { + return a + } + return b +} + +func MinInt64(a, b int64) int64 { + if a < b { + return a + } + return b +} + +func MinUint64(a, b uint64) uint64 { + if a < b { + return a + } + return b +} + +func MinInt(a, b int) int { + if a < b { + return a + } + return b +} + +func MinUint(a, b uint) uint { + if a < b { + return a + } + return b +} + +//----------------------------------------------------------------------------- + +func ExpUint64(a, b uint64) uint64 { + accum := uint64(1) + for b > 0 { + if b&1 == 1 { + accum *= a + } + a *= a + b >>= 1 + } + return accum +} diff --git a/libs/common/net.go b/libs/common/net.go new file mode 100644 index 000000000..bdbe38f79 --- /dev/null +++ b/libs/common/net.go @@ -0,0 +1,26 @@ +package common + +import ( + "net" + "strings" +) + +// Connect dials the given address and returns a net.Conn. The protoAddr argument should be prefixed with the protocol, +// eg. "tcp://127.0.0.1:8080" or "unix:///tmp/test.sock" +func Connect(protoAddr string) (net.Conn, error) { + proto, address := ProtocolAndAddress(protoAddr) + conn, err := net.Dial(proto, address) + return conn, err +} + +// ProtocolAndAddress splits an address into the protocol and address components. +// For instance, "tcp://127.0.0.1:8080" will be split into "tcp" and "127.0.0.1:8080". +// If the address has no protocol prefix, the default is "tcp". +func ProtocolAndAddress(listenAddr string) (string, string) { + protocol, address := "tcp", listenAddr + parts := strings.SplitN(address, "://", 2) + if len(parts) == 2 { + protocol, address = parts[0], parts[1] + } + return protocol, address +} diff --git a/libs/common/net_test.go b/libs/common/net_test.go new file mode 100644 index 000000000..38d2ae82d --- /dev/null +++ b/libs/common/net_test.go @@ -0,0 +1,38 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestProtocolAndAddress(t *testing.T) { + + cases := []struct { + fullAddr string + proto string + addr string + }{ + { + "tcp://mydomain:80", + "tcp", + "mydomain:80", + }, + { + "mydomain:80", + "tcp", + "mydomain:80", + }, + { + "unix://mydomain:80", + "unix", + "mydomain:80", + }, + } + + for _, c := range cases { + proto, addr := ProtocolAndAddress(c.fullAddr) + assert.Equal(t, proto, c.proto) + assert.Equal(t, addr, c.addr) + } +} diff --git a/libs/common/nil.go b/libs/common/nil.go new file mode 100644 index 000000000..31f75f008 --- /dev/null +++ b/libs/common/nil.go @@ -0,0 +1,29 @@ +package common + +import "reflect" + +// Go lacks a simple and safe way to see if something is a typed nil. +// See: +// - https://dave.cheney.net/2017/08/09/typed-nils-in-go-2 +// - https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I/discussion +// - https://github.com/golang/go/issues/21538 +func IsTypedNil(o interface{}) bool { + rv := reflect.ValueOf(o) + switch rv.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} + +// Returns true if it has zero length. +func IsEmpty(o interface{}) bool { + rv := reflect.ValueOf(o) + switch rv.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: + return rv.Len() == 0 + default: + return false + } +} diff --git a/libs/common/os.go b/libs/common/os.go new file mode 100644 index 000000000..00f4da57b --- /dev/null +++ b/libs/common/os.go @@ -0,0 +1,195 @@ +package common + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "os/signal" + "path/filepath" + "strings" + "syscall" +) + +var gopath string + +// GoPath returns GOPATH env variable value. If it is not set, this function +// will try to call `go env GOPATH` subcommand. +func GoPath() string { + if gopath != "" { + return gopath + } + + path := os.Getenv("GOPATH") + if len(path) == 0 { + goCmd := exec.Command("go", "env", "GOPATH") + out, err := goCmd.Output() + if err != nil { + panic(fmt.Sprintf("failed to determine gopath: %v", err)) + } + path = string(out) + } + gopath = path + return path +} + +// TrapSignal catches the SIGTERM and executes cb function. After that it exits +// with code 1. +func TrapSignal(cb func()) { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + for sig := range c { + fmt.Printf("captured %v, exiting...\n", sig) + if cb != nil { + cb() + } + os.Exit(1) + } + }() + select {} +} + +// Kill the running process by sending itself SIGTERM. +func Kill() error { + p, err := os.FindProcess(os.Getpid()) + if err != nil { + return err + } + return p.Signal(syscall.SIGTERM) +} + +func Exit(s string) { + fmt.Printf(s + "\n") + os.Exit(1) +} + +func EnsureDir(dir string, mode os.FileMode) error { + if _, err := os.Stat(dir); os.IsNotExist(err) { + err := os.MkdirAll(dir, mode) + if err != nil { + return fmt.Errorf("Could not create directory %v. %v", dir, err) + } + } + return nil +} + +func IsDirEmpty(name string) (bool, error) { + f, err := os.Open(name) + if err != nil { + if os.IsNotExist(err) { + return true, err + } + // Otherwise perhaps a permission + // error or some other error. + return false, err + } + defer f.Close() + + _, err = f.Readdirnames(1) // Or f.Readdir(1) + if err == io.EOF { + return true, nil + } + return false, err // Either not empty or error, suits both cases +} + +func FileExists(filePath string) bool { + _, err := os.Stat(filePath) + return !os.IsNotExist(err) +} + +func ReadFile(filePath string) ([]byte, error) { + return ioutil.ReadFile(filePath) +} + +func MustReadFile(filePath string) []byte { + fileBytes, err := ioutil.ReadFile(filePath) + if err != nil { + Exit(Fmt("MustReadFile failed: %v", err)) + return nil + } + return fileBytes +} + +func WriteFile(filePath string, contents []byte, mode os.FileMode) error { + return ioutil.WriteFile(filePath, contents, mode) +} + +func MustWriteFile(filePath string, contents []byte, mode os.FileMode) { + err := WriteFile(filePath, contents, mode) + if err != nil { + Exit(Fmt("MustWriteFile failed: %v", err)) + } +} + +// WriteFileAtomic creates a temporary file with data and the perm given and +// swaps it atomically with filename if successful. +func WriteFileAtomic(filename string, data []byte, perm os.FileMode) error { + var ( + dir = filepath.Dir(filename) + tempFile = filepath.Join(dir, "write-file-atomic-"+RandStr(32)) + // Override in case it does exist, create in case it doesn't and force kernel + // flush, which still leaves the potential of lingering disk cache. + flag = os.O_WRONLY | os.O_CREATE | os.O_SYNC | os.O_TRUNC + ) + + f, err := os.OpenFile(tempFile, flag, perm) + if err != nil { + return err + } + // Clean up in any case. Defer stacking order is last-in-first-out. + defer os.Remove(f.Name()) + defer f.Close() + + if n, err := f.Write(data); err != nil { + return err + } else if n < len(data) { + return io.ErrShortWrite + } + // Close the file before renaming it, otherwise it will cause "The process + // cannot access the file because it is being used by another process." on windows. + f.Close() + + return os.Rename(f.Name(), filename) +} + +//-------------------------------------------------------------------------------- + +func Tempfile(prefix string) (*os.File, string) { + file, err := ioutil.TempFile("", prefix) + if err != nil { + PanicCrisis(err) + } + return file, file.Name() +} + +func Tempdir(prefix string) (*os.File, string) { + tempDir := os.TempDir() + "/" + prefix + RandStr(12) + err := EnsureDir(tempDir, 0700) + if err != nil { + panic(Fmt("Error creating temp dir: %v", err)) + } + dir, err := os.Open(tempDir) + if err != nil { + panic(Fmt("Error opening temp dir: %v", err)) + } + return dir, tempDir +} + +//-------------------------------------------------------------------------------- + +func Prompt(prompt string, defaultValue string) (string, error) { + fmt.Print(prompt) + reader := bufio.NewReader(os.Stdin) + line, err := reader.ReadString('\n') + if err != nil { + return defaultValue, err + } + line = strings.TrimSpace(line) + if line == "" { + return defaultValue, nil + } + return line, nil +} diff --git a/libs/common/os_test.go b/libs/common/os_test.go new file mode 100644 index 000000000..973d68901 --- /dev/null +++ b/libs/common/os_test.go @@ -0,0 +1,91 @@ +package common + +import ( + "bytes" + "io/ioutil" + "math/rand" + "os" + "testing" + "time" +) + +func TestWriteFileAtomic(t *testing.T) { + var ( + seed = rand.New(rand.NewSource(time.Now().UnixNano())) + data = []byte(RandStr(seed.Intn(2048))) + old = RandBytes(seed.Intn(2048)) + perm os.FileMode = 0600 + ) + + f, err := ioutil.TempFile("/tmp", "write-atomic-test-") + if err != nil { + t.Fatal(err) + } + defer os.Remove(f.Name()) + + if err = ioutil.WriteFile(f.Name(), old, 0664); err != nil { + t.Fatal(err) + } + + if err = WriteFileAtomic(f.Name(), data, perm); err != nil { + t.Fatal(err) + } + + rData, err := ioutil.ReadFile(f.Name()) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(data, rData) { + t.Fatalf("data mismatch: %v != %v", data, rData) + } + + stat, err := os.Stat(f.Name()) + if err != nil { + t.Fatal(err) + } + + if have, want := stat.Mode().Perm(), perm; have != want { + t.Errorf("have %v, want %v", have, want) + } +} + +func TestGoPath(t *testing.T) { + // restore original gopath upon exit + path := os.Getenv("GOPATH") + defer func() { + _ = os.Setenv("GOPATH", path) + }() + + err := os.Setenv("GOPATH", "~/testgopath") + if err != nil { + t.Fatal(err) + } + path = GoPath() + if path != "~/testgopath" { + t.Fatalf("should get GOPATH env var value, got %v", path) + } + os.Unsetenv("GOPATH") + + path = GoPath() + if path != "~/testgopath" { + t.Fatalf("subsequent calls should return the same value, got %v", path) + } +} + +func TestGoPathWithoutEnvVar(t *testing.T) { + // restore original gopath upon exit + path := os.Getenv("GOPATH") + defer func() { + _ = os.Setenv("GOPATH", path) + }() + + os.Unsetenv("GOPATH") + // reset cache + gopath = "" + + path = GoPath() + if path == "" || path == "~/testgopath" { + t.Fatalf("should get nonempty result of calling go env GOPATH, got %v", path) + } +} diff --git a/libs/common/random.go b/libs/common/random.go new file mode 100644 index 000000000..389a32fc2 --- /dev/null +++ b/libs/common/random.go @@ -0,0 +1,357 @@ +package common + +import ( + crand "crypto/rand" + mrand "math/rand" + "sync" + "time" +) + +const ( + strChars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" // 62 characters +) + +// pseudo random number generator. +// seeded with OS randomness (crand) + +type Rand struct { + sync.Mutex + rand *mrand.Rand +} + +var grand *Rand + +func init() { + grand = NewRand() + grand.init() +} + +func NewRand() *Rand { + rand := &Rand{} + rand.init() + return rand +} + +func (r *Rand) init() { + bz := cRandBytes(8) + var seed uint64 + for i := 0; i < 8; i++ { + seed |= uint64(bz[i]) + seed <<= 8 + } + r.reset(int64(seed)) +} + +func (r *Rand) reset(seed int64) { + r.rand = mrand.New(mrand.NewSource(seed)) +} + +//---------------------------------------- +// Global functions + +func Seed(seed int64) { + grand.Seed(seed) +} + +func RandStr(length int) string { + return grand.Str(length) +} + +func RandUint16() uint16 { + return grand.Uint16() +} + +func RandUint32() uint32 { + return grand.Uint32() +} + +func RandUint64() uint64 { + return grand.Uint64() +} + +func RandUint() uint { + return grand.Uint() +} + +func RandInt16() int16 { + return grand.Int16() +} + +func RandInt32() int32 { + return grand.Int32() +} + +func RandInt64() int64 { + return grand.Int64() +} + +func RandInt() int { + return grand.Int() +} + +func RandInt31() int32 { + return grand.Int31() +} + +func RandInt31n(n int32) int32 { + return grand.Int31n(n) +} + +func RandInt63() int64 { + return grand.Int63() +} + +func RandInt63n(n int64) int64 { + return grand.Int63n(n) +} + +func RandUint16Exp() uint16 { + return grand.Uint16Exp() +} + +func RandUint32Exp() uint32 { + return grand.Uint32Exp() +} + +func RandUint64Exp() uint64 { + return grand.Uint64Exp() +} + +func RandFloat32() float32 { + return grand.Float32() +} + +func RandFloat64() float64 { + return grand.Float64() +} + +func RandTime() time.Time { + return grand.Time() +} + +func RandBytes(n int) []byte { + return grand.Bytes(n) +} + +func RandIntn(n int) int { + return grand.Intn(n) +} + +func RandPerm(n int) []int { + return grand.Perm(n) +} + +//---------------------------------------- +// Rand methods + +func (r *Rand) Seed(seed int64) { + r.Lock() + r.reset(seed) + r.Unlock() +} + +// Constructs an alphanumeric string of given length. +// It is not safe for cryptographic usage. +func (r *Rand) Str(length int) string { + chars := []byte{} +MAIN_LOOP: + for { + val := r.Int63() + for i := 0; i < 10; i++ { + v := int(val & 0x3f) // rightmost 6 bits + if v >= 62 { // only 62 characters in strChars + val >>= 6 + continue + } else { + chars = append(chars, strChars[v]) + if len(chars) == length { + break MAIN_LOOP + } + val >>= 6 + } + } + } + + return string(chars) +} + +// It is not safe for cryptographic usage. +func (r *Rand) Uint16() uint16 { + return uint16(r.Uint32() & (1<<16 - 1)) +} + +// It is not safe for cryptographic usage. +func (r *Rand) Uint32() uint32 { + r.Lock() + u32 := r.rand.Uint32() + r.Unlock() + return u32 +} + +// It is not safe for cryptographic usage. +func (r *Rand) Uint64() uint64 { + return uint64(r.Uint32())<<32 + uint64(r.Uint32()) +} + +// It is not safe for cryptographic usage. +func (r *Rand) Uint() uint { + r.Lock() + i := r.rand.Int() + r.Unlock() + return uint(i) +} + +// It is not safe for cryptographic usage. +func (r *Rand) Int16() int16 { + return int16(r.Uint32() & (1<<16 - 1)) +} + +// It is not safe for cryptographic usage. +func (r *Rand) Int32() int32 { + return int32(r.Uint32()) +} + +// It is not safe for cryptographic usage. +func (r *Rand) Int64() int64 { + return int64(r.Uint64()) +} + +// It is not safe for cryptographic usage. +func (r *Rand) Int() int { + r.Lock() + i := r.rand.Int() + r.Unlock() + return i +} + +// It is not safe for cryptographic usage. +func (r *Rand) Int31() int32 { + r.Lock() + i31 := r.rand.Int31() + r.Unlock() + return i31 +} + +// It is not safe for cryptographic usage. +func (r *Rand) Int31n(n int32) int32 { + r.Lock() + i31n := r.rand.Int31n(n) + r.Unlock() + return i31n +} + +// It is not safe for cryptographic usage. +func (r *Rand) Int63() int64 { + r.Lock() + i63 := r.rand.Int63() + r.Unlock() + return i63 +} + +// It is not safe for cryptographic usage. +func (r *Rand) Int63n(n int64) int64 { + r.Lock() + i63n := r.rand.Int63n(n) + r.Unlock() + return i63n +} + +// Distributed pseudo-exponentially to test for various cases +// It is not safe for cryptographic usage. +func (r *Rand) Uint16Exp() uint16 { + bits := r.Uint32() % 16 + if bits == 0 { + return 0 + } + n := uint16(1 << (bits - 1)) + n += uint16(r.Int31()) & ((1 << (bits - 1)) - 1) + return n +} + +// Distributed pseudo-exponentially to test for various cases +// It is not safe for cryptographic usage. +func (r *Rand) Uint32Exp() uint32 { + bits := r.Uint32() % 32 + if bits == 0 { + return 0 + } + n := uint32(1 << (bits - 1)) + n += uint32(r.Int31()) & ((1 << (bits - 1)) - 1) + return n +} + +// Distributed pseudo-exponentially to test for various cases +// It is not safe for cryptographic usage. +func (r *Rand) Uint64Exp() uint64 { + bits := r.Uint32() % 64 + if bits == 0 { + return 0 + } + n := uint64(1 << (bits - 1)) + n += uint64(r.Int63()) & ((1 << (bits - 1)) - 1) + return n +} + +// It is not safe for cryptographic usage. +func (r *Rand) Float32() float32 { + r.Lock() + f32 := r.rand.Float32() + r.Unlock() + return f32 +} + +// It is not safe for cryptographic usage. +func (r *Rand) Float64() float64 { + r.Lock() + f64 := r.rand.Float64() + r.Unlock() + return f64 +} + +// It is not safe for cryptographic usage. +func (r *Rand) Time() time.Time { + return time.Unix(int64(r.Uint64Exp()), 0) +} + +// RandBytes returns n random bytes from the OS's source of entropy ie. via crypto/rand. +// It is not safe for cryptographic usage. +func (r *Rand) Bytes(n int) []byte { + // cRandBytes isn't guaranteed to be fast so instead + // use random bytes generated from the internal PRNG + bs := make([]byte, n) + for i := 0; i < len(bs); i++ { + bs[i] = byte(r.Int() & 0xFF) + } + return bs +} + +// RandIntn returns, as an int, a non-negative pseudo-random number in [0, n). +// It panics if n <= 0. +// It is not safe for cryptographic usage. +func (r *Rand) Intn(n int) int { + r.Lock() + i := r.rand.Intn(n) + r.Unlock() + return i +} + +// RandPerm returns a pseudo-random permutation of n integers in [0, n). +// It is not safe for cryptographic usage. +func (r *Rand) Perm(n int) []int { + r.Lock() + perm := r.rand.Perm(n) + r.Unlock() + return perm +} + +// NOTE: This relies on the os's random number generator. +// For real security, we should salt that with some seed. +// See github.com/tendermint/go-crypto for a more secure reader. +func cRandBytes(numBytes int) []byte { + b := make([]byte, numBytes) + _, err := crand.Read(b) + if err != nil { + PanicCrisis(err) + } + return b +} diff --git a/libs/common/random_test.go b/libs/common/random_test.go new file mode 100644 index 000000000..b58b4a13a --- /dev/null +++ b/libs/common/random_test.go @@ -0,0 +1,121 @@ +package common + +import ( + "bytes" + "encoding/json" + "fmt" + mrand "math/rand" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestRandStr(t *testing.T) { + l := 243 + s := RandStr(l) + assert.Equal(t, l, len(s)) +} + +func TestRandBytes(t *testing.T) { + l := 243 + b := RandBytes(l) + assert.Equal(t, l, len(b)) +} + +func TestRandIntn(t *testing.T) { + n := 243 + for i := 0; i < 100; i++ { + x := RandIntn(n) + assert.True(t, x < n) + } +} + +// Test to make sure that we never call math.rand(). +// We do this by ensuring that outputs are deterministic. +func TestDeterminism(t *testing.T) { + var firstOutput string + + // Set math/rand's seed for the sake of debugging this test. + // (It isn't strictly necessary). + mrand.Seed(1) + + for i := 0; i < 100; i++ { + output := testThemAll() + if i == 0 { + firstOutput = output + } else { + if firstOutput != output { + t.Errorf("Run #%d's output was different from first run.\nfirst: %v\nlast: %v", + i, firstOutput, output) + } + } + } +} + +func testThemAll() string { + + // Such determinism. + grand.reset(1) + + // Use it. + out := new(bytes.Buffer) + perm := RandPerm(10) + blob, _ := json.Marshal(perm) + fmt.Fprintf(out, "perm: %s\n", blob) + fmt.Fprintf(out, "randInt: %d\n", RandInt()) + fmt.Fprintf(out, "randUint: %d\n", RandUint()) + fmt.Fprintf(out, "randIntn: %d\n", RandIntn(97)) + fmt.Fprintf(out, "randInt31: %d\n", RandInt31()) + fmt.Fprintf(out, "randInt32: %d\n", RandInt32()) + fmt.Fprintf(out, "randInt63: %d\n", RandInt63()) + fmt.Fprintf(out, "randInt64: %d\n", RandInt64()) + fmt.Fprintf(out, "randUint32: %d\n", RandUint32()) + fmt.Fprintf(out, "randUint64: %d\n", RandUint64()) + fmt.Fprintf(out, "randUint16Exp: %d\n", RandUint16Exp()) + fmt.Fprintf(out, "randUint32Exp: %d\n", RandUint32Exp()) + fmt.Fprintf(out, "randUint64Exp: %d\n", RandUint64Exp()) + return out.String() +} + +func TestRngConcurrencySafety(t *testing.T) { + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + _ = RandUint64() + <-time.After(time.Millisecond * time.Duration(RandIntn(100))) + _ = RandPerm(3) + }() + } + wg.Wait() +} + +func BenchmarkRandBytes10B(b *testing.B) { + benchmarkRandBytes(b, 10) +} +func BenchmarkRandBytes100B(b *testing.B) { + benchmarkRandBytes(b, 100) +} +func BenchmarkRandBytes1KiB(b *testing.B) { + benchmarkRandBytes(b, 1024) +} +func BenchmarkRandBytes10KiB(b *testing.B) { + benchmarkRandBytes(b, 10*1024) +} +func BenchmarkRandBytes100KiB(b *testing.B) { + benchmarkRandBytes(b, 100*1024) +} +func BenchmarkRandBytes1MiB(b *testing.B) { + benchmarkRandBytes(b, 1024*1024) +} + +func benchmarkRandBytes(b *testing.B, n int) { + for i := 0; i < b.N; i++ { + _ = RandBytes(n) + } + b.ReportAllocs() +} diff --git a/libs/common/repeat_timer.go b/libs/common/repeat_timer.go new file mode 100644 index 000000000..5d049738d --- /dev/null +++ b/libs/common/repeat_timer.go @@ -0,0 +1,232 @@ +package common + +import ( + "sync" + "time" +) + +// Used by RepeatTimer the first time, +// and every time it's Reset() after Stop(). +type TickerMaker func(dur time.Duration) Ticker + +// Ticker is a basic ticker interface. +type Ticker interface { + + // Never changes, never closes. + Chan() <-chan time.Time + + // Stopping a stopped Ticker will panic. + Stop() +} + +//---------------------------------------- +// defaultTicker + +var _ Ticker = (*defaultTicker)(nil) + +type defaultTicker time.Ticker + +func defaultTickerMaker(dur time.Duration) Ticker { + ticker := time.NewTicker(dur) + return (*defaultTicker)(ticker) +} + +// Implements Ticker +func (t *defaultTicker) Chan() <-chan time.Time { + return t.C +} + +// Implements Ticker +func (t *defaultTicker) Stop() { + ((*time.Ticker)(t)).Stop() +} + +//---------------------------------------- +// LogicalTickerMaker + +// Construct a TickerMaker that always uses `source`. +// It's useful for simulating a deterministic clock. +func NewLogicalTickerMaker(source chan time.Time) TickerMaker { + return func(dur time.Duration) Ticker { + return newLogicalTicker(source, dur) + } +} + +type logicalTicker struct { + source <-chan time.Time + ch chan time.Time + quit chan struct{} +} + +func newLogicalTicker(source <-chan time.Time, interval time.Duration) Ticker { + lt := &logicalTicker{ + source: source, + ch: make(chan time.Time), + quit: make(chan struct{}), + } + go lt.fireRoutine(interval) + return lt +} + +// We need a goroutine to read times from t.source +// and fire on t.Chan() when `interval` has passed. +func (t *logicalTicker) fireRoutine(interval time.Duration) { + source := t.source + + // Init `lasttime` + lasttime := time.Time{} + select { + case lasttime = <-source: + case <-t.quit: + return + } + // Init `lasttime` end + + for { + select { + case newtime := <-source: + elapsed := newtime.Sub(lasttime) + if interval <= elapsed { + // Block for determinism until the ticker is stopped. + select { + case t.ch <- newtime: + case <-t.quit: + return + } + // Reset timeleft. + // Don't try to "catch up" by sending more. + // "Ticker adjusts the intervals or drops ticks to make up for + // slow receivers" - https://golang.org/pkg/time/#Ticker + lasttime = newtime + } + case <-t.quit: + return // done + } + } +} + +// Implements Ticker +func (t *logicalTicker) Chan() <-chan time.Time { + return t.ch // immutable +} + +// Implements Ticker +func (t *logicalTicker) Stop() { + close(t.quit) // it *should* panic when stopped twice. +} + +//--------------------------------------------------------------------- + +/* + RepeatTimer repeatedly sends a struct{}{} to `.Chan()` after each `dur` + period. (It's good for keeping connections alive.) + A RepeatTimer must be stopped, or it will keep a goroutine alive. +*/ +type RepeatTimer struct { + name string + ch chan time.Time + tm TickerMaker + + mtx sync.Mutex + dur time.Duration + ticker Ticker + quit chan struct{} +} + +// NewRepeatTimer returns a RepeatTimer with a defaultTicker. +func NewRepeatTimer(name string, dur time.Duration) *RepeatTimer { + return NewRepeatTimerWithTickerMaker(name, dur, defaultTickerMaker) +} + +// NewRepeatTimerWithTicker returns a RepeatTimer with the given ticker +// maker. +func NewRepeatTimerWithTickerMaker(name string, dur time.Duration, tm TickerMaker) *RepeatTimer { + var t = &RepeatTimer{ + name: name, + ch: make(chan time.Time), + tm: tm, + dur: dur, + ticker: nil, + quit: nil, + } + t.reset() + return t +} + +// receive ticks on ch, send out on t.ch +func (t *RepeatTimer) fireRoutine(ch <-chan time.Time, quit <-chan struct{}) { + for { + select { + case tick := <-ch: + select { + case t.ch <- tick: + case <-quit: + return + } + case <-quit: // NOTE: `t.quit` races. + return + } + } +} + +func (t *RepeatTimer) Chan() <-chan time.Time { + return t.ch +} + +func (t *RepeatTimer) Stop() { + t.mtx.Lock() + defer t.mtx.Unlock() + + t.stop() +} + +// Wait the duration again before firing. +func (t *RepeatTimer) Reset() { + t.mtx.Lock() + defer t.mtx.Unlock() + + t.reset() +} + +//---------------------------------------- +// Misc. + +// CONTRACT: (non-constructor) caller should hold t.mtx. +func (t *RepeatTimer) reset() { + if t.ticker != nil { + t.stop() + } + t.ticker = t.tm(t.dur) + t.quit = make(chan struct{}) + go t.fireRoutine(t.ticker.Chan(), t.quit) +} + +// CONTRACT: caller should hold t.mtx. +func (t *RepeatTimer) stop() { + if t.ticker == nil { + /* + Similar to the case of closing channels twice: + https://groups.google.com/forum/#!topic/golang-nuts/rhxMiNmRAPk + Stopping a RepeatTimer twice implies that you do + not know whether you are done or not. + If you're calling stop on a stopped RepeatTimer, + you probably have race conditions. + */ + panic("Tried to stop a stopped RepeatTimer") + } + t.ticker.Stop() + t.ticker = nil + /* + From https://golang.org/pkg/time/#Ticker: + "Stop the ticker to release associated resources" + "After Stop, no more ticks will be sent" + So we shouldn't have to do the below. + + select { + case <-t.ch: + // read off channel if there's anything there + default: + } + */ + close(t.quit) +} diff --git a/libs/common/repeat_timer_test.go b/libs/common/repeat_timer_test.go new file mode 100644 index 000000000..b81720c85 --- /dev/null +++ b/libs/common/repeat_timer_test.go @@ -0,0 +1,137 @@ +package common + +import ( + "math/rand" + "sync" + "testing" + "time" + + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/assert" +) + +func TestDefaultTicker(t *testing.T) { + ticker := defaultTickerMaker(time.Millisecond * 10) + <-ticker.Chan() + ticker.Stop() +} + +func TestRepeatTimer(t *testing.T) { + + ch := make(chan time.Time, 100) + mtx := new(sync.Mutex) + + // tick() fires from start to end + // (exclusive) in milliseconds with incr. + // It locks on mtx, so subsequent calls + // run in series. + tick := func(startMs, endMs, incrMs time.Duration) { + mtx.Lock() + go func() { + for tMs := startMs; tMs < endMs; tMs += incrMs { + lt := time.Time{} + lt = lt.Add(tMs * time.Millisecond) + ch <- lt + } + mtx.Unlock() + }() + } + + // tock consumes Ticker.Chan() events and checks them against the ms in "timesMs". + tock := func(t *testing.T, rt *RepeatTimer, timesMs []int64) { + + // Check against timesMs. + for _, timeMs := range timesMs { + tyme := <-rt.Chan() + sinceMs := tyme.Sub(time.Time{}) / time.Millisecond + assert.Equal(t, timeMs, int64(sinceMs)) + } + + // TODO detect number of running + // goroutines to ensure that + // no other times will fire. + // See https://github.com/tendermint/tendermint/libs/issues/120. + time.Sleep(time.Millisecond * 100) + done := true + select { + case <-rt.Chan(): + done = false + default: + } + assert.True(t, done) + } + + tm := NewLogicalTickerMaker(ch) + rt := NewRepeatTimerWithTickerMaker("bar", time.Second, tm) + + /* NOTE: Useful for debugging deadlocks... + go func() { + time.Sleep(time.Second * 3) + trace := make([]byte, 102400) + count := runtime.Stack(trace, true) + fmt.Printf("Stack of %d bytes: %s\n", count, trace) + }() + */ + + tick(0, 1000, 10) + tock(t, rt, []int64{}) + tick(1000, 2000, 10) + tock(t, rt, []int64{1000}) + tick(2005, 5000, 10) + tock(t, rt, []int64{2005, 3005, 4005}) + tick(5001, 5999, 1) + // Read 5005 instead of 5001 because + // it's 1 second greater than 4005. + tock(t, rt, []int64{5005}) + tick(6000, 7005, 1) + tock(t, rt, []int64{6005}) + tick(7033, 8032, 1) + tock(t, rt, []int64{7033}) + + // After a reset, nothing happens + // until two ticks are received. + rt.Reset() + tock(t, rt, []int64{}) + tick(8040, 8041, 1) + tock(t, rt, []int64{}) + tick(9555, 9556, 1) + tock(t, rt, []int64{9555}) + + // After a stop, nothing more is sent. + rt.Stop() + tock(t, rt, []int64{}) + + // Another stop panics. + assert.Panics(t, func() { rt.Stop() }) +} + +func TestRepeatTimerReset(t *testing.T) { + // check that we are not leaking any go-routines + defer leaktest.Check(t)() + + timer := NewRepeatTimer("test", 20*time.Millisecond) + defer timer.Stop() + + // test we don't receive tick before duration ms. + select { + case <-timer.Chan(): + t.Fatal("did not expect to receive tick") + default: + } + + timer.Reset() + + // test we receive tick after Reset is called + select { + case <-timer.Chan(): + // all good + case <-time.After(40 * time.Millisecond): + t.Fatal("expected to receive tick after reset") + } + + // just random calls + for i := 0; i < 100; i++ { + time.Sleep(time.Duration(rand.Intn(40)) * time.Millisecond) + timer.Reset() + } +} diff --git a/libs/common/service.go b/libs/common/service.go new file mode 100644 index 000000000..b6f166e77 --- /dev/null +++ b/libs/common/service.go @@ -0,0 +1,205 @@ +package common + +import ( + "errors" + "fmt" + "sync/atomic" + + "github.com/tendermint/tendermint/libs/log" +) + +var ( + ErrAlreadyStarted = errors.New("already started") + ErrAlreadyStopped = errors.New("already stopped") +) + +// Service defines a service that can be started, stopped, and reset. +type Service interface { + // Start the service. + // If it's already started or stopped, will return an error. + // If OnStart() returns an error, it's returned by Start() + Start() error + OnStart() error + + // Stop the service. + // If it's already stopped, will return an error. + // OnStop must never error. + Stop() error + OnStop() + + // Reset the service. + // Panics by default - must be overwritten to enable reset. + Reset() error + OnReset() error + + // Return true if the service is running + IsRunning() bool + + // Quit returns a channel, which is closed once service is stopped. + Quit() <-chan struct{} + + // String representation of the service + String() string + + // SetLogger sets a logger. + SetLogger(log.Logger) +} + +/* +Classical-inheritance-style service declarations. Services can be started, then +stopped, then optionally restarted. + +Users can override the OnStart/OnStop methods. In the absence of errors, these +methods are guaranteed to be called at most once. If OnStart returns an error, +service won't be marked as started, so the user can call Start again. + +A call to Reset will panic, unless OnReset is overwritten, allowing +OnStart/OnStop to be called again. + +The caller must ensure that Start and Stop are not called concurrently. + +It is ok to call Stop without calling Start first. + +Typical usage: + + type FooService struct { + BaseService + // private fields + } + + func NewFooService() *FooService { + fs := &FooService{ + // init + } + fs.BaseService = *NewBaseService(log, "FooService", fs) + return fs + } + + func (fs *FooService) OnStart() error { + fs.BaseService.OnStart() // Always call the overridden method. + // initialize private fields + // start subroutines, etc. + } + + func (fs *FooService) OnStop() error { + fs.BaseService.OnStop() // Always call the overridden method. + // close/destroy private fields + // stop subroutines, etc. + } +*/ +type BaseService struct { + Logger log.Logger + name string + started uint32 // atomic + stopped uint32 // atomic + quit chan struct{} + + // The "subclass" of BaseService + impl Service +} + +// NewBaseService creates a new BaseService. +func NewBaseService(logger log.Logger, name string, impl Service) *BaseService { + if logger == nil { + logger = log.NewNopLogger() + } + + return &BaseService{ + Logger: logger, + name: name, + quit: make(chan struct{}), + impl: impl, + } +} + +// SetLogger implements Service by setting a logger. +func (bs *BaseService) SetLogger(l log.Logger) { + bs.Logger = l +} + +// Start implements Service by calling OnStart (if defined). An error will be +// returned if the service is already running or stopped. Not to start the +// stopped service, you need to call Reset. +func (bs *BaseService) Start() error { + if atomic.CompareAndSwapUint32(&bs.started, 0, 1) { + if atomic.LoadUint32(&bs.stopped) == 1 { + bs.Logger.Error(Fmt("Not starting %v -- already stopped", bs.name), "impl", bs.impl) + return ErrAlreadyStopped + } + bs.Logger.Info(Fmt("Starting %v", bs.name), "impl", bs.impl) + err := bs.impl.OnStart() + if err != nil { + // revert flag + atomic.StoreUint32(&bs.started, 0) + return err + } + return nil + } + bs.Logger.Debug(Fmt("Not starting %v -- already started", bs.name), "impl", bs.impl) + return ErrAlreadyStarted +} + +// OnStart implements Service by doing nothing. +// NOTE: Do not put anything in here, +// that way users don't need to call BaseService.OnStart() +func (bs *BaseService) OnStart() error { return nil } + +// Stop implements Service by calling OnStop (if defined) and closing quit +// channel. An error will be returned if the service is already stopped. +func (bs *BaseService) Stop() error { + if atomic.CompareAndSwapUint32(&bs.stopped, 0, 1) { + bs.Logger.Info(Fmt("Stopping %v", bs.name), "impl", bs.impl) + bs.impl.OnStop() + close(bs.quit) + return nil + } + bs.Logger.Debug(Fmt("Stopping %v (ignoring: already stopped)", bs.name), "impl", bs.impl) + return ErrAlreadyStopped +} + +// OnStop implements Service by doing nothing. +// NOTE: Do not put anything in here, +// that way users don't need to call BaseService.OnStop() +func (bs *BaseService) OnStop() {} + +// Reset implements Service by calling OnReset callback (if defined). An error +// will be returned if the service is running. +func (bs *BaseService) Reset() error { + if !atomic.CompareAndSwapUint32(&bs.stopped, 1, 0) { + bs.Logger.Debug(Fmt("Can't reset %v. Not stopped", bs.name), "impl", bs.impl) + return fmt.Errorf("can't reset running %s", bs.name) + } + + // whether or not we've started, we can reset + atomic.CompareAndSwapUint32(&bs.started, 1, 0) + + bs.quit = make(chan struct{}) + return bs.impl.OnReset() +} + +// OnReset implements Service by panicking. +func (bs *BaseService) OnReset() error { + PanicSanity("The service cannot be reset") + return nil +} + +// IsRunning implements Service by returning true or false depending on the +// service's state. +func (bs *BaseService) IsRunning() bool { + return atomic.LoadUint32(&bs.started) == 1 && atomic.LoadUint32(&bs.stopped) == 0 +} + +// Wait blocks until the service is stopped. +func (bs *BaseService) Wait() { + <-bs.quit +} + +// String implements Servce by returning a string representation of the service. +func (bs *BaseService) String() string { + return bs.name +} + +// Quit Implements Service by returning a quit channel. +func (bs *BaseService) Quit() <-chan struct{} { + return bs.quit +} diff --git a/libs/common/service_test.go b/libs/common/service_test.go new file mode 100644 index 000000000..ef360a648 --- /dev/null +++ b/libs/common/service_test.go @@ -0,0 +1,54 @@ +package common + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +type testService struct { + BaseService +} + +func (testService) OnReset() error { + return nil +} + +func TestBaseServiceWait(t *testing.T) { + ts := &testService{} + ts.BaseService = *NewBaseService(nil, "TestService", ts) + ts.Start() + + waitFinished := make(chan struct{}) + go func() { + ts.Wait() + waitFinished <- struct{}{} + }() + + go ts.Stop() + + select { + case <-waitFinished: + // all good + case <-time.After(100 * time.Millisecond): + t.Fatal("expected Wait() to finish within 100 ms.") + } +} + +func TestBaseServiceReset(t *testing.T) { + ts := &testService{} + ts.BaseService = *NewBaseService(nil, "TestService", ts) + ts.Start() + + err := ts.Reset() + require.Error(t, err, "expected cant reset service error") + + ts.Stop() + + err = ts.Reset() + require.NoError(t, err) + + err = ts.Start() + require.NoError(t, err) +} diff --git a/libs/common/string.go b/libs/common/string.go new file mode 100644 index 000000000..fac1be6c9 --- /dev/null +++ b/libs/common/string.go @@ -0,0 +1,89 @@ +package common + +import ( + "encoding/hex" + "fmt" + "strings" +) + +// Like fmt.Sprintf, but skips formatting if args are empty. +var Fmt = func(format string, a ...interface{}) string { + if len(a) == 0 { + return format + } + return fmt.Sprintf(format, a...) +} + +// IsHex returns true for non-empty hex-string prefixed with "0x" +func IsHex(s string) bool { + if len(s) > 2 && strings.EqualFold(s[:2], "0x") { + _, err := hex.DecodeString(s[2:]) + return err == nil + } + return false +} + +// StripHex returns hex string without leading "0x" +func StripHex(s string) string { + if IsHex(s) { + return s[2:] + } + return s +} + +// StringInSlice returns true if a is found the list. +func StringInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} + +// SplitAndTrim slices s into all subslices separated by sep and returns a +// slice of the string s with all leading and trailing Unicode code points +// contained in cutset removed. If sep is empty, SplitAndTrim splits after each +// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of +// -1. +func SplitAndTrim(s, sep, cutset string) []string { + if s == "" { + return []string{} + } + + spl := strings.Split(s, sep) + for i := 0; i < len(spl); i++ { + spl[i] = strings.Trim(spl[i], cutset) + } + return spl +} + +// Returns true if s is a non-empty printable non-tab ascii character. +func IsASCIIText(s string) bool { + if len(s) == 0 { + return false + } + for _, b := range []byte(s) { + if 32 <= b && b <= 126 { + // good + } else { + return false + } + } + return true +} + +// NOTE: Assumes that s is ASCII as per IsASCIIText(), otherwise panics. +func ASCIITrim(s string) string { + r := make([]byte, 0, len(s)) + for _, b := range []byte(s) { + if b == 32 { + continue // skip space + } else if 32 < b && b <= 126 { + r = append(r, b) + } else { + panic(fmt.Sprintf("non-ASCII (non-tab) char 0x%X", b)) + } + } + return string(r) +} diff --git a/libs/common/string_test.go b/libs/common/string_test.go new file mode 100644 index 000000000..5d1b68feb --- /dev/null +++ b/libs/common/string_test.go @@ -0,0 +1,74 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStringInSlice(t *testing.T) { + assert.True(t, StringInSlice("a", []string{"a", "b", "c"})) + assert.False(t, StringInSlice("d", []string{"a", "b", "c"})) + assert.True(t, StringInSlice("", []string{""})) + assert.False(t, StringInSlice("", []string{})) +} + +func TestIsHex(t *testing.T) { + notHex := []string{ + "", " ", "a", "x", "0", "0x", "0X", "0x ", "0X ", "0X a", + "0xf ", "0x f", "0xp", "0x-", + "0xf", "0XBED", "0xF", "0xbed", // Odd lengths + } + for _, v := range notHex { + assert.False(t, IsHex(v), "%q is not hex", v) + } + hex := []string{ + "0x00", "0x0a", "0x0F", "0xFFFFFF", "0Xdeadbeef", "0x0BED", + "0X12", "0X0A", + } + for _, v := range hex { + assert.True(t, IsHex(v), "%q is hex", v) + } +} + +func TestSplitAndTrim(t *testing.T) { + testCases := []struct { + s string + sep string + cutset string + expected []string + }{ + {"a,b,c", ",", " ", []string{"a", "b", "c"}}, + {" a , b , c ", ",", " ", []string{"a", "b", "c"}}, + {" a, b, c ", ",", " ", []string{"a", "b", "c"}}, + {" , ", ",", " ", []string{"", ""}}, + {" ", ",", " ", []string{""}}, + } + + for _, tc := range testCases { + assert.Equal(t, tc.expected, SplitAndTrim(tc.s, tc.sep, tc.cutset), "%s", tc.s) + } +} + +func TestIsASCIIText(t *testing.T) { + notASCIIText := []string{ + "", "\xC2", "\xC2\xA2", "\xFF", "\x80", "\xF0", "\n", "\t", + } + for _, v := range notASCIIText { + assert.False(t, IsHex(v), "%q is not ascii-text", v) + } + asciiText := []string{ + " ", ".", "x", "$", "_", "abcdefg;", "-", "0x00", "0", "123", + } + for _, v := range asciiText { + assert.True(t, IsASCIIText(v), "%q is ascii-text", v) + } +} + +func TestASCIITrim(t *testing.T) { + assert.Equal(t, ASCIITrim(" "), "") + assert.Equal(t, ASCIITrim(" a"), "a") + assert.Equal(t, ASCIITrim("a "), "a") + assert.Equal(t, ASCIITrim(" a "), "a") + assert.Panics(t, func() { ASCIITrim("\xC2\xA2") }) +} diff --git a/libs/common/throttle_timer.go b/libs/common/throttle_timer.go new file mode 100644 index 000000000..38ef4e9a3 --- /dev/null +++ b/libs/common/throttle_timer.go @@ -0,0 +1,75 @@ +package common + +import ( + "sync" + "time" +) + +/* +ThrottleTimer fires an event at most "dur" after each .Set() call. +If a short burst of .Set() calls happens, ThrottleTimer fires once. +If a long continuous burst of .Set() calls happens, ThrottleTimer fires +at most once every "dur". +*/ +type ThrottleTimer struct { + Name string + Ch chan struct{} + quit chan struct{} + dur time.Duration + + mtx sync.Mutex + timer *time.Timer + isSet bool +} + +func NewThrottleTimer(name string, dur time.Duration) *ThrottleTimer { + var ch = make(chan struct{}) + var quit = make(chan struct{}) + var t = &ThrottleTimer{Name: name, Ch: ch, dur: dur, quit: quit} + t.mtx.Lock() + t.timer = time.AfterFunc(dur, t.fireRoutine) + t.mtx.Unlock() + t.timer.Stop() + return t +} + +func (t *ThrottleTimer) fireRoutine() { + t.mtx.Lock() + defer t.mtx.Unlock() + select { + case t.Ch <- struct{}{}: + t.isSet = false + case <-t.quit: + // do nothing + default: + t.timer.Reset(t.dur) + } +} + +func (t *ThrottleTimer) Set() { + t.mtx.Lock() + defer t.mtx.Unlock() + if !t.isSet { + t.isSet = true + t.timer.Reset(t.dur) + } +} + +func (t *ThrottleTimer) Unset() { + t.mtx.Lock() + defer t.mtx.Unlock() + t.isSet = false + t.timer.Stop() +} + +// For ease of .Stop()'ing services before .Start()'ing them, +// we ignore .Stop()'s on nil ThrottleTimers +func (t *ThrottleTimer) Stop() bool { + if t == nil { + return false + } + close(t.quit) + t.mtx.Lock() + defer t.mtx.Unlock() + return t.timer.Stop() +} diff --git a/libs/common/throttle_timer_test.go b/libs/common/throttle_timer_test.go new file mode 100644 index 000000000..00f5abdec --- /dev/null +++ b/libs/common/throttle_timer_test.go @@ -0,0 +1,78 @@ +package common + +import ( + "sync" + "testing" + "time" + + // make govet noshadow happy... + asrt "github.com/stretchr/testify/assert" +) + +type thCounter struct { + input chan struct{} + mtx sync.Mutex + count int +} + +func (c *thCounter) Increment() { + c.mtx.Lock() + c.count++ + c.mtx.Unlock() +} + +func (c *thCounter) Count() int { + c.mtx.Lock() + val := c.count + c.mtx.Unlock() + return val +} + +// Read should run in a go-routine and +// updates count by one every time a packet comes in +func (c *thCounter) Read() { + for range c.input { + c.Increment() + } +} + +func TestThrottle(test *testing.T) { + assert := asrt.New(test) + + ms := 50 + delay := time.Duration(ms) * time.Millisecond + longwait := time.Duration(2) * delay + t := NewThrottleTimer("foo", delay) + + // start at 0 + c := &thCounter{input: t.Ch} + assert.Equal(0, c.Count()) + go c.Read() + + // waiting does nothing + time.Sleep(longwait) + assert.Equal(0, c.Count()) + + // send one event adds one + t.Set() + time.Sleep(longwait) + assert.Equal(1, c.Count()) + + // send a burst adds one + for i := 0; i < 5; i++ { + t.Set() + } + time.Sleep(longwait) + assert.Equal(2, c.Count()) + + // send 12, over 2 delay sections, adds 3 + short := time.Duration(ms/5) * time.Millisecond + for i := 0; i < 13; i++ { + t.Set() + time.Sleep(short) + } + time.Sleep(longwait) + assert.Equal(5, c.Count()) + + close(t.Ch) +} diff --git a/libs/common/types.pb.go b/libs/common/types.pb.go new file mode 100644 index 000000000..f6645602a --- /dev/null +++ b/libs/common/types.pb.go @@ -0,0 +1,98 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: common/types.proto + +/* +Package common is a generated protocol buffer package. + +It is generated from these files: + common/types.proto + +It has these top-level messages: + KVPair + KI64Pair +*/ +//nolint: gas +package common + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Define these here for compatibility but use tmlibs/common.KVPair. +type KVPair struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *KVPair) Reset() { *m = KVPair{} } +func (m *KVPair) String() string { return proto.CompactTextString(m) } +func (*KVPair) ProtoMessage() {} +func (*KVPair) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *KVPair) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *KVPair) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +// Define these here for compatibility but use tmlibs/common.KI64Pair. +type KI64Pair struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value" json:"value,omitempty"` +} + +func (m *KI64Pair) Reset() { *m = KI64Pair{} } +func (m *KI64Pair) String() string { return proto.CompactTextString(m) } +func (*KI64Pair) ProtoMessage() {} +func (*KI64Pair) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *KI64Pair) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *KI64Pair) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +func init() { + proto.RegisterType((*KVPair)(nil), "common.KVPair") + proto.RegisterType((*KI64Pair)(nil), "common.KI64Pair") +} + +func init() { proto.RegisterFile("common/types.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 107 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4a, 0xce, 0xcf, 0xcd, + 0xcd, 0xcf, 0xd3, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, + 0x83, 0x88, 0x29, 0x19, 0x70, 0xb1, 0x79, 0x87, 0x05, 0x24, 0x66, 0x16, 0x09, 0x09, 0x70, 0x31, + 0x67, 0xa7, 0x56, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x04, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac, + 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12, 0x4c, 0x60, 0x31, 0x08, 0x47, 0xc9, 0x88, 0x8b, 0xc3, 0xdb, + 0xd3, 0xcc, 0x84, 0x18, 0x3d, 0xcc, 0x50, 0x3d, 0x49, 0x6c, 0x60, 0x4b, 0x8d, 0x01, 0x01, 0x00, + 0x00, 0xff, 0xff, 0xd8, 0xf1, 0xc3, 0x8c, 0x8a, 0x00, 0x00, 0x00, +} diff --git a/libs/common/types.proto b/libs/common/types.proto new file mode 100644 index 000000000..8406fcfdd --- /dev/null +++ b/libs/common/types.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; +package common; + +//---------------------------------------- +// Abstract types + +// Define these here for compatibility but use tmlibs/common.KVPair. +message KVPair { + bytes key = 1; + bytes value = 2; +} + +// Define these here for compatibility but use tmlibs/common.KI64Pair. +message KI64Pair { + bytes key = 1; + int64 value = 2; +} diff --git a/libs/common/word.go b/libs/common/word.go new file mode 100644 index 000000000..a5b841f55 --- /dev/null +++ b/libs/common/word.go @@ -0,0 +1,90 @@ +package common + +import ( + "bytes" + "sort" +) + +var ( + Zero256 = Word256{0} + One256 = Word256{1} +) + +type Word256 [32]byte + +func (w Word256) String() string { return string(w[:]) } +func (w Word256) TrimmedString() string { return TrimmedString(w.Bytes()) } +func (w Word256) Copy() Word256 { return w } +func (w Word256) Bytes() []byte { return w[:] } // copied. +func (w Word256) Prefix(n int) []byte { return w[:n] } +func (w Word256) Postfix(n int) []byte { return w[32-n:] } +func (w Word256) IsZero() bool { + accum := byte(0) + for _, byt := range w { + accum |= byt + } + return accum == 0 +} +func (w Word256) Compare(other Word256) int { + return bytes.Compare(w[:], other[:]) +} + +func Uint64ToWord256(i uint64) Word256 { + buf := [8]byte{} + PutUint64BE(buf[:], i) + return LeftPadWord256(buf[:]) +} + +func Int64ToWord256(i int64) Word256 { + buf := [8]byte{} + PutInt64BE(buf[:], i) + return LeftPadWord256(buf[:]) +} + +func RightPadWord256(bz []byte) (word Word256) { + copy(word[:], bz) + return +} + +func LeftPadWord256(bz []byte) (word Word256) { + copy(word[32-len(bz):], bz) + return +} + +func Uint64FromWord256(word Word256) uint64 { + buf := word.Postfix(8) + return GetUint64BE(buf) +} + +func Int64FromWord256(word Word256) int64 { + buf := word.Postfix(8) + return GetInt64BE(buf) +} + +//------------------------------------- + +type Tuple256 struct { + First Word256 + Second Word256 +} + +func (tuple Tuple256) Compare(other Tuple256) int { + firstCompare := tuple.First.Compare(other.First) + if firstCompare == 0 { + return tuple.Second.Compare(other.Second) + } + return firstCompare +} + +func Tuple256Split(t Tuple256) (Word256, Word256) { + return t.First, t.Second +} + +type Tuple256Slice []Tuple256 + +func (p Tuple256Slice) Len() int { return len(p) } +func (p Tuple256Slice) Less(i, j int) bool { + return p[i].Compare(p[j]) < 0 +} +func (p Tuple256Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Tuple256Slice) Sort() { sort.Sort(p) } diff --git a/libs/db/LICENSE.md b/libs/db/LICENSE.md new file mode 100644 index 000000000..ab8da59d8 --- /dev/null +++ b/libs/db/LICENSE.md @@ -0,0 +1,3 @@ +Tendermint Go-DB Copyright (C) 2015 All in Bits, Inc + +Released under the Apache2.0 license diff --git a/libs/db/README.md b/libs/db/README.md new file mode 100644 index 000000000..ca5ab33f9 --- /dev/null +++ b/libs/db/README.md @@ -0,0 +1 @@ +TODO: syndtr/goleveldb should be replaced with actual LevelDB instance diff --git a/libs/db/backend_test.go b/libs/db/backend_test.go new file mode 100644 index 000000000..493ed83f9 --- /dev/null +++ b/libs/db/backend_test.go @@ -0,0 +1,215 @@ +package db + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + cmn "github.com/tendermint/tendermint/libs/common" +) + +func cleanupDBDir(dir, name string) { + os.RemoveAll(filepath.Join(dir, name) + ".db") +} + +func testBackendGetSetDelete(t *testing.T, backend DBBackendType) { + // Default + dir, dirname := cmn.Tempdir(fmt.Sprintf("test_backend_%s_", backend)) + defer dir.Close() + db := NewDB("testdb", backend, dirname) + + // A nonexistent key should return nil, even if the key is empty + require.Nil(t, db.Get([]byte(""))) + + // A nonexistent key should return nil, even if the key is nil + require.Nil(t, db.Get(nil)) + + // A nonexistent key should return nil. + key := []byte("abc") + require.Nil(t, db.Get(key)) + + // Set empty value. + db.Set(key, []byte("")) + require.NotNil(t, db.Get(key)) + require.Empty(t, db.Get(key)) + + // Set nil value. + db.Set(key, nil) + require.NotNil(t, db.Get(key)) + require.Empty(t, db.Get(key)) + + // Delete. + db.Delete(key) + require.Nil(t, db.Get(key)) +} + +func TestBackendsGetSetDelete(t *testing.T) { + for dbType := range backends { + testBackendGetSetDelete(t, dbType) + } +} + +func withDB(t *testing.T, creator dbCreator, fn func(DB)) { + name := cmn.Fmt("test_%x", cmn.RandStr(12)) + db, err := creator(name, "") + defer cleanupDBDir("", name) + assert.Nil(t, err) + fn(db) + db.Close() +} + +func TestBackendsNilKeys(t *testing.T) { + + // Test all backends. + for dbType, creator := range backends { + withDB(t, creator, func(db DB) { + t.Run(fmt.Sprintf("Testing %s", dbType), func(t *testing.T) { + + // Nil keys are treated as the empty key for most operations. + expect := func(key, value []byte) { + if len(key) == 0 { // nil or empty + assert.Equal(t, db.Get(nil), db.Get([]byte(""))) + assert.Equal(t, db.Has(nil), db.Has([]byte(""))) + } + assert.Equal(t, db.Get(key), value) + assert.Equal(t, db.Has(key), value != nil) + } + + // Not set + expect(nil, nil) + + // Set nil value + db.Set(nil, nil) + expect(nil, []byte("")) + + // Set empty value + db.Set(nil, []byte("")) + expect(nil, []byte("")) + + // Set nil, Delete nil + db.Set(nil, []byte("abc")) + expect(nil, []byte("abc")) + db.Delete(nil) + expect(nil, nil) + + // Set nil, Delete empty + db.Set(nil, []byte("abc")) + expect(nil, []byte("abc")) + db.Delete([]byte("")) + expect(nil, nil) + + // Set empty, Delete nil + db.Set([]byte(""), []byte("abc")) + expect(nil, []byte("abc")) + db.Delete(nil) + expect(nil, nil) + + // Set empty, Delete empty + db.Set([]byte(""), []byte("abc")) + expect(nil, []byte("abc")) + db.Delete([]byte("")) + expect(nil, nil) + + // SetSync nil, DeleteSync nil + db.SetSync(nil, []byte("abc")) + expect(nil, []byte("abc")) + db.DeleteSync(nil) + expect(nil, nil) + + // SetSync nil, DeleteSync empty + db.SetSync(nil, []byte("abc")) + expect(nil, []byte("abc")) + db.DeleteSync([]byte("")) + expect(nil, nil) + + // SetSync empty, DeleteSync nil + db.SetSync([]byte(""), []byte("abc")) + expect(nil, []byte("abc")) + db.DeleteSync(nil) + expect(nil, nil) + + // SetSync empty, DeleteSync empty + db.SetSync([]byte(""), []byte("abc")) + expect(nil, []byte("abc")) + db.DeleteSync([]byte("")) + expect(nil, nil) + }) + }) + } +} + +func TestGoLevelDBBackend(t *testing.T) { + name := cmn.Fmt("test_%x", cmn.RandStr(12)) + db := NewDB(name, GoLevelDBBackend, "") + defer cleanupDBDir("", name) + + _, ok := db.(*GoLevelDB) + assert.True(t, ok) +} + +func TestDBIterator(t *testing.T) { + for dbType := range backends { + t.Run(fmt.Sprintf("%v", dbType), func(t *testing.T) { + testDBIterator(t, dbType) + }) + } +} + +func testDBIterator(t *testing.T, backend DBBackendType) { + name := cmn.Fmt("test_%x", cmn.RandStr(12)) + db := NewDB(name, backend, "") + defer cleanupDBDir("", name) + + for i := 0; i < 10; i++ { + if i != 6 { // but skip 6. + db.Set(int642Bytes(int64(i)), nil) + } + } + + verifyIterator(t, db.Iterator(nil, nil), []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator") + verifyIterator(t, db.ReverseIterator(nil, nil), []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator") + + verifyIterator(t, db.Iterator(nil, int642Bytes(0)), []int64(nil), "forward iterator to 0") + verifyIterator(t, db.ReverseIterator(nil, int642Bytes(10)), []int64(nil), "reverse iterator 10") + + verifyIterator(t, db.Iterator(int642Bytes(0), nil), []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 0") + verifyIterator(t, db.Iterator(int642Bytes(1), nil), []int64{1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 1") + verifyIterator(t, db.ReverseIterator(int642Bytes(10), nil), []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 10") + verifyIterator(t, db.ReverseIterator(int642Bytes(9), nil), []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 9") + verifyIterator(t, db.ReverseIterator(int642Bytes(8), nil), []int64{8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 8") + + verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(6)), []int64{5}, "forward iterator from 5 to 6") + verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(7)), []int64{5}, "forward iterator from 5 to 7") + verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(8)), []int64{5, 7}, "forward iterator from 5 to 8") + verifyIterator(t, db.Iterator(int642Bytes(6), int642Bytes(7)), []int64(nil), "forward iterator from 6 to 7") + verifyIterator(t, db.Iterator(int642Bytes(6), int642Bytes(8)), []int64{7}, "forward iterator from 6 to 8") + verifyIterator(t, db.Iterator(int642Bytes(7), int642Bytes(8)), []int64{7}, "forward iterator from 7 to 8") + + verifyIterator(t, db.ReverseIterator(int642Bytes(5), int642Bytes(4)), []int64{5}, "reverse iterator from 5 to 4") + verifyIterator(t, db.ReverseIterator(int642Bytes(6), int642Bytes(4)), []int64{5}, "reverse iterator from 6 to 4") + verifyIterator(t, db.ReverseIterator(int642Bytes(7), int642Bytes(4)), []int64{7, 5}, "reverse iterator from 7 to 4") + verifyIterator(t, db.ReverseIterator(int642Bytes(6), int642Bytes(5)), []int64(nil), "reverse iterator from 6 to 5") + verifyIterator(t, db.ReverseIterator(int642Bytes(7), int642Bytes(5)), []int64{7}, "reverse iterator from 7 to 5") + verifyIterator(t, db.ReverseIterator(int642Bytes(7), int642Bytes(6)), []int64{7}, "reverse iterator from 7 to 6") + + verifyIterator(t, db.Iterator(int642Bytes(0), int642Bytes(1)), []int64{0}, "forward iterator from 0 to 1") + verifyIterator(t, db.ReverseIterator(int642Bytes(9), int642Bytes(8)), []int64{9}, "reverse iterator from 9 to 8") + + verifyIterator(t, db.Iterator(int642Bytes(2), int642Bytes(4)), []int64{2, 3}, "forward iterator from 2 to 4") + verifyIterator(t, db.Iterator(int642Bytes(4), int642Bytes(2)), []int64(nil), "forward iterator from 4 to 2") + verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(2)), []int64{4, 3}, "reverse iterator from 4 to 2") + verifyIterator(t, db.ReverseIterator(int642Bytes(2), int642Bytes(4)), []int64(nil), "reverse iterator from 2 to 4") + +} + +func verifyIterator(t *testing.T, itr Iterator, expected []int64, msg string) { + var list []int64 + for itr.Valid() { + list = append(list, bytes2Int64(itr.Key())) + itr.Next() + } + assert.Equal(t, expected, list, msg) +} diff --git a/libs/db/c_level_db.go b/libs/db/c_level_db.go new file mode 100644 index 000000000..307461261 --- /dev/null +++ b/libs/db/c_level_db.go @@ -0,0 +1,312 @@ +// +build gcc + +package db + +import ( + "bytes" + "fmt" + "path/filepath" + + "github.com/jmhodges/levigo" +) + +func init() { + dbCreator := func(name string, dir string) (DB, error) { + return NewCLevelDB(name, dir) + } + registerDBCreator(LevelDBBackend, dbCreator, true) + registerDBCreator(CLevelDBBackend, dbCreator, false) +} + +var _ DB = (*CLevelDB)(nil) + +type CLevelDB struct { + db *levigo.DB + ro *levigo.ReadOptions + wo *levigo.WriteOptions + woSync *levigo.WriteOptions +} + +func NewCLevelDB(name string, dir string) (*CLevelDB, error) { + dbPath := filepath.Join(dir, name+".db") + + opts := levigo.NewOptions() + opts.SetCache(levigo.NewLRUCache(1 << 30)) + opts.SetCreateIfMissing(true) + db, err := levigo.Open(dbPath, opts) + if err != nil { + return nil, err + } + ro := levigo.NewReadOptions() + wo := levigo.NewWriteOptions() + woSync := levigo.NewWriteOptions() + woSync.SetSync(true) + database := &CLevelDB{ + db: db, + ro: ro, + wo: wo, + woSync: woSync, + } + return database, nil +} + +// Implements DB. +func (db *CLevelDB) Get(key []byte) []byte { + key = nonNilBytes(key) + res, err := db.db.Get(db.ro, key) + if err != nil { + panic(err) + } + return res +} + +// Implements DB. +func (db *CLevelDB) Has(key []byte) bool { + return db.Get(key) != nil +} + +// Implements DB. +func (db *CLevelDB) Set(key []byte, value []byte) { + key = nonNilBytes(key) + value = nonNilBytes(value) + err := db.db.Put(db.wo, key, value) + if err != nil { + panic(err) + } +} + +// Implements DB. +func (db *CLevelDB) SetSync(key []byte, value []byte) { + key = nonNilBytes(key) + value = nonNilBytes(value) + err := db.db.Put(db.woSync, key, value) + if err != nil { + panic(err) + } +} + +// Implements DB. +func (db *CLevelDB) Delete(key []byte) { + key = nonNilBytes(key) + err := db.db.Delete(db.wo, key) + if err != nil { + panic(err) + } +} + +// Implements DB. +func (db *CLevelDB) DeleteSync(key []byte) { + key = nonNilBytes(key) + err := db.db.Delete(db.woSync, key) + if err != nil { + panic(err) + } +} + +func (db *CLevelDB) DB() *levigo.DB { + return db.db +} + +// Implements DB. +func (db *CLevelDB) Close() { + db.db.Close() + db.ro.Close() + db.wo.Close() + db.woSync.Close() +} + +// Implements DB. +func (db *CLevelDB) Print() { + itr := db.Iterator(nil, nil) + defer itr.Close() + for ; itr.Valid(); itr.Next() { + key := itr.Key() + value := itr.Value() + fmt.Printf("[%X]:\t[%X]\n", key, value) + } +} + +// Implements DB. +func (db *CLevelDB) Stats() map[string]string { + // TODO: Find the available properties for the C LevelDB implementation + keys := []string{} + + stats := make(map[string]string) + for _, key := range keys { + str := db.db.PropertyValue(key) + stats[key] = str + } + return stats +} + +//---------------------------------------- +// Batch + +// Implements DB. +func (db *CLevelDB) NewBatch() Batch { + batch := levigo.NewWriteBatch() + return &cLevelDBBatch{db, batch} +} + +type cLevelDBBatch struct { + db *CLevelDB + batch *levigo.WriteBatch +} + +// Implements Batch. +func (mBatch *cLevelDBBatch) Set(key, value []byte) { + mBatch.batch.Put(key, value) +} + +// Implements Batch. +func (mBatch *cLevelDBBatch) Delete(key []byte) { + mBatch.batch.Delete(key) +} + +// Implements Batch. +func (mBatch *cLevelDBBatch) Write() { + err := mBatch.db.db.Write(mBatch.db.wo, mBatch.batch) + if err != nil { + panic(err) + } +} + +// Implements Batch. +func (mBatch *cLevelDBBatch) WriteSync() { + err := mBatch.db.db.Write(mBatch.db.woSync, mBatch.batch) + if err != nil { + panic(err) + } +} + +//---------------------------------------- +// Iterator +// NOTE This is almost identical to db/go_level_db.Iterator +// Before creating a third version, refactor. + +func (db *CLevelDB) Iterator(start, end []byte) Iterator { + itr := db.db.NewIterator(db.ro) + return newCLevelDBIterator(itr, start, end, false) +} + +func (db *CLevelDB) ReverseIterator(start, end []byte) Iterator { + itr := db.db.NewIterator(db.ro) + return newCLevelDBIterator(itr, start, end, true) +} + +var _ Iterator = (*cLevelDBIterator)(nil) + +type cLevelDBIterator struct { + source *levigo.Iterator + start, end []byte + isReverse bool + isInvalid bool +} + +func newCLevelDBIterator(source *levigo.Iterator, start, end []byte, isReverse bool) *cLevelDBIterator { + if isReverse { + if start == nil { + source.SeekToLast() + } else { + source.Seek(start) + if source.Valid() { + soakey := source.Key() // start or after key + if bytes.Compare(start, soakey) < 0 { + source.Prev() + } + } else { + source.SeekToLast() + } + } + } else { + if start == nil { + source.SeekToFirst() + } else { + source.Seek(start) + } + } + return &cLevelDBIterator{ + source: source, + start: start, + end: end, + isReverse: isReverse, + isInvalid: false, + } +} + +func (itr cLevelDBIterator) Domain() ([]byte, []byte) { + return itr.start, itr.end +} + +func (itr cLevelDBIterator) Valid() bool { + + // Once invalid, forever invalid. + if itr.isInvalid { + return false + } + + // Panic on DB error. No way to recover. + itr.assertNoError() + + // If source is invalid, invalid. + if !itr.source.Valid() { + itr.isInvalid = true + return false + } + + // If key is end or past it, invalid. + var end = itr.end + var key = itr.source.Key() + if itr.isReverse { + if end != nil && bytes.Compare(key, end) <= 0 { + itr.isInvalid = true + return false + } + } else { + if end != nil && bytes.Compare(end, key) <= 0 { + itr.isInvalid = true + return false + } + } + + // It's valid. + return true +} + +func (itr cLevelDBIterator) Key() []byte { + itr.assertNoError() + itr.assertIsValid() + return itr.source.Key() +} + +func (itr cLevelDBIterator) Value() []byte { + itr.assertNoError() + itr.assertIsValid() + return itr.source.Value() +} + +func (itr cLevelDBIterator) Next() { + itr.assertNoError() + itr.assertIsValid() + if itr.isReverse { + itr.source.Prev() + } else { + itr.source.Next() + } +} + +func (itr cLevelDBIterator) Close() { + itr.source.Close() +} + +func (itr cLevelDBIterator) assertNoError() { + if err := itr.source.GetError(); err != nil { + panic(err) + } +} + +func (itr cLevelDBIterator) assertIsValid() { + if !itr.Valid() { + panic("cLevelDBIterator is invalid") + } +} diff --git a/libs/db/c_level_db_test.go b/libs/db/c_level_db_test.go new file mode 100644 index 000000000..2d30500dd --- /dev/null +++ b/libs/db/c_level_db_test.go @@ -0,0 +1,96 @@ +// +build gcc + +package db + +import ( + "bytes" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + cmn "github.com/tendermint/tendermint/libs/common" +) + +func BenchmarkRandomReadsWrites2(b *testing.B) { + b.StopTimer() + + numItems := int64(1000000) + internal := map[int64]int64{} + for i := 0; i < int(numItems); i++ { + internal[int64(i)] = int64(0) + } + db, err := NewCLevelDB(cmn.Fmt("test_%x", cmn.RandStr(12)), "") + if err != nil { + b.Fatal(err.Error()) + return + } + + fmt.Println("ok, starting") + b.StartTimer() + + for i := 0; i < b.N; i++ { + // Write something + { + idx := (int64(cmn.RandInt()) % numItems) + internal[idx] += 1 + val := internal[idx] + idxBytes := int642Bytes(int64(idx)) + valBytes := int642Bytes(int64(val)) + //fmt.Printf("Set %X -> %X\n", idxBytes, valBytes) + db.Set( + idxBytes, + valBytes, + ) + } + // Read something + { + idx := (int64(cmn.RandInt()) % numItems) + val := internal[idx] + idxBytes := int642Bytes(int64(idx)) + valBytes := db.Get(idxBytes) + //fmt.Printf("Get %X -> %X\n", idxBytes, valBytes) + if val == 0 { + if !bytes.Equal(valBytes, nil) { + b.Errorf("Expected %v for %v, got %X", + nil, idx, valBytes) + break + } + } else { + if len(valBytes) != 8 { + b.Errorf("Expected length 8 for %v, got %X", + idx, valBytes) + break + } + valGot := bytes2Int64(valBytes) + if val != valGot { + b.Errorf("Expected %v for %v, got %v", + val, idx, valGot) + break + } + } + } + } + + db.Close() +} + +/* +func int642Bytes(i int64) []byte { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, uint64(i)) + return buf +} + +func bytes2Int64(buf []byte) int64 { + return int64(binary.BigEndian.Uint64(buf)) +} +*/ + +func TestCLevelDBBackend(t *testing.T) { + name := cmn.Fmt("test_%x", cmn.RandStr(12)) + db := NewDB(name, LevelDBBackend, "") + defer cleanupDBDir("", name) + + _, ok := db.(*CLevelDB) + assert.True(t, ok) +} diff --git a/libs/db/common_test.go b/libs/db/common_test.go new file mode 100644 index 000000000..027b8ee53 --- /dev/null +++ b/libs/db/common_test.go @@ -0,0 +1,191 @@ +package db + +import ( + "fmt" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + cmn "github.com/tendermint/tendermint/libs/common" +) + +//---------------------------------------- +// Helper functions. + +func checkValue(t *testing.T, db DB, key []byte, valueWanted []byte) { + valueGot := db.Get(key) + assert.Equal(t, valueWanted, valueGot) +} + +func checkValid(t *testing.T, itr Iterator, expected bool) { + valid := itr.Valid() + require.Equal(t, expected, valid) +} + +func checkNext(t *testing.T, itr Iterator, expected bool) { + itr.Next() + valid := itr.Valid() + require.Equal(t, expected, valid) +} + +func checkNextPanics(t *testing.T, itr Iterator) { + assert.Panics(t, func() { itr.Next() }, "checkNextPanics expected panic but didn't") +} + +func checkDomain(t *testing.T, itr Iterator, start, end []byte) { + ds, de := itr.Domain() + assert.Equal(t, start, ds, "checkDomain domain start incorrect") + assert.Equal(t, end, de, "checkDomain domain end incorrect") +} + +func checkItem(t *testing.T, itr Iterator, key []byte, value []byte) { + k, v := itr.Key(), itr.Value() + assert.Exactly(t, key, k) + assert.Exactly(t, value, v) +} + +func checkInvalid(t *testing.T, itr Iterator) { + checkValid(t, itr, false) + checkKeyPanics(t, itr) + checkValuePanics(t, itr) + checkNextPanics(t, itr) +} + +func checkKeyPanics(t *testing.T, itr Iterator) { + assert.Panics(t, func() { itr.Key() }, "checkKeyPanics expected panic but didn't") +} + +func checkValuePanics(t *testing.T, itr Iterator) { + assert.Panics(t, func() { itr.Key() }, "checkValuePanics expected panic but didn't") +} + +func newTempDB(t *testing.T, backend DBBackendType) (db DB) { + dir, dirname := cmn.Tempdir("db_common_test") + db = NewDB("testdb", backend, dirname) + dir.Close() + return db +} + +//---------------------------------------- +// mockDB + +// NOTE: not actually goroutine safe. +// If you want something goroutine safe, maybe you just want a MemDB. +type mockDB struct { + mtx sync.Mutex + calls map[string]int +} + +func newMockDB() *mockDB { + return &mockDB{ + calls: make(map[string]int), + } +} + +func (mdb *mockDB) Mutex() *sync.Mutex { + return &(mdb.mtx) +} + +func (mdb *mockDB) Get([]byte) []byte { + mdb.calls["Get"]++ + return nil +} + +func (mdb *mockDB) Has([]byte) bool { + mdb.calls["Has"]++ + return false +} + +func (mdb *mockDB) Set([]byte, []byte) { + mdb.calls["Set"]++ +} + +func (mdb *mockDB) SetSync([]byte, []byte) { + mdb.calls["SetSync"]++ +} + +func (mdb *mockDB) SetNoLock([]byte, []byte) { + mdb.calls["SetNoLock"]++ +} + +func (mdb *mockDB) SetNoLockSync([]byte, []byte) { + mdb.calls["SetNoLockSync"]++ +} + +func (mdb *mockDB) Delete([]byte) { + mdb.calls["Delete"]++ +} + +func (mdb *mockDB) DeleteSync([]byte) { + mdb.calls["DeleteSync"]++ +} + +func (mdb *mockDB) DeleteNoLock([]byte) { + mdb.calls["DeleteNoLock"]++ +} + +func (mdb *mockDB) DeleteNoLockSync([]byte) { + mdb.calls["DeleteNoLockSync"]++ +} + +func (mdb *mockDB) Iterator(start, end []byte) Iterator { + mdb.calls["Iterator"]++ + return &mockIterator{} +} + +func (mdb *mockDB) ReverseIterator(start, end []byte) Iterator { + mdb.calls["ReverseIterator"]++ + return &mockIterator{} +} + +func (mdb *mockDB) Close() { + mdb.calls["Close"]++ +} + +func (mdb *mockDB) NewBatch() Batch { + mdb.calls["NewBatch"]++ + return &memBatch{db: mdb} +} + +func (mdb *mockDB) Print() { + mdb.calls["Print"]++ + fmt.Printf("mockDB{%v}", mdb.Stats()) +} + +func (mdb *mockDB) Stats() map[string]string { + mdb.calls["Stats"]++ + + res := make(map[string]string) + for key, count := range mdb.calls { + res[key] = fmt.Sprintf("%d", count) + } + return res +} + +//---------------------------------------- +// mockIterator + +type mockIterator struct{} + +func (mockIterator) Domain() (start []byte, end []byte) { + return nil, nil +} + +func (mockIterator) Valid() bool { + return false +} + +func (mockIterator) Next() { +} + +func (mockIterator) Key() []byte { + return nil +} + +func (mockIterator) Value() []byte { + return nil +} + +func (mockIterator) Close() { +} diff --git a/libs/db/db.go b/libs/db/db.go new file mode 100644 index 000000000..869937660 --- /dev/null +++ b/libs/db/db.go @@ -0,0 +1,36 @@ +package db + +import "fmt" + +//---------------------------------------- +// Main entry + +type DBBackendType string + +const ( + LevelDBBackend DBBackendType = "leveldb" // legacy, defaults to goleveldb unless +gcc + CLevelDBBackend DBBackendType = "cleveldb" + GoLevelDBBackend DBBackendType = "goleveldb" + MemDBBackend DBBackendType = "memdb" + FSDBBackend DBBackendType = "fsdb" // using the filesystem naively +) + +type dbCreator func(name string, dir string) (DB, error) + +var backends = map[DBBackendType]dbCreator{} + +func registerDBCreator(backend DBBackendType, creator dbCreator, force bool) { + _, ok := backends[backend] + if !force && ok { + return + } + backends[backend] = creator +} + +func NewDB(name string, backend DBBackendType, dir string) DB { + db, err := backends[backend](name, dir) + if err != nil { + panic(fmt.Sprintf("Error initializing DB: %v", err)) + } + return db +} diff --git a/libs/db/db_test.go b/libs/db/db_test.go new file mode 100644 index 000000000..a56901016 --- /dev/null +++ b/libs/db/db_test.go @@ -0,0 +1,194 @@ +package db + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDBIteratorSingleKey(t *testing.T) { + for backend := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("1"), bz("value_1")) + itr := db.Iterator(nil, nil) + + checkValid(t, itr, true) + checkNext(t, itr, false) + checkValid(t, itr, false) + checkNextPanics(t, itr) + + // Once invalid... + checkInvalid(t, itr) + }) + } +} + +func TestDBIteratorTwoKeys(t *testing.T) { + for backend := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("1"), bz("value_1")) + db.SetSync(bz("2"), bz("value_1")) + + { // Fail by calling Next too much + itr := db.Iterator(nil, nil) + checkValid(t, itr, true) + + checkNext(t, itr, true) + checkValid(t, itr, true) + + checkNext(t, itr, false) + checkValid(t, itr, false) + + checkNextPanics(t, itr) + + // Once invalid... + checkInvalid(t, itr) + } + }) + } +} + +func TestDBIteratorMany(t *testing.T) { + for backend := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + + keys := make([][]byte, 100) + for i := 0; i < 100; i++ { + keys[i] = []byte{byte(i)} + } + + value := []byte{5} + for _, k := range keys { + db.Set(k, value) + } + + itr := db.Iterator(nil, nil) + defer itr.Close() + for ; itr.Valid(); itr.Next() { + assert.Equal(t, db.Get(itr.Key()), itr.Value()) + } + }) + } +} + +func TestDBIteratorEmpty(t *testing.T) { + for backend := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + itr := db.Iterator(nil, nil) + + checkInvalid(t, itr) + }) + } +} + +func TestDBIteratorEmptyBeginAfter(t *testing.T) { + for backend := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + itr := db.Iterator(bz("1"), nil) + + checkInvalid(t, itr) + }) + } +} + +func TestDBIteratorNonemptyBeginAfter(t *testing.T) { + for backend := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("1"), bz("value_1")) + itr := db.Iterator(bz("2"), nil) + + checkInvalid(t, itr) + }) + } +} + +func TestDBBatchWrite1(t *testing.T) { + mdb := newMockDB() + ddb := NewDebugDB(t.Name(), mdb) + batch := ddb.NewBatch() + + batch.Set(bz("1"), bz("1")) + batch.Set(bz("2"), bz("2")) + batch.Delete(bz("3")) + batch.Set(bz("4"), bz("4")) + batch.Write() + + assert.Equal(t, 0, mdb.calls["Set"]) + assert.Equal(t, 0, mdb.calls["SetSync"]) + assert.Equal(t, 3, mdb.calls["SetNoLock"]) + assert.Equal(t, 0, mdb.calls["SetNoLockSync"]) + assert.Equal(t, 0, mdb.calls["Delete"]) + assert.Equal(t, 0, mdb.calls["DeleteSync"]) + assert.Equal(t, 1, mdb.calls["DeleteNoLock"]) + assert.Equal(t, 0, mdb.calls["DeleteNoLockSync"]) +} + +func TestDBBatchWrite2(t *testing.T) { + mdb := newMockDB() + ddb := NewDebugDB(t.Name(), mdb) + batch := ddb.NewBatch() + + batch.Set(bz("1"), bz("1")) + batch.Set(bz("2"), bz("2")) + batch.Set(bz("4"), bz("4")) + batch.Delete(bz("3")) + batch.Write() + + assert.Equal(t, 0, mdb.calls["Set"]) + assert.Equal(t, 0, mdb.calls["SetSync"]) + assert.Equal(t, 3, mdb.calls["SetNoLock"]) + assert.Equal(t, 0, mdb.calls["SetNoLockSync"]) + assert.Equal(t, 0, mdb.calls["Delete"]) + assert.Equal(t, 0, mdb.calls["DeleteSync"]) + assert.Equal(t, 1, mdb.calls["DeleteNoLock"]) + assert.Equal(t, 0, mdb.calls["DeleteNoLockSync"]) +} + +func TestDBBatchWriteSync1(t *testing.T) { + mdb := newMockDB() + ddb := NewDebugDB(t.Name(), mdb) + batch := ddb.NewBatch() + + batch.Set(bz("1"), bz("1")) + batch.Set(bz("2"), bz("2")) + batch.Delete(bz("3")) + batch.Set(bz("4"), bz("4")) + batch.WriteSync() + + assert.Equal(t, 0, mdb.calls["Set"]) + assert.Equal(t, 0, mdb.calls["SetSync"]) + assert.Equal(t, 2, mdb.calls["SetNoLock"]) + assert.Equal(t, 1, mdb.calls["SetNoLockSync"]) + assert.Equal(t, 0, mdb.calls["Delete"]) + assert.Equal(t, 0, mdb.calls["DeleteSync"]) + assert.Equal(t, 1, mdb.calls["DeleteNoLock"]) + assert.Equal(t, 0, mdb.calls["DeleteNoLockSync"]) +} + +func TestDBBatchWriteSync2(t *testing.T) { + mdb := newMockDB() + ddb := NewDebugDB(t.Name(), mdb) + batch := ddb.NewBatch() + + batch.Set(bz("1"), bz("1")) + batch.Set(bz("2"), bz("2")) + batch.Set(bz("4"), bz("4")) + batch.Delete(bz("3")) + batch.WriteSync() + + assert.Equal(t, 0, mdb.calls["Set"]) + assert.Equal(t, 0, mdb.calls["SetSync"]) + assert.Equal(t, 3, mdb.calls["SetNoLock"]) + assert.Equal(t, 0, mdb.calls["SetNoLockSync"]) + assert.Equal(t, 0, mdb.calls["Delete"]) + assert.Equal(t, 0, mdb.calls["DeleteSync"]) + assert.Equal(t, 0, mdb.calls["DeleteNoLock"]) + assert.Equal(t, 1, mdb.calls["DeleteNoLockSync"]) +} diff --git a/libs/db/debug_db.go b/libs/db/debug_db.go new file mode 100644 index 000000000..bb361a266 --- /dev/null +++ b/libs/db/debug_db.go @@ -0,0 +1,252 @@ +package db + +import ( + "fmt" + "sync" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +//---------------------------------------- +// debugDB + +type debugDB struct { + label string + db DB +} + +// For printing all operationgs to the console for debugging. +func NewDebugDB(label string, db DB) debugDB { + return debugDB{ + label: label, + db: db, + } +} + +// Implements atomicSetDeleter. +func (ddb debugDB) Mutex() *sync.Mutex { return nil } + +// Implements DB. +func (ddb debugDB) Get(key []byte) (value []byte) { + defer func() { + fmt.Printf("%v.Get(%v) %v\n", ddb.label, + cmn.ColoredBytes(key, cmn.Cyan, cmn.Blue), + cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) + }() + value = ddb.db.Get(key) + return +} + +// Implements DB. +func (ddb debugDB) Has(key []byte) (has bool) { + defer func() { + fmt.Printf("%v.Has(%v) %v\n", ddb.label, + cmn.ColoredBytes(key, cmn.Cyan, cmn.Blue), has) + }() + return ddb.db.Has(key) +} + +// Implements DB. +func (ddb debugDB) Set(key []byte, value []byte) { + fmt.Printf("%v.Set(%v, %v)\n", ddb.label, + cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue), + cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) + ddb.db.Set(key, value) +} + +// Implements DB. +func (ddb debugDB) SetSync(key []byte, value []byte) { + fmt.Printf("%v.SetSync(%v, %v)\n", ddb.label, + cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue), + cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) + ddb.db.SetSync(key, value) +} + +// Implements atomicSetDeleter. +func (ddb debugDB) SetNoLock(key []byte, value []byte) { + fmt.Printf("%v.SetNoLock(%v, %v)\n", ddb.label, + cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue), + cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) + ddb.db.(atomicSetDeleter).SetNoLock(key, value) +} + +// Implements atomicSetDeleter. +func (ddb debugDB) SetNoLockSync(key []byte, value []byte) { + fmt.Printf("%v.SetNoLockSync(%v, %v)\n", ddb.label, + cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue), + cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) + ddb.db.(atomicSetDeleter).SetNoLockSync(key, value) +} + +// Implements DB. +func (ddb debugDB) Delete(key []byte) { + fmt.Printf("%v.Delete(%v)\n", ddb.label, + cmn.ColoredBytes(key, cmn.Red, cmn.Yellow)) + ddb.db.Delete(key) +} + +// Implements DB. +func (ddb debugDB) DeleteSync(key []byte) { + fmt.Printf("%v.DeleteSync(%v)\n", ddb.label, + cmn.ColoredBytes(key, cmn.Red, cmn.Yellow)) + ddb.db.DeleteSync(key) +} + +// Implements atomicSetDeleter. +func (ddb debugDB) DeleteNoLock(key []byte) { + fmt.Printf("%v.DeleteNoLock(%v)\n", ddb.label, + cmn.ColoredBytes(key, cmn.Red, cmn.Yellow)) + ddb.db.(atomicSetDeleter).DeleteNoLock(key) +} + +// Implements atomicSetDeleter. +func (ddb debugDB) DeleteNoLockSync(key []byte) { + fmt.Printf("%v.DeleteNoLockSync(%v)\n", ddb.label, + cmn.ColoredBytes(key, cmn.Red, cmn.Yellow)) + ddb.db.(atomicSetDeleter).DeleteNoLockSync(key) +} + +// Implements DB. +func (ddb debugDB) Iterator(start, end []byte) Iterator { + fmt.Printf("%v.Iterator(%v, %v)\n", ddb.label, + cmn.ColoredBytes(start, cmn.Cyan, cmn.Blue), + cmn.ColoredBytes(end, cmn.Cyan, cmn.Blue)) + return NewDebugIterator(ddb.label, ddb.db.Iterator(start, end)) +} + +// Implements DB. +func (ddb debugDB) ReverseIterator(start, end []byte) Iterator { + fmt.Printf("%v.ReverseIterator(%v, %v)\n", ddb.label, + cmn.ColoredBytes(start, cmn.Cyan, cmn.Blue), + cmn.ColoredBytes(end, cmn.Cyan, cmn.Blue)) + return NewDebugIterator(ddb.label, ddb.db.ReverseIterator(start, end)) +} + +// Implements DB. +// Panics if the underlying db is not an +// atomicSetDeleter. +func (ddb debugDB) NewBatch() Batch { + fmt.Printf("%v.NewBatch()\n", ddb.label) + return NewDebugBatch(ddb.label, ddb.db.NewBatch()) +} + +// Implements DB. +func (ddb debugDB) Close() { + fmt.Printf("%v.Close()\n", ddb.label) + ddb.db.Close() +} + +// Implements DB. +func (ddb debugDB) Print() { + ddb.db.Print() +} + +// Implements DB. +func (ddb debugDB) Stats() map[string]string { + return ddb.db.Stats() +} + +//---------------------------------------- +// debugIterator + +type debugIterator struct { + label string + itr Iterator +} + +// For printing all operationgs to the console for debugging. +func NewDebugIterator(label string, itr Iterator) debugIterator { + return debugIterator{ + label: label, + itr: itr, + } +} + +// Implements Iterator. +func (ditr debugIterator) Domain() (start []byte, end []byte) { + defer func() { + fmt.Printf("%v.itr.Domain() (%X,%X)\n", ditr.label, start, end) + }() + start, end = ditr.itr.Domain() + return +} + +// Implements Iterator. +func (ditr debugIterator) Valid() (ok bool) { + defer func() { + fmt.Printf("%v.itr.Valid() %v\n", ditr.label, ok) + }() + ok = ditr.itr.Valid() + return +} + +// Implements Iterator. +func (ditr debugIterator) Next() { + fmt.Printf("%v.itr.Next()\n", ditr.label) + ditr.itr.Next() +} + +// Implements Iterator. +func (ditr debugIterator) Key() (key []byte) { + key = ditr.itr.Key() + fmt.Printf("%v.itr.Key() %v\n", ditr.label, + cmn.ColoredBytes(key, cmn.Cyan, cmn.Blue)) + return +} + +// Implements Iterator. +func (ditr debugIterator) Value() (value []byte) { + value = ditr.itr.Value() + fmt.Printf("%v.itr.Value() %v\n", ditr.label, + cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) + return +} + +// Implements Iterator. +func (ditr debugIterator) Close() { + fmt.Printf("%v.itr.Close()\n", ditr.label) + ditr.itr.Close() +} + +//---------------------------------------- +// debugBatch + +type debugBatch struct { + label string + bch Batch +} + +// For printing all operationgs to the console for debugging. +func NewDebugBatch(label string, bch Batch) debugBatch { + return debugBatch{ + label: label, + bch: bch, + } +} + +// Implements Batch. +func (dbch debugBatch) Set(key, value []byte) { + fmt.Printf("%v.batch.Set(%v, %v)\n", dbch.label, + cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue), + cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) + dbch.bch.Set(key, value) +} + +// Implements Batch. +func (dbch debugBatch) Delete(key []byte) { + fmt.Printf("%v.batch.Delete(%v)\n", dbch.label, + cmn.ColoredBytes(key, cmn.Red, cmn.Yellow)) + dbch.bch.Delete(key) +} + +// Implements Batch. +func (dbch debugBatch) Write() { + fmt.Printf("%v.batch.Write()\n", dbch.label) + dbch.bch.Write() +} + +// Implements Batch. +func (dbch debugBatch) WriteSync() { + fmt.Printf("%v.batch.WriteSync()\n", dbch.label) + dbch.bch.WriteSync() +} diff --git a/libs/db/fsdb.go b/libs/db/fsdb.go new file mode 100644 index 000000000..fc861decc --- /dev/null +++ b/libs/db/fsdb.go @@ -0,0 +1,262 @@ +package db + +import ( + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "sort" + "sync" + + "github.com/pkg/errors" + cmn "github.com/tendermint/tendermint/libs/common" +) + +const ( + keyPerm = os.FileMode(0600) + dirPerm = os.FileMode(0700) +) + +func init() { + registerDBCreator(FSDBBackend, func(name string, dir string) (DB, error) { + dbPath := filepath.Join(dir, name+".db") + return NewFSDB(dbPath), nil + }, false) +} + +var _ DB = (*FSDB)(nil) + +// It's slow. +type FSDB struct { + mtx sync.Mutex + dir string +} + +func NewFSDB(dir string) *FSDB { + err := os.MkdirAll(dir, dirPerm) + if err != nil { + panic(errors.Wrap(err, "Creating FSDB dir "+dir)) + } + database := &FSDB{ + dir: dir, + } + return database +} + +func (db *FSDB) Get(key []byte) []byte { + db.mtx.Lock() + defer db.mtx.Unlock() + key = escapeKey(key) + + path := db.nameToPath(key) + value, err := read(path) + if os.IsNotExist(err) { + return nil + } else if err != nil { + panic(errors.Wrapf(err, "Getting key %s (0x%X)", string(key), key)) + } + return value +} + +func (db *FSDB) Has(key []byte) bool { + db.mtx.Lock() + defer db.mtx.Unlock() + key = escapeKey(key) + + path := db.nameToPath(key) + return cmn.FileExists(path) +} + +func (db *FSDB) Set(key []byte, value []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.SetNoLock(key, value) +} + +func (db *FSDB) SetSync(key []byte, value []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.SetNoLock(key, value) +} + +// NOTE: Implements atomicSetDeleter. +func (db *FSDB) SetNoLock(key []byte, value []byte) { + key = escapeKey(key) + value = nonNilBytes(value) + path := db.nameToPath(key) + err := write(path, value) + if err != nil { + panic(errors.Wrapf(err, "Setting key %s (0x%X)", string(key), key)) + } +} + +func (db *FSDB) Delete(key []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.DeleteNoLock(key) +} + +func (db *FSDB) DeleteSync(key []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.DeleteNoLock(key) +} + +// NOTE: Implements atomicSetDeleter. +func (db *FSDB) DeleteNoLock(key []byte) { + key = escapeKey(key) + path := db.nameToPath(key) + err := remove(path) + if os.IsNotExist(err) { + return + } else if err != nil { + panic(errors.Wrapf(err, "Removing key %s (0x%X)", string(key), key)) + } +} + +func (db *FSDB) Close() { + // Nothing to do. +} + +func (db *FSDB) Print() { + db.mtx.Lock() + defer db.mtx.Unlock() + + panic("FSDB.Print not yet implemented") +} + +func (db *FSDB) Stats() map[string]string { + db.mtx.Lock() + defer db.mtx.Unlock() + + panic("FSDB.Stats not yet implemented") +} + +func (db *FSDB) NewBatch() Batch { + db.mtx.Lock() + defer db.mtx.Unlock() + + // Not sure we would ever want to try... + // It doesn't seem easy for general filesystems. + panic("FSDB.NewBatch not yet implemented") +} + +func (db *FSDB) Mutex() *sync.Mutex { + return &(db.mtx) +} + +func (db *FSDB) Iterator(start, end []byte) Iterator { + return db.MakeIterator(start, end, false) +} + +func (db *FSDB) MakeIterator(start, end []byte, isReversed bool) Iterator { + db.mtx.Lock() + defer db.mtx.Unlock() + + // We need a copy of all of the keys. + // Not the best, but probably not a bottleneck depending. + keys, err := list(db.dir, start, end, isReversed) + if err != nil { + panic(errors.Wrapf(err, "Listing keys in %s", db.dir)) + } + if isReversed { + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + } else { + sort.Strings(keys) + } + return newMemDBIterator(db, keys, start, end) +} + +func (db *FSDB) ReverseIterator(start, end []byte) Iterator { + return db.MakeIterator(start, end, true) +} + +func (db *FSDB) nameToPath(name []byte) string { + n := url.PathEscape(string(name)) + return filepath.Join(db.dir, n) +} + +// Read some bytes to a file. +// CONTRACT: returns os errors directly without wrapping. +func read(path string) ([]byte, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + d, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + return d, nil +} + +// Write some bytes from a file. +// CONTRACT: returns os errors directly without wrapping. +func write(path string, d []byte) error { + f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, keyPerm) + if err != nil { + return err + } + defer f.Close() + _, err = f.Write(d) + if err != nil { + return err + } + err = f.Sync() + return err +} + +// Remove a file. +// CONTRACT: returns os errors directly without wrapping. +func remove(path string) error { + return os.Remove(path) +} + +// List keys in a directory, stripping of escape sequences and dir portions. +// CONTRACT: returns os errors directly without wrapping. +func list(dirPath string, start, end []byte, isReversed bool) ([]string, error) { + dir, err := os.Open(dirPath) + if err != nil { + return nil, err + } + defer dir.Close() + + names, err := dir.Readdirnames(0) + if err != nil { + return nil, err + } + var keys []string + for _, name := range names { + n, err := url.PathUnescape(name) + if err != nil { + return nil, fmt.Errorf("Failed to unescape %s while listing", name) + } + key := unescapeKey([]byte(n)) + if IsKeyInDomain(key, start, end, isReversed) { + keys = append(keys, string(key)) + } + } + return keys, nil +} + +// To support empty or nil keys, while the file system doesn't allow empty +// filenames. +func escapeKey(key []byte) []byte { + return []byte("k_" + string(key)) +} +func unescapeKey(escKey []byte) []byte { + if len(escKey) < 2 { + panic(fmt.Sprintf("Invalid esc key: %x", escKey)) + } + if string(escKey[:2]) != "k_" { + panic(fmt.Sprintf("Invalid esc key: %x", escKey)) + } + return escKey[2:] +} diff --git a/libs/db/go_level_db.go b/libs/db/go_level_db.go new file mode 100644 index 000000000..349e447b2 --- /dev/null +++ b/libs/db/go_level_db.go @@ -0,0 +1,327 @@ +package db + +import ( + "bytes" + "fmt" + "path/filepath" + + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +func init() { + dbCreator := func(name string, dir string) (DB, error) { + return NewGoLevelDB(name, dir) + } + registerDBCreator(LevelDBBackend, dbCreator, false) + registerDBCreator(GoLevelDBBackend, dbCreator, false) +} + +var _ DB = (*GoLevelDB)(nil) + +type GoLevelDB struct { + db *leveldb.DB +} + +func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { + dbPath := filepath.Join(dir, name+".db") + db, err := leveldb.OpenFile(dbPath, nil) + if err != nil { + return nil, err + } + database := &GoLevelDB{ + db: db, + } + return database, nil +} + +// Implements DB. +func (db *GoLevelDB) Get(key []byte) []byte { + key = nonNilBytes(key) + res, err := db.db.Get(key, nil) + if err != nil { + if err == errors.ErrNotFound { + return nil + } + panic(err) + } + return res +} + +// Implements DB. +func (db *GoLevelDB) Has(key []byte) bool { + return db.Get(key) != nil +} + +// Implements DB. +func (db *GoLevelDB) Set(key []byte, value []byte) { + key = nonNilBytes(key) + value = nonNilBytes(value) + err := db.db.Put(key, value, nil) + if err != nil { + cmn.PanicCrisis(err) + } +} + +// Implements DB. +func (db *GoLevelDB) SetSync(key []byte, value []byte) { + key = nonNilBytes(key) + value = nonNilBytes(value) + err := db.db.Put(key, value, &opt.WriteOptions{Sync: true}) + if err != nil { + cmn.PanicCrisis(err) + } +} + +// Implements DB. +func (db *GoLevelDB) Delete(key []byte) { + key = nonNilBytes(key) + err := db.db.Delete(key, nil) + if err != nil { + cmn.PanicCrisis(err) + } +} + +// Implements DB. +func (db *GoLevelDB) DeleteSync(key []byte) { + key = nonNilBytes(key) + err := db.db.Delete(key, &opt.WriteOptions{Sync: true}) + if err != nil { + cmn.PanicCrisis(err) + } +} + +func (db *GoLevelDB) DB() *leveldb.DB { + return db.db +} + +// Implements DB. +func (db *GoLevelDB) Close() { + db.db.Close() +} + +// Implements DB. +func (db *GoLevelDB) Print() { + str, _ := db.db.GetProperty("leveldb.stats") + fmt.Printf("%v\n", str) + + itr := db.db.NewIterator(nil, nil) + for itr.Next() { + key := itr.Key() + value := itr.Value() + fmt.Printf("[%X]:\t[%X]\n", key, value) + } +} + +// Implements DB. +func (db *GoLevelDB) Stats() map[string]string { + keys := []string{ + "leveldb.num-files-at-level{n}", + "leveldb.stats", + "leveldb.sstables", + "leveldb.blockpool", + "leveldb.cachedblock", + "leveldb.openedtables", + "leveldb.alivesnaps", + "leveldb.aliveiters", + } + + stats := make(map[string]string) + for _, key := range keys { + str, err := db.db.GetProperty(key) + if err == nil { + stats[key] = str + } + } + return stats +} + +//---------------------------------------- +// Batch + +// Implements DB. +func (db *GoLevelDB) NewBatch() Batch { + batch := new(leveldb.Batch) + return &goLevelDBBatch{db, batch} +} + +type goLevelDBBatch struct { + db *GoLevelDB + batch *leveldb.Batch +} + +// Implements Batch. +func (mBatch *goLevelDBBatch) Set(key, value []byte) { + mBatch.batch.Put(key, value) +} + +// Implements Batch. +func (mBatch *goLevelDBBatch) Delete(key []byte) { + mBatch.batch.Delete(key) +} + +// Implements Batch. +func (mBatch *goLevelDBBatch) Write() { + err := mBatch.db.db.Write(mBatch.batch, &opt.WriteOptions{Sync: false}) + if err != nil { + panic(err) + } +} + +// Implements Batch. +func (mBatch *goLevelDBBatch) WriteSync() { + err := mBatch.db.db.Write(mBatch.batch, &opt.WriteOptions{Sync: true}) + if err != nil { + panic(err) + } +} + +//---------------------------------------- +// Iterator +// NOTE This is almost identical to db/c_level_db.Iterator +// Before creating a third version, refactor. + +// Implements DB. +func (db *GoLevelDB) Iterator(start, end []byte) Iterator { + itr := db.db.NewIterator(nil, nil) + return newGoLevelDBIterator(itr, start, end, false) +} + +// Implements DB. +func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator { + itr := db.db.NewIterator(nil, nil) + return newGoLevelDBIterator(itr, start, end, true) +} + +type goLevelDBIterator struct { + source iterator.Iterator + start []byte + end []byte + isReverse bool + isInvalid bool +} + +var _ Iterator = (*goLevelDBIterator)(nil) + +func newGoLevelDBIterator(source iterator.Iterator, start, end []byte, isReverse bool) *goLevelDBIterator { + if isReverse { + if start == nil { + source.Last() + } else { + valid := source.Seek(start) + if valid { + soakey := source.Key() // start or after key + if bytes.Compare(start, soakey) < 0 { + source.Prev() + } + } else { + source.Last() + } + } + } else { + if start == nil { + source.First() + } else { + source.Seek(start) + } + } + return &goLevelDBIterator{ + source: source, + start: start, + end: end, + isReverse: isReverse, + isInvalid: false, + } +} + +// Implements Iterator. +func (itr *goLevelDBIterator) Domain() ([]byte, []byte) { + return itr.start, itr.end +} + +// Implements Iterator. +func (itr *goLevelDBIterator) Valid() bool { + + // Once invalid, forever invalid. + if itr.isInvalid { + return false + } + + // Panic on DB error. No way to recover. + itr.assertNoError() + + // If source is invalid, invalid. + if !itr.source.Valid() { + itr.isInvalid = true + return false + } + + // If key is end or past it, invalid. + var end = itr.end + var key = itr.source.Key() + + if itr.isReverse { + if end != nil && bytes.Compare(key, end) <= 0 { + itr.isInvalid = true + return false + } + } else { + if end != nil && bytes.Compare(end, key) <= 0 { + itr.isInvalid = true + return false + } + } + + // Valid + return true +} + +// Implements Iterator. +func (itr *goLevelDBIterator) Key() []byte { + // Key returns a copy of the current key. + // See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88 + itr.assertNoError() + itr.assertIsValid() + return cp(itr.source.Key()) +} + +// Implements Iterator. +func (itr *goLevelDBIterator) Value() []byte { + // Value returns a copy of the current value. + // See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88 + itr.assertNoError() + itr.assertIsValid() + return cp(itr.source.Value()) +} + +// Implements Iterator. +func (itr *goLevelDBIterator) Next() { + itr.assertNoError() + itr.assertIsValid() + if itr.isReverse { + itr.source.Prev() + } else { + itr.source.Next() + } +} + +// Implements Iterator. +func (itr *goLevelDBIterator) Close() { + itr.source.Release() +} + +func (itr *goLevelDBIterator) assertNoError() { + if err := itr.source.Error(); err != nil { + panic(err) + } +} + +func (itr goLevelDBIterator) assertIsValid() { + if !itr.Valid() { + panic("goLevelDBIterator is invalid") + } +} diff --git a/libs/db/go_level_db_test.go b/libs/db/go_level_db_test.go new file mode 100644 index 000000000..47be216a6 --- /dev/null +++ b/libs/db/go_level_db_test.go @@ -0,0 +1,83 @@ +package db + +import ( + "bytes" + "encoding/binary" + "fmt" + "testing" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +func BenchmarkRandomReadsWrites(b *testing.B) { + b.StopTimer() + + numItems := int64(1000000) + internal := map[int64]int64{} + for i := 0; i < int(numItems); i++ { + internal[int64(i)] = int64(0) + } + db, err := NewGoLevelDB(cmn.Fmt("test_%x", cmn.RandStr(12)), "") + if err != nil { + b.Fatal(err.Error()) + return + } + + fmt.Println("ok, starting") + b.StartTimer() + + for i := 0; i < b.N; i++ { + // Write something + { + idx := (int64(cmn.RandInt()) % numItems) + internal[idx]++ + val := internal[idx] + idxBytes := int642Bytes(int64(idx)) + valBytes := int642Bytes(int64(val)) + //fmt.Printf("Set %X -> %X\n", idxBytes, valBytes) + db.Set( + idxBytes, + valBytes, + ) + } + // Read something + { + idx := (int64(cmn.RandInt()) % numItems) + val := internal[idx] + idxBytes := int642Bytes(int64(idx)) + valBytes := db.Get(idxBytes) + //fmt.Printf("Get %X -> %X\n", idxBytes, valBytes) + if val == 0 { + if !bytes.Equal(valBytes, nil) { + b.Errorf("Expected %v for %v, got %X", + nil, idx, valBytes) + break + } + } else { + if len(valBytes) != 8 { + b.Errorf("Expected length 8 for %v, got %X", + idx, valBytes) + break + } + valGot := bytes2Int64(valBytes) + if val != valGot { + b.Errorf("Expected %v for %v, got %v", + val, idx, valGot) + break + } + } + } + } + + db.Close() +} + +func int642Bytes(i int64) []byte { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, uint64(i)) + return buf +} + +func bytes2Int64(buf []byte) int64 { + return int64(binary.BigEndian.Uint64(buf)) +} diff --git a/libs/db/mem_batch.go b/libs/db/mem_batch.go new file mode 100644 index 000000000..5c5d0c13a --- /dev/null +++ b/libs/db/mem_batch.go @@ -0,0 +1,72 @@ +package db + +import ( + "sync" +) + +type atomicSetDeleter interface { + Mutex() *sync.Mutex + SetNoLock(key, value []byte) + SetNoLockSync(key, value []byte) + DeleteNoLock(key []byte) + DeleteNoLockSync(key []byte) +} + +type memBatch struct { + db atomicSetDeleter + ops []operation +} + +type opType int + +const ( + opTypeSet opType = 1 + opTypeDelete opType = 2 +) + +type operation struct { + opType + key []byte + value []byte +} + +func (mBatch *memBatch) Set(key, value []byte) { + mBatch.ops = append(mBatch.ops, operation{opTypeSet, key, value}) +} + +func (mBatch *memBatch) Delete(key []byte) { + mBatch.ops = append(mBatch.ops, operation{opTypeDelete, key, nil}) +} + +func (mBatch *memBatch) Write() { + mBatch.write(false) +} + +func (mBatch *memBatch) WriteSync() { + mBatch.write(true) +} + +func (mBatch *memBatch) write(doSync bool) { + if mtx := mBatch.db.Mutex(); mtx != nil { + mtx.Lock() + defer mtx.Unlock() + } + + for i, op := range mBatch.ops { + if doSync && i == (len(mBatch.ops)-1) { + switch op.opType { + case opTypeSet: + mBatch.db.SetNoLockSync(op.key, op.value) + case opTypeDelete: + mBatch.db.DeleteNoLockSync(op.key) + } + break // we're done. + } + switch op.opType { + case opTypeSet: + mBatch.db.SetNoLock(op.key, op.value) + case opTypeDelete: + mBatch.db.DeleteNoLock(op.key) + } + } +} diff --git a/libs/db/mem_db.go b/libs/db/mem_db.go new file mode 100644 index 000000000..580123017 --- /dev/null +++ b/libs/db/mem_db.go @@ -0,0 +1,255 @@ +package db + +import ( + "fmt" + "sort" + "sync" +) + +func init() { + registerDBCreator(MemDBBackend, func(name string, dir string) (DB, error) { + return NewMemDB(), nil + }, false) +} + +var _ DB = (*MemDB)(nil) + +type MemDB struct { + mtx sync.Mutex + db map[string][]byte +} + +func NewMemDB() *MemDB { + database := &MemDB{ + db: make(map[string][]byte), + } + return database +} + +// Implements atomicSetDeleter. +func (db *MemDB) Mutex() *sync.Mutex { + return &(db.mtx) +} + +// Implements DB. +func (db *MemDB) Get(key []byte) []byte { + db.mtx.Lock() + defer db.mtx.Unlock() + key = nonNilBytes(key) + + value := db.db[string(key)] + return value +} + +// Implements DB. +func (db *MemDB) Has(key []byte) bool { + db.mtx.Lock() + defer db.mtx.Unlock() + key = nonNilBytes(key) + + _, ok := db.db[string(key)] + return ok +} + +// Implements DB. +func (db *MemDB) Set(key []byte, value []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.SetNoLock(key, value) +} + +// Implements DB. +func (db *MemDB) SetSync(key []byte, value []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.SetNoLock(key, value) +} + +// Implements atomicSetDeleter. +func (db *MemDB) SetNoLock(key []byte, value []byte) { + db.SetNoLockSync(key, value) +} + +// Implements atomicSetDeleter. +func (db *MemDB) SetNoLockSync(key []byte, value []byte) { + key = nonNilBytes(key) + value = nonNilBytes(value) + + db.db[string(key)] = value +} + +// Implements DB. +func (db *MemDB) Delete(key []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.DeleteNoLock(key) +} + +// Implements DB. +func (db *MemDB) DeleteSync(key []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.DeleteNoLock(key) +} + +// Implements atomicSetDeleter. +func (db *MemDB) DeleteNoLock(key []byte) { + db.DeleteNoLockSync(key) +} + +// Implements atomicSetDeleter. +func (db *MemDB) DeleteNoLockSync(key []byte) { + key = nonNilBytes(key) + + delete(db.db, string(key)) +} + +// Implements DB. +func (db *MemDB) Close() { + // Close is a noop since for an in-memory + // database, we don't have a destination + // to flush contents to nor do we want + // any data loss on invoking Close() + // See the discussion in https://github.com/tendermint/tendermint/libs/pull/56 +} + +// Implements DB. +func (db *MemDB) Print() { + db.mtx.Lock() + defer db.mtx.Unlock() + + for key, value := range db.db { + fmt.Printf("[%X]:\t[%X]\n", []byte(key), value) + } +} + +// Implements DB. +func (db *MemDB) Stats() map[string]string { + db.mtx.Lock() + defer db.mtx.Unlock() + + stats := make(map[string]string) + stats["database.type"] = "memDB" + stats["database.size"] = fmt.Sprintf("%d", len(db.db)) + return stats +} + +// Implements DB. +func (db *MemDB) NewBatch() Batch { + db.mtx.Lock() + defer db.mtx.Unlock() + + return &memBatch{db, nil} +} + +//---------------------------------------- +// Iterator + +// Implements DB. +func (db *MemDB) Iterator(start, end []byte) Iterator { + db.mtx.Lock() + defer db.mtx.Unlock() + + keys := db.getSortedKeys(start, end, false) + return newMemDBIterator(db, keys, start, end) +} + +// Implements DB. +func (db *MemDB) ReverseIterator(start, end []byte) Iterator { + db.mtx.Lock() + defer db.mtx.Unlock() + + keys := db.getSortedKeys(start, end, true) + return newMemDBIterator(db, keys, start, end) +} + +// We need a copy of all of the keys. +// Not the best, but probably not a bottleneck depending. +type memDBIterator struct { + db DB + cur int + keys []string + start []byte + end []byte +} + +var _ Iterator = (*memDBIterator)(nil) + +// Keys is expected to be in reverse order for reverse iterators. +func newMemDBIterator(db DB, keys []string, start, end []byte) *memDBIterator { + return &memDBIterator{ + db: db, + cur: 0, + keys: keys, + start: start, + end: end, + } +} + +// Implements Iterator. +func (itr *memDBIterator) Domain() ([]byte, []byte) { + return itr.start, itr.end +} + +// Implements Iterator. +func (itr *memDBIterator) Valid() bool { + return 0 <= itr.cur && itr.cur < len(itr.keys) +} + +// Implements Iterator. +func (itr *memDBIterator) Next() { + itr.assertIsValid() + itr.cur++ +} + +// Implements Iterator. +func (itr *memDBIterator) Key() []byte { + itr.assertIsValid() + return []byte(itr.keys[itr.cur]) +} + +// Implements Iterator. +func (itr *memDBIterator) Value() []byte { + itr.assertIsValid() + key := []byte(itr.keys[itr.cur]) + return itr.db.Get(key) +} + +// Implements Iterator. +func (itr *memDBIterator) Close() { + itr.keys = nil + itr.db = nil +} + +func (itr *memDBIterator) assertIsValid() { + if !itr.Valid() { + panic("memDBIterator is invalid") + } +} + +//---------------------------------------- +// Misc. + +func (db *MemDB) getSortedKeys(start, end []byte, reverse bool) []string { + keys := []string{} + for key := range db.db { + inDomain := IsKeyInDomain([]byte(key), start, end, reverse) + if inDomain { + keys = append(keys, key) + } + } + sort.Strings(keys) + if reverse { + nkeys := len(keys) + for i := 0; i < nkeys/2; i++ { + temp := keys[i] + keys[i] = keys[nkeys-i-1] + keys[nkeys-i-1] = temp + } + } + return keys +} diff --git a/libs/db/prefix_db.go b/libs/db/prefix_db.go new file mode 100644 index 000000000..5bb53ebd9 --- /dev/null +++ b/libs/db/prefix_db.go @@ -0,0 +1,355 @@ +package db + +import ( + "bytes" + "fmt" + "sync" +) + +// IteratePrefix is a convenience function for iterating over a key domain +// restricted by prefix. +func IteratePrefix(db DB, prefix []byte) Iterator { + var start, end []byte + if len(prefix) == 0 { + start = nil + end = nil + } else { + start = cp(prefix) + end = cpIncr(prefix) + } + return db.Iterator(start, end) +} + +/* +TODO: Make test, maybe rename. +// Like IteratePrefix but the iterator strips the prefix from the keys. +func IteratePrefixStripped(db DB, prefix []byte) Iterator { + start, end := ... + return newPrefixIterator(prefix, start, end, IteratePrefix(db, prefix)) +} +*/ + +//---------------------------------------- +// prefixDB + +type prefixDB struct { + mtx sync.Mutex + prefix []byte + db DB +} + +// NewPrefixDB lets you namespace multiple DBs within a single DB. +func NewPrefixDB(db DB, prefix []byte) *prefixDB { + return &prefixDB{ + prefix: prefix, + db: db, + } +} + +// Implements atomicSetDeleter. +func (pdb *prefixDB) Mutex() *sync.Mutex { + return &(pdb.mtx) +} + +// Implements DB. +func (pdb *prefixDB) Get(key []byte) []byte { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + pkey := pdb.prefixed(key) + value := pdb.db.Get(pkey) + return value +} + +// Implements DB. +func (pdb *prefixDB) Has(key []byte) bool { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + return pdb.db.Has(pdb.prefixed(key)) +} + +// Implements DB. +func (pdb *prefixDB) Set(key []byte, value []byte) { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + pkey := pdb.prefixed(key) + pdb.db.Set(pkey, value) +} + +// Implements DB. +func (pdb *prefixDB) SetSync(key []byte, value []byte) { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + pdb.db.SetSync(pdb.prefixed(key), value) +} + +// Implements DB. +func (pdb *prefixDB) Delete(key []byte) { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + pdb.db.Delete(pdb.prefixed(key)) +} + +// Implements DB. +func (pdb *prefixDB) DeleteSync(key []byte) { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + pdb.db.DeleteSync(pdb.prefixed(key)) +} + +// Implements DB. +func (pdb *prefixDB) Iterator(start, end []byte) Iterator { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + var pstart, pend []byte + pstart = append(cp(pdb.prefix), start...) + if end == nil { + pend = cpIncr(pdb.prefix) + } else { + pend = append(cp(pdb.prefix), end...) + } + return newPrefixIterator( + pdb.prefix, + start, + end, + pdb.db.Iterator( + pstart, + pend, + ), + ) +} + +// Implements DB. +func (pdb *prefixDB) ReverseIterator(start, end []byte) Iterator { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + var pstart, pend []byte + if start == nil { + // This may cause the underlying iterator to start with + // an item which doesn't start with prefix. We will skip + // that item later in this function. See 'skipOne'. + pstart = cpIncr(pdb.prefix) + } else { + pstart = append(cp(pdb.prefix), start...) + } + if end == nil { + // This may cause the underlying iterator to end with an + // item which doesn't start with prefix. The + // prefixIterator will terminate iteration + // automatically upon detecting this. + pend = cpDecr(pdb.prefix) + } else { + pend = append(cp(pdb.prefix), end...) + } + ritr := pdb.db.ReverseIterator(pstart, pend) + if start == nil { + skipOne(ritr, cpIncr(pdb.prefix)) + } + return newPrefixIterator( + pdb.prefix, + start, + end, + ritr, + ) +} + +// Implements DB. +// Panics if the underlying DB is not an +// atomicSetDeleter. +func (pdb *prefixDB) NewBatch() Batch { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + return newPrefixBatch(pdb.prefix, pdb.db.NewBatch()) +} + +/* NOTE: Uncomment to use memBatch instead of prefixBatch +// Implements atomicSetDeleter. +func (pdb *prefixDB) SetNoLock(key []byte, value []byte) { + pdb.db.(atomicSetDeleter).SetNoLock(pdb.prefixed(key), value) +} + +// Implements atomicSetDeleter. +func (pdb *prefixDB) SetNoLockSync(key []byte, value []byte) { + pdb.db.(atomicSetDeleter).SetNoLockSync(pdb.prefixed(key), value) +} + +// Implements atomicSetDeleter. +func (pdb *prefixDB) DeleteNoLock(key []byte) { + pdb.db.(atomicSetDeleter).DeleteNoLock(pdb.prefixed(key)) +} + +// Implements atomicSetDeleter. +func (pdb *prefixDB) DeleteNoLockSync(key []byte) { + pdb.db.(atomicSetDeleter).DeleteNoLockSync(pdb.prefixed(key)) +} +*/ + +// Implements DB. +func (pdb *prefixDB) Close() { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + pdb.db.Close() +} + +// Implements DB. +func (pdb *prefixDB) Print() { + fmt.Printf("prefix: %X\n", pdb.prefix) + + itr := pdb.Iterator(nil, nil) + defer itr.Close() + for ; itr.Valid(); itr.Next() { + key := itr.Key() + value := itr.Value() + fmt.Printf("[%X]:\t[%X]\n", key, value) + } +} + +// Implements DB. +func (pdb *prefixDB) Stats() map[string]string { + stats := make(map[string]string) + stats["prefixdb.prefix.string"] = string(pdb.prefix) + stats["prefixdb.prefix.hex"] = fmt.Sprintf("%X", pdb.prefix) + source := pdb.db.Stats() + for key, value := range source { + stats["prefixdb.source."+key] = value + } + return stats +} + +func (pdb *prefixDB) prefixed(key []byte) []byte { + return append(cp(pdb.prefix), key...) +} + +//---------------------------------------- +// prefixBatch + +type prefixBatch struct { + prefix []byte + source Batch +} + +func newPrefixBatch(prefix []byte, source Batch) prefixBatch { + return prefixBatch{ + prefix: prefix, + source: source, + } +} + +func (pb prefixBatch) Set(key, value []byte) { + pkey := append(cp(pb.prefix), key...) + pb.source.Set(pkey, value) +} + +func (pb prefixBatch) Delete(key []byte) { + pkey := append(cp(pb.prefix), key...) + pb.source.Delete(pkey) +} + +func (pb prefixBatch) Write() { + pb.source.Write() +} + +func (pb prefixBatch) WriteSync() { + pb.source.WriteSync() +} + +//---------------------------------------- +// prefixIterator + +// Strips prefix while iterating from Iterator. +type prefixIterator struct { + prefix []byte + start []byte + end []byte + source Iterator + valid bool +} + +func newPrefixIterator(prefix, start, end []byte, source Iterator) prefixIterator { + if !source.Valid() || !bytes.HasPrefix(source.Key(), prefix) { + return prefixIterator{ + prefix: prefix, + start: start, + end: end, + source: source, + valid: false, + } + } else { + return prefixIterator{ + prefix: prefix, + start: start, + end: end, + source: source, + valid: true, + } + } +} + +func (itr prefixIterator) Domain() (start []byte, end []byte) { + return itr.start, itr.end +} + +func (itr prefixIterator) Valid() bool { + return itr.valid && itr.source.Valid() +} + +func (itr prefixIterator) Next() { + if !itr.valid { + panic("prefixIterator invalid, cannot call Next()") + } + itr.source.Next() + if !itr.source.Valid() || !bytes.HasPrefix(itr.source.Key(), itr.prefix) { + itr.source.Close() + itr.valid = false + return + } +} + +func (itr prefixIterator) Key() (key []byte) { + if !itr.valid { + panic("prefixIterator invalid, cannot call Key()") + } + return stripPrefix(itr.source.Key(), itr.prefix) +} + +func (itr prefixIterator) Value() (value []byte) { + if !itr.valid { + panic("prefixIterator invalid, cannot call Value()") + } + return itr.source.Value() +} + +func (itr prefixIterator) Close() { + itr.source.Close() +} + +//---------------------------------------- + +func stripPrefix(key []byte, prefix []byte) (stripped []byte) { + if len(key) < len(prefix) { + panic("should not happen") + } + if !bytes.Equal(key[:len(prefix)], prefix) { + panic("should not happne") + } + return key[len(prefix):] +} + +// If the first iterator item is skipKey, then +// skip it. +func skipOne(itr Iterator, skipKey []byte) { + if itr.Valid() { + if bytes.Equal(itr.Key(), skipKey) { + itr.Next() + } + } +} diff --git a/libs/db/prefix_db_test.go b/libs/db/prefix_db_test.go new file mode 100644 index 000000000..60809f157 --- /dev/null +++ b/libs/db/prefix_db_test.go @@ -0,0 +1,147 @@ +package db + +import "testing" + +func mockDBWithStuff() DB { + db := NewMemDB() + // Under "key" prefix + db.Set(bz("key"), bz("value")) + db.Set(bz("key1"), bz("value1")) + db.Set(bz("key2"), bz("value2")) + db.Set(bz("key3"), bz("value3")) + db.Set(bz("something"), bz("else")) + db.Set(bz(""), bz("")) + db.Set(bz("k"), bz("val")) + db.Set(bz("ke"), bz("valu")) + db.Set(bz("kee"), bz("valuu")) + return db +} + +func TestPrefixDBSimple(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + checkValue(t, pdb, bz("key"), nil) + checkValue(t, pdb, bz(""), bz("value")) + checkValue(t, pdb, bz("key1"), nil) + checkValue(t, pdb, bz("1"), bz("value1")) + checkValue(t, pdb, bz("key2"), nil) + checkValue(t, pdb, bz("2"), bz("value2")) + checkValue(t, pdb, bz("key3"), nil) + checkValue(t, pdb, bz("3"), bz("value3")) + checkValue(t, pdb, bz("something"), nil) + checkValue(t, pdb, bz("k"), nil) + checkValue(t, pdb, bz("ke"), nil) + checkValue(t, pdb, bz("kee"), nil) +} + +func TestPrefixDBIterator1(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.Iterator(nil, nil) + checkDomain(t, itr, nil, nil) + checkItem(t, itr, bz(""), bz("value")) + checkNext(t, itr, true) + checkItem(t, itr, bz("1"), bz("value1")) + checkNext(t, itr, true) + checkItem(t, itr, bz("2"), bz("value2")) + checkNext(t, itr, true) + checkItem(t, itr, bz("3"), bz("value3")) + checkNext(t, itr, false) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBIterator2(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.Iterator(nil, bz("")) + checkDomain(t, itr, nil, bz("")) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBIterator3(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.Iterator(bz(""), nil) + checkDomain(t, itr, bz(""), nil) + checkItem(t, itr, bz(""), bz("value")) + checkNext(t, itr, true) + checkItem(t, itr, bz("1"), bz("value1")) + checkNext(t, itr, true) + checkItem(t, itr, bz("2"), bz("value2")) + checkNext(t, itr, true) + checkItem(t, itr, bz("3"), bz("value3")) + checkNext(t, itr, false) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBIterator4(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.Iterator(bz(""), bz("")) + checkDomain(t, itr, bz(""), bz("")) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBReverseIterator1(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.ReverseIterator(nil, nil) + checkDomain(t, itr, nil, nil) + checkItem(t, itr, bz("3"), bz("value3")) + checkNext(t, itr, true) + checkItem(t, itr, bz("2"), bz("value2")) + checkNext(t, itr, true) + checkItem(t, itr, bz("1"), bz("value1")) + checkNext(t, itr, true) + checkItem(t, itr, bz(""), bz("value")) + checkNext(t, itr, false) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBReverseIterator2(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.ReverseIterator(nil, bz("")) + checkDomain(t, itr, nil, bz("")) + checkItem(t, itr, bz("3"), bz("value3")) + checkNext(t, itr, true) + checkItem(t, itr, bz("2"), bz("value2")) + checkNext(t, itr, true) + checkItem(t, itr, bz("1"), bz("value1")) + checkNext(t, itr, false) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBReverseIterator3(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.ReverseIterator(bz(""), nil) + checkDomain(t, itr, bz(""), nil) + checkItem(t, itr, bz(""), bz("value")) + checkNext(t, itr, false) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBReverseIterator4(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.ReverseIterator(bz(""), bz("")) + checkInvalid(t, itr) + itr.Close() +} diff --git a/libs/db/remotedb/doc.go b/libs/db/remotedb/doc.go new file mode 100644 index 000000000..07c95a56a --- /dev/null +++ b/libs/db/remotedb/doc.go @@ -0,0 +1,37 @@ +/* +remotedb is a package for connecting to distributed Tendermint db.DB +instances. The purpose is to detach difficult deployments such as +CLevelDB that requires gcc or perhaps for databases that require +custom configurations such as extra disk space. It also eases +the burden and cost of deployment of dependencies for databases +to be used by Tendermint developers. Most importantly it is built +over the high performant gRPC transport. + +remotedb's RemoteDB implements db.DB so can be used normally +like other databases. One just has to explicitly connect to the +remote database with a client setup such as: + + client, err := remotedb.NewInsecure(addr) + // Make sure to invoke InitRemote! + if err := client.InitRemote(&remotedb.Init{Name: "test-remote-db", Type: "leveldb"}); err != nil { + log.Fatalf("Failed to initialize the remote db") + } + + client.Set(key1, value) + gv1 := client.SetSync(k2, v2) + + client.Delete(k1) + gv2 := client.Get(k1) + + for itr := client.Iterator(k1, k9); itr.Valid(); itr.Next() { + ik, iv := itr.Key(), itr.Value() + ds, de := itr.Domain() + } + + stats := client.Stats() + + if !client.Has(dk1) { + client.SetSync(dk1, dv1) + } +*/ +package remotedb diff --git a/libs/db/remotedb/grpcdb/client.go b/libs/db/remotedb/grpcdb/client.go new file mode 100644 index 000000000..e11b7839b --- /dev/null +++ b/libs/db/remotedb/grpcdb/client.go @@ -0,0 +1,30 @@ +package grpcdb + +import ( + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto" +) + +// Security defines how the client will talk to the gRPC server. +type Security uint + +const ( + Insecure Security = iota + Secure +) + +// NewClient creates a gRPC client connected to the bound gRPC server at serverAddr. +// Use kind to set the level of security to either Secure or Insecure. +func NewClient(serverAddr, serverCert string) (protodb.DBClient, error) { + creds, err := credentials.NewClientTLSFromFile(serverCert, "") + if err != nil { + return nil, err + } + cc, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(creds)) + if err != nil { + return nil, err + } + return protodb.NewDBClient(cc), nil +} diff --git a/libs/db/remotedb/grpcdb/doc.go b/libs/db/remotedb/grpcdb/doc.go new file mode 100644 index 000000000..0d8e380ce --- /dev/null +++ b/libs/db/remotedb/grpcdb/doc.go @@ -0,0 +1,32 @@ +/* +grpcdb is the distribution of Tendermint's db.DB instances using +the gRPC transport to decouple local db.DB usages from applications, +to using them over a network in a highly performant manner. + +grpcdb allows users to initialize a database's server like +they would locally and invoke the respective methods of db.DB. + +Most users shouldn't use this package, but should instead use +remotedb. Only the lower level users and database server deployers +should use it, for functionality such as: + + ln, err := net.Listen("tcp", "0.0.0.0:0") + srv := grpcdb.NewServer() + defer srv.Stop() + go func() { + if err := srv.Serve(ln); err != nil { + t.Fatalf("BindServer: %v", err) + } + }() + +or + addr := ":8998" + cert := "server.crt" + key := "server.key" + go func() { + if err := grpcdb.ListenAndServe(addr, cert, key); err != nil { + log.Fatalf("BindServer: %v", err) + } + }() +*/ +package grpcdb diff --git a/libs/db/remotedb/grpcdb/example_test.go b/libs/db/remotedb/grpcdb/example_test.go new file mode 100644 index 000000000..eba0d6914 --- /dev/null +++ b/libs/db/remotedb/grpcdb/example_test.go @@ -0,0 +1,52 @@ +package grpcdb_test + +import ( + "bytes" + "context" + "log" + + grpcdb "github.com/tendermint/tendermint/libs/db/remotedb/grpcdb" + protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto" +) + +func Example() { + addr := ":8998" + cert := "server.crt" + key := "server.key" + go func() { + if err := grpcdb.ListenAndServe(addr, cert, key); err != nil { + log.Fatalf("BindServer: %v", err) + } + }() + + client, err := grpcdb.NewClient(addr, cert) + if err != nil { + log.Fatalf("Failed to create grpcDB client: %v", err) + } + + ctx := context.Background() + // 1. Initialize the DB + in := &protodb.Init{ + Type: "leveldb", + Name: "grpc-uno-test", + Dir: ".", + } + if _, err := client.Init(ctx, in); err != nil { + log.Fatalf("Init error: %v", err) + } + + // 2. Now it can be used! + query1 := &protodb.Entity{Key: []byte("Project"), Value: []byte("Tmlibs-on-gRPC")} + if _, err := client.SetSync(ctx, query1); err != nil { + log.Fatalf("SetSync err: %v", err) + } + + query2 := &protodb.Entity{Key: []byte("Project")} + read, err := client.Get(ctx, query2) + if err != nil { + log.Fatalf("Get err: %v", err) + } + if g, w := read.Value, []byte("Tmlibs-on-gRPC"); !bytes.Equal(g, w) { + log.Fatalf("got= (%q ==> % X)\nwant=(%q ==> % X)", g, g, w, w) + } +} diff --git a/libs/db/remotedb/grpcdb/server.go b/libs/db/remotedb/grpcdb/server.go new file mode 100644 index 000000000..3a9955ddf --- /dev/null +++ b/libs/db/remotedb/grpcdb/server.go @@ -0,0 +1,197 @@ +package grpcdb + +import ( + "context" + "net" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "github.com/tendermint/tendermint/libs/db" + protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto" +) + +// ListenAndServe is a blocking function that sets up a gRPC based +// server at the address supplied, with the gRPC options passed in. +// Normally in usage, invoke it in a goroutine like you would for http.ListenAndServe. +func ListenAndServe(addr, cert, key string, opts ...grpc.ServerOption) error { + ln, err := net.Listen("tcp", addr) + if err != nil { + return err + } + srv, err := NewServer(cert, key, opts...) + if err != nil { + return err + } + return srv.Serve(ln) +} + +func NewServer(cert, key string, opts ...grpc.ServerOption) (*grpc.Server, error) { + creds, err := credentials.NewServerTLSFromFile(cert, key) + if err != nil { + return nil, err + } + opts = append(opts, grpc.Creds(creds)) + srv := grpc.NewServer(opts...) + protodb.RegisterDBServer(srv, new(server)) + return srv, nil +} + +type server struct { + mu sync.Mutex + db db.DB +} + +var _ protodb.DBServer = (*server)(nil) + +// Init initializes the server's database. Only one type of database +// can be initialized per server. +// +// Dir is the directory on the file system in which the DB will be stored(if backed by disk) (TODO: remove) +// +// Name is representative filesystem entry's basepath +// +// Type can be either one of: +// * cleveldb (if built with gcc enabled) +// * fsdb +// * memdB +// * leveldb +// See https://godoc.org/github.com/tendermint/tendermint/libs/db#DBBackendType +func (s *server) Init(ctx context.Context, in *protodb.Init) (*protodb.Entity, error) { + s.mu.Lock() + defer s.mu.Unlock() + + s.db = db.NewDB(in.Name, db.DBBackendType(in.Type), in.Dir) + return &protodb.Entity{CreatedAt: time.Now().Unix()}, nil +} + +func (s *server) Delete(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { + s.db.Delete(in.Key) + return nothing, nil +} + +var nothing = new(protodb.Nothing) + +func (s *server) DeleteSync(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { + s.db.DeleteSync(in.Key) + return nothing, nil +} + +func (s *server) Get(ctx context.Context, in *protodb.Entity) (*protodb.Entity, error) { + value := s.db.Get(in.Key) + return &protodb.Entity{Value: value}, nil +} + +func (s *server) GetStream(ds protodb.DB_GetStreamServer) error { + // Receive routine + responsesChan := make(chan *protodb.Entity) + go func() { + defer close(responsesChan) + ctx := context.Background() + for { + in, err := ds.Recv() + if err != nil { + responsesChan <- &protodb.Entity{Err: err.Error()} + return + } + out, err := s.Get(ctx, in) + if err != nil { + if out == nil { + out = new(protodb.Entity) + out.Key = in.Key + } + out.Err = err.Error() + responsesChan <- out + return + } + + // Otherwise continue on + responsesChan <- out + } + }() + + // Send routine, block until we return + for out := range responsesChan { + if err := ds.Send(out); err != nil { + return err + } + } + return nil +} + +func (s *server) Has(ctx context.Context, in *protodb.Entity) (*protodb.Entity, error) { + exists := s.db.Has(in.Key) + return &protodb.Entity{Exists: exists}, nil +} + +func (s *server) Set(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { + s.db.Set(in.Key, in.Value) + return nothing, nil +} + +func (s *server) SetSync(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { + s.db.SetSync(in.Key, in.Value) + return nothing, nil +} + +func (s *server) Iterator(query *protodb.Entity, dis protodb.DB_IteratorServer) error { + it := s.db.Iterator(query.Start, query.End) + return s.handleIterator(it, dis.Send) +} + +func (s *server) handleIterator(it db.Iterator, sendFunc func(*protodb.Iterator) error) error { + for it.Valid() { + start, end := it.Domain() + out := &protodb.Iterator{ + Domain: &protodb.Domain{Start: start, End: end}, + Valid: it.Valid(), + Key: it.Key(), + Value: it.Value(), + } + if err := sendFunc(out); err != nil { + return err + } + + // Finally move the iterator forward + it.Next() + } + return nil +} + +func (s *server) ReverseIterator(query *protodb.Entity, dis protodb.DB_ReverseIteratorServer) error { + it := s.db.ReverseIterator(query.Start, query.End) + return s.handleIterator(it, dis.Send) +} + +func (s *server) Stats(context.Context, *protodb.Nothing) (*protodb.Stats, error) { + stats := s.db.Stats() + return &protodb.Stats{Data: stats, TimeAt: time.Now().Unix()}, nil +} + +func (s *server) BatchWrite(c context.Context, b *protodb.Batch) (*protodb.Nothing, error) { + return s.batchWrite(c, b, false) +} + +func (s *server) BatchWriteSync(c context.Context, b *protodb.Batch) (*protodb.Nothing, error) { + return s.batchWrite(c, b, true) +} + +func (s *server) batchWrite(c context.Context, b *protodb.Batch, sync bool) (*protodb.Nothing, error) { + bat := s.db.NewBatch() + for _, op := range b.Ops { + switch op.Type { + case protodb.Operation_SET: + bat.Set(op.Entity.Key, op.Entity.Value) + case protodb.Operation_DELETE: + bat.Delete(op.Entity.Key) + } + } + if sync { + bat.WriteSync() + } else { + bat.Write() + } + return nothing, nil +} diff --git a/libs/db/remotedb/proto/defs.pb.go b/libs/db/remotedb/proto/defs.pb.go new file mode 100644 index 000000000..4d9f0b272 --- /dev/null +++ b/libs/db/remotedb/proto/defs.pb.go @@ -0,0 +1,914 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: defs.proto + +/* +Package protodb is a generated protocol buffer package. + +It is generated from these files: + defs.proto + +It has these top-level messages: + Batch + Operation + Entity + Nothing + Domain + Iterator + Stats + Init +*/ +package protodb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Operation_Type int32 + +const ( + Operation_SET Operation_Type = 0 + Operation_DELETE Operation_Type = 1 +) + +var Operation_Type_name = map[int32]string{ + 0: "SET", + 1: "DELETE", +} +var Operation_Type_value = map[string]int32{ + "SET": 0, + "DELETE": 1, +} + +func (x Operation_Type) String() string { + return proto.EnumName(Operation_Type_name, int32(x)) +} +func (Operation_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} } + +type Batch struct { + Ops []*Operation `protobuf:"bytes,1,rep,name=ops" json:"ops,omitempty"` +} + +func (m *Batch) Reset() { *m = Batch{} } +func (m *Batch) String() string { return proto.CompactTextString(m) } +func (*Batch) ProtoMessage() {} +func (*Batch) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Batch) GetOps() []*Operation { + if m != nil { + return m.Ops + } + return nil +} + +type Operation struct { + Entity *Entity `protobuf:"bytes,1,opt,name=entity" json:"entity,omitempty"` + Type Operation_Type `protobuf:"varint,2,opt,name=type,enum=protodb.Operation_Type" json:"type,omitempty"` +} + +func (m *Operation) Reset() { *m = Operation{} } +func (m *Operation) String() string { return proto.CompactTextString(m) } +func (*Operation) ProtoMessage() {} +func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Operation) GetEntity() *Entity { + if m != nil { + return m.Entity + } + return nil +} + +func (m *Operation) GetType() Operation_Type { + if m != nil { + return m.Type + } + return Operation_SET +} + +type Entity struct { + Id int32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + Exists bool `protobuf:"varint,4,opt,name=exists" json:"exists,omitempty"` + Start []byte `protobuf:"bytes,5,opt,name=start,proto3" json:"start,omitempty"` + End []byte `protobuf:"bytes,6,opt,name=end,proto3" json:"end,omitempty"` + Err string `protobuf:"bytes,7,opt,name=err" json:"err,omitempty"` + CreatedAt int64 `protobuf:"varint,8,opt,name=created_at,json=createdAt" json:"created_at,omitempty"` +} + +func (m *Entity) Reset() { *m = Entity{} } +func (m *Entity) String() string { return proto.CompactTextString(m) } +func (*Entity) ProtoMessage() {} +func (*Entity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Entity) GetId() int32 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Entity) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *Entity) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *Entity) GetExists() bool { + if m != nil { + return m.Exists + } + return false +} + +func (m *Entity) GetStart() []byte { + if m != nil { + return m.Start + } + return nil +} + +func (m *Entity) GetEnd() []byte { + if m != nil { + return m.End + } + return nil +} + +func (m *Entity) GetErr() string { + if m != nil { + return m.Err + } + return "" +} + +func (m *Entity) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +type Nothing struct { +} + +func (m *Nothing) Reset() { *m = Nothing{} } +func (m *Nothing) String() string { return proto.CompactTextString(m) } +func (*Nothing) ProtoMessage() {} +func (*Nothing) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +type Domain struct { + Start []byte `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` + End []byte `protobuf:"bytes,2,opt,name=end,proto3" json:"end,omitempty"` +} + +func (m *Domain) Reset() { *m = Domain{} } +func (m *Domain) String() string { return proto.CompactTextString(m) } +func (*Domain) ProtoMessage() {} +func (*Domain) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *Domain) GetStart() []byte { + if m != nil { + return m.Start + } + return nil +} + +func (m *Domain) GetEnd() []byte { + if m != nil { + return m.End + } + return nil +} + +type Iterator struct { + Domain *Domain `protobuf:"bytes,1,opt,name=domain" json:"domain,omitempty"` + Valid bool `protobuf:"varint,2,opt,name=valid" json:"valid,omitempty"` + Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Iterator) Reset() { *m = Iterator{} } +func (m *Iterator) String() string { return proto.CompactTextString(m) } +func (*Iterator) ProtoMessage() {} +func (*Iterator) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *Iterator) GetDomain() *Domain { + if m != nil { + return m.Domain + } + return nil +} + +func (m *Iterator) GetValid() bool { + if m != nil { + return m.Valid + } + return false +} + +func (m *Iterator) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *Iterator) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type Stats struct { + Data map[string]string `protobuf:"bytes,1,rep,name=data" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + TimeAt int64 `protobuf:"varint,2,opt,name=time_at,json=timeAt" json:"time_at,omitempty"` +} + +func (m *Stats) Reset() { *m = Stats{} } +func (m *Stats) String() string { return proto.CompactTextString(m) } +func (*Stats) ProtoMessage() {} +func (*Stats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *Stats) GetData() map[string]string { + if m != nil { + return m.Data + } + return nil +} + +func (m *Stats) GetTimeAt() int64 { + if m != nil { + return m.TimeAt + } + return 0 +} + +type Init struct { + Type string `protobuf:"bytes,1,opt,name=Type" json:"Type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=Name" json:"Name,omitempty"` + Dir string `protobuf:"bytes,3,opt,name=Dir" json:"Dir,omitempty"` +} + +func (m *Init) Reset() { *m = Init{} } +func (m *Init) String() string { return proto.CompactTextString(m) } +func (*Init) ProtoMessage() {} +func (*Init) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *Init) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Init) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Init) GetDir() string { + if m != nil { + return m.Dir + } + return "" +} + +func init() { + proto.RegisterType((*Batch)(nil), "protodb.Batch") + proto.RegisterType((*Operation)(nil), "protodb.Operation") + proto.RegisterType((*Entity)(nil), "protodb.Entity") + proto.RegisterType((*Nothing)(nil), "protodb.Nothing") + proto.RegisterType((*Domain)(nil), "protodb.Domain") + proto.RegisterType((*Iterator)(nil), "protodb.Iterator") + proto.RegisterType((*Stats)(nil), "protodb.Stats") + proto.RegisterType((*Init)(nil), "protodb.Init") + proto.RegisterEnum("protodb.Operation_Type", Operation_Type_name, Operation_Type_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for DB service + +type DBClient interface { + Init(ctx context.Context, in *Init, opts ...grpc.CallOption) (*Entity, error) + Get(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) + GetStream(ctx context.Context, opts ...grpc.CallOption) (DB_GetStreamClient, error) + Has(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) + Set(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) + SetSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) + Delete(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) + DeleteSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) + Iterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_IteratorClient, error) + ReverseIterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_ReverseIteratorClient, error) + // rpc print(Nothing) returns (Entity) {} + Stats(ctx context.Context, in *Nothing, opts ...grpc.CallOption) (*Stats, error) + BatchWrite(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) + BatchWriteSync(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) +} + +type dBClient struct { + cc *grpc.ClientConn +} + +func NewDBClient(cc *grpc.ClientConn) DBClient { + return &dBClient{cc} +} + +func (c *dBClient) Init(ctx context.Context, in *Init, opts ...grpc.CallOption) (*Entity, error) { + out := new(Entity) + err := grpc.Invoke(ctx, "/protodb.DB/init", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) Get(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) { + out := new(Entity) + err := grpc.Invoke(ctx, "/protodb.DB/get", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) GetStream(ctx context.Context, opts ...grpc.CallOption) (DB_GetStreamClient, error) { + stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[0], c.cc, "/protodb.DB/getStream", opts...) + if err != nil { + return nil, err + } + x := &dBGetStreamClient{stream} + return x, nil +} + +type DB_GetStreamClient interface { + Send(*Entity) error + Recv() (*Entity, error) + grpc.ClientStream +} + +type dBGetStreamClient struct { + grpc.ClientStream +} + +func (x *dBGetStreamClient) Send(m *Entity) error { + return x.ClientStream.SendMsg(m) +} + +func (x *dBGetStreamClient) Recv() (*Entity, error) { + m := new(Entity) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *dBClient) Has(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) { + out := new(Entity) + err := grpc.Invoke(ctx, "/protodb.DB/has", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) Set(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) { + out := new(Nothing) + err := grpc.Invoke(ctx, "/protodb.DB/set", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) SetSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) { + out := new(Nothing) + err := grpc.Invoke(ctx, "/protodb.DB/setSync", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) Delete(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) { + out := new(Nothing) + err := grpc.Invoke(ctx, "/protodb.DB/delete", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) DeleteSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) { + out := new(Nothing) + err := grpc.Invoke(ctx, "/protodb.DB/deleteSync", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) Iterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_IteratorClient, error) { + stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[1], c.cc, "/protodb.DB/iterator", opts...) + if err != nil { + return nil, err + } + x := &dBIteratorClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type DB_IteratorClient interface { + Recv() (*Iterator, error) + grpc.ClientStream +} + +type dBIteratorClient struct { + grpc.ClientStream +} + +func (x *dBIteratorClient) Recv() (*Iterator, error) { + m := new(Iterator) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *dBClient) ReverseIterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_ReverseIteratorClient, error) { + stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[2], c.cc, "/protodb.DB/reverseIterator", opts...) + if err != nil { + return nil, err + } + x := &dBReverseIteratorClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type DB_ReverseIteratorClient interface { + Recv() (*Iterator, error) + grpc.ClientStream +} + +type dBReverseIteratorClient struct { + grpc.ClientStream +} + +func (x *dBReverseIteratorClient) Recv() (*Iterator, error) { + m := new(Iterator) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *dBClient) Stats(ctx context.Context, in *Nothing, opts ...grpc.CallOption) (*Stats, error) { + out := new(Stats) + err := grpc.Invoke(ctx, "/protodb.DB/stats", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) BatchWrite(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) { + out := new(Nothing) + err := grpc.Invoke(ctx, "/protodb.DB/batchWrite", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) BatchWriteSync(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) { + out := new(Nothing) + err := grpc.Invoke(ctx, "/protodb.DB/batchWriteSync", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for DB service + +type DBServer interface { + Init(context.Context, *Init) (*Entity, error) + Get(context.Context, *Entity) (*Entity, error) + GetStream(DB_GetStreamServer) error + Has(context.Context, *Entity) (*Entity, error) + Set(context.Context, *Entity) (*Nothing, error) + SetSync(context.Context, *Entity) (*Nothing, error) + Delete(context.Context, *Entity) (*Nothing, error) + DeleteSync(context.Context, *Entity) (*Nothing, error) + Iterator(*Entity, DB_IteratorServer) error + ReverseIterator(*Entity, DB_ReverseIteratorServer) error + // rpc print(Nothing) returns (Entity) {} + Stats(context.Context, *Nothing) (*Stats, error) + BatchWrite(context.Context, *Batch) (*Nothing, error) + BatchWriteSync(context.Context, *Batch) (*Nothing, error) +} + +func RegisterDBServer(s *grpc.Server, srv DBServer) { + s.RegisterService(&_DB_serviceDesc, srv) +} + +func _DB_Init_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Init) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).Init(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/Init", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).Init(ctx, req.(*Init)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Entity) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).Get(ctx, req.(*Entity)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_GetStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(DBServer).GetStream(&dBGetStreamServer{stream}) +} + +type DB_GetStreamServer interface { + Send(*Entity) error + Recv() (*Entity, error) + grpc.ServerStream +} + +type dBGetStreamServer struct { + grpc.ServerStream +} + +func (x *dBGetStreamServer) Send(m *Entity) error { + return x.ServerStream.SendMsg(m) +} + +func (x *dBGetStreamServer) Recv() (*Entity, error) { + m := new(Entity) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _DB_Has_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Entity) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).Has(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/Has", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).Has(ctx, req.(*Entity)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Entity) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).Set(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/Set", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).Set(ctx, req.(*Entity)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_SetSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Entity) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).SetSync(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/SetSync", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).SetSync(ctx, req.(*Entity)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Entity) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).Delete(ctx, req.(*Entity)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_DeleteSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Entity) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).DeleteSync(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/DeleteSync", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).DeleteSync(ctx, req.(*Entity)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_Iterator_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(Entity) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DBServer).Iterator(m, &dBIteratorServer{stream}) +} + +type DB_IteratorServer interface { + Send(*Iterator) error + grpc.ServerStream +} + +type dBIteratorServer struct { + grpc.ServerStream +} + +func (x *dBIteratorServer) Send(m *Iterator) error { + return x.ServerStream.SendMsg(m) +} + +func _DB_ReverseIterator_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(Entity) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DBServer).ReverseIterator(m, &dBReverseIteratorServer{stream}) +} + +type DB_ReverseIteratorServer interface { + Send(*Iterator) error + grpc.ServerStream +} + +type dBReverseIteratorServer struct { + grpc.ServerStream +} + +func (x *dBReverseIteratorServer) Send(m *Iterator) error { + return x.ServerStream.SendMsg(m) +} + +func _DB_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Nothing) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).Stats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/Stats", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).Stats(ctx, req.(*Nothing)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_BatchWrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Batch) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).BatchWrite(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/BatchWrite", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).BatchWrite(ctx, req.(*Batch)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_BatchWriteSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Batch) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).BatchWriteSync(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/BatchWriteSync", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).BatchWriteSync(ctx, req.(*Batch)) + } + return interceptor(ctx, in, info, handler) +} + +var _DB_serviceDesc = grpc.ServiceDesc{ + ServiceName: "protodb.DB", + HandlerType: (*DBServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "init", + Handler: _DB_Init_Handler, + }, + { + MethodName: "get", + Handler: _DB_Get_Handler, + }, + { + MethodName: "has", + Handler: _DB_Has_Handler, + }, + { + MethodName: "set", + Handler: _DB_Set_Handler, + }, + { + MethodName: "setSync", + Handler: _DB_SetSync_Handler, + }, + { + MethodName: "delete", + Handler: _DB_Delete_Handler, + }, + { + MethodName: "deleteSync", + Handler: _DB_DeleteSync_Handler, + }, + { + MethodName: "stats", + Handler: _DB_Stats_Handler, + }, + { + MethodName: "batchWrite", + Handler: _DB_BatchWrite_Handler, + }, + { + MethodName: "batchWriteSync", + Handler: _DB_BatchWriteSync_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "getStream", + Handler: _DB_GetStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "iterator", + Handler: _DB_Iterator_Handler, + ServerStreams: true, + }, + { + StreamName: "reverseIterator", + Handler: _DB_ReverseIterator_Handler, + ServerStreams: true, + }, + }, + Metadata: "defs.proto", +} + +func init() { proto.RegisterFile("defs.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 606 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4f, 0x6f, 0xd3, 0x4e, + 0x10, 0xcd, 0xda, 0x8e, 0x13, 0x4f, 0x7f, 0xbf, 0x34, 0x8c, 0x10, 0xb5, 0x8a, 0x90, 0x22, 0x0b, + 0x09, 0x43, 0x69, 0x14, 0x52, 0x24, 0xfe, 0x9c, 0x68, 0x95, 0x1c, 0x2a, 0xa1, 0x22, 0x39, 0x95, + 0x38, 0xa2, 0x6d, 0x3d, 0x34, 0x2b, 0x1a, 0x3b, 0xac, 0x87, 0x8a, 0x5c, 0xb8, 0xf2, 0x79, 0xf8, + 0x7c, 0x5c, 0xd0, 0xae, 0x1d, 0x87, 0x36, 0x39, 0x84, 0x53, 0x76, 0x66, 0xde, 0x7b, 0xb3, 0xf3, + 0x32, 0x5e, 0x80, 0x94, 0x3e, 0x17, 0xfd, 0xb9, 0xce, 0x39, 0xc7, 0x96, 0xfd, 0x49, 0x2f, 0xa2, + 0x43, 0x68, 0x9e, 0x48, 0xbe, 0x9c, 0xe2, 0x63, 0x70, 0xf3, 0x79, 0x11, 0x8a, 0x9e, 0x1b, 0xef, + 0x0c, 0xb1, 0x5f, 0xd5, 0xfb, 0x1f, 0xe6, 0xa4, 0x25, 0xab, 0x3c, 0x4b, 0x4c, 0x39, 0xfa, 0x01, + 0x41, 0x9d, 0xc1, 0x27, 0xe0, 0x53, 0xc6, 0x8a, 0x17, 0xa1, 0xe8, 0x89, 0x78, 0x67, 0xb8, 0x5b, + 0xb3, 0xc6, 0x36, 0x9d, 0x54, 0x65, 0x3c, 0x00, 0x8f, 0x17, 0x73, 0x0a, 0x9d, 0x9e, 0x88, 0x3b, + 0xc3, 0xbd, 0x75, 0xf1, 0xfe, 0xf9, 0x62, 0x4e, 0x89, 0x05, 0x45, 0x0f, 0xc1, 0x33, 0x11, 0xb6, + 0xc0, 0x9d, 0x8c, 0xcf, 0xbb, 0x0d, 0x04, 0xf0, 0x47, 0xe3, 0xf7, 0xe3, 0xf3, 0x71, 0x57, 0x44, + 0xbf, 0x04, 0xf8, 0xa5, 0x38, 0x76, 0xc0, 0x51, 0xa9, 0xed, 0xdc, 0x4c, 0x1c, 0x95, 0x62, 0x17, + 0xdc, 0x2f, 0xb4, 0xb0, 0x3d, 0xfe, 0x4b, 0xcc, 0x11, 0xef, 0x43, 0xf3, 0x46, 0x5e, 0x7f, 0xa3, + 0xd0, 0xb5, 0xb9, 0x32, 0xc0, 0x07, 0xe0, 0xd3, 0x77, 0x55, 0x70, 0x11, 0x7a, 0x3d, 0x11, 0xb7, + 0x93, 0x2a, 0x32, 0xe8, 0x82, 0xa5, 0xe6, 0xb0, 0x59, 0xa2, 0x6d, 0x60, 0x54, 0x29, 0x4b, 0x43, + 0xbf, 0x54, 0xa5, 0xcc, 0xf6, 0x21, 0xad, 0xc3, 0x56, 0x4f, 0xc4, 0x41, 0x62, 0x8e, 0xf8, 0x08, + 0xe0, 0x52, 0x93, 0x64, 0x4a, 0x3f, 0x49, 0x0e, 0xdb, 0x3d, 0x11, 0xbb, 0x49, 0x50, 0x65, 0x8e, + 0x39, 0x0a, 0xa0, 0x75, 0x96, 0xf3, 0x54, 0x65, 0x57, 0xd1, 0x00, 0xfc, 0x51, 0x3e, 0x93, 0x2a, + 0x5b, 0x75, 0x13, 0x1b, 0xba, 0x39, 0x75, 0xb7, 0xe8, 0x2b, 0xb4, 0x4f, 0xd9, 0xb8, 0x94, 0x6b, + 0xe3, 0x77, 0x6a, 0xd9, 0x6b, 0x7e, 0x97, 0xa2, 0x49, 0x55, 0xae, 0x06, 0x57, 0xa5, 0x50, 0x3b, + 0x29, 0x83, 0xa5, 0x41, 0xee, 0x06, 0x83, 0xbc, 0xbf, 0x0c, 0x8a, 0x7e, 0x0a, 0x68, 0x4e, 0x58, + 0x72, 0x81, 0xcf, 0xc1, 0x4b, 0x25, 0xcb, 0x6a, 0x29, 0xc2, 0xba, 0x9d, 0xad, 0xf6, 0x47, 0x92, + 0xe5, 0x38, 0x63, 0xbd, 0x48, 0x2c, 0x0a, 0xf7, 0xa0, 0xc5, 0x6a, 0x46, 0xc6, 0x03, 0xc7, 0x7a, + 0xe0, 0x9b, 0xf0, 0x98, 0xf7, 0x5f, 0x41, 0x50, 0x63, 0x97, 0xb7, 0x10, 0xa5, 0x7d, 0xb7, 0x6e, + 0xe1, 0xd8, 0x5c, 0x19, 0xbc, 0x75, 0x5e, 0x8b, 0xe8, 0x1d, 0x78, 0xa7, 0x99, 0x62, 0xc4, 0x72, + 0x25, 0x2a, 0x52, 0xb9, 0x1e, 0x08, 0xde, 0x99, 0x9c, 0x2d, 0x49, 0xf6, 0x6c, 0xb4, 0x47, 0x4a, + 0xdb, 0x09, 0x83, 0xc4, 0x1c, 0x87, 0xbf, 0x3d, 0x70, 0x46, 0x27, 0x18, 0x83, 0xa7, 0x8c, 0xd0, + 0xff, 0xf5, 0x08, 0x46, 0x77, 0xff, 0xee, 0xc2, 0x46, 0x0d, 0x7c, 0x0a, 0xee, 0x15, 0x31, 0xde, + 0xad, 0x6c, 0x82, 0x1e, 0x41, 0x70, 0x45, 0x3c, 0x61, 0x4d, 0x72, 0xb6, 0x0d, 0x21, 0x16, 0x03, + 0x61, 0xf4, 0xa7, 0xb2, 0xd8, 0x4a, 0xff, 0x19, 0xb8, 0xc5, 0xa6, 0xab, 0x74, 0xeb, 0xc4, 0x72, + 0xad, 0x1a, 0xd8, 0x87, 0x56, 0x41, 0x3c, 0x59, 0x64, 0x97, 0xdb, 0xe1, 0x0f, 0xc1, 0x4f, 0xe9, + 0x9a, 0x98, 0xb6, 0x83, 0xbf, 0x30, 0x8f, 0x87, 0x81, 0x6f, 0xdf, 0x61, 0x08, 0x6d, 0xb5, 0x5c, + 0xdc, 0x35, 0xc2, 0xbd, 0xd5, 0xff, 0x50, 0x61, 0xa2, 0xc6, 0x40, 0xe0, 0x1b, 0xd8, 0xd5, 0x74, + 0x43, 0xba, 0xa0, 0xd3, 0x7f, 0xa5, 0x1e, 0xd8, 0xef, 0x89, 0x0b, 0x5c, 0xbb, 0xcb, 0x7e, 0xe7, + 0xf6, 0xde, 0x46, 0x0d, 0x1c, 0x00, 0x5c, 0x98, 0x47, 0xef, 0xa3, 0x56, 0x4c, 0xb8, 0xaa, 0xdb, + 0x97, 0x70, 0xe3, 0x34, 0x2f, 0xa1, 0xb3, 0x62, 0x58, 0x13, 0xb6, 0x60, 0x5d, 0xf8, 0x36, 0x75, + 0xf4, 0x27, 0x00, 0x00, 0xff, 0xff, 0x95, 0xf4, 0xe3, 0x82, 0x7a, 0x05, 0x00, 0x00, +} diff --git a/libs/db/remotedb/proto/defs.proto b/libs/db/remotedb/proto/defs.proto new file mode 100644 index 000000000..70471f234 --- /dev/null +++ b/libs/db/remotedb/proto/defs.proto @@ -0,0 +1,71 @@ +syntax = "proto3"; + +package protodb; + +message Batch { + repeated Operation ops = 1; +} + +message Operation { + Entity entity = 1; + enum Type { + SET = 0; + DELETE = 1; + } + Type type = 2; +} + +message Entity { + int32 id = 1; + bytes key = 2; + bytes value = 3; + bool exists = 4; + bytes start = 5; + bytes end = 6; + string err = 7; + int64 created_at = 8; +} + +message Nothing { +} + +message Domain { + bytes start = 1; + bytes end = 2; +} + +message Iterator { + Domain domain = 1; + bool valid = 2; + bytes key = 3; + bytes value = 4; +} + +message Stats { + map data = 1; + int64 time_at = 2; +} + +message Init { + string Type = 1; + string Name = 2; + string Dir = 3; +} + +service DB { + rpc init(Init) returns (Entity) {} + rpc get(Entity) returns (Entity) {} + rpc getStream(stream Entity) returns (stream Entity) {} + + rpc has(Entity) returns (Entity) {} + rpc set(Entity) returns (Nothing) {} + rpc setSync(Entity) returns (Nothing) {} + rpc delete(Entity) returns (Nothing) {} + rpc deleteSync(Entity) returns (Nothing) {} + rpc iterator(Entity) returns (stream Iterator) {} + rpc reverseIterator(Entity) returns (stream Iterator) {} + // rpc print(Nothing) returns (Entity) {} + rpc stats(Nothing) returns (Stats) {} + rpc batchWrite(Batch) returns (Nothing) {} + rpc batchWriteSync(Batch) returns (Nothing) {} +} diff --git a/libs/db/remotedb/remotedb.go b/libs/db/remotedb/remotedb.go new file mode 100644 index 000000000..2b60d8159 --- /dev/null +++ b/libs/db/remotedb/remotedb.go @@ -0,0 +1,262 @@ +package remotedb + +import ( + "context" + "fmt" + + "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/db/remotedb/grpcdb" + protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto" +) + +type RemoteDB struct { + ctx context.Context + dc protodb.DBClient +} + +func NewRemoteDB(serverAddr string, serverKey string) (*RemoteDB, error) { + return newRemoteDB(grpcdb.NewClient(serverAddr, serverKey)) +} + +func newRemoteDB(gdc protodb.DBClient, err error) (*RemoteDB, error) { + if err != nil { + return nil, err + } + return &RemoteDB{dc: gdc, ctx: context.Background()}, nil +} + +type Init struct { + Dir string + Name string + Type string +} + +func (rd *RemoteDB) InitRemote(in *Init) error { + _, err := rd.dc.Init(rd.ctx, &protodb.Init{Dir: in.Dir, Type: in.Type, Name: in.Name}) + return err +} + +var _ db.DB = (*RemoteDB)(nil) + +// Close is a noop currently +func (rd *RemoteDB) Close() { +} + +func (rd *RemoteDB) Delete(key []byte) { + if _, err := rd.dc.Delete(rd.ctx, &protodb.Entity{Key: key}); err != nil { + panic(fmt.Sprintf("RemoteDB.Delete: %v", err)) + } +} + +func (rd *RemoteDB) DeleteSync(key []byte) { + if _, err := rd.dc.DeleteSync(rd.ctx, &protodb.Entity{Key: key}); err != nil { + panic(fmt.Sprintf("RemoteDB.DeleteSync: %v", err)) + } +} + +func (rd *RemoteDB) Set(key, value []byte) { + if _, err := rd.dc.Set(rd.ctx, &protodb.Entity{Key: key, Value: value}); err != nil { + panic(fmt.Sprintf("RemoteDB.Set: %v", err)) + } +} + +func (rd *RemoteDB) SetSync(key, value []byte) { + if _, err := rd.dc.SetSync(rd.ctx, &protodb.Entity{Key: key, Value: value}); err != nil { + panic(fmt.Sprintf("RemoteDB.SetSync: %v", err)) + } +} + +func (rd *RemoteDB) Get(key []byte) []byte { + res, err := rd.dc.Get(rd.ctx, &protodb.Entity{Key: key}) + if err != nil { + panic(fmt.Sprintf("RemoteDB.Get error: %v", err)) + } + return res.Value +} + +func (rd *RemoteDB) Has(key []byte) bool { + res, err := rd.dc.Has(rd.ctx, &protodb.Entity{Key: key}) + if err != nil { + panic(fmt.Sprintf("RemoteDB.Has error: %v", err)) + } + return res.Exists +} + +func (rd *RemoteDB) ReverseIterator(start, end []byte) db.Iterator { + dic, err := rd.dc.ReverseIterator(rd.ctx, &protodb.Entity{Start: start, End: end}) + if err != nil { + panic(fmt.Sprintf("RemoteDB.Iterator error: %v", err)) + } + return makeReverseIterator(dic) +} + +func (rd *RemoteDB) NewBatch() db.Batch { + return &batch{ + db: rd, + ops: nil, + } +} + +// TODO: Implement Print when db.DB implements a method +// to print to a string and not db.Print to stdout. +func (rd *RemoteDB) Print() { + panic("Unimplemented") +} + +func (rd *RemoteDB) Stats() map[string]string { + stats, err := rd.dc.Stats(rd.ctx, &protodb.Nothing{}) + if err != nil { + panic(fmt.Sprintf("RemoteDB.Stats error: %v", err)) + } + if stats == nil { + return nil + } + return stats.Data +} + +func (rd *RemoteDB) Iterator(start, end []byte) db.Iterator { + dic, err := rd.dc.Iterator(rd.ctx, &protodb.Entity{Start: start, End: end}) + if err != nil { + panic(fmt.Sprintf("RemoteDB.Iterator error: %v", err)) + } + return makeIterator(dic) +} + +func makeIterator(dic protodb.DB_IteratorClient) db.Iterator { + return &iterator{dic: dic} +} + +func makeReverseIterator(dric protodb.DB_ReverseIteratorClient) db.Iterator { + return &reverseIterator{dric: dric} +} + +type reverseIterator struct { + dric protodb.DB_ReverseIteratorClient + cur *protodb.Iterator +} + +var _ db.Iterator = (*iterator)(nil) + +func (rItr *reverseIterator) Valid() bool { + return rItr.cur != nil && rItr.cur.Valid +} + +func (rItr *reverseIterator) Domain() (start, end []byte) { + if rItr.cur == nil || rItr.cur.Domain == nil { + return nil, nil + } + return rItr.cur.Domain.Start, rItr.cur.Domain.End +} + +// Next advances the current reverseIterator +func (rItr *reverseIterator) Next() { + var err error + rItr.cur, err = rItr.dric.Recv() + if err != nil { + panic(fmt.Sprintf("RemoteDB.ReverseIterator.Next error: %v", err)) + } +} + +func (rItr *reverseIterator) Key() []byte { + if rItr.cur == nil { + return nil + } + return rItr.cur.Key +} + +func (rItr *reverseIterator) Value() []byte { + if rItr.cur == nil { + return nil + } + return rItr.cur.Value +} + +func (rItr *reverseIterator) Close() { +} + +// iterator implements the db.Iterator by retrieving +// streamed iterators from the remote backend as +// needed. It is NOT safe for concurrent usage, +// matching the behavior of other iterators. +type iterator struct { + dic protodb.DB_IteratorClient + cur *protodb.Iterator +} + +var _ db.Iterator = (*iterator)(nil) + +func (itr *iterator) Valid() bool { + return itr.cur != nil && itr.cur.Valid +} + +func (itr *iterator) Domain() (start, end []byte) { + if itr.cur == nil || itr.cur.Domain == nil { + return nil, nil + } + return itr.cur.Domain.Start, itr.cur.Domain.End +} + +// Next advances the current iterator +func (itr *iterator) Next() { + var err error + itr.cur, err = itr.dic.Recv() + if err != nil { + panic(fmt.Sprintf("RemoteDB.Iterator.Next error: %v", err)) + } +} + +func (itr *iterator) Key() []byte { + if itr.cur == nil { + return nil + } + return itr.cur.Key +} + +func (itr *iterator) Value() []byte { + if itr.cur == nil { + return nil + } + return itr.cur.Value +} + +func (itr *iterator) Close() { + err := itr.dic.CloseSend() + if err != nil { + panic(fmt.Sprintf("Error closing iterator: %v", err)) + } +} + +type batch struct { + db *RemoteDB + ops []*protodb.Operation +} + +var _ db.Batch = (*batch)(nil) + +func (bat *batch) Set(key, value []byte) { + op := &protodb.Operation{ + Entity: &protodb.Entity{Key: key, Value: value}, + Type: protodb.Operation_SET, + } + bat.ops = append(bat.ops, op) +} + +func (bat *batch) Delete(key []byte) { + op := &protodb.Operation{ + Entity: &protodb.Entity{Key: key}, + Type: protodb.Operation_DELETE, + } + bat.ops = append(bat.ops, op) +} + +func (bat *batch) Write() { + if _, err := bat.db.dc.BatchWrite(bat.db.ctx, &protodb.Batch{Ops: bat.ops}); err != nil { + panic(fmt.Sprintf("RemoteDB.BatchWrite: %v", err)) + } +} + +func (bat *batch) WriteSync() { + if _, err := bat.db.dc.BatchWriteSync(bat.db.ctx, &protodb.Batch{Ops: bat.ops}); err != nil { + panic(fmt.Sprintf("RemoteDB.BatchWriteSync: %v", err)) + } +} diff --git a/libs/db/remotedb/remotedb_test.go b/libs/db/remotedb/remotedb_test.go new file mode 100644 index 000000000..0e7319971 --- /dev/null +++ b/libs/db/remotedb/remotedb_test.go @@ -0,0 +1,123 @@ +package remotedb_test + +import ( + "net" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/libs/db/remotedb" + "github.com/tendermint/tendermint/libs/db/remotedb/grpcdb" +) + +func TestRemoteDB(t *testing.T) { + cert := "test.crt" + key := "test.key" + ln, err := net.Listen("tcp", "0.0.0.0:0") + require.Nil(t, err, "expecting a port to have been assigned on which we can listen") + srv, err := grpcdb.NewServer(cert, key) + require.Nil(t, err) + defer srv.Stop() + go func() { + if err := srv.Serve(ln); err != nil { + t.Fatalf("BindServer: %v", err) + } + }() + + client, err := remotedb.NewRemoteDB(ln.Addr().String(), cert) + require.Nil(t, err, "expecting a successful client creation") + dbName := "test-remote-db" + require.Nil(t, client.InitRemote(&remotedb.Init{Name: dbName, Type: "leveldb"})) + defer func() { + err := os.RemoveAll(dbName + ".db") + if err != nil { + panic(err) + } + }() + + k1 := []byte("key-1") + v1 := client.Get(k1) + require.Equal(t, 0, len(v1), "expecting no key1 to have been stored, got %X (%s)", v1, v1) + vv1 := []byte("value-1") + client.Set(k1, vv1) + gv1 := client.Get(k1) + require.Equal(t, gv1, vv1) + + // Simple iteration + itr := client.Iterator(nil, nil) + itr.Next() + require.Equal(t, itr.Key(), []byte("key-1")) + require.Equal(t, itr.Value(), []byte("value-1")) + require.Panics(t, itr.Next) + itr.Close() + + // Set some more keys + k2 := []byte("key-2") + v2 := []byte("value-2") + client.SetSync(k2, v2) + has := client.Has(k2) + require.True(t, has) + gv2 := client.Get(k2) + require.Equal(t, gv2, v2) + + // More iteration + itr = client.Iterator(nil, nil) + itr.Next() + require.Equal(t, itr.Key(), []byte("key-1")) + require.Equal(t, itr.Value(), []byte("value-1")) + itr.Next() + require.Equal(t, itr.Key(), []byte("key-2")) + require.Equal(t, itr.Value(), []byte("value-2")) + require.Panics(t, itr.Next) + itr.Close() + + // Deletion + client.Delete(k1) + client.DeleteSync(k2) + gv1 = client.Get(k1) + gv2 = client.Get(k2) + require.Equal(t, len(gv2), 0, "after deletion, not expecting the key to exist anymore") + require.Equal(t, len(gv1), 0, "after deletion, not expecting the key to exist anymore") + + // Batch tests - set + k3 := []byte("key-3") + k4 := []byte("key-4") + k5 := []byte("key-5") + v3 := []byte("value-3") + v4 := []byte("value-4") + v5 := []byte("value-5") + bat := client.NewBatch() + bat.Set(k3, v3) + bat.Set(k4, v4) + rv3 := client.Get(k3) + require.Equal(t, 0, len(rv3), "expecting no k3 to have been stored") + rv4 := client.Get(k4) + require.Equal(t, 0, len(rv4), "expecting no k4 to have been stored") + bat.Write() + rv3 = client.Get(k3) + require.Equal(t, rv3, v3, "expecting k3 to have been stored") + rv4 = client.Get(k4) + require.Equal(t, rv4, v4, "expecting k4 to have been stored") + + // Batch tests - deletion + bat = client.NewBatch() + bat.Delete(k4) + bat.Delete(k3) + bat.WriteSync() + rv3 = client.Get(k3) + require.Equal(t, 0, len(rv3), "expecting k3 to have been deleted") + rv4 = client.Get(k4) + require.Equal(t, 0, len(rv4), "expecting k4 to have been deleted") + + // Batch tests - set and delete + bat = client.NewBatch() + bat.Set(k4, v4) + bat.Set(k5, v5) + bat.Delete(k4) + bat.WriteSync() + rv4 = client.Get(k4) + require.Equal(t, 0, len(rv4), "expecting k4 to have been deleted") + rv5 := client.Get(k5) + require.Equal(t, rv5, v5, "expecting k5 to have been stored") +} diff --git a/libs/db/remotedb/test.crt b/libs/db/remotedb/test.crt new file mode 100644 index 000000000..bdc8a0f29 --- /dev/null +++ b/libs/db/remotedb/test.crt @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEQTCCAimgAwIBAgIRANqF1HD19i/uvQ3n62TAKTwwDQYJKoZIhvcNAQELBQAw +GTEXMBUGA1UEAxMOdGVuZGVybWludC5jb20wHhcNMTgwNzAyMDMwNzMyWhcNMjAw +MTAyMDMwNzMwWjANMQswCQYDVQQDEwI6OjCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAOuWUMCSzYJmvKU1vsouDTe7OxnPWO3oV0FjSH8vKYoi2zpZQX35 +dQDPtLDF2/v/ANZJ5pzMJR8yMMtEQ4tWxKuGzJw1ZgTgHtASPbj/M5fDnDO7Hqg4 +D09eLTkZAUfiBf6BzDyQIHn22CUexhaS70TbIT9AOAoOsGXMZz9d+iImKIm+gbzf +pR52LNbBGesHWGjwIuGF4InstIMsKSwGv2DctzhWI+i/m5Goi3rd1V8z/lzUbsf1 +0uXqQcSfTyv3ee6YiCWj2W8vcdc5H+B6KzSlGjAR4sRcHTHOQJYO9BgA9evQ3qsJ +Pp00iez13RdheJWPtbfUqQy4gdpu8HFeZx8CAwEAAaOBjzCBjDAOBgNVHQ8BAf8E +BAMCA7gwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBRc +XBo+bJILrLcJiGkTWeMPpXb1TDAfBgNVHSMEGDAWgBQqk1Xu65Ww7EBCROw4KLGw +KuToaDAbBgNVHREEFDAShxAAAAAAAAAAAAAAAAAAAAAAMA0GCSqGSIb3DQEBCwUA +A4ICAQAbGsIMhL8clczNmhGl9xZhmyNz6FbLq6g163x9LTgfvwHPt+7urthtd++O +uy4Ut8zFurh/yk7eooPlzf8jO7QUJBAFVy4vj8IcsvpWbFa7cuEOIulbjIzyAm/v +lgy7vUQ6xrWn8x8O9K1ww9z7wugwCyl22BD0wSHZKclJz++AwpL6vUVOD76IIuJO ++S6bE6z26/0ndpundh2AkA++2eIleD6ygnTeTl0PWu6aGoCggBmos50f8KgYHZF/ +OZVef203kDls9xCaOiMzaU91VsgLqq/gNcT+2cBd5r3IZTY3C8Rve6EEHS+/4zxf +PKlmiLN7lU9GFZogKecYzY+zPT7OArY7OVFnGTo4qdhdmxnXzHsI+anMCjxLOgEJ +381hyplQGPQOouEupCBxFcwa7oMYoGu20+1nLWYEqFcIXCeyH+s77MyteJSsseqL +xivG5PT+jKJn9hrnFb39bBmht9Vsa+Th6vk953zi5wCSe1j2wXsxFaENDq6BQZOK +f86Kp86M2elYnv3lJ3j2DE2ZTMpw+PA5ThYUnB+HVqYeeB2Y3ErRS8P1FOp1LBE8 ++eTz7yXQO5OM2wdYhNNL1zDri/41fHXi9b6337PZVqc39GM+N74x/O4Q7xEBiWgQ +T0dT8SNwf55kv63MeZh63ImxFV0FNRkCteYLcJMle3ohIY4zyQ== +-----END CERTIFICATE----- diff --git a/libs/db/remotedb/test.key b/libs/db/remotedb/test.key new file mode 100644 index 000000000..14d285584 --- /dev/null +++ b/libs/db/remotedb/test.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpgIBAAKCAQEA65ZQwJLNgma8pTW+yi4NN7s7Gc9Y7ehXQWNIfy8piiLbOllB +ffl1AM+0sMXb+/8A1knmnMwlHzIwy0RDi1bEq4bMnDVmBOAe0BI9uP8zl8OcM7se +qDgPT14tORkBR+IF/oHMPJAgefbYJR7GFpLvRNshP0A4Cg6wZcxnP136IiYoib6B +vN+lHnYs1sEZ6wdYaPAi4YXgiey0gywpLAa/YNy3OFYj6L+bkaiLet3VXzP+XNRu +x/XS5epBxJ9PK/d57piIJaPZby9x1zkf4HorNKUaMBHixFwdMc5Alg70GAD169De +qwk+nTSJ7PXdF2F4lY+1t9SpDLiB2m7wcV5nHwIDAQABAoIBAQCB2/ilPgaUE8d2 +ldqWHa5hgw4/2uCdO04ll/GVUczm/PG1BxAnvYL2MIfcTSRGkrjGZjP9SDZKLONi +mD1XKDv+hK5yiKi0lUnGzddCC0JILKYEieeLOGOQD0yERblEA13kfW20EIomUJ+y +TnVIajQD03pPIDoDqTco1fQvpMDFYw5Q//UhH7VBC261GO1akvhT2Gqdb4aKLaYQ +iDW9IEButL5cRKIJuRxToB/JbmPVEF7xIZtm0sf9dtYVOlBQLeID0uHXgaci0enc +de6GMajmj7NFqc36ypb+Ct18fqEwQBYD+TSQdKs7/lMsAXwRjd5HW4RbYiMZyYnf +Dxgh7QVBAoGBAP9aLLIUcIG7+gk1x7xd+8wRhfo+dhsungeCluSigI9AsfDr6dpR +G9/0lEJH56noZZKQueACTmj7shmRB40xFFLc8w0IDRZCnofsl+Z15k9K84uFPA3W +hdZH9nMieU/mRKdcUYK7pHGqbicHTaJQ5ydZ+xb2E+zYQHOzYpQacHv/AoGBAOwv +TjDZSiassnAPYmmfcHtkUF4gf7PTpiZfH0hXHGAb0mJX4cXAoktAeDeHSi2tz3LW +dAc0ReP8Pdf3uSNv7wkJ1KpNRxAhU5bhnDFmjRc7gMZknVOU+az2M+4yGOn/SOiJ +I6uMHgQDS/VsI+N583n6gbGxVHbQfr9TOc4bLpThAoGBAKin0JmWMnEdzRnEMbZS +hPrWIB2Wn794XNws/qjoQ+1aF60+xGhz5etXyYy1nWd1nZDekkZIf62LgKiuR8ST +xA6u7MGQrcQkID06oWGQQZvhr1ZZm76wEBnl0ftdq66AMpwvt46XjReeL78LbdVl +hidRoSwbQDHQ61EADH4xsFXVAoGBAISXqhXSZsZ/fU1b1avmTod3MYcmR4r07vnr +vOwnu05ZUCrVm3IhSvtkHhlOYl5yjVuy+UByICp1mWJ9N/qlBFTWqAVTjOmJTBwQ +XFd/cwXv6cN3CLu7js+DCHRYu5PiNVQWaWgNKWynTSViqGM0O3PnJphTLU/mjMFs +P69toyEBAoGBALh9YsqxHdYdS5WK9chzDfGlaTQ79jwN+gEzQuP1ooLF0JkMgh5W +//2C6kCrgBsGTm1gfHAjEfC04ZDZLFbKLm56YVKUGL6JJNapm6e5kfiZGjbRKWAg +ViCeRS2qQnVbH74GfHyimeTPDI9cJMiJfDDTPbfosqWSsPEcg2jfsySJ +-----END RSA PRIVATE KEY----- diff --git a/libs/db/types.go b/libs/db/types.go new file mode 100644 index 000000000..ad78859a7 --- /dev/null +++ b/libs/db/types.go @@ -0,0 +1,134 @@ +package db + +// DBs are goroutine safe. +type DB interface { + + // Get returns nil iff key doesn't exist. + // A nil key is interpreted as an empty byteslice. + // CONTRACT: key, value readonly []byte + Get([]byte) []byte + + // Has checks if a key exists. + // A nil key is interpreted as an empty byteslice. + // CONTRACT: key, value readonly []byte + Has(key []byte) bool + + // Set sets the key. + // A nil key is interpreted as an empty byteslice. + // CONTRACT: key, value readonly []byte + Set([]byte, []byte) + SetSync([]byte, []byte) + + // Delete deletes the key. + // A nil key is interpreted as an empty byteslice. + // CONTRACT: key readonly []byte + Delete([]byte) + DeleteSync([]byte) + + // Iterate over a domain of keys in ascending order. End is exclusive. + // Start must be less than end, or the Iterator is invalid. + // A nil start is interpreted as an empty byteslice. + // If end is nil, iterates up to the last item (inclusive). + // CONTRACT: No writes may happen within a domain while an iterator exists over it. + // CONTRACT: start, end readonly []byte + Iterator(start, end []byte) Iterator + + // Iterate over a domain of keys in descending order. End is exclusive. + // Start must be greater than end, or the Iterator is invalid. + // If start is nil, iterates from the last/greatest item (inclusive). + // If end is nil, iterates up to the first/least item (inclusive). + // CONTRACT: No writes may happen within a domain while an iterator exists over it. + // CONTRACT: start, end readonly []byte + ReverseIterator(start, end []byte) Iterator + + // Closes the connection. + Close() + + // Creates a batch for atomic updates. + NewBatch() Batch + + // For debugging + Print() + + // Stats returns a map of property values for all keys and the size of the cache. + Stats() map[string]string +} + +//---------------------------------------- +// Batch + +type Batch interface { + SetDeleter + Write() + WriteSync() +} + +type SetDeleter interface { + Set(key, value []byte) // CONTRACT: key, value readonly []byte + Delete(key []byte) // CONTRACT: key readonly []byte +} + +//---------------------------------------- +// Iterator + +/* + Usage: + + var itr Iterator = ... + defer itr.Close() + + for ; itr.Valid(); itr.Next() { + k, v := itr.Key(); itr.Value() + // ... + } +*/ +type Iterator interface { + + // The start & end (exclusive) limits to iterate over. + // If end < start, then the Iterator goes in reverse order. + // + // A domain of ([]byte{12, 13}, []byte{12, 14}) will iterate + // over anything with the prefix []byte{12, 13}. + // + // The smallest key is the empty byte array []byte{} - see BeginningKey(). + // The largest key is the nil byte array []byte(nil) - see EndingKey(). + // CONTRACT: start, end readonly []byte + Domain() (start []byte, end []byte) + + // Valid returns whether the current position is valid. + // Once invalid, an Iterator is forever invalid. + Valid() bool + + // Next moves the iterator to the next sequential key in the database, as + // defined by order of iteration. + // + // If Valid returns false, this method will panic. + Next() + + // Key returns the key of the cursor. + // If Valid returns false, this method will panic. + // CONTRACT: key readonly []byte + Key() (key []byte) + + // Value returns the value of the cursor. + // If Valid returns false, this method will panic. + // CONTRACT: value readonly []byte + Value() (value []byte) + + // Close releases the Iterator. + Close() +} + +// For testing convenience. +func bz(s string) []byte { + return []byte(s) +} + +// We defensively turn nil keys or values into []byte{} for +// most operations. +func nonNilBytes(bz []byte) []byte { + if bz == nil { + return []byte{} + } + return bz +} diff --git a/libs/db/util.go b/libs/db/util.go new file mode 100644 index 000000000..51277ac42 --- /dev/null +++ b/libs/db/util.go @@ -0,0 +1,78 @@ +package db + +import ( + "bytes" +) + +func cp(bz []byte) (ret []byte) { + ret = make([]byte, len(bz)) + copy(ret, bz) + return ret +} + +// Returns a slice of the same length (big endian) +// except incremented by one. +// Returns nil on overflow (e.g. if bz bytes are all 0xFF) +// CONTRACT: len(bz) > 0 +func cpIncr(bz []byte) (ret []byte) { + if len(bz) == 0 { + panic("cpIncr expects non-zero bz length") + } + ret = cp(bz) + for i := len(bz) - 1; i >= 0; i-- { + if ret[i] < byte(0xFF) { + ret[i]++ + return + } + ret[i] = byte(0x00) + if i == 0 { + // Overflow + return nil + } + } + return nil +} + +// Returns a slice of the same length (big endian) +// except decremented by one. +// Returns nil on underflow (e.g. if bz bytes are all 0x00) +// CONTRACT: len(bz) > 0 +func cpDecr(bz []byte) (ret []byte) { + if len(bz) == 0 { + panic("cpDecr expects non-zero bz length") + } + ret = cp(bz) + for i := len(bz) - 1; i >= 0; i-- { + if ret[i] > byte(0x00) { + ret[i]-- + return + } + ret[i] = byte(0xFF) + if i == 0 { + // Underflow + return nil + } + } + return nil +} + +// See DB interface documentation for more information. +func IsKeyInDomain(key, start, end []byte, isReverse bool) bool { + if !isReverse { + if bytes.Compare(key, start) < 0 { + return false + } + if end != nil && bytes.Compare(end, key) <= 0 { + return false + } + return true + } else { + if start != nil && bytes.Compare(start, key) < 0 { + return false + } + if end != nil && bytes.Compare(key, end) <= 0 { + return false + } + return true + } +} diff --git a/libs/db/util_test.go b/libs/db/util_test.go new file mode 100644 index 000000000..44f1f9f73 --- /dev/null +++ b/libs/db/util_test.go @@ -0,0 +1,93 @@ +package db + +import ( + "fmt" + "testing" +) + +// Empty iterator for empty db. +func TestPrefixIteratorNoMatchNil(t *testing.T) { + for backend := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + itr := IteratePrefix(db, []byte("2")) + + checkInvalid(t, itr) + }) + } +} + +// Empty iterator for db populated after iterator created. +func TestPrefixIteratorNoMatch1(t *testing.T) { + for backend := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + itr := IteratePrefix(db, []byte("2")) + db.SetSync(bz("1"), bz("value_1")) + + checkInvalid(t, itr) + }) + } +} + +// Empty iterator for prefix starting after db entry. +func TestPrefixIteratorNoMatch2(t *testing.T) { + for backend := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("3"), bz("value_3")) + itr := IteratePrefix(db, []byte("4")) + + checkInvalid(t, itr) + }) + } +} + +// Iterator with single val for db with single val, starting from that val. +func TestPrefixIteratorMatch1(t *testing.T) { + for backend := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("2"), bz("value_2")) + itr := IteratePrefix(db, bz("2")) + + checkValid(t, itr, true) + checkItem(t, itr, bz("2"), bz("value_2")) + checkNext(t, itr, false) + + // Once invalid... + checkInvalid(t, itr) + }) + } +} + +// Iterator with prefix iterates over everything with same prefix. +func TestPrefixIteratorMatches1N(t *testing.T) { + for backend := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + + // prefixed + db.SetSync(bz("a/1"), bz("value_1")) + db.SetSync(bz("a/3"), bz("value_3")) + + // not + db.SetSync(bz("b/3"), bz("value_3")) + db.SetSync(bz("a-3"), bz("value_3")) + db.SetSync(bz("a.3"), bz("value_3")) + db.SetSync(bz("abcdefg"), bz("value_3")) + itr := IteratePrefix(db, bz("a/")) + + checkValid(t, itr, true) + checkItem(t, itr, bz("a/1"), bz("value_1")) + checkNext(t, itr, true) + checkItem(t, itr, bz("a/3"), bz("value_3")) + + // Bad! + checkNext(t, itr, false) + + //Once invalid... + checkInvalid(t, itr) + }) + } +} diff --git a/libs/events/Makefile b/libs/events/Makefile new file mode 100644 index 000000000..696aafff1 --- /dev/null +++ b/libs/events/Makefile @@ -0,0 +1,9 @@ +.PHONY: docs +REPO:=github.com/tendermint/tendermint/libs/events + +docs: + @go get github.com/davecheney/godoc2md + godoc2md $(REPO) > README.md + +test: + go test -v ./... diff --git a/libs/events/README.md b/libs/events/README.md new file mode 100644 index 000000000..14aa498ff --- /dev/null +++ b/libs/events/README.md @@ -0,0 +1,175 @@ + + +# events +`import "github.com/tendermint/tendermint/libs/events"` + +* [Overview](#pkg-overview) +* [Index](#pkg-index) + +## Overview +Pub-Sub in go with event caching + + + + +## Index +* [type EventCache](#EventCache) + * [func NewEventCache(evsw Fireable) *EventCache](#NewEventCache) + * [func (evc *EventCache) FireEvent(event string, data EventData)](#EventCache.FireEvent) + * [func (evc *EventCache) Flush()](#EventCache.Flush) +* [type EventCallback](#EventCallback) +* [type EventData](#EventData) +* [type EventSwitch](#EventSwitch) + * [func NewEventSwitch() EventSwitch](#NewEventSwitch) +* [type Eventable](#Eventable) +* [type Fireable](#Fireable) + + +#### Package files +[event_cache.go](/src/github.com/tendermint/tendermint/libs/events/event_cache.go) [events.go](/src/github.com/tendermint/tendermint/libs/events/events.go) + + + + + + +## type [EventCache](/src/target/event_cache.go?s=116:179#L5) +``` go +type EventCache struct { + // contains filtered or unexported fields +} +``` +An EventCache buffers events for a Fireable +All events are cached. Filtering happens on Flush + + + + + + + +### func [NewEventCache](/src/target/event_cache.go?s=239:284#L11) +``` go +func NewEventCache(evsw Fireable) *EventCache +``` +Create a new EventCache with an EventSwitch as backend + + + + + +### func (\*EventCache) [FireEvent](/src/target/event_cache.go?s=449:511#L24) +``` go +func (evc *EventCache) FireEvent(event string, data EventData) +``` +Cache an event to be fired upon finality. + + + + +### func (\*EventCache) [Flush](/src/target/event_cache.go?s=735:765#L31) +``` go +func (evc *EventCache) Flush() +``` +Fire events by running evsw.FireEvent on all cached events. Blocks. +Clears cached events + + + + +## type [EventCallback](/src/target/events.go?s=4201:4240#L185) +``` go +type EventCallback func(data EventData) +``` + + + + + + + + + +## type [EventData](/src/target/events.go?s=243:294#L14) +``` go +type EventData interface { +} +``` +Generic event data can be typed and registered with tendermint/go-amino +via concrete implementation of this interface + + + + + + + + + + +## type [EventSwitch](/src/target/events.go?s=560:771#L29) +``` go +type EventSwitch interface { + cmn.Service + Fireable + + AddListenerForEvent(listenerID, event string, cb EventCallback) + RemoveListenerForEvent(event string, listenerID string) + RemoveListener(listenerID string) +} +``` + + + + + + +### func [NewEventSwitch](/src/target/events.go?s=917:950#L46) +``` go +func NewEventSwitch() EventSwitch +``` + + + + +## type [Eventable](/src/target/events.go?s=378:440#L20) +``` go +type Eventable interface { + SetEventSwitch(evsw EventSwitch) +} +``` +reactors and other modules should export +this interface to become eventable + + + + + + + + + + +## type [Fireable](/src/target/events.go?s=490:558#L25) +``` go +type Fireable interface { + FireEvent(event string, data EventData) +} +``` +an event switch or cache implements fireable + + + + + + + + + + + + + + +- - - +Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) diff --git a/libs/events/event_cache.go b/libs/events/event_cache.go new file mode 100644 index 000000000..f508e873d --- /dev/null +++ b/libs/events/event_cache.go @@ -0,0 +1,37 @@ +package events + +// An EventCache buffers events for a Fireable +// All events are cached. Filtering happens on Flush +type EventCache struct { + evsw Fireable + events []eventInfo +} + +// Create a new EventCache with an EventSwitch as backend +func NewEventCache(evsw Fireable) *EventCache { + return &EventCache{ + evsw: evsw, + } +} + +// a cached event +type eventInfo struct { + event string + data EventData +} + +// Cache an event to be fired upon finality. +func (evc *EventCache) FireEvent(event string, data EventData) { + // append to list (go will grow our backing array exponentially) + evc.events = append(evc.events, eventInfo{event, data}) +} + +// Fire events by running evsw.FireEvent on all cached events. Blocks. +// Clears cached events +func (evc *EventCache) Flush() { + for _, ei := range evc.events { + evc.evsw.FireEvent(ei.event, ei.data) + } + // Clear the buffer, since we only add to it with append it's safe to just set it to nil and maybe safe an allocation + evc.events = nil +} diff --git a/libs/events/event_cache_test.go b/libs/events/event_cache_test.go new file mode 100644 index 000000000..ab321da3a --- /dev/null +++ b/libs/events/event_cache_test.go @@ -0,0 +1,35 @@ +package events + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestEventCache_Flush(t *testing.T) { + evsw := NewEventSwitch() + evsw.Start() + evsw.AddListenerForEvent("nothingness", "", func(data EventData) { + // Check we are not initialising an empty buffer full of zeroed eventInfos in the EventCache + require.FailNow(t, "We should never receive a message on this switch since none are fired") + }) + evc := NewEventCache(evsw) + evc.Flush() + // Check after reset + evc.Flush() + fail := true + pass := false + evsw.AddListenerForEvent("somethingness", "something", func(data EventData) { + if fail { + require.FailNow(t, "Shouldn't see a message until flushed") + } + pass = true + }) + evc.FireEvent("something", struct{ int }{1}) + evc.FireEvent("something", struct{ int }{2}) + evc.FireEvent("something", struct{ int }{3}) + fail = false + evc.Flush() + assert.True(t, pass) +} diff --git a/libs/events/events.go b/libs/events/events.go new file mode 100644 index 000000000..9c7f0fd05 --- /dev/null +++ b/libs/events/events.go @@ -0,0 +1,220 @@ +/* +Pub-Sub in go with event caching +*/ +package events + +import ( + "sync" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +// Generic event data can be typed and registered with tendermint/go-amino +// via concrete implementation of this interface +type EventData interface { + //AssertIsEventData() +} + +// reactors and other modules should export +// this interface to become eventable +type Eventable interface { + SetEventSwitch(evsw EventSwitch) +} + +// an event switch or cache implements fireable +type Fireable interface { + FireEvent(event string, data EventData) +} + +type EventSwitch interface { + cmn.Service + Fireable + + AddListenerForEvent(listenerID, event string, cb EventCallback) + RemoveListenerForEvent(event string, listenerID string) + RemoveListener(listenerID string) +} + +type eventSwitch struct { + cmn.BaseService + + mtx sync.RWMutex + eventCells map[string]*eventCell + listeners map[string]*eventListener +} + +func NewEventSwitch() EventSwitch { + evsw := &eventSwitch{ + eventCells: make(map[string]*eventCell), + listeners: make(map[string]*eventListener), + } + evsw.BaseService = *cmn.NewBaseService(nil, "EventSwitch", evsw) + return evsw +} + +func (evsw *eventSwitch) OnStart() error { + return nil +} + +func (evsw *eventSwitch) OnStop() {} + +func (evsw *eventSwitch) AddListenerForEvent(listenerID, event string, cb EventCallback) { + // Get/Create eventCell and listener + evsw.mtx.Lock() + eventCell := evsw.eventCells[event] + if eventCell == nil { + eventCell = newEventCell() + evsw.eventCells[event] = eventCell + } + listener := evsw.listeners[listenerID] + if listener == nil { + listener = newEventListener(listenerID) + evsw.listeners[listenerID] = listener + } + evsw.mtx.Unlock() + + // Add event and listener + eventCell.AddListener(listenerID, cb) + listener.AddEvent(event) +} + +func (evsw *eventSwitch) RemoveListener(listenerID string) { + // Get and remove listener + evsw.mtx.RLock() + listener := evsw.listeners[listenerID] + evsw.mtx.RUnlock() + if listener == nil { + return + } + + evsw.mtx.Lock() + delete(evsw.listeners, listenerID) + evsw.mtx.Unlock() + + // Remove callback for each event. + listener.SetRemoved() + for _, event := range listener.GetEvents() { + evsw.RemoveListenerForEvent(event, listenerID) + } +} + +func (evsw *eventSwitch) RemoveListenerForEvent(event string, listenerID string) { + // Get eventCell + evsw.mtx.Lock() + eventCell := evsw.eventCells[event] + evsw.mtx.Unlock() + + if eventCell == nil { + return + } + + // Remove listenerID from eventCell + numListeners := eventCell.RemoveListener(listenerID) + + // Maybe garbage collect eventCell. + if numListeners == 0 { + // Lock again and double check. + evsw.mtx.Lock() // OUTER LOCK + eventCell.mtx.Lock() // INNER LOCK + if len(eventCell.listeners) == 0 { + delete(evsw.eventCells, event) + } + eventCell.mtx.Unlock() // INNER LOCK + evsw.mtx.Unlock() // OUTER LOCK + } +} + +func (evsw *eventSwitch) FireEvent(event string, data EventData) { + // Get the eventCell + evsw.mtx.RLock() + eventCell := evsw.eventCells[event] + evsw.mtx.RUnlock() + + if eventCell == nil { + return + } + + // Fire event for all listeners in eventCell + eventCell.FireEvent(data) +} + +//----------------------------------------------------------------------------- + +// eventCell handles keeping track of listener callbacks for a given event. +type eventCell struct { + mtx sync.RWMutex + listeners map[string]EventCallback +} + +func newEventCell() *eventCell { + return &eventCell{ + listeners: make(map[string]EventCallback), + } +} + +func (cell *eventCell) AddListener(listenerID string, cb EventCallback) { + cell.mtx.Lock() + cell.listeners[listenerID] = cb + cell.mtx.Unlock() +} + +func (cell *eventCell) RemoveListener(listenerID string) int { + cell.mtx.Lock() + delete(cell.listeners, listenerID) + numListeners := len(cell.listeners) + cell.mtx.Unlock() + return numListeners +} + +func (cell *eventCell) FireEvent(data EventData) { + cell.mtx.RLock() + for _, listener := range cell.listeners { + listener(data) + } + cell.mtx.RUnlock() +} + +//----------------------------------------------------------------------------- + +type EventCallback func(data EventData) + +type eventListener struct { + id string + + mtx sync.RWMutex + removed bool + events []string +} + +func newEventListener(id string) *eventListener { + return &eventListener{ + id: id, + removed: false, + events: nil, + } +} + +func (evl *eventListener) AddEvent(event string) { + evl.mtx.Lock() + defer evl.mtx.Unlock() + + if evl.removed { + return + } + evl.events = append(evl.events, event) +} + +func (evl *eventListener) GetEvents() []string { + evl.mtx.RLock() + defer evl.mtx.RUnlock() + + events := make([]string, len(evl.events)) + copy(events, evl.events) + return events +} + +func (evl *eventListener) SetRemoved() { + evl.mtx.Lock() + defer evl.mtx.Unlock() + evl.removed = true +} diff --git a/libs/events/events_test.go b/libs/events/events_test.go new file mode 100644 index 000000000..4995ae730 --- /dev/null +++ b/libs/events/events_test.go @@ -0,0 +1,380 @@ +package events + +import ( + "fmt" + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +// TestAddListenerForEventFireOnce sets up an EventSwitch, subscribes a single +// listener to an event, and sends a string "data". +func TestAddListenerForEventFireOnce(t *testing.T) { + evsw := NewEventSwitch() + err := evsw.Start() + if err != nil { + t.Errorf("Failed to start EventSwitch, error: %v", err) + } + messages := make(chan EventData) + evsw.AddListenerForEvent("listener", "event", + func(data EventData) { + messages <- data + }) + go evsw.FireEvent("event", "data") + received := <-messages + if received != "data" { + t.Errorf("Message received does not match: %v", received) + } +} + +// TestAddListenerForEventFireMany sets up an EventSwitch, subscribes a single +// listener to an event, and sends a thousand integers. +func TestAddListenerForEventFireMany(t *testing.T) { + evsw := NewEventSwitch() + err := evsw.Start() + if err != nil { + t.Errorf("Failed to start EventSwitch, error: %v", err) + } + doneSum := make(chan uint64) + doneSending := make(chan uint64) + numbers := make(chan uint64, 4) + // subscribe one listener for one event + evsw.AddListenerForEvent("listener", "event", + func(data EventData) { + numbers <- data.(uint64) + }) + // collect received events + go sumReceivedNumbers(numbers, doneSum) + // go fire events + go fireEvents(evsw, "event", doneSending, uint64(1)) + checkSum := <-doneSending + close(numbers) + eventSum := <-doneSum + if checkSum != eventSum { + t.Errorf("Not all messages sent were received.\n") + } +} + +// TestAddListenerForDifferentEvents sets up an EventSwitch, subscribes a single +// listener to three different events and sends a thousand integers for each +// of the three events. +func TestAddListenerForDifferentEvents(t *testing.T) { + evsw := NewEventSwitch() + err := evsw.Start() + if err != nil { + t.Errorf("Failed to start EventSwitch, error: %v", err) + } + doneSum := make(chan uint64) + doneSending1 := make(chan uint64) + doneSending2 := make(chan uint64) + doneSending3 := make(chan uint64) + numbers := make(chan uint64, 4) + // subscribe one listener to three events + evsw.AddListenerForEvent("listener", "event1", + func(data EventData) { + numbers <- data.(uint64) + }) + evsw.AddListenerForEvent("listener", "event2", + func(data EventData) { + numbers <- data.(uint64) + }) + evsw.AddListenerForEvent("listener", "event3", + func(data EventData) { + numbers <- data.(uint64) + }) + // collect received events + go sumReceivedNumbers(numbers, doneSum) + // go fire events + go fireEvents(evsw, "event1", doneSending1, uint64(1)) + go fireEvents(evsw, "event2", doneSending2, uint64(1)) + go fireEvents(evsw, "event3", doneSending3, uint64(1)) + var checkSum uint64 = 0 + checkSum += <-doneSending1 + checkSum += <-doneSending2 + checkSum += <-doneSending3 + close(numbers) + eventSum := <-doneSum + if checkSum != eventSum { + t.Errorf("Not all messages sent were received.\n") + } +} + +// TestAddDifferentListenerForDifferentEvents sets up an EventSwitch, +// subscribes a first listener to three events, and subscribes a second +// listener to two of those three events, and then sends a thousand integers +// for each of the three events. +func TestAddDifferentListenerForDifferentEvents(t *testing.T) { + evsw := NewEventSwitch() + err := evsw.Start() + if err != nil { + t.Errorf("Failed to start EventSwitch, error: %v", err) + } + doneSum1 := make(chan uint64) + doneSum2 := make(chan uint64) + doneSending1 := make(chan uint64) + doneSending2 := make(chan uint64) + doneSending3 := make(chan uint64) + numbers1 := make(chan uint64, 4) + numbers2 := make(chan uint64, 4) + // subscribe two listener to three events + evsw.AddListenerForEvent("listener1", "event1", + func(data EventData) { + numbers1 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener1", "event2", + func(data EventData) { + numbers1 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener1", "event3", + func(data EventData) { + numbers1 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener2", "event2", + func(data EventData) { + numbers2 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener2", "event3", + func(data EventData) { + numbers2 <- data.(uint64) + }) + // collect received events for listener1 + go sumReceivedNumbers(numbers1, doneSum1) + // collect received events for listener2 + go sumReceivedNumbers(numbers2, doneSum2) + // go fire events + go fireEvents(evsw, "event1", doneSending1, uint64(1)) + go fireEvents(evsw, "event2", doneSending2, uint64(1001)) + go fireEvents(evsw, "event3", doneSending3, uint64(2001)) + checkSumEvent1 := <-doneSending1 + checkSumEvent2 := <-doneSending2 + checkSumEvent3 := <-doneSending3 + checkSum1 := checkSumEvent1 + checkSumEvent2 + checkSumEvent3 + checkSum2 := checkSumEvent2 + checkSumEvent3 + close(numbers1) + close(numbers2) + eventSum1 := <-doneSum1 + eventSum2 := <-doneSum2 + if checkSum1 != eventSum1 || + checkSum2 != eventSum2 { + t.Errorf("Not all messages sent were received for different listeners to different events.\n") + } +} + +// TestAddAndRemoveListener sets up an EventSwitch, subscribes a listener to +// two events, fires a thousand integers for the first event, then unsubscribes +// the listener and fires a thousand integers for the second event. +func TestAddAndRemoveListener(t *testing.T) { + evsw := NewEventSwitch() + err := evsw.Start() + if err != nil { + t.Errorf("Failed to start EventSwitch, error: %v", err) + } + doneSum1 := make(chan uint64) + doneSum2 := make(chan uint64) + doneSending1 := make(chan uint64) + doneSending2 := make(chan uint64) + numbers1 := make(chan uint64, 4) + numbers2 := make(chan uint64, 4) + // subscribe two listener to three events + evsw.AddListenerForEvent("listener", "event1", + func(data EventData) { + numbers1 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener", "event2", + func(data EventData) { + numbers2 <- data.(uint64) + }) + // collect received events for event1 + go sumReceivedNumbers(numbers1, doneSum1) + // collect received events for event2 + go sumReceivedNumbers(numbers2, doneSum2) + // go fire events + go fireEvents(evsw, "event1", doneSending1, uint64(1)) + checkSumEvent1 := <-doneSending1 + // after sending all event1, unsubscribe for all events + evsw.RemoveListener("listener") + go fireEvents(evsw, "event2", doneSending2, uint64(1001)) + checkSumEvent2 := <-doneSending2 + close(numbers1) + close(numbers2) + eventSum1 := <-doneSum1 + eventSum2 := <-doneSum2 + if checkSumEvent1 != eventSum1 || + // correct value asserted by preceding tests, suffices to be non-zero + checkSumEvent2 == uint64(0) || + eventSum2 != uint64(0) { + t.Errorf("Not all messages sent were received or unsubscription did not register.\n") + } +} + +// TestRemoveListener does basic tests on adding and removing +func TestRemoveListener(t *testing.T) { + evsw := NewEventSwitch() + err := evsw.Start() + if err != nil { + t.Errorf("Failed to start EventSwitch, error: %v", err) + } + count := 10 + sum1, sum2 := 0, 0 + // add some listeners and make sure they work + evsw.AddListenerForEvent("listener", "event1", + func(data EventData) { + sum1++ + }) + evsw.AddListenerForEvent("listener", "event2", + func(data EventData) { + sum2++ + }) + for i := 0; i < count; i++ { + evsw.FireEvent("event1", true) + evsw.FireEvent("event2", true) + } + assert.Equal(t, count, sum1) + assert.Equal(t, count, sum2) + + // remove one by event and make sure it is gone + evsw.RemoveListenerForEvent("event2", "listener") + for i := 0; i < count; i++ { + evsw.FireEvent("event1", true) + evsw.FireEvent("event2", true) + } + assert.Equal(t, count*2, sum1) + assert.Equal(t, count, sum2) + + // remove the listener entirely and make sure both gone + evsw.RemoveListener("listener") + for i := 0; i < count; i++ { + evsw.FireEvent("event1", true) + evsw.FireEvent("event2", true) + } + assert.Equal(t, count*2, sum1) + assert.Equal(t, count, sum2) +} + +// TestAddAndRemoveListenersAsync sets up an EventSwitch, subscribes two +// listeners to three events, and fires a thousand integers for each event. +// These two listeners serve as the baseline validation while other listeners +// are randomly subscribed and unsubscribed. +// More precisely it randomly subscribes new listeners (different from the first +// two listeners) to one of these three events. At the same time it starts +// randomly unsubscribing these additional listeners from all events they are +// at that point subscribed to. +// NOTE: it is important to run this test with race conditions tracking on, +// `go test -race`, to examine for possible race conditions. +func TestRemoveListenersAsync(t *testing.T) { + evsw := NewEventSwitch() + err := evsw.Start() + if err != nil { + t.Errorf("Failed to start EventSwitch, error: %v", err) + } + doneSum1 := make(chan uint64) + doneSum2 := make(chan uint64) + doneSending1 := make(chan uint64) + doneSending2 := make(chan uint64) + doneSending3 := make(chan uint64) + numbers1 := make(chan uint64, 4) + numbers2 := make(chan uint64, 4) + // subscribe two listener to three events + evsw.AddListenerForEvent("listener1", "event1", + func(data EventData) { + numbers1 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener1", "event2", + func(data EventData) { + numbers1 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener1", "event3", + func(data EventData) { + numbers1 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener2", "event1", + func(data EventData) { + numbers2 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener2", "event2", + func(data EventData) { + numbers2 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener2", "event3", + func(data EventData) { + numbers2 <- data.(uint64) + }) + // collect received events for event1 + go sumReceivedNumbers(numbers1, doneSum1) + // collect received events for event2 + go sumReceivedNumbers(numbers2, doneSum2) + addListenersStress := func() { + s1 := rand.NewSource(time.Now().UnixNano()) + r1 := rand.New(s1) + for k := uint16(0); k < 400; k++ { + listenerNumber := r1.Intn(100) + 3 + eventNumber := r1.Intn(3) + 1 + go evsw.AddListenerForEvent(fmt.Sprintf("listener%v", listenerNumber), + fmt.Sprintf("event%v", eventNumber), + func(_ EventData) {}) + } + } + removeListenersStress := func() { + s2 := rand.NewSource(time.Now().UnixNano()) + r2 := rand.New(s2) + for k := uint16(0); k < 80; k++ { + listenerNumber := r2.Intn(100) + 3 + go evsw.RemoveListener(fmt.Sprintf("listener%v", listenerNumber)) + } + } + addListenersStress() + // go fire events + go fireEvents(evsw, "event1", doneSending1, uint64(1)) + removeListenersStress() + go fireEvents(evsw, "event2", doneSending2, uint64(1001)) + go fireEvents(evsw, "event3", doneSending3, uint64(2001)) + checkSumEvent1 := <-doneSending1 + checkSumEvent2 := <-doneSending2 + checkSumEvent3 := <-doneSending3 + checkSum := checkSumEvent1 + checkSumEvent2 + checkSumEvent3 + close(numbers1) + close(numbers2) + eventSum1 := <-doneSum1 + eventSum2 := <-doneSum2 + if checkSum != eventSum1 || + checkSum != eventSum2 { + t.Errorf("Not all messages sent were received.\n") + } +} + +//------------------------------------------------------------------------------ +// Helper functions + +// sumReceivedNumbers takes two channels and adds all numbers received +// until the receiving channel `numbers` is closed; it then sends the sum +// on `doneSum` and closes that channel. Expected to be run in a go-routine. +func sumReceivedNumbers(numbers, doneSum chan uint64) { + var sum uint64 = 0 + for { + j, more := <-numbers + sum += j + if !more { + doneSum <- sum + close(doneSum) + return + } + } +} + +// fireEvents takes an EventSwitch and fires a thousand integers under +// a given `event` with the integers mootonically increasing from `offset` +// to `offset` + 999. It additionally returns the addition of all integers +// sent on `doneChan` for assertion that all events have been sent, and enabling +// the test to assert all events have also been received. +func fireEvents(evsw EventSwitch, event string, doneChan chan uint64, + offset uint64) { + var sentSum uint64 = 0 + for i := offset; i <= offset+uint64(999); i++ { + sentSum += i + evsw.FireEvent(event, i) + } + doneChan <- sentSum + close(doneChan) +} diff --git a/libs/flowrate/README.md b/libs/flowrate/README.md new file mode 100644 index 000000000..db428090c --- /dev/null +++ b/libs/flowrate/README.md @@ -0,0 +1,10 @@ +Data Flow Rate Control +====================== + +To download and install this package run: + +go get github.com/mxk/go-flowrate/flowrate + +The documentation is available at: + +http://godoc.org/github.com/mxk/go-flowrate/flowrate diff --git a/libs/flowrate/flowrate.go b/libs/flowrate/flowrate.go new file mode 100644 index 000000000..e233eae0f --- /dev/null +++ b/libs/flowrate/flowrate.go @@ -0,0 +1,275 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +// Package flowrate provides the tools for monitoring and limiting the flow rate +// of an arbitrary data stream. +package flowrate + +import ( + "math" + "sync" + "time" +) + +// Monitor monitors and limits the transfer rate of a data stream. +type Monitor struct { + mu sync.Mutex // Mutex guarding access to all internal fields + active bool // Flag indicating an active transfer + start time.Duration // Transfer start time (clock() value) + bytes int64 // Total number of bytes transferred + samples int64 // Total number of samples taken + + rSample float64 // Most recent transfer rate sample (bytes per second) + rEMA float64 // Exponential moving average of rSample + rPeak float64 // Peak transfer rate (max of all rSamples) + rWindow float64 // rEMA window (seconds) + + sBytes int64 // Number of bytes transferred since sLast + sLast time.Duration // Most recent sample time (stop time when inactive) + sRate time.Duration // Sampling rate + + tBytes int64 // Number of bytes expected in the current transfer + tLast time.Duration // Time of the most recent transfer of at least 1 byte +} + +// New creates a new flow control monitor. Instantaneous transfer rate is +// measured and updated for each sampleRate interval. windowSize determines the +// weight of each sample in the exponential moving average (EMA) calculation. +// The exact formulas are: +// +// sampleTime = currentTime - prevSampleTime +// sampleRate = byteCount / sampleTime +// weight = 1 - exp(-sampleTime/windowSize) +// newRate = weight*sampleRate + (1-weight)*oldRate +// +// The default values for sampleRate and windowSize (if <= 0) are 100ms and 1s, +// respectively. +func New(sampleRate, windowSize time.Duration) *Monitor { + if sampleRate = clockRound(sampleRate); sampleRate <= 0 { + sampleRate = 5 * clockRate + } + if windowSize <= 0 { + windowSize = 1 * time.Second + } + now := clock() + return &Monitor{ + active: true, + start: now, + rWindow: windowSize.Seconds(), + sLast: now, + sRate: sampleRate, + tLast: now, + } +} + +// Update records the transfer of n bytes and returns n. It should be called +// after each Read/Write operation, even if n is 0. +func (m *Monitor) Update(n int) int { + m.mu.Lock() + m.update(n) + m.mu.Unlock() + return n +} + +// Hack to set the current rEMA. +func (m *Monitor) SetREMA(rEMA float64) { + m.mu.Lock() + m.rEMA = rEMA + m.samples++ + m.mu.Unlock() +} + +// IO is a convenience method intended to wrap io.Reader and io.Writer method +// execution. It calls m.Update(n) and then returns (n, err) unmodified. +func (m *Monitor) IO(n int, err error) (int, error) { + return m.Update(n), err +} + +// Done marks the transfer as finished and prevents any further updates or +// limiting. Instantaneous and current transfer rates drop to 0. Update, IO, and +// Limit methods become NOOPs. It returns the total number of bytes transferred. +func (m *Monitor) Done() int64 { + m.mu.Lock() + if now := m.update(0); m.sBytes > 0 { + m.reset(now) + } + m.active = false + m.tLast = 0 + n := m.bytes + m.mu.Unlock() + return n +} + +// timeRemLimit is the maximum Status.TimeRem value. +const timeRemLimit = 999*time.Hour + 59*time.Minute + 59*time.Second + +// Status represents the current Monitor status. All transfer rates are in bytes +// per second rounded to the nearest byte. +type Status struct { + Active bool // Flag indicating an active transfer + Start time.Time // Transfer start time + Duration time.Duration // Time period covered by the statistics + Idle time.Duration // Time since the last transfer of at least 1 byte + Bytes int64 // Total number of bytes transferred + Samples int64 // Total number of samples taken + InstRate int64 // Instantaneous transfer rate + CurRate int64 // Current transfer rate (EMA of InstRate) + AvgRate int64 // Average transfer rate (Bytes / Duration) + PeakRate int64 // Maximum instantaneous transfer rate + BytesRem int64 // Number of bytes remaining in the transfer + TimeRem time.Duration // Estimated time to completion + Progress Percent // Overall transfer progress +} + +// Status returns current transfer status information. The returned value +// becomes static after a call to Done. +func (m *Monitor) Status() Status { + m.mu.Lock() + now := m.update(0) + s := Status{ + Active: m.active, + Start: clockToTime(m.start), + Duration: m.sLast - m.start, + Idle: now - m.tLast, + Bytes: m.bytes, + Samples: m.samples, + PeakRate: round(m.rPeak), + BytesRem: m.tBytes - m.bytes, + Progress: percentOf(float64(m.bytes), float64(m.tBytes)), + } + if s.BytesRem < 0 { + s.BytesRem = 0 + } + if s.Duration > 0 { + rAvg := float64(s.Bytes) / s.Duration.Seconds() + s.AvgRate = round(rAvg) + if s.Active { + s.InstRate = round(m.rSample) + s.CurRate = round(m.rEMA) + if s.BytesRem > 0 { + if tRate := 0.8*m.rEMA + 0.2*rAvg; tRate > 0 { + ns := float64(s.BytesRem) / tRate * 1e9 + if ns > float64(timeRemLimit) { + ns = float64(timeRemLimit) + } + s.TimeRem = clockRound(time.Duration(ns)) + } + } + } + } + m.mu.Unlock() + return s +} + +// Limit restricts the instantaneous (per-sample) data flow to rate bytes per +// second. It returns the maximum number of bytes (0 <= n <= want) that may be +// transferred immediately without exceeding the limit. If block == true, the +// call blocks until n > 0. want is returned unmodified if want < 1, rate < 1, +// or the transfer is inactive (after a call to Done). +// +// At least one byte is always allowed to be transferred in any given sampling +// period. Thus, if the sampling rate is 100ms, the lowest achievable flow rate +// is 10 bytes per second. +// +// For usage examples, see the implementation of Reader and Writer in io.go. +func (m *Monitor) Limit(want int, rate int64, block bool) (n int) { + if want < 1 || rate < 1 { + return want + } + m.mu.Lock() + + // Determine the maximum number of bytes that can be sent in one sample + limit := round(float64(rate) * m.sRate.Seconds()) + if limit <= 0 { + limit = 1 + } + + // If block == true, wait until m.sBytes < limit + if now := m.update(0); block { + for m.sBytes >= limit && m.active { + now = m.waitNextSample(now) + } + } + + // Make limit <= want (unlimited if the transfer is no longer active) + if limit -= m.sBytes; limit > int64(want) || !m.active { + limit = int64(want) + } + m.mu.Unlock() + + if limit < 0 { + limit = 0 + } + return int(limit) +} + +// SetTransferSize specifies the total size of the data transfer, which allows +// the Monitor to calculate the overall progress and time to completion. +func (m *Monitor) SetTransferSize(bytes int64) { + if bytes < 0 { + bytes = 0 + } + m.mu.Lock() + m.tBytes = bytes + m.mu.Unlock() +} + +// update accumulates the transferred byte count for the current sample until +// clock() - m.sLast >= m.sRate. The monitor status is updated once the current +// sample is done. +func (m *Monitor) update(n int) (now time.Duration) { + if !m.active { + return + } + if now = clock(); n > 0 { + m.tLast = now + } + m.sBytes += int64(n) + if sTime := now - m.sLast; sTime >= m.sRate { + t := sTime.Seconds() + if m.rSample = float64(m.sBytes) / t; m.rSample > m.rPeak { + m.rPeak = m.rSample + } + + // Exponential moving average using a method similar to *nix load + // average calculation. Longer sampling periods carry greater weight. + if m.samples > 0 { + w := math.Exp(-t / m.rWindow) + m.rEMA = m.rSample + w*(m.rEMA-m.rSample) + } else { + m.rEMA = m.rSample + } + m.reset(now) + } + return +} + +// reset clears the current sample state in preparation for the next sample. +func (m *Monitor) reset(sampleTime time.Duration) { + m.bytes += m.sBytes + m.samples++ + m.sBytes = 0 + m.sLast = sampleTime +} + +// waitNextSample sleeps for the remainder of the current sample. The lock is +// released and reacquired during the actual sleep period, so it's possible for +// the transfer to be inactive when this method returns. +func (m *Monitor) waitNextSample(now time.Duration) time.Duration { + const minWait = 5 * time.Millisecond + current := m.sLast + + // sleep until the last sample time changes (ideally, just one iteration) + for m.sLast == current && m.active { + d := current + m.sRate - now + m.mu.Unlock() + if d < minWait { + d = minWait + } + time.Sleep(d) + m.mu.Lock() + now = m.update(0) + } + return now +} diff --git a/libs/flowrate/io.go b/libs/flowrate/io.go new file mode 100644 index 000000000..fbe090972 --- /dev/null +++ b/libs/flowrate/io.go @@ -0,0 +1,133 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowrate + +import ( + "errors" + "io" +) + +// ErrLimit is returned by the Writer when a non-blocking write is short due to +// the transfer rate limit. +var ErrLimit = errors.New("flowrate: flow rate limit exceeded") + +// Limiter is implemented by the Reader and Writer to provide a consistent +// interface for monitoring and controlling data transfer. +type Limiter interface { + Done() int64 + Status() Status + SetTransferSize(bytes int64) + SetLimit(new int64) (old int64) + SetBlocking(new bool) (old bool) +} + +// Reader implements io.ReadCloser with a restriction on the rate of data +// transfer. +type Reader struct { + io.Reader // Data source + *Monitor // Flow control monitor + + limit int64 // Rate limit in bytes per second (unlimited when <= 0) + block bool // What to do when no new bytes can be read due to the limit +} + +// NewReader restricts all Read operations on r to limit bytes per second. +func NewReader(r io.Reader, limit int64) *Reader { + return &Reader{r, New(0, 0), limit, true} +} + +// Read reads up to len(p) bytes into p without exceeding the current transfer +// rate limit. It returns (0, nil) immediately if r is non-blocking and no new +// bytes can be read at this time. +func (r *Reader) Read(p []byte) (n int, err error) { + p = p[:r.Limit(len(p), r.limit, r.block)] + if len(p) > 0 { + n, err = r.IO(r.Reader.Read(p)) + } + return +} + +// SetLimit changes the transfer rate limit to new bytes per second and returns +// the previous setting. +func (r *Reader) SetLimit(new int64) (old int64) { + old, r.limit = r.limit, new + return +} + +// SetBlocking changes the blocking behavior and returns the previous setting. A +// Read call on a non-blocking reader returns immediately if no additional bytes +// may be read at this time due to the rate limit. +func (r *Reader) SetBlocking(new bool) (old bool) { + old, r.block = r.block, new + return +} + +// Close closes the underlying reader if it implements the io.Closer interface. +func (r *Reader) Close() error { + defer r.Done() + if c, ok := r.Reader.(io.Closer); ok { + return c.Close() + } + return nil +} + +// Writer implements io.WriteCloser with a restriction on the rate of data +// transfer. +type Writer struct { + io.Writer // Data destination + *Monitor // Flow control monitor + + limit int64 // Rate limit in bytes per second (unlimited when <= 0) + block bool // What to do when no new bytes can be written due to the limit +} + +// NewWriter restricts all Write operations on w to limit bytes per second. The +// transfer rate and the default blocking behavior (true) can be changed +// directly on the returned *Writer. +func NewWriter(w io.Writer, limit int64) *Writer { + return &Writer{w, New(0, 0), limit, true} +} + +// Write writes len(p) bytes from p to the underlying data stream without +// exceeding the current transfer rate limit. It returns (n, ErrLimit) if w is +// non-blocking and no additional bytes can be written at this time. +func (w *Writer) Write(p []byte) (n int, err error) { + var c int + for len(p) > 0 && err == nil { + s := p[:w.Limit(len(p), w.limit, w.block)] + if len(s) > 0 { + c, err = w.IO(w.Writer.Write(s)) + } else { + return n, ErrLimit + } + p = p[c:] + n += c + } + return +} + +// SetLimit changes the transfer rate limit to new bytes per second and returns +// the previous setting. +func (w *Writer) SetLimit(new int64) (old int64) { + old, w.limit = w.limit, new + return +} + +// SetBlocking changes the blocking behavior and returns the previous setting. A +// Write call on a non-blocking writer returns as soon as no additional bytes +// may be written at this time due to the rate limit. +func (w *Writer) SetBlocking(new bool) (old bool) { + old, w.block = w.block, new + return +} + +// Close closes the underlying writer if it implements the io.Closer interface. +func (w *Writer) Close() error { + defer w.Done() + if c, ok := w.Writer.(io.Closer); ok { + return c.Close() + } + return nil +} diff --git a/libs/flowrate/io_test.go b/libs/flowrate/io_test.go new file mode 100644 index 000000000..c84029d5e --- /dev/null +++ b/libs/flowrate/io_test.go @@ -0,0 +1,194 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowrate + +import ( + "bytes" + "testing" + "time" +) + +const ( + _50ms = 50 * time.Millisecond + _100ms = 100 * time.Millisecond + _200ms = 200 * time.Millisecond + _300ms = 300 * time.Millisecond + _400ms = 400 * time.Millisecond + _500ms = 500 * time.Millisecond +) + +func nextStatus(m *Monitor) Status { + samples := m.samples + for i := 0; i < 30; i++ { + if s := m.Status(); s.Samples != samples { + return s + } + time.Sleep(5 * time.Millisecond) + } + return m.Status() +} + +func TestReader(t *testing.T) { + in := make([]byte, 100) + for i := range in { + in[i] = byte(i) + } + b := make([]byte, 100) + r := NewReader(bytes.NewReader(in), 100) + start := time.Now() + + // Make sure r implements Limiter + _ = Limiter(r) + + // 1st read of 10 bytes is performed immediately + if n, err := r.Read(b); n != 10 || err != nil { + t.Fatalf("r.Read(b) expected 10 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt > _50ms { + t.Fatalf("r.Read(b) took too long (%v)", rt) + } + + // No new Reads allowed in the current sample + r.SetBlocking(false) + if n, err := r.Read(b); n != 0 || err != nil { + t.Fatalf("r.Read(b) expected 0 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt > _50ms { + t.Fatalf("r.Read(b) took too long (%v)", rt) + } + + status := [6]Status{0: r.Status()} // No samples in the first status + + // 2nd read of 10 bytes blocks until the next sample + r.SetBlocking(true) + if n, err := r.Read(b[10:]); n != 10 || err != nil { + t.Fatalf("r.Read(b[10:]) expected 10 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt < _100ms { + t.Fatalf("r.Read(b[10:]) returned ahead of time (%v)", rt) + } + + status[1] = r.Status() // 1st sample + status[2] = nextStatus(r.Monitor) // 2nd sample + status[3] = nextStatus(r.Monitor) // No activity for the 3rd sample + + if n := r.Done(); n != 20 { + t.Fatalf("r.Done() expected 20; got %v", n) + } + + status[4] = r.Status() + status[5] = nextStatus(r.Monitor) // Timeout + start = status[0].Start + + // Active, Start, Duration, Idle, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, TimeRem, Progress + want := []Status{ + Status{true, start, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Status{true, start, _100ms, 0, 10, 1, 100, 100, 100, 100, 0, 0, 0}, + Status{true, start, _200ms, _100ms, 20, 2, 100, 100, 100, 100, 0, 0, 0}, + Status{true, start, _300ms, _200ms, 20, 3, 0, 90, 67, 100, 0, 0, 0}, + Status{false, start, _300ms, 0, 20, 3, 0, 0, 67, 100, 0, 0, 0}, + Status{false, start, _300ms, 0, 20, 3, 0, 0, 67, 100, 0, 0, 0}, + } + for i, s := range status { + if !statusesAreEqual(&s, &want[i]) { + t.Errorf("r.Status(%v)\nexpected: %v\ngot : %v", i, want[i], s) + } + } + if !bytes.Equal(b[:20], in[:20]) { + t.Errorf("r.Read() input doesn't match output") + } +} + +func TestWriter(t *testing.T) { + b := make([]byte, 100) + for i := range b { + b[i] = byte(i) + } + w := NewWriter(&bytes.Buffer{}, 200) + start := time.Now() + + // Make sure w implements Limiter + _ = Limiter(w) + + // Non-blocking 20-byte write for the first sample returns ErrLimit + w.SetBlocking(false) + if n, err := w.Write(b); n != 20 || err != ErrLimit { + t.Fatalf("w.Write(b) expected 20 (ErrLimit); got %v (%v)", n, err) + } else if rt := time.Since(start); rt > _50ms { + t.Fatalf("w.Write(b) took too long (%v)", rt) + } + + // Blocking 80-byte write + w.SetBlocking(true) + if n, err := w.Write(b[20:]); n != 80 || err != nil { + t.Fatalf("w.Write(b[20:]) expected 80 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt < _300ms { + // Explanation for `rt < _300ms` (as opposed to `< _400ms`) + // + // |<-- start | | + // epochs: -----0ms|---100ms|---200ms|---300ms|---400ms + // sends: 20|20 |20 |20 |20# + // + // NOTE: The '#' symbol can thus happen before 400ms is up. + // Thus, we can only panic if rt < _300ms. + t.Fatalf("w.Write(b[20:]) returned ahead of time (%v)", rt) + } + + w.SetTransferSize(100) + status := []Status{w.Status(), nextStatus(w.Monitor)} + start = status[0].Start + + // Active, Start, Duration, Idle, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, TimeRem, Progress + want := []Status{ + Status{true, start, _400ms, 0, 80, 4, 200, 200, 200, 200, 20, _100ms, 80000}, + Status{true, start, _500ms, _100ms, 100, 5, 200, 200, 200, 200, 0, 0, 100000}, + } + for i, s := range status { + if !statusesAreEqual(&s, &want[i]) { + t.Errorf("w.Status(%v)\nexpected: %v\ngot : %v\n", i, want[i], s) + } + } + if !bytes.Equal(b, w.Writer.(*bytes.Buffer).Bytes()) { + t.Errorf("w.Write() input doesn't match output") + } +} + +const maxDeviationForDuration = 50 * time.Millisecond +const maxDeviationForRate int64 = 50 + +// statusesAreEqual returns true if s1 is equal to s2. Equality here means +// general equality of fields except for the duration and rates, which can +// drift due to unpredictable delays (e.g. thread wakes up 25ms after +// `time.Sleep` has ended). +func statusesAreEqual(s1 *Status, s2 *Status) bool { + if s1.Active == s2.Active && + s1.Start == s2.Start && + durationsAreEqual(s1.Duration, s2.Duration, maxDeviationForDuration) && + s1.Idle == s2.Idle && + s1.Bytes == s2.Bytes && + s1.Samples == s2.Samples && + ratesAreEqual(s1.InstRate, s2.InstRate, maxDeviationForRate) && + ratesAreEqual(s1.CurRate, s2.CurRate, maxDeviationForRate) && + ratesAreEqual(s1.AvgRate, s2.AvgRate, maxDeviationForRate) && + ratesAreEqual(s1.PeakRate, s2.PeakRate, maxDeviationForRate) && + s1.BytesRem == s2.BytesRem && + durationsAreEqual(s1.TimeRem, s2.TimeRem, maxDeviationForDuration) && + s1.Progress == s2.Progress { + return true + } + return false +} + +func durationsAreEqual(d1 time.Duration, d2 time.Duration, maxDeviation time.Duration) bool { + return d2-d1 <= maxDeviation +} + +func ratesAreEqual(r1 int64, r2 int64, maxDeviation int64) bool { + sub := r1 - r2 + if sub < 0 { + sub = -sub + } + if sub <= maxDeviation { + return true + } + return false +} diff --git a/libs/flowrate/util.go b/libs/flowrate/util.go new file mode 100644 index 000000000..b33ddc701 --- /dev/null +++ b/libs/flowrate/util.go @@ -0,0 +1,67 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowrate + +import ( + "math" + "strconv" + "time" +) + +// clockRate is the resolution and precision of clock(). +const clockRate = 20 * time.Millisecond + +// czero is the process start time rounded down to the nearest clockRate +// increment. +var czero = time.Now().Round(clockRate) + +// clock returns a low resolution timestamp relative to the process start time. +func clock() time.Duration { + return time.Now().Round(clockRate).Sub(czero) +} + +// clockToTime converts a clock() timestamp to an absolute time.Time value. +func clockToTime(c time.Duration) time.Time { + return czero.Add(c) +} + +// clockRound returns d rounded to the nearest clockRate increment. +func clockRound(d time.Duration) time.Duration { + return (d + clockRate>>1) / clockRate * clockRate +} + +// round returns x rounded to the nearest int64 (non-negative values only). +func round(x float64) int64 { + if _, frac := math.Modf(x); frac >= 0.5 { + return int64(math.Ceil(x)) + } + return int64(math.Floor(x)) +} + +// Percent represents a percentage in increments of 1/1000th of a percent. +type Percent uint32 + +// percentOf calculates what percent of the total is x. +func percentOf(x, total float64) Percent { + if x < 0 || total <= 0 { + return 0 + } else if p := round(x / total * 1e5); p <= math.MaxUint32 { + return Percent(p) + } + return Percent(math.MaxUint32) +} + +func (p Percent) Float() float64 { + return float64(p) * 1e-3 +} + +func (p Percent) String() string { + var buf [12]byte + b := strconv.AppendUint(buf[:0], uint64(p)/1000, 10) + n := len(b) + b = strconv.AppendUint(b, 1000+uint64(p)%1000, 10) + b[n] = '.' + return string(append(b, '%')) +} diff --git a/libs/log/filter.go b/libs/log/filter.go new file mode 100644 index 000000000..768c09b85 --- /dev/null +++ b/libs/log/filter.go @@ -0,0 +1,158 @@ +package log + +import "fmt" + +type level byte + +const ( + levelDebug level = 1 << iota + levelInfo + levelError +) + +type filter struct { + next Logger + allowed level // XOR'd levels for default case + allowedKeyvals map[keyval]level // When key-value match, use this level +} + +type keyval struct { + key interface{} + value interface{} +} + +// NewFilter wraps next and implements filtering. See the commentary on the +// Option functions for a detailed description of how to configure levels. If +// no options are provided, all leveled log events created with Debug, Info or +// Error helper methods are squelched. +func NewFilter(next Logger, options ...Option) Logger { + l := &filter{ + next: next, + allowedKeyvals: make(map[keyval]level), + } + for _, option := range options { + option(l) + } + return l +} + +func (l *filter) Info(msg string, keyvals ...interface{}) { + levelAllowed := l.allowed&levelInfo != 0 + if !levelAllowed { + return + } + l.next.Info(msg, keyvals...) +} + +func (l *filter) Debug(msg string, keyvals ...interface{}) { + levelAllowed := l.allowed&levelDebug != 0 + if !levelAllowed { + return + } + l.next.Debug(msg, keyvals...) +} + +func (l *filter) Error(msg string, keyvals ...interface{}) { + levelAllowed := l.allowed&levelError != 0 + if !levelAllowed { + return + } + l.next.Error(msg, keyvals...) +} + +// With implements Logger by constructing a new filter with a keyvals appended +// to the logger. +// +// If custom level was set for a keyval pair using one of the +// Allow*With methods, it is used as the logger's level. +// +// Examples: +// logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto")) +// logger.With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto" +// +// logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto"), log.AllowNoneWith("user", "Sam")) +// logger.With("module", "crypto", "user", "Sam").Info("Hello") # returns nil +// +// logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto"), log.AllowNoneWith("user", "Sam")) +// logger.With("user", "Sam").With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto user=Sam" +func (l *filter) With(keyvals ...interface{}) Logger { + for i := len(keyvals) - 2; i >= 0; i -= 2 { + for kv, allowed := range l.allowedKeyvals { + if keyvals[i] == kv.key && keyvals[i+1] == kv.value { + return &filter{next: l.next.With(keyvals...), allowed: allowed, allowedKeyvals: l.allowedKeyvals} + } + } + } + return &filter{next: l.next.With(keyvals...), allowed: l.allowed, allowedKeyvals: l.allowedKeyvals} +} + +//-------------------------------------------------------------------------------- + +// Option sets a parameter for the filter. +type Option func(*filter) + +// AllowLevel returns an option for the given level or error if no option exist +// for such level. +func AllowLevel(lvl string) (Option, error) { + switch lvl { + case "debug": + return AllowDebug(), nil + case "info": + return AllowInfo(), nil + case "error": + return AllowError(), nil + case "none": + return AllowNone(), nil + default: + return nil, fmt.Errorf("Expected either \"info\", \"debug\", \"error\" or \"none\" level, given %s", lvl) + } +} + +// AllowAll is an alias for AllowDebug. +func AllowAll() Option { + return AllowDebug() +} + +// AllowDebug allows error, info and debug level log events to pass. +func AllowDebug() Option { + return allowed(levelError | levelInfo | levelDebug) +} + +// AllowInfo allows error and info level log events to pass. +func AllowInfo() Option { + return allowed(levelError | levelInfo) +} + +// AllowError allows only error level log events to pass. +func AllowError() Option { + return allowed(levelError) +} + +// AllowNone allows no leveled log events to pass. +func AllowNone() Option { + return allowed(0) +} + +func allowed(allowed level) Option { + return func(l *filter) { l.allowed = allowed } +} + +// AllowDebugWith allows error, info and debug level log events to pass for a specific key value pair. +func AllowDebugWith(key interface{}, value interface{}) Option { + return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError | levelInfo | levelDebug } +} + +// AllowInfoWith allows error and info level log events to pass for a specific key value pair. +func AllowInfoWith(key interface{}, value interface{}) Option { + return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError | levelInfo } +} + +// AllowErrorWith allows only error level log events to pass for a specific key value pair. +func AllowErrorWith(key interface{}, value interface{}) Option { + return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError } +} + +// AllowNoneWith allows no leveled log events to pass for a specific key value pair. +func AllowNoneWith(key interface{}, value interface{}) Option { + return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = 0 } +} diff --git a/libs/log/filter_test.go b/libs/log/filter_test.go new file mode 100644 index 000000000..f9957f043 --- /dev/null +++ b/libs/log/filter_test.go @@ -0,0 +1,118 @@ +package log_test + +import ( + "bytes" + "strings" + "testing" + + "github.com/tendermint/tendermint/libs/log" +) + +func TestVariousLevels(t *testing.T) { + testCases := []struct { + name string + allowed log.Option + want string + }{ + { + "AllowAll", + log.AllowAll(), + strings.Join([]string{ + `{"_msg":"here","level":"debug","this is":"debug log"}`, + `{"_msg":"here","level":"info","this is":"info log"}`, + `{"_msg":"here","level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowDebug", + log.AllowDebug(), + strings.Join([]string{ + `{"_msg":"here","level":"debug","this is":"debug log"}`, + `{"_msg":"here","level":"info","this is":"info log"}`, + `{"_msg":"here","level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowInfo", + log.AllowInfo(), + strings.Join([]string{ + `{"_msg":"here","level":"info","this is":"info log"}`, + `{"_msg":"here","level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowError", + log.AllowError(), + strings.Join([]string{ + `{"_msg":"here","level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowNone", + log.AllowNone(), + ``, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var buf bytes.Buffer + logger := log.NewFilter(log.NewTMJSONLogger(&buf), tc.allowed) + + logger.Debug("here", "this is", "debug log") + logger.Info("here", "this is", "info log") + logger.Error("here", "this is", "error log") + + if want, have := tc.want, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant:\n%s\nhave:\n%s", want, have) + } + }) + } +} + +func TestLevelContext(t *testing.T) { + var buf bytes.Buffer + + logger := log.NewTMJSONLogger(&buf) + logger = log.NewFilter(logger, log.AllowError()) + logger = logger.With("context", "value") + + logger.Error("foo", "bar", "baz") + if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"error"}`, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } + + buf.Reset() + logger.Info("foo", "bar", "baz") + if want, have := ``, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } +} + +func TestVariousAllowWith(t *testing.T) { + var buf bytes.Buffer + + logger := log.NewTMJSONLogger(&buf) + + logger1 := log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("context", "value")) + logger1.With("context", "value").Info("foo", "bar", "baz") + if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"info"}`, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } + + buf.Reset() + + logger2 := log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("context", "value"), log.AllowNoneWith("user", "Sam")) + logger2.With("context", "value", "user", "Sam").Info("foo", "bar", "baz") + if want, have := ``, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } + + buf.Reset() + + logger3 := log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("context", "value"), log.AllowNoneWith("user", "Sam")) + logger3.With("user", "Sam").With("context", "value").Info("foo", "bar", "baz") + if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"info","user":"Sam"}`, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } +} diff --git a/libs/log/logger.go b/libs/log/logger.go new file mode 100644 index 000000000..ddb187bc7 --- /dev/null +++ b/libs/log/logger.go @@ -0,0 +1,30 @@ +package log + +import ( + "io" + + kitlog "github.com/go-kit/kit/log" +) + +// Logger is what any Tendermint library should take. +type Logger interface { + Debug(msg string, keyvals ...interface{}) + Info(msg string, keyvals ...interface{}) + Error(msg string, keyvals ...interface{}) + + With(keyvals ...interface{}) Logger +} + +// NewSyncWriter returns a new writer that is safe for concurrent use by +// multiple goroutines. Writes to the returned writer are passed on to w. If +// another write is already in progress, the calling goroutine blocks until +// the writer is available. +// +// If w implements the following interface, so does the returned writer. +// +// interface { +// Fd() uintptr +// } +func NewSyncWriter(w io.Writer) io.Writer { + return kitlog.NewSyncWriter(w) +} diff --git a/libs/log/nop_logger.go b/libs/log/nop_logger.go new file mode 100644 index 000000000..12d75abe6 --- /dev/null +++ b/libs/log/nop_logger.go @@ -0,0 +1,17 @@ +package log + +type nopLogger struct{} + +// Interface assertions +var _ Logger = (*nopLogger)(nil) + +// NewNopLogger returns a logger that doesn't do anything. +func NewNopLogger() Logger { return &nopLogger{} } + +func (nopLogger) Info(string, ...interface{}) {} +func (nopLogger) Debug(string, ...interface{}) {} +func (nopLogger) Error(string, ...interface{}) {} + +func (l *nopLogger) With(...interface{}) Logger { + return l +} diff --git a/libs/log/testing_logger.go b/libs/log/testing_logger.go new file mode 100644 index 000000000..81482bef5 --- /dev/null +++ b/libs/log/testing_logger.go @@ -0,0 +1,49 @@ +package log + +import ( + "os" + "testing" + + "github.com/go-kit/kit/log/term" +) + +var ( + // reuse the same logger across all tests + _testingLogger Logger +) + +// TestingLogger returns a TMLogger which writes to STDOUT if testing being run +// with the verbose (-v) flag, NopLogger otherwise. +// +// Note that the call to TestingLogger() must be made +// inside a test (not in the init func) because +// verbose flag only set at the time of testing. +func TestingLogger() Logger { + if _testingLogger != nil { + return _testingLogger + } + + if testing.Verbose() { + _testingLogger = NewTMLogger(NewSyncWriter(os.Stdout)) + } else { + _testingLogger = NewNopLogger() + } + + return _testingLogger +} + +// TestingLoggerWithColorFn allow you to provide your own color function. See +// TestingLogger for documentation. +func TestingLoggerWithColorFn(colorFn func(keyvals ...interface{}) term.FgBgColor) Logger { + if _testingLogger != nil { + return _testingLogger + } + + if testing.Verbose() { + _testingLogger = NewTMLoggerWithColorFn(NewSyncWriter(os.Stdout), colorFn) + } else { + _testingLogger = NewNopLogger() + } + + return _testingLogger +} diff --git a/libs/log/tm_json_logger.go b/libs/log/tm_json_logger.go new file mode 100644 index 000000000..a71ac1034 --- /dev/null +++ b/libs/log/tm_json_logger.go @@ -0,0 +1,15 @@ +package log + +import ( + "io" + + kitlog "github.com/go-kit/kit/log" +) + +// NewTMJSONLogger returns a Logger that encodes keyvals to the Writer as a +// single JSON object. Each log event produces no more than one call to +// w.Write. The passed Writer must be safe for concurrent use by multiple +// goroutines if the returned Logger will be used concurrently. +func NewTMJSONLogger(w io.Writer) Logger { + return &tmLogger{kitlog.NewJSONLogger(w)} +} diff --git a/libs/log/tm_logger.go b/libs/log/tm_logger.go new file mode 100644 index 000000000..d49e8d22b --- /dev/null +++ b/libs/log/tm_logger.go @@ -0,0 +1,83 @@ +package log + +import ( + "fmt" + "io" + + kitlog "github.com/go-kit/kit/log" + kitlevel "github.com/go-kit/kit/log/level" + "github.com/go-kit/kit/log/term" +) + +const ( + msgKey = "_msg" // "_" prefixed to avoid collisions + moduleKey = "module" +) + +type tmLogger struct { + srcLogger kitlog.Logger +} + +// Interface assertions +var _ Logger = (*tmLogger)(nil) + +// NewTMTermLogger returns a logger that encodes msg and keyvals to the Writer +// using go-kit's log as an underlying logger and our custom formatter. Note +// that underlying logger could be swapped with something else. +func NewTMLogger(w io.Writer) Logger { + // Color by level value + colorFn := func(keyvals ...interface{}) term.FgBgColor { + if keyvals[0] != kitlevel.Key() { + panic(fmt.Sprintf("expected level key to be first, got %v", keyvals[0])) + } + switch keyvals[1].(kitlevel.Value).String() { + case "debug": + return term.FgBgColor{Fg: term.DarkGray} + case "error": + return term.FgBgColor{Fg: term.Red} + default: + return term.FgBgColor{} + } + } + + return &tmLogger{term.NewLogger(w, NewTMFmtLogger, colorFn)} +} + +// NewTMLoggerWithColorFn allows you to provide your own color function. See +// NewTMLogger for documentation. +func NewTMLoggerWithColorFn(w io.Writer, colorFn func(keyvals ...interface{}) term.FgBgColor) Logger { + return &tmLogger{term.NewLogger(w, NewTMFmtLogger, colorFn)} +} + +// Info logs a message at level Info. +func (l *tmLogger) Info(msg string, keyvals ...interface{}) { + lWithLevel := kitlevel.Info(l.srcLogger) + if err := kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...); err != nil { + errLogger := kitlevel.Error(l.srcLogger) + kitlog.With(errLogger, msgKey, msg).Log("err", err) + } +} + +// Debug logs a message at level Debug. +func (l *tmLogger) Debug(msg string, keyvals ...interface{}) { + lWithLevel := kitlevel.Debug(l.srcLogger) + if err := kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...); err != nil { + errLogger := kitlevel.Error(l.srcLogger) + kitlog.With(errLogger, msgKey, msg).Log("err", err) + } +} + +// Error logs a message at level Error. +func (l *tmLogger) Error(msg string, keyvals ...interface{}) { + lWithLevel := kitlevel.Error(l.srcLogger) + lWithMsg := kitlog.With(lWithLevel, msgKey, msg) + if err := lWithMsg.Log(keyvals...); err != nil { + lWithMsg.Log("err", err) + } +} + +// With returns a new contextual logger with keyvals prepended to those passed +// to calls to Info, Debug or Error. +func (l *tmLogger) With(keyvals ...interface{}) Logger { + return &tmLogger{kitlog.With(l.srcLogger, keyvals...)} +} diff --git a/libs/log/tm_logger_test.go b/libs/log/tm_logger_test.go new file mode 100644 index 000000000..1f890cef1 --- /dev/null +++ b/libs/log/tm_logger_test.go @@ -0,0 +1,44 @@ +package log_test + +import ( + "bytes" + "io/ioutil" + "strings" + "testing" + + "github.com/go-logfmt/logfmt" + "github.com/tendermint/tendermint/libs/log" +) + +func TestLoggerLogsItsErrors(t *testing.T) { + var buf bytes.Buffer + + logger := log.NewTMLogger(&buf) + logger.Info("foo", "baz baz", "bar") + msg := strings.TrimSpace(buf.String()) + if !strings.Contains(msg, logfmt.ErrInvalidKey.Error()) { + t.Errorf("Expected logger msg to contain ErrInvalidKey, got %s", msg) + } +} + +func BenchmarkTMLoggerSimple(b *testing.B) { + benchmarkRunner(b, log.NewTMLogger(ioutil.Discard), baseInfoMessage) +} + +func BenchmarkTMLoggerContextual(b *testing.B) { + benchmarkRunner(b, log.NewTMLogger(ioutil.Discard), withInfoMessage) +} + +func benchmarkRunner(b *testing.B, logger log.Logger, f func(log.Logger)) { + lc := logger.With("common_key", "common_value") + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + f(lc) + } +} + +var ( + baseInfoMessage = func(logger log.Logger) { logger.Info("foo_message", "foo_key", "foo_value") } + withInfoMessage = func(logger log.Logger) { logger.With("a", "b").Info("c", "d", "f") } +) diff --git a/libs/log/tmfmt_logger.go b/libs/log/tmfmt_logger.go new file mode 100644 index 000000000..d03979718 --- /dev/null +++ b/libs/log/tmfmt_logger.go @@ -0,0 +1,127 @@ +package log + +import ( + "bytes" + "fmt" + "io" + "sync" + "time" + + kitlog "github.com/go-kit/kit/log" + kitlevel "github.com/go-kit/kit/log/level" + "github.com/go-logfmt/logfmt" +) + +type tmfmtEncoder struct { + *logfmt.Encoder + buf bytes.Buffer +} + +func (l *tmfmtEncoder) Reset() { + l.Encoder.Reset() + l.buf.Reset() +} + +var tmfmtEncoderPool = sync.Pool{ + New: func() interface{} { + var enc tmfmtEncoder + enc.Encoder = logfmt.NewEncoder(&enc.buf) + return &enc + }, +} + +type tmfmtLogger struct { + w io.Writer +} + +// NewTMFmtLogger returns a logger that encodes keyvals to the Writer in +// Tendermint custom format. Note complex types (structs, maps, slices) +// formatted as "%+v". +// +// Each log event produces no more than one call to w.Write. +// The passed Writer must be safe for concurrent use by multiple goroutines if +// the returned Logger will be used concurrently. +func NewTMFmtLogger(w io.Writer) kitlog.Logger { + return &tmfmtLogger{w} +} + +func (l tmfmtLogger) Log(keyvals ...interface{}) error { + enc := tmfmtEncoderPool.Get().(*tmfmtEncoder) + enc.Reset() + defer tmfmtEncoderPool.Put(enc) + + const unknown = "unknown" + lvl := "none" + msg := unknown + module := unknown + + // indexes of keys to skip while encoding later + excludeIndexes := make([]int, 0) + + for i := 0; i < len(keyvals)-1; i += 2 { + // Extract level + if keyvals[i] == kitlevel.Key() { + excludeIndexes = append(excludeIndexes, i) + switch keyvals[i+1].(type) { + case string: + lvl = keyvals[i+1].(string) + case kitlevel.Value: + lvl = keyvals[i+1].(kitlevel.Value).String() + default: + panic(fmt.Sprintf("level value of unknown type %T", keyvals[i+1])) + } + // and message + } else if keyvals[i] == msgKey { + excludeIndexes = append(excludeIndexes, i) + msg = keyvals[i+1].(string) + // and module (could be multiple keyvals; if such case last keyvalue wins) + } else if keyvals[i] == moduleKey { + excludeIndexes = append(excludeIndexes, i) + module = keyvals[i+1].(string) + } + } + + // Form a custom Tendermint line + // + // Example: + // D[05-02|11:06:44.322] Stopping AddrBook (ignoring: already stopped) + // + // Description: + // D - first character of the level, uppercase (ASCII only) + // [05-02|11:06:44.322] - our time format (see https://golang.org/src/time/format.go) + // Stopping ... - message + enc.buf.WriteString(fmt.Sprintf("%c[%s] %-44s ", lvl[0]-32, time.Now().UTC().Format("01-02|15:04:05.000"), msg)) + + if module != unknown { + enc.buf.WriteString("module=" + module + " ") + } + +KeyvalueLoop: + for i := 0; i < len(keyvals)-1; i += 2 { + for _, j := range excludeIndexes { + if i == j { + continue KeyvalueLoop + } + } + + err := enc.EncodeKeyval(keyvals[i], keyvals[i+1]) + if err == logfmt.ErrUnsupportedValueType { + enc.EncodeKeyval(keyvals[i], fmt.Sprintf("%+v", keyvals[i+1])) + } else if err != nil { + return err + } + } + + // Add newline to the end of the buffer + if err := enc.EndRecord(); err != nil { + return err + } + + // The Logger interface requires implementations to be safe for concurrent + // use by multiple goroutines. For this implementation that means making + // only one call to l.w.Write() for each call to Log. + if _, err := l.w.Write(enc.buf.Bytes()); err != nil { + return err + } + return nil +} diff --git a/libs/log/tmfmt_logger_test.go b/libs/log/tmfmt_logger_test.go new file mode 100644 index 000000000..d6f039ce4 --- /dev/null +++ b/libs/log/tmfmt_logger_test.go @@ -0,0 +1,118 @@ +package log_test + +import ( + "bytes" + "errors" + "io/ioutil" + "math" + "regexp" + "testing" + + kitlog "github.com/go-kit/kit/log" + "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/libs/log" +) + +func TestTMFmtLogger(t *testing.T) { + t.Parallel() + buf := &bytes.Buffer{} + logger := log.NewTMFmtLogger(buf) + + if err := logger.Log("hello", "world"); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ hello=world\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("a", 1, "err", errors.New("error")); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ a=1 err=error\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("std_map", map[int]int{1: 2}, "my_map", mymap{0: 0}); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ std_map=map\[1:2\] my_map=special_behavior\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("level", "error"); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`E\[.+\] unknown \s+\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("_msg", "Hello"); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] Hello \s+\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("module", "main", "module", "crypto", "module", "wire"); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+module=wire\s+\n$`), buf.String()) +} + +func BenchmarkTMFmtLoggerSimple(b *testing.B) { + benchmarkRunnerKitlog(b, log.NewTMFmtLogger(ioutil.Discard), baseMessage) +} + +func BenchmarkTMFmtLoggerContextual(b *testing.B) { + benchmarkRunnerKitlog(b, log.NewTMFmtLogger(ioutil.Discard), withMessage) +} + +func TestTMFmtLoggerConcurrency(t *testing.T) { + t.Parallel() + testConcurrency(t, log.NewTMFmtLogger(ioutil.Discard), 10000) +} + +func benchmarkRunnerKitlog(b *testing.B, logger kitlog.Logger, f func(kitlog.Logger)) { + lc := kitlog.With(logger, "common_key", "common_value") + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + f(lc) + } +} + +var ( + baseMessage = func(logger kitlog.Logger) { logger.Log("foo_key", "foo_value") } + withMessage = func(logger kitlog.Logger) { kitlog.With(logger, "a", "b").Log("d", "f") } +) + +// These test are designed to be run with the race detector. + +func testConcurrency(t *testing.T, logger kitlog.Logger, total int) { + n := int(math.Sqrt(float64(total))) + share := total / n + + errC := make(chan error, n) + + for i := 0; i < n; i++ { + go func() { + errC <- spam(logger, share) + }() + } + + for i := 0; i < n; i++ { + err := <-errC + if err != nil { + t.Fatalf("concurrent logging error: %v", err) + } + } +} + +func spam(logger kitlog.Logger, count int) error { + for i := 0; i < count; i++ { + err := logger.Log("key", i) + if err != nil { + return err + } + } + return nil +} + +type mymap map[int]int + +func (m mymap) String() string { return "special_behavior" } diff --git a/libs/log/tracing_logger.go b/libs/log/tracing_logger.go new file mode 100644 index 000000000..d2a6ff44e --- /dev/null +++ b/libs/log/tracing_logger.go @@ -0,0 +1,76 @@ +package log + +import ( + "fmt" + + "github.com/pkg/errors" +) + +// NewTracingLogger enables tracing by wrapping all errors (if they +// implement stackTracer interface) in tracedError. +// +// All errors returned by https://github.com/pkg/errors implement stackTracer +// interface. +// +// For debugging purposes only as it doubles the amount of allocations. +func NewTracingLogger(next Logger) Logger { + return &tracingLogger{ + next: next, + } +} + +type stackTracer interface { + error + StackTrace() errors.StackTrace +} + +type tracingLogger struct { + next Logger +} + +func (l *tracingLogger) Info(msg string, keyvals ...interface{}) { + l.next.Info(msg, formatErrors(keyvals)...) +} + +func (l *tracingLogger) Debug(msg string, keyvals ...interface{}) { + l.next.Debug(msg, formatErrors(keyvals)...) +} + +func (l *tracingLogger) Error(msg string, keyvals ...interface{}) { + l.next.Error(msg, formatErrors(keyvals)...) +} + +func (l *tracingLogger) With(keyvals ...interface{}) Logger { + return &tracingLogger{next: l.next.With(formatErrors(keyvals)...)} +} + +func formatErrors(keyvals []interface{}) []interface{} { + newKeyvals := make([]interface{}, len(keyvals)) + copy(newKeyvals, keyvals) + for i := 0; i < len(newKeyvals)-1; i += 2 { + if err, ok := newKeyvals[i+1].(stackTracer); ok { + newKeyvals[i+1] = tracedError{err} + } + } + return newKeyvals +} + +// tracedError wraps a stackTracer and just makes the Error() result +// always return a full stack trace. +type tracedError struct { + wrapped stackTracer +} + +var _ stackTracer = tracedError{} + +func (t tracedError) StackTrace() errors.StackTrace { + return t.wrapped.StackTrace() +} + +func (t tracedError) Cause() error { + return t.wrapped +} + +func (t tracedError) Error() string { + return fmt.Sprintf("%+v", t.wrapped) +} diff --git a/libs/log/tracing_logger_test.go b/libs/log/tracing_logger_test.go new file mode 100644 index 000000000..1abc6440f --- /dev/null +++ b/libs/log/tracing_logger_test.go @@ -0,0 +1,41 @@ +package log_test + +import ( + "bytes" + stderr "errors" + "fmt" + "strings" + "testing" + + "github.com/pkg/errors" + "github.com/tendermint/tendermint/libs/log" +) + +func TestTracingLogger(t *testing.T) { + var buf bytes.Buffer + + logger := log.NewTMJSONLogger(&buf) + + logger1 := log.NewTracingLogger(logger) + err1 := errors.New("Courage is grace under pressure.") + err2 := errors.New("It does not matter how slowly you go, so long as you do not stop.") + logger1.With("err1", err1).Info("foo", "err2", err2) + have := strings.Replace(strings.Replace(strings.TrimSpace(buf.String()), "\\n", "", -1), "\\t", "", -1) + if want := strings.Replace(strings.Replace(`{"_msg":"foo","err1":"`+fmt.Sprintf("%+v", err1)+`","err2":"`+fmt.Sprintf("%+v", err2)+`","level":"info"}`, "\t", "", -1), "\n", "", -1); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } + + buf.Reset() + + logger.With("err1", stderr.New("Opportunities don't happen. You create them.")).Info("foo", "err2", stderr.New("Once you choose hope, anything's possible.")) + if want, have := `{"_msg":"foo","err1":"Opportunities don't happen. You create them.","err2":"Once you choose hope, anything's possible.","level":"info"}`, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } + + buf.Reset() + + logger.With("user", "Sam").With("context", "value").Info("foo", "bar", "baz") + if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"info","user":"Sam"}`, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } +} diff --git a/libs/pubsub/example_test.go b/libs/pubsub/example_test.go new file mode 100644 index 000000000..4e4634de5 --- /dev/null +++ b/libs/pubsub/example_test.go @@ -0,0 +1,28 @@ +package pubsub_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/libs/log" + + "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/libs/pubsub/query" +) + +func TestExample(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + s.Start() + defer s.Stop() + + ctx := context.Background() + ch := make(chan interface{}, 1) + err := s.Subscribe(ctx, "example-client", query.MustParse("abci.account.name='John'"), ch) + require.NoError(t, err) + err = s.PublishWithTags(ctx, "Tombstone", pubsub.NewTagMap(map[string]string{"abci.account.name": "John"})) + require.NoError(t, err) + assertReceive(t, "Tombstone", ch) +} diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go new file mode 100644 index 000000000..4280ca1ea --- /dev/null +++ b/libs/pubsub/pubsub.go @@ -0,0 +1,351 @@ +// Package pubsub implements a pub-sub model with a single publisher (Server) +// and multiple subscribers (clients). +// +// Though you can have multiple publishers by sharing a pointer to a server or +// by giving the same channel to each publisher and publishing messages from +// that channel (fan-in). +// +// Clients subscribe for messages, which could be of any type, using a query. +// When some message is published, we match it with all queries. If there is a +// match, this message will be pushed to all clients, subscribed to that query. +// See query subpackage for our implementation. +package pubsub + +import ( + "context" + "errors" + "sync" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +type operation int + +const ( + sub operation = iota + pub + unsub + shutdown +) + +var ( + // ErrSubscriptionNotFound is returned when a client tries to unsubscribe + // from not existing subscription. + ErrSubscriptionNotFound = errors.New("subscription not found") + + // ErrAlreadySubscribed is returned when a client tries to subscribe twice or + // more using the same query. + ErrAlreadySubscribed = errors.New("already subscribed") +) + +type cmd struct { + op operation + query Query + ch chan<- interface{} + clientID string + msg interface{} + tags TagMap +} + +// Query defines an interface for a query to be used for subscribing. +type Query interface { + Matches(tags TagMap) bool + String() string +} + +// Server allows clients to subscribe/unsubscribe for messages, publishing +// messages with or without tags, and manages internal state. +type Server struct { + cmn.BaseService + + cmds chan cmd + cmdsCap int + + mtx sync.RWMutex + subscriptions map[string]map[string]Query // subscriber -> query (string) -> Query +} + +// Option sets a parameter for the server. +type Option func(*Server) + +// TagMap is used to associate tags to a message. +// They can be queried by subscribers to choose messages they will received. +type TagMap interface { + // Get returns the value for a key, or nil if no value is present. + // The ok result indicates whether value was found in the tags. + Get(key string) (value string, ok bool) + // Len returns the number of tags. + Len() int +} + +type tagMap map[string]string + +var _ TagMap = (*tagMap)(nil) + +// NewTagMap constructs a new immutable tag set from a map. +func NewTagMap(data map[string]string) TagMap { + return tagMap(data) +} + +// Get returns the value for a key, or nil if no value is present. +// The ok result indicates whether value was found in the tags. +func (ts tagMap) Get(key string) (value string, ok bool) { + value, ok = ts[key] + return +} + +// Len returns the number of tags. +func (ts tagMap) Len() int { + return len(ts) +} + +// NewServer returns a new server. See the commentary on the Option functions +// for a detailed description of how to configure buffering. If no options are +// provided, the resulting server's queue is unbuffered. +func NewServer(options ...Option) *Server { + s := &Server{ + subscriptions: make(map[string]map[string]Query), + } + s.BaseService = *cmn.NewBaseService(nil, "PubSub", s) + + for _, option := range options { + option(s) + } + + // if BufferCapacity option was not set, the channel is unbuffered + s.cmds = make(chan cmd, s.cmdsCap) + + return s +} + +// BufferCapacity allows you to specify capacity for the internal server's +// queue. Since the server, given Y subscribers, could only process X messages, +// this option could be used to survive spikes (e.g. high amount of +// transactions during peak hours). +func BufferCapacity(cap int) Option { + return func(s *Server) { + if cap > 0 { + s.cmdsCap = cap + } + } +} + +// BufferCapacity returns capacity of the internal server's queue. +func (s *Server) BufferCapacity() int { + return s.cmdsCap +} + +// Subscribe creates a subscription for the given client. It accepts a channel +// on which messages matching the given query can be received. An error will be +// returned to the caller if the context is canceled or if subscription already +// exist for pair clientID and query. +func (s *Server) Subscribe(ctx context.Context, clientID string, query Query, out chan<- interface{}) error { + s.mtx.RLock() + clientSubscriptions, ok := s.subscriptions[clientID] + if ok { + _, ok = clientSubscriptions[query.String()] + } + s.mtx.RUnlock() + if ok { + return ErrAlreadySubscribed + } + + select { + case s.cmds <- cmd{op: sub, clientID: clientID, query: query, ch: out}: + s.mtx.Lock() + if _, ok = s.subscriptions[clientID]; !ok { + s.subscriptions[clientID] = make(map[string]Query) + } + // preserve original query + // see Unsubscribe + s.subscriptions[clientID][query.String()] = query + s.mtx.Unlock() + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// Unsubscribe removes the subscription on the given query. An error will be +// returned to the caller if the context is canceled or if subscription does +// not exist. +func (s *Server) Unsubscribe(ctx context.Context, clientID string, query Query) error { + var origQuery Query + s.mtx.RLock() + clientSubscriptions, ok := s.subscriptions[clientID] + if ok { + origQuery, ok = clientSubscriptions[query.String()] + } + s.mtx.RUnlock() + if !ok { + return ErrSubscriptionNotFound + } + + // original query is used here because we're using pointers as map keys + select { + case s.cmds <- cmd{op: unsub, clientID: clientID, query: origQuery}: + s.mtx.Lock() + delete(clientSubscriptions, query.String()) + s.mtx.Unlock() + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// UnsubscribeAll removes all client subscriptions. An error will be returned +// to the caller if the context is canceled or if subscription does not exist. +func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { + s.mtx.RLock() + _, ok := s.subscriptions[clientID] + s.mtx.RUnlock() + if !ok { + return ErrSubscriptionNotFound + } + + select { + case s.cmds <- cmd{op: unsub, clientID: clientID}: + s.mtx.Lock() + delete(s.subscriptions, clientID) + s.mtx.Unlock() + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// Publish publishes the given message. An error will be returned to the caller +// if the context is canceled. +func (s *Server) Publish(ctx context.Context, msg interface{}) error { + return s.PublishWithTags(ctx, msg, NewTagMap(make(map[string]string))) +} + +// PublishWithTags publishes the given message with the set of tags. The set is +// matched with clients queries. If there is a match, the message is sent to +// the client. +func (s *Server) PublishWithTags(ctx context.Context, msg interface{}, tags TagMap) error { + select { + case s.cmds <- cmd{op: pub, msg: msg, tags: tags}: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// OnStop implements Service.OnStop by shutting down the server. +func (s *Server) OnStop() { + s.cmds <- cmd{op: shutdown} +} + +// NOTE: not goroutine safe +type state struct { + // query -> client -> ch + queries map[Query]map[string]chan<- interface{} + // client -> query -> struct{} + clients map[string]map[Query]struct{} +} + +// OnStart implements Service.OnStart by starting the server. +func (s *Server) OnStart() error { + go s.loop(state{ + queries: make(map[Query]map[string]chan<- interface{}), + clients: make(map[string]map[Query]struct{}), + }) + return nil +} + +// OnReset implements Service.OnReset +func (s *Server) OnReset() error { + return nil +} + +func (s *Server) loop(state state) { +loop: + for cmd := range s.cmds { + switch cmd.op { + case unsub: + if cmd.query != nil { + state.remove(cmd.clientID, cmd.query) + } else { + state.removeAll(cmd.clientID) + } + case shutdown: + for clientID := range state.clients { + state.removeAll(clientID) + } + break loop + case sub: + state.add(cmd.clientID, cmd.query, cmd.ch) + case pub: + state.send(cmd.msg, cmd.tags) + } + } +} + +func (state *state) add(clientID string, q Query, ch chan<- interface{}) { + // add query if needed + if _, ok := state.queries[q]; !ok { + state.queries[q] = make(map[string]chan<- interface{}) + } + + // create subscription + state.queries[q][clientID] = ch + + // add client if needed + if _, ok := state.clients[clientID]; !ok { + state.clients[clientID] = make(map[Query]struct{}) + } + state.clients[clientID][q] = struct{}{} +} + +func (state *state) remove(clientID string, q Query) { + clientToChannelMap, ok := state.queries[q] + if !ok { + return + } + + ch, ok := clientToChannelMap[clientID] + if ok { + close(ch) + + delete(state.clients[clientID], q) + + // if it not subscribed to anything else, remove the client + if len(state.clients[clientID]) == 0 { + delete(state.clients, clientID) + } + + delete(state.queries[q], clientID) + if len(state.queries[q]) == 0 { + delete(state.queries, q) + } + } +} + +func (state *state) removeAll(clientID string) { + queryMap, ok := state.clients[clientID] + if !ok { + return + } + + for q := range queryMap { + ch := state.queries[q][clientID] + close(ch) + + delete(state.queries[q], clientID) + if len(state.queries[q]) == 0 { + delete(state.queries, q) + } + } + delete(state.clients, clientID) +} + +func (state *state) send(msg interface{}, tags TagMap) { + for q, clientToChannelMap := range state.queries { + if q.Matches(tags) { + for _, ch := range clientToChannelMap { + ch <- msg + } + } + } +} diff --git a/libs/pubsub/pubsub_test.go b/libs/pubsub/pubsub_test.go new file mode 100644 index 000000000..5e9931e40 --- /dev/null +++ b/libs/pubsub/pubsub_test.go @@ -0,0 +1,253 @@ +package pubsub_test + +import ( + "context" + "fmt" + "runtime/debug" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/libs/log" + + "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/libs/pubsub/query" +) + +const ( + clientID = "test-client" +) + +func TestSubscribe(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + s.Start() + defer s.Stop() + + ctx := context.Background() + ch := make(chan interface{}, 1) + err := s.Subscribe(ctx, clientID, query.Empty{}, ch) + require.NoError(t, err) + err = s.Publish(ctx, "Ka-Zar") + require.NoError(t, err) + assertReceive(t, "Ka-Zar", ch) + + err = s.Publish(ctx, "Quicksilver") + require.NoError(t, err) + assertReceive(t, "Quicksilver", ch) +} + +func TestDifferentClients(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + s.Start() + defer s.Stop() + + ctx := context.Background() + ch1 := make(chan interface{}, 1) + err := s.Subscribe(ctx, "client-1", query.MustParse("tm.events.type='NewBlock'"), ch1) + require.NoError(t, err) + err = s.PublishWithTags(ctx, "Iceman", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewBlock"})) + require.NoError(t, err) + assertReceive(t, "Iceman", ch1) + + ch2 := make(chan interface{}, 1) + err = s.Subscribe(ctx, "client-2", query.MustParse("tm.events.type='NewBlock' AND abci.account.name='Igor'"), ch2) + require.NoError(t, err) + err = s.PublishWithTags(ctx, "Ultimo", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewBlock", "abci.account.name": "Igor"})) + require.NoError(t, err) + assertReceive(t, "Ultimo", ch1) + assertReceive(t, "Ultimo", ch2) + + ch3 := make(chan interface{}, 1) + err = s.Subscribe(ctx, "client-3", query.MustParse("tm.events.type='NewRoundStep' AND abci.account.name='Igor' AND abci.invoice.number = 10"), ch3) + require.NoError(t, err) + err = s.PublishWithTags(ctx, "Valeria Richards", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewRoundStep"})) + require.NoError(t, err) + assert.Zero(t, len(ch3)) +} + +func TestClientSubscribesTwice(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + s.Start() + defer s.Stop() + + ctx := context.Background() + q := query.MustParse("tm.events.type='NewBlock'") + + ch1 := make(chan interface{}, 1) + err := s.Subscribe(ctx, clientID, q, ch1) + require.NoError(t, err) + err = s.PublishWithTags(ctx, "Goblin Queen", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewBlock"})) + require.NoError(t, err) + assertReceive(t, "Goblin Queen", ch1) + + ch2 := make(chan interface{}, 1) + err = s.Subscribe(ctx, clientID, q, ch2) + require.Error(t, err) + + err = s.PublishWithTags(ctx, "Spider-Man", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewBlock"})) + require.NoError(t, err) + assertReceive(t, "Spider-Man", ch1) +} + +func TestUnsubscribe(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + s.Start() + defer s.Stop() + + ctx := context.Background() + ch := make(chan interface{}) + err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"), ch) + require.NoError(t, err) + err = s.Unsubscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'")) + require.NoError(t, err) + + err = s.Publish(ctx, "Nick Fury") + require.NoError(t, err) + assert.Zero(t, len(ch), "Should not receive anything after Unsubscribe") + + _, ok := <-ch + assert.False(t, ok) +} + +func TestResubscribe(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + s.Start() + defer s.Stop() + + ctx := context.Background() + ch := make(chan interface{}) + err := s.Subscribe(ctx, clientID, query.Empty{}, ch) + require.NoError(t, err) + err = s.Unsubscribe(ctx, clientID, query.Empty{}) + require.NoError(t, err) + ch = make(chan interface{}) + err = s.Subscribe(ctx, clientID, query.Empty{}, ch) + require.NoError(t, err) + + err = s.Publish(ctx, "Cable") + require.NoError(t, err) + assertReceive(t, "Cable", ch) +} + +func TestUnsubscribeAll(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + s.Start() + defer s.Stop() + + ctx := context.Background() + ch1, ch2 := make(chan interface{}, 1), make(chan interface{}, 1) + err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"), ch1) + require.NoError(t, err) + err = s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlockHeader'"), ch2) + require.NoError(t, err) + + err = s.UnsubscribeAll(ctx, clientID) + require.NoError(t, err) + + err = s.Publish(ctx, "Nick Fury") + require.NoError(t, err) + assert.Zero(t, len(ch1), "Should not receive anything after UnsubscribeAll") + assert.Zero(t, len(ch2), "Should not receive anything after UnsubscribeAll") + + _, ok := <-ch1 + assert.False(t, ok) + _, ok = <-ch2 + assert.False(t, ok) +} + +func TestBufferCapacity(t *testing.T) { + s := pubsub.NewServer(pubsub.BufferCapacity(2)) + s.SetLogger(log.TestingLogger()) + + assert.Equal(t, 2, s.BufferCapacity()) + + ctx := context.Background() + err := s.Publish(ctx, "Nighthawk") + require.NoError(t, err) + err = s.Publish(ctx, "Sage") + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + defer cancel() + err = s.Publish(ctx, "Ironclad") + if assert.Error(t, err) { + assert.Equal(t, context.DeadlineExceeded, err) + } +} + +func Benchmark10Clients(b *testing.B) { benchmarkNClients(10, b) } +func Benchmark100Clients(b *testing.B) { benchmarkNClients(100, b) } +func Benchmark1000Clients(b *testing.B) { benchmarkNClients(1000, b) } + +func Benchmark10ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(10, b) } +func Benchmark100ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(100, b) } +func Benchmark1000ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(1000, b) } + +func benchmarkNClients(n int, b *testing.B) { + s := pubsub.NewServer() + s.Start() + defer s.Stop() + + ctx := context.Background() + for i := 0; i < n; i++ { + ch := make(chan interface{}) + go func() { + for range ch { + } + }() + s.Subscribe(ctx, clientID, query.MustParse(fmt.Sprintf("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = %d", i)), ch) + } + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + s.PublishWithTags(ctx, "Gamora", pubsub.NewTagMap(map[string]string{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": string(i)})) + } +} + +func benchmarkNClientsOneQuery(n int, b *testing.B) { + s := pubsub.NewServer() + s.Start() + defer s.Stop() + + ctx := context.Background() + q := query.MustParse("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = 1") + for i := 0; i < n; i++ { + ch := make(chan interface{}) + go func() { + for range ch { + } + }() + s.Subscribe(ctx, clientID, q, ch) + } + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + s.PublishWithTags(ctx, "Gamora", pubsub.NewTagMap(map[string]string{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": "1"})) + } +} + +/////////////////////////////////////////////////////////////////////////////// +/// HELPERS +/////////////////////////////////////////////////////////////////////////////// + +func assertReceive(t *testing.T, expected interface{}, ch <-chan interface{}, msgAndArgs ...interface{}) { + select { + case actual := <-ch: + if actual != nil { + assert.Equal(t, expected, actual, msgAndArgs...) + } + case <-time.After(1 * time.Second): + t.Errorf("Expected to receive %v from the channel, got nothing after 1s", expected) + debug.PrintStack() + } +} diff --git a/libs/pubsub/query/Makefile b/libs/pubsub/query/Makefile new file mode 100644 index 000000000..91030ef09 --- /dev/null +++ b/libs/pubsub/query/Makefile @@ -0,0 +1,11 @@ +gen_query_parser: + @go get github.com/pointlander/peg + peg -inline -switch query.peg + +fuzzy_test: + @go get github.com/dvyukov/go-fuzz/go-fuzz + @go get github.com/dvyukov/go-fuzz/go-fuzz-build + go-fuzz-build github.com/tendermint/tendermint/libs/pubsub/query/fuzz_test + go-fuzz -bin=./fuzz_test-fuzz.zip -workdir=./fuzz_test/output + +.PHONY: gen_query_parser fuzzy_test diff --git a/libs/pubsub/query/empty.go b/libs/pubsub/query/empty.go new file mode 100644 index 000000000..17d7acefa --- /dev/null +++ b/libs/pubsub/query/empty.go @@ -0,0 +1,16 @@ +package query + +import "github.com/tendermint/tendermint/libs/pubsub" + +// Empty query matches any set of tags. +type Empty struct { +} + +// Matches always returns true. +func (Empty) Matches(tags pubsub.TagMap) bool { + return true +} + +func (Empty) String() string { + return "empty" +} diff --git a/libs/pubsub/query/empty_test.go b/libs/pubsub/query/empty_test.go new file mode 100644 index 000000000..6183b6bd4 --- /dev/null +++ b/libs/pubsub/query/empty_test.go @@ -0,0 +1,18 @@ +package query_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/libs/pubsub/query" +) + +func TestEmptyQueryMatchesAnything(t *testing.T) { + q := query.Empty{} + assert.True(t, q.Matches(pubsub.NewTagMap(map[string]string{}))) + assert.True(t, q.Matches(pubsub.NewTagMap(map[string]string{"Asher": "Roth"}))) + assert.True(t, q.Matches(pubsub.NewTagMap(map[string]string{"Route": "66"}))) + assert.True(t, q.Matches(pubsub.NewTagMap(map[string]string{"Route": "66", "Billy": "Blue"}))) +} diff --git a/libs/pubsub/query/fuzz_test/main.go b/libs/pubsub/query/fuzz_test/main.go new file mode 100644 index 000000000..7a46116b5 --- /dev/null +++ b/libs/pubsub/query/fuzz_test/main.go @@ -0,0 +1,30 @@ +package fuzz_test + +import ( + "fmt" + + "github.com/tendermint/tendermint/libs/pubsub/query" +) + +func Fuzz(data []byte) int { + sdata := string(data) + q0, err := query.New(sdata) + if err != nil { + return 0 + } + + sdata1 := q0.String() + q1, err := query.New(sdata1) + if err != nil { + panic(err) + } + + sdata2 := q1.String() + if sdata1 != sdata2 { + fmt.Printf("q0: %q\n", sdata1) + fmt.Printf("q1: %q\n", sdata2) + panic("query changed") + } + + return 1 +} diff --git a/libs/pubsub/query/parser_test.go b/libs/pubsub/query/parser_test.go new file mode 100644 index 000000000..708dee484 --- /dev/null +++ b/libs/pubsub/query/parser_test.go @@ -0,0 +1,92 @@ +package query_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/tendermint/tendermint/libs/pubsub/query" +) + +// TODO: fuzzy testing? +func TestParser(t *testing.T) { + cases := []struct { + query string + valid bool + }{ + {"tm.events.type='NewBlock'", true}, + {"tm.events.type = 'NewBlock'", true}, + {"tm.events.name = ''", true}, + {"tm.events.type='TIME'", true}, + {"tm.events.type='DATE'", true}, + {"tm.events.type='='", true}, + {"tm.events.type='TIME", false}, + {"tm.events.type=TIME'", false}, + {"tm.events.type==", false}, + {"tm.events.type=NewBlock", false}, + {">==", false}, + {"tm.events.type 'NewBlock' =", false}, + {"tm.events.type>'NewBlock'", false}, + {"", false}, + {"=", false}, + {"='NewBlock'", false}, + {"tm.events.type=", false}, + + {"tm.events.typeNewBlock", false}, + {"tm.events.type'NewBlock'", false}, + {"'NewBlock'", false}, + {"NewBlock", false}, + {"", false}, + + {"tm.events.type='NewBlock' AND abci.account.name='Igor'", true}, + {"tm.events.type='NewBlock' AND", false}, + {"tm.events.type='NewBlock' AN", false}, + {"tm.events.type='NewBlock' AN tm.events.type='NewBlockHeader'", false}, + {"AND tm.events.type='NewBlock' ", false}, + + {"abci.account.name CONTAINS 'Igor'", true}, + + {"tx.date > DATE 2013-05-03", true}, + {"tx.date < DATE 2013-05-03", true}, + {"tx.date <= DATE 2013-05-03", true}, + {"tx.date >= DATE 2013-05-03", true}, + {"tx.date >= DAT 2013-05-03", false}, + {"tx.date <= DATE2013-05-03", false}, + {"tx.date <= DATE -05-03", false}, + {"tx.date >= DATE 20130503", false}, + {"tx.date >= DATE 2013+01-03", false}, + // incorrect year, month, day + {"tx.date >= DATE 0013-01-03", false}, + {"tx.date >= DATE 2013-31-03", false}, + {"tx.date >= DATE 2013-01-83", false}, + + {"tx.date > TIME 2013-05-03T14:45:00+07:00", true}, + {"tx.date < TIME 2013-05-03T14:45:00-02:00", true}, + {"tx.date <= TIME 2013-05-03T14:45:00Z", true}, + {"tx.date >= TIME 2013-05-03T14:45:00Z", true}, + {"tx.date >= TIME2013-05-03T14:45:00Z", false}, + {"tx.date = IME 2013-05-03T14:45:00Z", false}, + {"tx.date = TIME 2013-05-:45:00Z", false}, + {"tx.date >= TIME 2013-05-03T14:45:00", false}, + {"tx.date >= TIME 0013-00-00T14:45:00Z", false}, + {"tx.date >= TIME 2013+05=03T14:45:00Z", false}, + + {"account.balance=100", true}, + {"account.balance >= 200", true}, + {"account.balance >= -300", false}, + {"account.balance >>= 400", false}, + {"account.balance=33.22.1", false}, + + {"hash='136E18F7E4C348B780CF873A0BF43922E5BAFA63'", true}, + {"hash=136E18F7E4C348B780CF873A0BF43922E5BAFA63", false}, + } + + for _, c := range cases { + _, err := query.New(c.query) + if c.valid { + assert.NoErrorf(t, err, "Query was '%s'", c.query) + } else { + assert.Errorf(t, err, "Query was '%s'", c.query) + } + } +} diff --git a/libs/pubsub/query/query.go b/libs/pubsub/query/query.go new file mode 100644 index 000000000..ec187486e --- /dev/null +++ b/libs/pubsub/query/query.go @@ -0,0 +1,339 @@ +// Package query provides a parser for a custom query format: +// +// abci.invoice.number=22 AND abci.invoice.owner=Ivan +// +// See query.peg for the grammar, which is a https://en.wikipedia.org/wiki/Parsing_expression_grammar. +// More: https://github.com/PhilippeSigaud/Pegged/wiki/PEG-Basics +// +// It has a support for numbers (integer and floating point), dates and times. +package query + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "time" + + "github.com/tendermint/tendermint/libs/pubsub" +) + +// Query holds the query string and the query parser. +type Query struct { + str string + parser *QueryParser +} + +// Condition represents a single condition within a query and consists of tag +// (e.g. "tx.gas"), operator (e.g. "=") and operand (e.g. "7"). +type Condition struct { + Tag string + Op Operator + Operand interface{} +} + +// New parses the given string and returns a query or error if the string is +// invalid. +func New(s string) (*Query, error) { + p := &QueryParser{Buffer: fmt.Sprintf(`"%s"`, s)} + p.Init() + if err := p.Parse(); err != nil { + return nil, err + } + return &Query{str: s, parser: p}, nil +} + +// MustParse turns the given string into a query or panics; for tests or others +// cases where you know the string is valid. +func MustParse(s string) *Query { + q, err := New(s) + if err != nil { + panic(fmt.Sprintf("failed to parse %s: %v", s, err)) + } + return q +} + +// String returns the original string. +func (q *Query) String() string { + return q.str +} + +// Operator is an operator that defines some kind of relation between tag and +// operand (equality, etc.). +type Operator uint8 + +const ( + // "<=" + OpLessEqual Operator = iota + // ">=" + OpGreaterEqual + // "<" + OpLess + // ">" + OpGreater + // "=" + OpEqual + // "CONTAINS"; used to check if a string contains a certain sub string. + OpContains +) + +const ( + // DateLayout defines a layout for all dates (`DATE date`) + DateLayout = "2006-01-02" + // TimeLayout defines a layout for all times (`TIME time`) + TimeLayout = time.RFC3339 +) + +// Conditions returns a list of conditions. +func (q *Query) Conditions() []Condition { + conditions := make([]Condition, 0) + + buffer, begin, end := q.parser.Buffer, 0, 0 + + var tag string + var op Operator + + // tokens must be in the following order: tag ("tx.gas") -> operator ("=") -> operand ("7") + for _, token := range q.parser.Tokens() { + switch token.pegRule { + + case rulePegText: + begin, end = int(token.begin), int(token.end) + case ruletag: + tag = buffer[begin:end] + case rulele: + op = OpLessEqual + case rulege: + op = OpGreaterEqual + case rulel: + op = OpLess + case ruleg: + op = OpGreater + case ruleequal: + op = OpEqual + case rulecontains: + op = OpContains + case rulevalue: + // strip single quotes from value (i.e. "'NewBlock'" -> "NewBlock") + valueWithoutSingleQuotes := buffer[begin+1 : end-1] + conditions = append(conditions, Condition{tag, op, valueWithoutSingleQuotes}) + case rulenumber: + number := buffer[begin:end] + if strings.ContainsAny(number, ".") { // if it looks like a floating-point number + value, err := strconv.ParseFloat(number, 64) + if err != nil { + panic(fmt.Sprintf("got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", err, number)) + } + conditions = append(conditions, Condition{tag, op, value}) + } else { + value, err := strconv.ParseInt(number, 10, 64) + if err != nil { + panic(fmt.Sprintf("got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", err, number)) + } + conditions = append(conditions, Condition{tag, op, value}) + } + case ruletime: + value, err := time.Parse(TimeLayout, buffer[begin:end]) + if err != nil { + panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / RFC3339 (should never happen if the grammar is correct)", err, buffer[begin:end])) + } + conditions = append(conditions, Condition{tag, op, value}) + case ruledate: + value, err := time.Parse("2006-01-02", buffer[begin:end]) + if err != nil { + panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / '2006-01-02' (should never happen if the grammar is correct)", err, buffer[begin:end])) + } + conditions = append(conditions, Condition{tag, op, value}) + } + } + + return conditions +} + +// Matches returns true if the query matches the given set of tags, false otherwise. +// +// For example, query "name=John" matches tags = {"name": "John"}. More +// examples could be found in parser_test.go and query_test.go. +func (q *Query) Matches(tags pubsub.TagMap) bool { + if tags.Len() == 0 { + return false + } + + buffer, begin, end := q.parser.Buffer, 0, 0 + + var tag string + var op Operator + + // tokens must be in the following order: tag ("tx.gas") -> operator ("=") -> operand ("7") + for _, token := range q.parser.Tokens() { + switch token.pegRule { + + case rulePegText: + begin, end = int(token.begin), int(token.end) + case ruletag: + tag = buffer[begin:end] + case rulele: + op = OpLessEqual + case rulege: + op = OpGreaterEqual + case rulel: + op = OpLess + case ruleg: + op = OpGreater + case ruleequal: + op = OpEqual + case rulecontains: + op = OpContains + case rulevalue: + // strip single quotes from value (i.e. "'NewBlock'" -> "NewBlock") + valueWithoutSingleQuotes := buffer[begin+1 : end-1] + + // see if the triplet (tag, operator, operand) matches any tag + // "tx.gas", "=", "7", { "tx.gas": 7, "tx.ID": "4AE393495334" } + if !match(tag, op, reflect.ValueOf(valueWithoutSingleQuotes), tags) { + return false + } + case rulenumber: + number := buffer[begin:end] + if strings.ContainsAny(number, ".") { // if it looks like a floating-point number + value, err := strconv.ParseFloat(number, 64) + if err != nil { + panic(fmt.Sprintf("got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", err, number)) + } + if !match(tag, op, reflect.ValueOf(value), tags) { + return false + } + } else { + value, err := strconv.ParseInt(number, 10, 64) + if err != nil { + panic(fmt.Sprintf("got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", err, number)) + } + if !match(tag, op, reflect.ValueOf(value), tags) { + return false + } + } + case ruletime: + value, err := time.Parse(TimeLayout, buffer[begin:end]) + if err != nil { + panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / RFC3339 (should never happen if the grammar is correct)", err, buffer[begin:end])) + } + if !match(tag, op, reflect.ValueOf(value), tags) { + return false + } + case ruledate: + value, err := time.Parse("2006-01-02", buffer[begin:end]) + if err != nil { + panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / '2006-01-02' (should never happen if the grammar is correct)", err, buffer[begin:end])) + } + if !match(tag, op, reflect.ValueOf(value), tags) { + return false + } + } + } + + return true +} + +// match returns true if the given triplet (tag, operator, operand) matches any tag. +// +// First, it looks up the tag in tags and if it finds one, tries to compare the +// value from it to the operand using the operator. +// +// "tx.gas", "=", "7", { "tx.gas": 7, "tx.ID": "4AE393495334" } +func match(tag string, op Operator, operand reflect.Value, tags pubsub.TagMap) bool { + // look up the tag from the query in tags + value, ok := tags.Get(tag) + if !ok { + return false + } + switch operand.Kind() { + case reflect.Struct: // time + operandAsTime := operand.Interface().(time.Time) + // try our best to convert value from tags to time.Time + var ( + v time.Time + err error + ) + if strings.ContainsAny(value, "T") { + v, err = time.Parse(TimeLayout, value) + } else { + v, err = time.Parse(DateLayout, value) + } + if err != nil { + panic(fmt.Sprintf("Failed to convert value %v from tag to time.Time: %v", value, err)) + } + switch op { + case OpLessEqual: + return v.Before(operandAsTime) || v.Equal(operandAsTime) + case OpGreaterEqual: + return v.Equal(operandAsTime) || v.After(operandAsTime) + case OpLess: + return v.Before(operandAsTime) + case OpGreater: + return v.After(operandAsTime) + case OpEqual: + return v.Equal(operandAsTime) + } + case reflect.Float64: + operandFloat64 := operand.Interface().(float64) + var v float64 + // try our best to convert value from tags to float64 + v, err := strconv.ParseFloat(value, 64) + if err != nil { + panic(fmt.Sprintf("Failed to convert value %v from tag to float64: %v", value, err)) + } + switch op { + case OpLessEqual: + return v <= operandFloat64 + case OpGreaterEqual: + return v >= operandFloat64 + case OpLess: + return v < operandFloat64 + case OpGreater: + return v > operandFloat64 + case OpEqual: + return v == operandFloat64 + } + case reflect.Int64: + operandInt := operand.Interface().(int64) + var v int64 + // if value looks like float, we try to parse it as float + if strings.ContainsAny(value, ".") { + v1, err := strconv.ParseFloat(value, 64) + if err != nil { + panic(fmt.Sprintf("Failed to convert value %v from tag to float64: %v", value, err)) + } + v = int64(v1) + } else { + var err error + // try our best to convert value from tags to int64 + v, err = strconv.ParseInt(value, 10, 64) + if err != nil { + panic(fmt.Sprintf("Failed to convert value %v from tag to int64: %v", value, err)) + } + } + switch op { + case OpLessEqual: + return v <= operandInt + case OpGreaterEqual: + return v >= operandInt + case OpLess: + return v < operandInt + case OpGreater: + return v > operandInt + case OpEqual: + return v == operandInt + } + case reflect.String: + switch op { + case OpEqual: + return value == operand.String() + case OpContains: + return strings.Contains(value, operand.String()) + } + default: + panic(fmt.Sprintf("Unknown kind of operand %v", operand.Kind())) + } + + return false +} diff --git a/libs/pubsub/query/query.peg b/libs/pubsub/query/query.peg new file mode 100644 index 000000000..739892e4f --- /dev/null +++ b/libs/pubsub/query/query.peg @@ -0,0 +1,33 @@ +package query + +type QueryParser Peg { +} + +e <- '\"' condition ( ' '+ and ' '+ condition )* '\"' !. + +condition <- tag ' '* (le ' '* (number / time / date) + / ge ' '* (number / time / date) + / l ' '* (number / time / date) + / g ' '* (number / time / date) + / equal ' '* (number / time / date / value) + / contains ' '* value + ) + +tag <- < (![ \t\n\r\\()"'=><] .)+ > +value <- < '\'' (!["'] .)* '\''> +number <- < ('0' + / [1-9] digit* ('.' digit*)?) > +digit <- [0-9] +time <- "TIME " < year '-' month '-' day 'T' digit digit ':' digit digit ':' digit digit (('-' / '+') digit digit ':' digit digit / 'Z') > +date <- "DATE " < year '-' month '-' day > +year <- ('1' / '2') digit digit digit +month <- ('0' / '1') digit +day <- ('0' / '1' / '2' / '3') digit +and <- "AND" + +equal <- "=" +contains <- "CONTAINS" +le <- "<=" +ge <- ">=" +l <- "<" +g <- ">" diff --git a/libs/pubsub/query/query.peg.go b/libs/pubsub/query/query.peg.go new file mode 100644 index 000000000..c86e4a47f --- /dev/null +++ b/libs/pubsub/query/query.peg.go @@ -0,0 +1,1553 @@ +// nolint +package query + +import ( + "fmt" + "math" + "sort" + "strconv" +) + +const endSymbol rune = 1114112 + +/* The rule types inferred from the grammar are below. */ +type pegRule uint8 + +const ( + ruleUnknown pegRule = iota + rulee + rulecondition + ruletag + rulevalue + rulenumber + ruledigit + ruletime + ruledate + ruleyear + rulemonth + ruleday + ruleand + ruleequal + rulecontains + rulele + rulege + rulel + ruleg + rulePegText +) + +var rul3s = [...]string{ + "Unknown", + "e", + "condition", + "tag", + "value", + "number", + "digit", + "time", + "date", + "year", + "month", + "day", + "and", + "equal", + "contains", + "le", + "ge", + "l", + "g", + "PegText", +} + +type token32 struct { + pegRule + begin, end uint32 +} + +func (t *token32) String() string { + return fmt.Sprintf("\x1B[34m%v\x1B[m %v %v", rul3s[t.pegRule], t.begin, t.end) +} + +type node32 struct { + token32 + up, next *node32 +} + +func (node *node32) print(pretty bool, buffer string) { + var print func(node *node32, depth int) + print = func(node *node32, depth int) { + for node != nil { + for c := 0; c < depth; c++ { + fmt.Printf(" ") + } + rule := rul3s[node.pegRule] + quote := strconv.Quote(string(([]rune(buffer)[node.begin:node.end]))) + if !pretty { + fmt.Printf("%v %v\n", rule, quote) + } else { + fmt.Printf("\x1B[34m%v\x1B[m %v\n", rule, quote) + } + if node.up != nil { + print(node.up, depth+1) + } + node = node.next + } + } + print(node, 0) +} + +func (node *node32) Print(buffer string) { + node.print(false, buffer) +} + +func (node *node32) PrettyPrint(buffer string) { + node.print(true, buffer) +} + +type tokens32 struct { + tree []token32 +} + +func (t *tokens32) Trim(length uint32) { + t.tree = t.tree[:length] +} + +func (t *tokens32) Print() { + for _, token := range t.tree { + fmt.Println(token.String()) + } +} + +func (t *tokens32) AST() *node32 { + type element struct { + node *node32 + down *element + } + tokens := t.Tokens() + var stack *element + for _, token := range tokens { + if token.begin == token.end { + continue + } + node := &node32{token32: token} + for stack != nil && stack.node.begin >= token.begin && stack.node.end <= token.end { + stack.node.next = node.up + node.up = stack.node + stack = stack.down + } + stack = &element{node: node, down: stack} + } + if stack != nil { + return stack.node + } + return nil +} + +func (t *tokens32) PrintSyntaxTree(buffer string) { + t.AST().Print(buffer) +} + +func (t *tokens32) PrettyPrintSyntaxTree(buffer string) { + t.AST().PrettyPrint(buffer) +} + +func (t *tokens32) Add(rule pegRule, begin, end, index uint32) { + if tree := t.tree; int(index) >= len(tree) { + expanded := make([]token32, 2*len(tree)) + copy(expanded, tree) + t.tree = expanded + } + t.tree[index] = token32{ + pegRule: rule, + begin: begin, + end: end, + } +} + +func (t *tokens32) Tokens() []token32 { + return t.tree +} + +type QueryParser struct { + Buffer string + buffer []rune + rules [20]func() bool + parse func(rule ...int) error + reset func() + Pretty bool + tokens32 +} + +func (p *QueryParser) Parse(rule ...int) error { + return p.parse(rule...) +} + +func (p *QueryParser) Reset() { + p.reset() +} + +type textPosition struct { + line, symbol int +} + +type textPositionMap map[int]textPosition + +func translatePositions(buffer []rune, positions []int) textPositionMap { + length, translations, j, line, symbol := len(positions), make(textPositionMap, len(positions)), 0, 1, 0 + sort.Ints(positions) + +search: + for i, c := range buffer { + if c == '\n' { + line, symbol = line+1, 0 + } else { + symbol++ + } + if i == positions[j] { + translations[positions[j]] = textPosition{line, symbol} + for j++; j < length; j++ { + if i != positions[j] { + continue search + } + } + break search + } + } + + return translations +} + +type parseError struct { + p *QueryParser + max token32 +} + +func (e *parseError) Error() string { + tokens, error := []token32{e.max}, "\n" + positions, p := make([]int, 2*len(tokens)), 0 + for _, token := range tokens { + positions[p], p = int(token.begin), p+1 + positions[p], p = int(token.end), p+1 + } + translations := translatePositions(e.p.buffer, positions) + format := "parse error near %v (line %v symbol %v - line %v symbol %v):\n%v\n" + if e.p.Pretty { + format = "parse error near \x1B[34m%v\x1B[m (line %v symbol %v - line %v symbol %v):\n%v\n" + } + for _, token := range tokens { + begin, end := int(token.begin), int(token.end) + error += fmt.Sprintf(format, + rul3s[token.pegRule], + translations[begin].line, translations[begin].symbol, + translations[end].line, translations[end].symbol, + strconv.Quote(string(e.p.buffer[begin:end]))) + } + + return error +} + +func (p *QueryParser) PrintSyntaxTree() { + if p.Pretty { + p.tokens32.PrettyPrintSyntaxTree(p.Buffer) + } else { + p.tokens32.PrintSyntaxTree(p.Buffer) + } +} + +func (p *QueryParser) Init() { + var ( + max token32 + position, tokenIndex uint32 + buffer []rune + ) + p.reset = func() { + max = token32{} + position, tokenIndex = 0, 0 + + p.buffer = []rune(p.Buffer) + if len(p.buffer) == 0 || p.buffer[len(p.buffer)-1] != endSymbol { + p.buffer = append(p.buffer, endSymbol) + } + buffer = p.buffer + } + p.reset() + + _rules := p.rules + tree := tokens32{tree: make([]token32, math.MaxInt16)} + p.parse = func(rule ...int) error { + r := 1 + if len(rule) > 0 { + r = rule[0] + } + matches := p.rules[r]() + p.tokens32 = tree + if matches { + p.Trim(tokenIndex) + return nil + } + return &parseError{p, max} + } + + add := func(rule pegRule, begin uint32) { + tree.Add(rule, begin, position, tokenIndex) + tokenIndex++ + if begin != position && position > max.end { + max = token32{rule, begin, position} + } + } + + matchDot := func() bool { + if buffer[position] != endSymbol { + position++ + return true + } + return false + } + + /*matchChar := func(c byte) bool { + if buffer[position] == c { + position++ + return true + } + return false + }*/ + + /*matchRange := func(lower byte, upper byte) bool { + if c := buffer[position]; c >= lower && c <= upper { + position++ + return true + } + return false + }*/ + + _rules = [...]func() bool{ + nil, + /* 0 e <- <('"' condition (' '+ and ' '+ condition)* '"' !.)> */ + func() bool { + position0, tokenIndex0 := position, tokenIndex + { + position1 := position + if buffer[position] != rune('"') { + goto l0 + } + position++ + if !_rules[rulecondition]() { + goto l0 + } + l2: + { + position3, tokenIndex3 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l3 + } + position++ + l4: + { + position5, tokenIndex5 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l5 + } + position++ + goto l4 + l5: + position, tokenIndex = position5, tokenIndex5 + } + { + position6 := position + { + position7, tokenIndex7 := position, tokenIndex + if buffer[position] != rune('a') { + goto l8 + } + position++ + goto l7 + l8: + position, tokenIndex = position7, tokenIndex7 + if buffer[position] != rune('A') { + goto l3 + } + position++ + } + l7: + { + position9, tokenIndex9 := position, tokenIndex + if buffer[position] != rune('n') { + goto l10 + } + position++ + goto l9 + l10: + position, tokenIndex = position9, tokenIndex9 + if buffer[position] != rune('N') { + goto l3 + } + position++ + } + l9: + { + position11, tokenIndex11 := position, tokenIndex + if buffer[position] != rune('d') { + goto l12 + } + position++ + goto l11 + l12: + position, tokenIndex = position11, tokenIndex11 + if buffer[position] != rune('D') { + goto l3 + } + position++ + } + l11: + add(ruleand, position6) + } + if buffer[position] != rune(' ') { + goto l3 + } + position++ + l13: + { + position14, tokenIndex14 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l14 + } + position++ + goto l13 + l14: + position, tokenIndex = position14, tokenIndex14 + } + if !_rules[rulecondition]() { + goto l3 + } + goto l2 + l3: + position, tokenIndex = position3, tokenIndex3 + } + if buffer[position] != rune('"') { + goto l0 + } + position++ + { + position15, tokenIndex15 := position, tokenIndex + if !matchDot() { + goto l15 + } + goto l0 + l15: + position, tokenIndex = position15, tokenIndex15 + } + add(rulee, position1) + } + return true + l0: + position, tokenIndex = position0, tokenIndex0 + return false + }, + /* 1 condition <- <(tag ' '* ((le ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number))) / (ge ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number))) / ((&('=') (equal ' '* ((&('\'') value) | (&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number)))) | (&('>') (g ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number)))) | (&('<') (l ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number)))) | (&('C' | 'c') (contains ' '* value)))))> */ + func() bool { + position16, tokenIndex16 := position, tokenIndex + { + position17 := position + { + position18 := position + { + position19 := position + { + position22, tokenIndex22 := position, tokenIndex + { + switch buffer[position] { + case '<': + if buffer[position] != rune('<') { + goto l22 + } + position++ + break + case '>': + if buffer[position] != rune('>') { + goto l22 + } + position++ + break + case '=': + if buffer[position] != rune('=') { + goto l22 + } + position++ + break + case '\'': + if buffer[position] != rune('\'') { + goto l22 + } + position++ + break + case '"': + if buffer[position] != rune('"') { + goto l22 + } + position++ + break + case ')': + if buffer[position] != rune(')') { + goto l22 + } + position++ + break + case '(': + if buffer[position] != rune('(') { + goto l22 + } + position++ + break + case '\\': + if buffer[position] != rune('\\') { + goto l22 + } + position++ + break + case '\r': + if buffer[position] != rune('\r') { + goto l22 + } + position++ + break + case '\n': + if buffer[position] != rune('\n') { + goto l22 + } + position++ + break + case '\t': + if buffer[position] != rune('\t') { + goto l22 + } + position++ + break + default: + if buffer[position] != rune(' ') { + goto l22 + } + position++ + break + } + } + + goto l16 + l22: + position, tokenIndex = position22, tokenIndex22 + } + if !matchDot() { + goto l16 + } + l20: + { + position21, tokenIndex21 := position, tokenIndex + { + position24, tokenIndex24 := position, tokenIndex + { + switch buffer[position] { + case '<': + if buffer[position] != rune('<') { + goto l24 + } + position++ + break + case '>': + if buffer[position] != rune('>') { + goto l24 + } + position++ + break + case '=': + if buffer[position] != rune('=') { + goto l24 + } + position++ + break + case '\'': + if buffer[position] != rune('\'') { + goto l24 + } + position++ + break + case '"': + if buffer[position] != rune('"') { + goto l24 + } + position++ + break + case ')': + if buffer[position] != rune(')') { + goto l24 + } + position++ + break + case '(': + if buffer[position] != rune('(') { + goto l24 + } + position++ + break + case '\\': + if buffer[position] != rune('\\') { + goto l24 + } + position++ + break + case '\r': + if buffer[position] != rune('\r') { + goto l24 + } + position++ + break + case '\n': + if buffer[position] != rune('\n') { + goto l24 + } + position++ + break + case '\t': + if buffer[position] != rune('\t') { + goto l24 + } + position++ + break + default: + if buffer[position] != rune(' ') { + goto l24 + } + position++ + break + } + } + + goto l21 + l24: + position, tokenIndex = position24, tokenIndex24 + } + if !matchDot() { + goto l21 + } + goto l20 + l21: + position, tokenIndex = position21, tokenIndex21 + } + add(rulePegText, position19) + } + add(ruletag, position18) + } + l26: + { + position27, tokenIndex27 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l27 + } + position++ + goto l26 + l27: + position, tokenIndex = position27, tokenIndex27 + } + { + position28, tokenIndex28 := position, tokenIndex + { + position30 := position + if buffer[position] != rune('<') { + goto l29 + } + position++ + if buffer[position] != rune('=') { + goto l29 + } + position++ + add(rulele, position30) + } + l31: + { + position32, tokenIndex32 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l32 + } + position++ + goto l31 + l32: + position, tokenIndex = position32, tokenIndex32 + } + { + switch buffer[position] { + case 'D', 'd': + if !_rules[ruledate]() { + goto l29 + } + break + case 'T', 't': + if !_rules[ruletime]() { + goto l29 + } + break + default: + if !_rules[rulenumber]() { + goto l29 + } + break + } + } + + goto l28 + l29: + position, tokenIndex = position28, tokenIndex28 + { + position35 := position + if buffer[position] != rune('>') { + goto l34 + } + position++ + if buffer[position] != rune('=') { + goto l34 + } + position++ + add(rulege, position35) + } + l36: + { + position37, tokenIndex37 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l37 + } + position++ + goto l36 + l37: + position, tokenIndex = position37, tokenIndex37 + } + { + switch buffer[position] { + case 'D', 'd': + if !_rules[ruledate]() { + goto l34 + } + break + case 'T', 't': + if !_rules[ruletime]() { + goto l34 + } + break + default: + if !_rules[rulenumber]() { + goto l34 + } + break + } + } + + goto l28 + l34: + position, tokenIndex = position28, tokenIndex28 + { + switch buffer[position] { + case '=': + { + position40 := position + if buffer[position] != rune('=') { + goto l16 + } + position++ + add(ruleequal, position40) + } + l41: + { + position42, tokenIndex42 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l42 + } + position++ + goto l41 + l42: + position, tokenIndex = position42, tokenIndex42 + } + { + switch buffer[position] { + case '\'': + if !_rules[rulevalue]() { + goto l16 + } + break + case 'D', 'd': + if !_rules[ruledate]() { + goto l16 + } + break + case 'T', 't': + if !_rules[ruletime]() { + goto l16 + } + break + default: + if !_rules[rulenumber]() { + goto l16 + } + break + } + } + + break + case '>': + { + position44 := position + if buffer[position] != rune('>') { + goto l16 + } + position++ + add(ruleg, position44) + } + l45: + { + position46, tokenIndex46 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l46 + } + position++ + goto l45 + l46: + position, tokenIndex = position46, tokenIndex46 + } + { + switch buffer[position] { + case 'D', 'd': + if !_rules[ruledate]() { + goto l16 + } + break + case 'T', 't': + if !_rules[ruletime]() { + goto l16 + } + break + default: + if !_rules[rulenumber]() { + goto l16 + } + break + } + } + + break + case '<': + { + position48 := position + if buffer[position] != rune('<') { + goto l16 + } + position++ + add(rulel, position48) + } + l49: + { + position50, tokenIndex50 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l50 + } + position++ + goto l49 + l50: + position, tokenIndex = position50, tokenIndex50 + } + { + switch buffer[position] { + case 'D', 'd': + if !_rules[ruledate]() { + goto l16 + } + break + case 'T', 't': + if !_rules[ruletime]() { + goto l16 + } + break + default: + if !_rules[rulenumber]() { + goto l16 + } + break + } + } + + break + default: + { + position52 := position + { + position53, tokenIndex53 := position, tokenIndex + if buffer[position] != rune('c') { + goto l54 + } + position++ + goto l53 + l54: + position, tokenIndex = position53, tokenIndex53 + if buffer[position] != rune('C') { + goto l16 + } + position++ + } + l53: + { + position55, tokenIndex55 := position, tokenIndex + if buffer[position] != rune('o') { + goto l56 + } + position++ + goto l55 + l56: + position, tokenIndex = position55, tokenIndex55 + if buffer[position] != rune('O') { + goto l16 + } + position++ + } + l55: + { + position57, tokenIndex57 := position, tokenIndex + if buffer[position] != rune('n') { + goto l58 + } + position++ + goto l57 + l58: + position, tokenIndex = position57, tokenIndex57 + if buffer[position] != rune('N') { + goto l16 + } + position++ + } + l57: + { + position59, tokenIndex59 := position, tokenIndex + if buffer[position] != rune('t') { + goto l60 + } + position++ + goto l59 + l60: + position, tokenIndex = position59, tokenIndex59 + if buffer[position] != rune('T') { + goto l16 + } + position++ + } + l59: + { + position61, tokenIndex61 := position, tokenIndex + if buffer[position] != rune('a') { + goto l62 + } + position++ + goto l61 + l62: + position, tokenIndex = position61, tokenIndex61 + if buffer[position] != rune('A') { + goto l16 + } + position++ + } + l61: + { + position63, tokenIndex63 := position, tokenIndex + if buffer[position] != rune('i') { + goto l64 + } + position++ + goto l63 + l64: + position, tokenIndex = position63, tokenIndex63 + if buffer[position] != rune('I') { + goto l16 + } + position++ + } + l63: + { + position65, tokenIndex65 := position, tokenIndex + if buffer[position] != rune('n') { + goto l66 + } + position++ + goto l65 + l66: + position, tokenIndex = position65, tokenIndex65 + if buffer[position] != rune('N') { + goto l16 + } + position++ + } + l65: + { + position67, tokenIndex67 := position, tokenIndex + if buffer[position] != rune('s') { + goto l68 + } + position++ + goto l67 + l68: + position, tokenIndex = position67, tokenIndex67 + if buffer[position] != rune('S') { + goto l16 + } + position++ + } + l67: + add(rulecontains, position52) + } + l69: + { + position70, tokenIndex70 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l70 + } + position++ + goto l69 + l70: + position, tokenIndex = position70, tokenIndex70 + } + if !_rules[rulevalue]() { + goto l16 + } + break + } + } + + } + l28: + add(rulecondition, position17) + } + return true + l16: + position, tokenIndex = position16, tokenIndex16 + return false + }, + /* 2 tag <- <<(!((&('<') '<') | (&('>') '>') | (&('=') '=') | (&('\'') '\'') | (&('"') '"') | (&(')') ')') | (&('(') '(') | (&('\\') '\\') | (&('\r') '\r') | (&('\n') '\n') | (&('\t') '\t') | (&(' ') ' ')) .)+>> */ + nil, + /* 3 value <- <<('\'' (!('"' / '\'') .)* '\'')>> */ + func() bool { + position72, tokenIndex72 := position, tokenIndex + { + position73 := position + { + position74 := position + if buffer[position] != rune('\'') { + goto l72 + } + position++ + l75: + { + position76, tokenIndex76 := position, tokenIndex + { + position77, tokenIndex77 := position, tokenIndex + { + position78, tokenIndex78 := position, tokenIndex + if buffer[position] != rune('"') { + goto l79 + } + position++ + goto l78 + l79: + position, tokenIndex = position78, tokenIndex78 + if buffer[position] != rune('\'') { + goto l77 + } + position++ + } + l78: + goto l76 + l77: + position, tokenIndex = position77, tokenIndex77 + } + if !matchDot() { + goto l76 + } + goto l75 + l76: + position, tokenIndex = position76, tokenIndex76 + } + if buffer[position] != rune('\'') { + goto l72 + } + position++ + add(rulePegText, position74) + } + add(rulevalue, position73) + } + return true + l72: + position, tokenIndex = position72, tokenIndex72 + return false + }, + /* 4 number <- <<('0' / ([1-9] digit* ('.' digit*)?))>> */ + func() bool { + position80, tokenIndex80 := position, tokenIndex + { + position81 := position + { + position82 := position + { + position83, tokenIndex83 := position, tokenIndex + if buffer[position] != rune('0') { + goto l84 + } + position++ + goto l83 + l84: + position, tokenIndex = position83, tokenIndex83 + if c := buffer[position]; c < rune('1') || c > rune('9') { + goto l80 + } + position++ + l85: + { + position86, tokenIndex86 := position, tokenIndex + if !_rules[ruledigit]() { + goto l86 + } + goto l85 + l86: + position, tokenIndex = position86, tokenIndex86 + } + { + position87, tokenIndex87 := position, tokenIndex + if buffer[position] != rune('.') { + goto l87 + } + position++ + l89: + { + position90, tokenIndex90 := position, tokenIndex + if !_rules[ruledigit]() { + goto l90 + } + goto l89 + l90: + position, tokenIndex = position90, tokenIndex90 + } + goto l88 + l87: + position, tokenIndex = position87, tokenIndex87 + } + l88: + } + l83: + add(rulePegText, position82) + } + add(rulenumber, position81) + } + return true + l80: + position, tokenIndex = position80, tokenIndex80 + return false + }, + /* 5 digit <- <[0-9]> */ + func() bool { + position91, tokenIndex91 := position, tokenIndex + { + position92 := position + if c := buffer[position]; c < rune('0') || c > rune('9') { + goto l91 + } + position++ + add(ruledigit, position92) + } + return true + l91: + position, tokenIndex = position91, tokenIndex91 + return false + }, + /* 6 time <- <(('t' / 'T') ('i' / 'I') ('m' / 'M') ('e' / 'E') ' ' <(year '-' month '-' day 'T' digit digit ':' digit digit ':' digit digit ((('-' / '+') digit digit ':' digit digit) / 'Z'))>)> */ + func() bool { + position93, tokenIndex93 := position, tokenIndex + { + position94 := position + { + position95, tokenIndex95 := position, tokenIndex + if buffer[position] != rune('t') { + goto l96 + } + position++ + goto l95 + l96: + position, tokenIndex = position95, tokenIndex95 + if buffer[position] != rune('T') { + goto l93 + } + position++ + } + l95: + { + position97, tokenIndex97 := position, tokenIndex + if buffer[position] != rune('i') { + goto l98 + } + position++ + goto l97 + l98: + position, tokenIndex = position97, tokenIndex97 + if buffer[position] != rune('I') { + goto l93 + } + position++ + } + l97: + { + position99, tokenIndex99 := position, tokenIndex + if buffer[position] != rune('m') { + goto l100 + } + position++ + goto l99 + l100: + position, tokenIndex = position99, tokenIndex99 + if buffer[position] != rune('M') { + goto l93 + } + position++ + } + l99: + { + position101, tokenIndex101 := position, tokenIndex + if buffer[position] != rune('e') { + goto l102 + } + position++ + goto l101 + l102: + position, tokenIndex = position101, tokenIndex101 + if buffer[position] != rune('E') { + goto l93 + } + position++ + } + l101: + if buffer[position] != rune(' ') { + goto l93 + } + position++ + { + position103 := position + if !_rules[ruleyear]() { + goto l93 + } + if buffer[position] != rune('-') { + goto l93 + } + position++ + if !_rules[rulemonth]() { + goto l93 + } + if buffer[position] != rune('-') { + goto l93 + } + position++ + if !_rules[ruleday]() { + goto l93 + } + if buffer[position] != rune('T') { + goto l93 + } + position++ + if !_rules[ruledigit]() { + goto l93 + } + if !_rules[ruledigit]() { + goto l93 + } + if buffer[position] != rune(':') { + goto l93 + } + position++ + if !_rules[ruledigit]() { + goto l93 + } + if !_rules[ruledigit]() { + goto l93 + } + if buffer[position] != rune(':') { + goto l93 + } + position++ + if !_rules[ruledigit]() { + goto l93 + } + if !_rules[ruledigit]() { + goto l93 + } + { + position104, tokenIndex104 := position, tokenIndex + { + position106, tokenIndex106 := position, tokenIndex + if buffer[position] != rune('-') { + goto l107 + } + position++ + goto l106 + l107: + position, tokenIndex = position106, tokenIndex106 + if buffer[position] != rune('+') { + goto l105 + } + position++ + } + l106: + if !_rules[ruledigit]() { + goto l105 + } + if !_rules[ruledigit]() { + goto l105 + } + if buffer[position] != rune(':') { + goto l105 + } + position++ + if !_rules[ruledigit]() { + goto l105 + } + if !_rules[ruledigit]() { + goto l105 + } + goto l104 + l105: + position, tokenIndex = position104, tokenIndex104 + if buffer[position] != rune('Z') { + goto l93 + } + position++ + } + l104: + add(rulePegText, position103) + } + add(ruletime, position94) + } + return true + l93: + position, tokenIndex = position93, tokenIndex93 + return false + }, + /* 7 date <- <(('d' / 'D') ('a' / 'A') ('t' / 'T') ('e' / 'E') ' ' <(year '-' month '-' day)>)> */ + func() bool { + position108, tokenIndex108 := position, tokenIndex + { + position109 := position + { + position110, tokenIndex110 := position, tokenIndex + if buffer[position] != rune('d') { + goto l111 + } + position++ + goto l110 + l111: + position, tokenIndex = position110, tokenIndex110 + if buffer[position] != rune('D') { + goto l108 + } + position++ + } + l110: + { + position112, tokenIndex112 := position, tokenIndex + if buffer[position] != rune('a') { + goto l113 + } + position++ + goto l112 + l113: + position, tokenIndex = position112, tokenIndex112 + if buffer[position] != rune('A') { + goto l108 + } + position++ + } + l112: + { + position114, tokenIndex114 := position, tokenIndex + if buffer[position] != rune('t') { + goto l115 + } + position++ + goto l114 + l115: + position, tokenIndex = position114, tokenIndex114 + if buffer[position] != rune('T') { + goto l108 + } + position++ + } + l114: + { + position116, tokenIndex116 := position, tokenIndex + if buffer[position] != rune('e') { + goto l117 + } + position++ + goto l116 + l117: + position, tokenIndex = position116, tokenIndex116 + if buffer[position] != rune('E') { + goto l108 + } + position++ + } + l116: + if buffer[position] != rune(' ') { + goto l108 + } + position++ + { + position118 := position + if !_rules[ruleyear]() { + goto l108 + } + if buffer[position] != rune('-') { + goto l108 + } + position++ + if !_rules[rulemonth]() { + goto l108 + } + if buffer[position] != rune('-') { + goto l108 + } + position++ + if !_rules[ruleday]() { + goto l108 + } + add(rulePegText, position118) + } + add(ruledate, position109) + } + return true + l108: + position, tokenIndex = position108, tokenIndex108 + return false + }, + /* 8 year <- <(('1' / '2') digit digit digit)> */ + func() bool { + position119, tokenIndex119 := position, tokenIndex + { + position120 := position + { + position121, tokenIndex121 := position, tokenIndex + if buffer[position] != rune('1') { + goto l122 + } + position++ + goto l121 + l122: + position, tokenIndex = position121, tokenIndex121 + if buffer[position] != rune('2') { + goto l119 + } + position++ + } + l121: + if !_rules[ruledigit]() { + goto l119 + } + if !_rules[ruledigit]() { + goto l119 + } + if !_rules[ruledigit]() { + goto l119 + } + add(ruleyear, position120) + } + return true + l119: + position, tokenIndex = position119, tokenIndex119 + return false + }, + /* 9 month <- <(('0' / '1') digit)> */ + func() bool { + position123, tokenIndex123 := position, tokenIndex + { + position124 := position + { + position125, tokenIndex125 := position, tokenIndex + if buffer[position] != rune('0') { + goto l126 + } + position++ + goto l125 + l126: + position, tokenIndex = position125, tokenIndex125 + if buffer[position] != rune('1') { + goto l123 + } + position++ + } + l125: + if !_rules[ruledigit]() { + goto l123 + } + add(rulemonth, position124) + } + return true + l123: + position, tokenIndex = position123, tokenIndex123 + return false + }, + /* 10 day <- <(((&('3') '3') | (&('2') '2') | (&('1') '1') | (&('0') '0')) digit)> */ + func() bool { + position127, tokenIndex127 := position, tokenIndex + { + position128 := position + { + switch buffer[position] { + case '3': + if buffer[position] != rune('3') { + goto l127 + } + position++ + break + case '2': + if buffer[position] != rune('2') { + goto l127 + } + position++ + break + case '1': + if buffer[position] != rune('1') { + goto l127 + } + position++ + break + default: + if buffer[position] != rune('0') { + goto l127 + } + position++ + break + } + } + + if !_rules[ruledigit]() { + goto l127 + } + add(ruleday, position128) + } + return true + l127: + position, tokenIndex = position127, tokenIndex127 + return false + }, + /* 11 and <- <(('a' / 'A') ('n' / 'N') ('d' / 'D'))> */ + nil, + /* 12 equal <- <'='> */ + nil, + /* 13 contains <- <(('c' / 'C') ('o' / 'O') ('n' / 'N') ('t' / 'T') ('a' / 'A') ('i' / 'I') ('n' / 'N') ('s' / 'S'))> */ + nil, + /* 14 le <- <('<' '=')> */ + nil, + /* 15 ge <- <('>' '=')> */ + nil, + /* 16 l <- <'<'> */ + nil, + /* 17 g <- <'>'> */ + nil, + nil, + } + p.rules = _rules +} diff --git a/libs/pubsub/query/query_test.go b/libs/pubsub/query/query_test.go new file mode 100644 index 000000000..f0d940992 --- /dev/null +++ b/libs/pubsub/query/query_test.go @@ -0,0 +1,87 @@ +package query_test + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/libs/pubsub/query" +) + +func TestMatches(t *testing.T) { + var ( + txDate = "2017-01-01" + txTime = "2018-05-03T14:45:00Z" + ) + + testCases := []struct { + s string + tags map[string]string + err bool + matches bool + }{ + {"tm.events.type='NewBlock'", map[string]string{"tm.events.type": "NewBlock"}, false, true}, + + {"tx.gas > 7", map[string]string{"tx.gas": "8"}, false, true}, + {"tx.gas > 7 AND tx.gas < 9", map[string]string{"tx.gas": "8"}, false, true}, + {"body.weight >= 3.5", map[string]string{"body.weight": "3.5"}, false, true}, + {"account.balance < 1000.0", map[string]string{"account.balance": "900"}, false, true}, + {"apples.kg <= 4", map[string]string{"apples.kg": "4.0"}, false, true}, + {"body.weight >= 4.5", map[string]string{"body.weight": fmt.Sprintf("%v", float32(4.5))}, false, true}, + {"oranges.kg < 4 AND watermellons.kg > 10", map[string]string{"oranges.kg": "3", "watermellons.kg": "12"}, false, true}, + {"peaches.kg < 4", map[string]string{"peaches.kg": "5"}, false, false}, + + {"tx.date > DATE 2017-01-01", map[string]string{"tx.date": time.Now().Format(query.DateLayout)}, false, true}, + {"tx.date = DATE 2017-01-01", map[string]string{"tx.date": txDate}, false, true}, + {"tx.date = DATE 2018-01-01", map[string]string{"tx.date": txDate}, false, false}, + + {"tx.time >= TIME 2013-05-03T14:45:00Z", map[string]string{"tx.time": time.Now().Format(query.TimeLayout)}, false, true}, + {"tx.time = TIME 2013-05-03T14:45:00Z", map[string]string{"tx.time": txTime}, false, false}, + + {"abci.owner.name CONTAINS 'Igor'", map[string]string{"abci.owner.name": "Igor,Ivan"}, false, true}, + {"abci.owner.name CONTAINS 'Igor'", map[string]string{"abci.owner.name": "Pavel,Ivan"}, false, false}, + } + + for _, tc := range testCases { + q, err := query.New(tc.s) + if !tc.err { + require.Nil(t, err) + } + + if tc.matches { + assert.True(t, q.Matches(pubsub.NewTagMap(tc.tags)), "Query '%s' should match %v", tc.s, tc.tags) + } else { + assert.False(t, q.Matches(pubsub.NewTagMap(tc.tags)), "Query '%s' should not match %v", tc.s, tc.tags) + } + } +} + +func TestMustParse(t *testing.T) { + assert.Panics(t, func() { query.MustParse("=") }) + assert.NotPanics(t, func() { query.MustParse("tm.events.type='NewBlock'") }) +} + +func TestConditions(t *testing.T) { + txTime, err := time.Parse(time.RFC3339, "2013-05-03T14:45:00Z") + require.NoError(t, err) + + testCases := []struct { + s string + conditions []query.Condition + }{ + {s: "tm.events.type='NewBlock'", conditions: []query.Condition{query.Condition{Tag: "tm.events.type", Op: query.OpEqual, Operand: "NewBlock"}}}, + {s: "tx.gas > 7 AND tx.gas < 9", conditions: []query.Condition{query.Condition{Tag: "tx.gas", Op: query.OpGreater, Operand: int64(7)}, query.Condition{Tag: "tx.gas", Op: query.OpLess, Operand: int64(9)}}}, + {s: "tx.time >= TIME 2013-05-03T14:45:00Z", conditions: []query.Condition{query.Condition{Tag: "tx.time", Op: query.OpGreaterEqual, Operand: txTime}}}, + } + + for _, tc := range testCases { + q, err := query.New(tc.s) + require.Nil(t, err) + + assert.Equal(t, tc.conditions, q.Conditions()) + } +} diff --git a/libs/test.sh b/libs/test.sh new file mode 100755 index 000000000..ecf17fc45 --- /dev/null +++ b/libs/test.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +set -e + +# run the linter +# make metalinter_test + +# setup certs +make gen_certs + +# run the unit tests with coverage +echo "" > coverage.txt +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done + +# cleanup certs +make clean_certs diff --git a/libs/test/assert.go b/libs/test/assert.go new file mode 100644 index 000000000..a6ffed0ce --- /dev/null +++ b/libs/test/assert.go @@ -0,0 +1,14 @@ +package test + +import ( + "testing" +) + +func AssertPanics(t *testing.T, msg string, f func()) { + defer func() { + if err := recover(); err == nil { + t.Errorf("Should have panic'd, but didn't: %v", msg) + } + }() + f() +} diff --git a/libs/test/mutate.go b/libs/test/mutate.go new file mode 100644 index 000000000..3bbbbd217 --- /dev/null +++ b/libs/test/mutate.go @@ -0,0 +1,28 @@ +package test + +import ( + cmn "github.com/tendermint/tendermint/libs/common" +) + +// Contract: !bytes.Equal(input, output) && len(input) >= len(output) +func MutateByteSlice(bytez []byte) []byte { + // If bytez is empty, panic + if len(bytez) == 0 { + panic("Cannot mutate an empty bytez") + } + + // Copy bytez + mBytez := make([]byte, len(bytez)) + copy(mBytez, bytez) + bytez = mBytez + + // Try a random mutation + switch cmn.RandInt() % 2 { + case 0: // Mutate a single byte + bytez[cmn.RandInt()%len(bytez)] += byte(cmn.RandInt()%255 + 1) + case 1: // Remove an arbitrary byte + pos := cmn.RandInt() % len(bytez) + bytez = append(bytez[:pos], bytez[pos+1:]...) + } + return bytez +} diff --git a/libs/version/version.go b/libs/version/version.go new file mode 100644 index 000000000..6e73a937d --- /dev/null +++ b/libs/version/version.go @@ -0,0 +1,3 @@ +package version + +const Version = "0.9.0" diff --git a/lite/client/main_test.go b/lite/client/main_test.go new file mode 100644 index 000000000..49b194366 --- /dev/null +++ b/lite/client/main_test.go @@ -0,0 +1,25 @@ +package client_test + +import ( + "os" + "testing" + + "github.com/tendermint/tendermint/abci/example/kvstore" + + nm "github.com/tendermint/tendermint/node" + rpctest "github.com/tendermint/tendermint/rpc/test" +) + +var node *nm.Node + +func TestMain(m *testing.M) { + // start a tendermint node (and merkleeyes) in the background to test against + app := kvstore.NewKVStoreApplication() + node = rpctest.StartTendermint(app) + code := m.Run() + + // and shut down proper at the end + node.Stop() + node.Wait() + os.Exit(code) +} diff --git a/lite/client/provider.go b/lite/client/provider.go new file mode 100644 index 000000000..5f3d72450 --- /dev/null +++ b/lite/client/provider.go @@ -0,0 +1,141 @@ +/* +Package client defines a provider that uses a rpcclient +to get information, which is used to get new headers +and validators directly from a node. +*/ +package client + +import ( + "bytes" + + rpcclient "github.com/tendermint/tendermint/rpc/client" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/types" + + "github.com/tendermint/tendermint/lite" + liteErr "github.com/tendermint/tendermint/lite/errors" +) + +// SignStatusClient combines a SignClient and StatusClient. +type SignStatusClient interface { + rpcclient.SignClient + rpcclient.StatusClient +} + +type provider struct { + node SignStatusClient + lastHeight int64 +} + +// NewProvider can wrap any rpcclient to expose it as +// a read-only provider. +func NewProvider(node SignStatusClient) lite.Provider { + return &provider{node: node} +} + +// NewHTTPProvider can connect to a tendermint json-rpc endpoint +// at the given url, and uses that as a read-only provider. +func NewHTTPProvider(remote string) lite.Provider { + return &provider{ + node: rpcclient.NewHTTP(remote, "/websocket"), + } +} + +// StatusClient returns the internal node as a StatusClient +func (p *provider) StatusClient() rpcclient.StatusClient { + return p.node +} + +// StoreCommit is a noop, as clients can only read from the chain... +func (p *provider) StoreCommit(_ lite.FullCommit) error { return nil } + +// GetHash gets the most recent validator and sees if it matches +// +// TODO: improve when the rpc interface supports more functionality +func (p *provider) GetByHash(hash []byte) (lite.FullCommit, error) { + var fc lite.FullCommit + vals, err := p.node.Validators(nil) + // if we get no validators, or a different height, return an error + if err != nil { + return fc, err + } + p.updateHeight(vals.BlockHeight) + vhash := types.NewValidatorSet(vals.Validators).Hash() + if !bytes.Equal(hash, vhash) { + return fc, liteErr.ErrCommitNotFound() + } + return p.seedFromVals(vals) +} + +// GetByHeight gets the validator set by height +func (p *provider) GetByHeight(h int64) (fc lite.FullCommit, err error) { + commit, err := p.node.Commit(&h) + if err != nil { + return fc, err + } + return p.seedFromCommit(commit) +} + +// LatestCommit returns the newest commit stored. +func (p *provider) LatestCommit() (fc lite.FullCommit, err error) { + commit, err := p.GetLatestCommit() + if err != nil { + return fc, err + } + return p.seedFromCommit(commit) +} + +// GetLatestCommit should return the most recent commit there is, +// which handles queries for future heights as per the semantics +// of GetByHeight. +func (p *provider) GetLatestCommit() (*ctypes.ResultCommit, error) { + status, err := p.node.Status() + if err != nil { + return nil, err + } + return p.node.Commit(&status.SyncInfo.LatestBlockHeight) +} + +// CommitFromResult ... +func CommitFromResult(result *ctypes.ResultCommit) lite.Commit { + return (lite.Commit)(result.SignedHeader) +} + +func (p *provider) seedFromVals(vals *ctypes.ResultValidators) (lite.FullCommit, error) { + // now get the commits and build a full commit + commit, err := p.node.Commit(&vals.BlockHeight) + if err != nil { + return lite.FullCommit{}, err + } + fc := lite.NewFullCommit( + CommitFromResult(commit), + types.NewValidatorSet(vals.Validators), + ) + return fc, nil +} + +func (p *provider) seedFromCommit(commit *ctypes.ResultCommit) (fc lite.FullCommit, err error) { + fc.Commit = CommitFromResult(commit) + + // now get the proper validators + vals, err := p.node.Validators(&commit.Header.Height) + if err != nil { + return fc, err + } + + // make sure they match the commit (as we cannot enforce height) + vset := types.NewValidatorSet(vals.Validators) + if !bytes.Equal(vset.Hash(), commit.Header.ValidatorsHash) { + return fc, liteErr.ErrValidatorsChanged() + } + + p.updateHeight(commit.Header.Height) + fc.Validators = vset + return fc, nil +} + +func (p *provider) updateHeight(h int64) { + if h > p.lastHeight { + p.lastHeight = h + } +} diff --git a/lite/client/provider_test.go b/lite/client/provider_test.go new file mode 100644 index 000000000..94d47da3f --- /dev/null +++ b/lite/client/provider_test.go @@ -0,0 +1,63 @@ +package client + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/lite" + liteErr "github.com/tendermint/tendermint/lite/errors" + rpcclient "github.com/tendermint/tendermint/rpc/client" + rpctest "github.com/tendermint/tendermint/rpc/test" + "github.com/tendermint/tendermint/types" +) + +func TestProvider(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + cfg := rpctest.GetConfig() + rpcAddr := cfg.RPC.ListenAddress + genDoc, _ := types.GenesisDocFromFile(cfg.GenesisFile()) + chainID := genDoc.ChainID + p := NewHTTPProvider(rpcAddr) + require.NotNil(t, p) + + // let it produce some blocks + err := rpcclient.WaitForHeight(p.(*provider).node, 6, nil) + require.Nil(err) + + // let's get the highest block + seed, err := p.LatestCommit() + + require.Nil(err, "%+v", err) + sh := seed.Height() + vhash := seed.Header.ValidatorsHash + assert.True(sh < 5000) + + // let's check this is valid somehow + assert.Nil(seed.ValidateBasic(chainID)) + cert := lite.NewStaticCertifier(chainID, seed.Validators) + + // historical queries now work :) + lower := sh - 5 + seed, err = p.GetByHeight(lower) + assert.Nil(err, "%+v", err) + assert.Equal(lower, seed.Height()) + + // also get by hash (given the match) + seed, err = p.GetByHash(vhash) + require.Nil(err, "%+v", err) + require.Equal(vhash, seed.Header.ValidatorsHash) + err = cert.Certify(seed.Commit) + assert.Nil(err, "%+v", err) + + // get by hash fails without match + seed, err = p.GetByHash([]byte("foobar")) + assert.NotNil(err) + assert.True(liteErr.IsCommitNotFoundErr(err)) + + // storing the seed silently ignored + err = p.StoreCommit(seed) + assert.Nil(err, "%+v", err) +} diff --git a/lite/commit.go b/lite/commit.go new file mode 100644 index 000000000..11ae6d7ff --- /dev/null +++ b/lite/commit.go @@ -0,0 +1,99 @@ +package lite + +import ( + "bytes" + + "github.com/pkg/errors" + + "github.com/tendermint/tendermint/types" + + liteErr "github.com/tendermint/tendermint/lite/errors" +) + +// Certifier checks the votes to make sure the block really is signed properly. +// Certifier must know the current set of validitors by some other means. +type Certifier interface { + Certify(check Commit) error + ChainID() string +} + +// Commit is basically the rpc /commit response, but extended +// +// This is the basepoint for proving anything on the blockchain. It contains +// a signed header. If the signatures are valid and > 2/3 of the known set, +// we can store this checkpoint and use it to prove any number of aspects of +// the system: such as txs, abci state, validator sets, etc... +type Commit types.SignedHeader + +// FullCommit is a commit and the actual validator set, +// the base info you need to update to a given point, +// assuming knowledge of some previous validator set +type FullCommit struct { + Commit `json:"commit"` + Validators *types.ValidatorSet `json:"validator_set"` +} + +// NewFullCommit returns a new FullCommit. +func NewFullCommit(commit Commit, vals *types.ValidatorSet) FullCommit { + return FullCommit{ + Commit: commit, + Validators: vals, + } +} + +// Height returns the height of the header. +func (c Commit) Height() int64 { + if c.Header == nil { + return 0 + } + return c.Header.Height +} + +// ValidatorsHash returns the hash of the validator set. +func (c Commit) ValidatorsHash() []byte { + if c.Header == nil { + return nil + } + return c.Header.ValidatorsHash +} + +// ValidateBasic does basic consistency checks and makes sure the headers +// and commits are all consistent and refer to our chain. +// +// Make sure to use a Verifier to validate the signatures actually provide +// a significantly strong proof for this header's validity. +func (c Commit) ValidateBasic(chainID string) error { + // make sure the header is reasonable + if c.Header == nil { + return errors.New("Commit missing header") + } + if c.Header.ChainID != chainID { + return errors.Errorf("Header belongs to another chain '%s' not '%s'", + c.Header.ChainID, chainID) + } + + if c.Commit == nil { + return errors.New("Commit missing signatures") + } + + // make sure the header and commit match (height and hash) + if c.Commit.Height() != c.Header.Height { + return liteErr.ErrHeightMismatch(c.Commit.Height(), c.Header.Height) + } + hhash := c.Header.Hash() + chash := c.Commit.BlockID.Hash + if !bytes.Equal(hhash, chash) { + return errors.Errorf("Commits sign block %X header is block %X", + chash, hhash) + } + + // make sure the commit is reasonable + err := c.Commit.ValidateBasic() + if err != nil { + return errors.WithStack(err) + } + + // looks good, we just need to make sure the signatures are really from + // empowered validators + return nil +} diff --git a/lite/doc.go b/lite/doc.go new file mode 100644 index 000000000..89dc702fc --- /dev/null +++ b/lite/doc.go @@ -0,0 +1,133 @@ +/* +Package lite allows you to securely validate headers +without a full node. + +This library pulls together all the crypto and algorithms, +so given a relatively recent (< unbonding period) known +validator set, one can get indisputable proof that data is in +the chain (current state) or detect if the node is lying to +the client. + +Tendermint RPC exposes a lot of info, but a malicious node +could return any data it wants to queries, or even to block +headers, even making up fake signatures from non-existent +validators to justify it. This is a lot of logic to get +right, to be contained in a small, easy to use library, +that does this for you, so you can just build nice UI. + +We design for clients who have no strong trust relationship +with any tendermint node, just the validator set as a whole. +Beyond building nice mobile or desktop applications, the +cosmos hub is another important example of a client, +that needs undeniable proof without syncing the full chain, +in order to efficiently implement IBC. + +Commits + +There are two main data structures that we pass around - Commit +and FullCommit. Both of them mirror what information is +exposed in tendermint rpc. + +Commit is a block header along with enough validator signatures +to prove its validity (> 2/3 of the voting power). A FullCommit +is a Commit along with the full validator set. When the +validator set doesn't change, the Commit is enough, but since +the block header only has a hash, we need the FullCommit to +follow any changes to the validator set. + +Certifiers + +A Certifier validates a new Commit given the currently known +state. There are three different types of Certifiers exposed, +each one building on the last one, with additional complexity. + +Static - given the validator set upon initialization. Verifies +all signatures against that set and if the validator set +changes, it will reject all headers. + +Dynamic - This wraps Static and has the same Certify +method. However, it adds an Update method, which can be called +with a FullCommit when the validator set changes. If it can +prove this is a valid transition, it will update the validator +set. + +Inquiring - this wraps Dynamic and implements an auto-update +strategy on top of the Dynamic update. If a call to +Certify fails as the validator set has changed, then it +attempts to find a FullCommit and Update to that header. +To get these FullCommits, it makes use of a Provider. + +Providers + +A Provider allows us to store and retrieve the FullCommits, +to provide memory to the Inquiring Certifier. + +NewMemStoreProvider - in-memory cache. + +files.NewProvider - disk backed storage. + +client.NewHTTPProvider - query tendermint rpc. + +NewCacheProvider - combine multiple providers. + +The suggested use for local light clients is +client.NewHTTPProvider for getting new data (Source), +and NewCacheProvider(NewMemStoreProvider(), +files.NewProvider()) to store confirmed headers (Trusted) + +How We Track Validators + +Unless you want to blindly trust the node you talk with, you +need to trace every response back to a hash in a block header +and validate the commit signatures of that block header match +the proper validator set. If there is a contant validator +set, you store it locally upon initialization of the client, +and check against that every time. + +Once there is a dynamic validator set, the issue of +verifying a block becomes a bit more tricky. There is +background information in a +github issue (https://github.com/tendermint/tendermint/issues/377). + +In short, if there is a block at height H with a known +(trusted) validator set V, and another block at height H' +(H' > H) with validator set V' != V, then we want a way to +safely update it. + +First, get the new (unconfirmed) validator set V' and +verify H' is internally consistent and properly signed by +this V'. Assuming it is a valid block, we check that at +least 2/3 of the validators in V also signed it, meaning +it would also be valid under our old assumptions. +That should be enough, but we can also check that the +V counts for at least 2/3 of the total votes in H' +for extra safety (we can have a discussion if this is +strictly required). If we can verify all this, +then we can accept H' and V' as valid and use that to +validate all blocks X > H'. + +If we cannot update directly from H -> H' because there was +too much change to the validator set, then we can look for +some Hm (H < Hm < H') with a validator set Vm. Then we try +to update H -> Hm and Hm -> H' in two separate steps. +If one of these steps doesn't work, then we continue +bisecting, until we eventually have to externally +validate the valdiator set changes at every block. + +Since we never trust any server in this protocol, only the +signatures themselves, it doesn't matter if the seed comes +from a (possibly malicious) node or a (possibly malicious) user. +We can accept it or reject it based only on our trusted +validator set and cryptographic proofs. This makes it +extremely important to verify that you have the proper +validator set when initializing the client, as that is the +root of all trust. + +Or course, this assumes that the known block is within the +unbonding period to avoid the "nothing at stake" problem. +If you haven't seen the state in a few months, you will need +to manually verify the new validator set hash using off-chain +means (the same as getting the initial hash). + +*/ +package lite diff --git a/lite/dynamic_certifier.go b/lite/dynamic_certifier.go new file mode 100644 index 000000000..0ddace8b6 --- /dev/null +++ b/lite/dynamic_certifier.go @@ -0,0 +1,96 @@ +package lite + +import ( + "github.com/tendermint/tendermint/types" + + liteErr "github.com/tendermint/tendermint/lite/errors" +) + +var _ Certifier = (*DynamicCertifier)(nil) + +// DynamicCertifier uses a StaticCertifier for Certify, but adds an +// Update method to allow for a change of validators. +// +// You can pass in a FullCommit with another validator set, +// and if this is a provably secure transition (< 1/3 change, +// sufficient signatures), then it will update the +// validator set for the next Certify call. +// For security, it will only follow validator set changes +// going forward. +type DynamicCertifier struct { + cert *StaticCertifier + lastHeight int64 +} + +// NewDynamic returns a new dynamic certifier. +func NewDynamicCertifier(chainID string, vals *types.ValidatorSet, height int64) *DynamicCertifier { + return &DynamicCertifier{ + cert: NewStaticCertifier(chainID, vals), + lastHeight: height, + } +} + +// ChainID returns the chain id of this certifier. +// Implements Certifier. +func (dc *DynamicCertifier) ChainID() string { + return dc.cert.ChainID() +} + +// Validators returns the validators of this certifier. +func (dc *DynamicCertifier) Validators() *types.ValidatorSet { + return dc.cert.vSet +} + +// Hash returns the hash of this certifier. +func (dc *DynamicCertifier) Hash() []byte { + return dc.cert.Hash() +} + +// LastHeight returns the last height of this certifier. +func (dc *DynamicCertifier) LastHeight() int64 { + return dc.lastHeight +} + +// Certify will verify whether the commit is valid and will update the height if it is or return an +// error if it is not. +// Implements Certifier. +func (dc *DynamicCertifier) Certify(check Commit) error { + err := dc.cert.Certify(check) + if err == nil { + // update last seen height if input is valid + dc.lastHeight = check.Height() + } + return err +} + +// Update will verify if this is a valid change and update +// the certifying validator set if safe to do so. +// +// Returns an error if update is impossible (invalid proof or IsTooMuchChangeErr) +func (dc *DynamicCertifier) Update(fc FullCommit) error { + // ignore all checkpoints in the past -> only to the future + h := fc.Height() + if h <= dc.lastHeight { + return liteErr.ErrPastTime() + } + + // first, verify if the input is self-consistent.... + err := fc.ValidateBasic(dc.ChainID()) + if err != nil { + return err + } + + // now, make sure not too much change... meaning this commit + // would be approved by the currently known validator set + // as well as the new set + commit := fc.Commit.Commit + err = dc.Validators().VerifyCommitAny(fc.Validators, dc.ChainID(), commit.BlockID, h, commit) + if err != nil { + return liteErr.ErrTooMuchChange() + } + + // looks good, we can update + dc.cert = NewStaticCertifier(dc.ChainID(), fc.Validators) + dc.lastHeight = h + return nil +} diff --git a/lite/dynamic_certifier_test.go b/lite/dynamic_certifier_test.go new file mode 100644 index 000000000..88c145f95 --- /dev/null +++ b/lite/dynamic_certifier_test.go @@ -0,0 +1,130 @@ +package lite_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/types" + + "github.com/tendermint/tendermint/lite" + "github.com/tendermint/tendermint/lite/errors" +) + +// TestDynamicCert just makes sure it still works like StaticCert +func TestDynamicCert(t *testing.T) { + // assert, require := assert.New(t), require.New(t) + assert := assert.New(t) + // require := require.New(t) + + keys := lite.GenValKeys(4) + // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! + vals := keys.ToValidators(20, 10) + // and a certifier based on our known set + chainID := "test-dyno" + cert := lite.NewDynamicCertifier(chainID, vals, 0) + + cases := []struct { + keys lite.ValKeys + vals *types.ValidatorSet + height int64 + first, last int // who actually signs + proper bool // true -> expect no error + changed bool // true -> expect validator change error + }{ + // perfect, signed by everyone + {keys, vals, 1, 0, len(keys), true, false}, + // skip little guy is okay + {keys, vals, 2, 1, len(keys), true, false}, + // but not the big guy + {keys, vals, 3, 0, len(keys) - 1, false, false}, + // even changing the power a little bit breaks the static validator + // the sigs are enough, but the validator hash is unknown + {keys, keys.ToValidators(20, 11), 4, 0, len(keys), false, true}, + } + + for _, tc := range cases { + check := tc.keys.GenCommit(chainID, tc.height, nil, tc.vals, + []byte("bar"), []byte("params"), []byte("results"), tc.first, tc.last) + err := cert.Certify(check) + if tc.proper { + assert.Nil(err, "%+v", err) + assert.Equal(cert.LastHeight(), tc.height) + } else { + assert.NotNil(err) + if tc.changed { + assert.True(errors.IsValidatorsChangedErr(err), "%+v", err) + } + } + } +} + +// TestDynamicUpdate makes sure we update safely and sanely +func TestDynamicUpdate(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + chainID := "test-dyno-up" + keys := lite.GenValKeys(5) + vals := keys.ToValidators(20, 0) + cert := lite.NewDynamicCertifier(chainID, vals, 40) + + // one valid block to give us a sense of time + h := int64(100) + good := keys.GenCommit(chainID, h, nil, vals, []byte("foo"), []byte("params"), []byte("results"), 0, len(keys)) + err := cert.Certify(good) + require.Nil(err, "%+v", err) + + // some new sets to try later + keys2 := keys.Extend(2) + keys3 := keys2.Extend(4) + + // we try to update with some blocks + cases := []struct { + keys lite.ValKeys + vals *types.ValidatorSet + height int64 + first, last int // who actually signs + proper bool // true -> expect no error + changed bool // true -> expect too much change error + }{ + // same validator set, well signed, of course it is okay + {keys, vals, h + 10, 0, len(keys), true, false}, + // same validator set, poorly signed, fails + {keys, vals, h + 20, 2, len(keys), false, false}, + + // shift the power a little, works if properly signed + {keys, keys.ToValidators(10, 0), h + 30, 1, len(keys), true, false}, + // but not on a poor signature + {keys, keys.ToValidators(10, 0), h + 40, 2, len(keys), false, false}, + // and not if it was in the past + {keys, keys.ToValidators(10, 0), h + 25, 0, len(keys), false, false}, + + // let's try to adjust to a whole new validator set (we have 5/7 of the votes) + {keys2, keys2.ToValidators(10, 0), h + 33, 0, len(keys2), true, false}, + + // properly signed but too much change, not allowed (only 7/11 validators known) + {keys3, keys3.ToValidators(10, 0), h + 50, 0, len(keys3), false, true}, + } + + for _, tc := range cases { + fc := tc.keys.GenFullCommit(chainID, tc.height, nil, tc.vals, + []byte("bar"), []byte("params"), []byte("results"), tc.first, tc.last) + err := cert.Update(fc) + if tc.proper { + assert.Nil(err, "%d: %+v", tc.height, err) + // we update last seen height + assert.Equal(cert.LastHeight(), tc.height) + // and we update the proper validators + assert.EqualValues(fc.Header.ValidatorsHash, cert.Hash()) + } else { + assert.NotNil(err, "%d", tc.height) + // we don't update the height + assert.NotEqual(cert.LastHeight(), tc.height) + if tc.changed { + assert.True(errors.IsTooMuchChangeErr(err), + "%d: %+v", tc.height, err) + } + } + } +} diff --git a/lite/errors/errors.go b/lite/errors/errors.go new file mode 100644 index 000000000..99e42a0bd --- /dev/null +++ b/lite/errors/errors.go @@ -0,0 +1,92 @@ +package errors + +import ( + "fmt" + + "github.com/pkg/errors" +) + +var ( + errValidatorsChanged = fmt.Errorf("Validators differ between header and certifier") + errCommitNotFound = fmt.Errorf("Commit not found by provider") + errTooMuchChange = fmt.Errorf("Validators change too much to safely update") + errPastTime = fmt.Errorf("Update older than certifier height") + errNoPathFound = fmt.Errorf("Cannot find a path of validators") +) + +// IsCommitNotFoundErr checks whether an error is due to missing data +func IsCommitNotFoundErr(err error) bool { + return err != nil && (errors.Cause(err) == errCommitNotFound) +} + +// ErrCommitNotFound indicates that a the requested commit was not found. +func ErrCommitNotFound() error { + return errors.WithStack(errCommitNotFound) +} + +// IsValidatorsChangedErr checks whether an error is due +// to a differing validator set. +func IsValidatorsChangedErr(err error) bool { + return err != nil && (errors.Cause(err) == errValidatorsChanged) +} + +// ErrValidatorsChanged indicates that the validator set was changed between two commits. +func ErrValidatorsChanged() error { + return errors.WithStack(errValidatorsChanged) +} + +// IsTooMuchChangeErr checks whether an error is due to too much change +// between these validators sets. +func IsTooMuchChangeErr(err error) bool { + return err != nil && (errors.Cause(err) == errTooMuchChange) +} + +// ErrTooMuchChange indicates that the underlying validator set was changed by >1/3. +func ErrTooMuchChange() error { + return errors.WithStack(errTooMuchChange) +} + +// IsPastTimeErr ... +func IsPastTimeErr(err error) bool { + return err != nil && (errors.Cause(err) == errPastTime) +} + +// ErrPastTime ... +func ErrPastTime() error { + return errors.WithStack(errPastTime) +} + +// IsNoPathFoundErr checks whether an error is due to no path of +// validators in provider from where we are to where we want to be +func IsNoPathFoundErr(err error) bool { + return err != nil && (errors.Cause(err) == errNoPathFound) +} + +// ErrNoPathFound ... +func ErrNoPathFound() error { + return errors.WithStack(errNoPathFound) +} + +//-------------------------------------------- + +type errHeightMismatch struct { + h1, h2 int64 +} + +func (e errHeightMismatch) Error() string { + return fmt.Sprintf("Blocks don't match - %d vs %d", e.h1, e.h2) +} + +// IsHeightMismatchErr checks whether an error is due to data from different blocks +func IsHeightMismatchErr(err error) bool { + if err == nil { + return false + } + _, ok := errors.Cause(err).(errHeightMismatch) + return ok +} + +// ErrHeightMismatch returns an mismatch error with stack-trace +func ErrHeightMismatch(h1, h2 int64) error { + return errors.WithStack(errHeightMismatch{h1, h2}) +} diff --git a/lite/errors/errors_test.go b/lite/errors/errors_test.go new file mode 100644 index 000000000..479215e47 --- /dev/null +++ b/lite/errors/errors_test.go @@ -0,0 +1,18 @@ +package errors + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestErrorHeight(t *testing.T) { + e1 := ErrHeightMismatch(2, 3) + e1.Error() + assert.True(t, IsHeightMismatchErr(e1)) + + e2 := errors.New("foobar") + assert.False(t, IsHeightMismatchErr(e2)) + assert.False(t, IsHeightMismatchErr(nil)) +} diff --git a/lite/files/commit.go b/lite/files/commit.go new file mode 100644 index 000000000..8a7e4721e --- /dev/null +++ b/lite/files/commit.go @@ -0,0 +1,93 @@ +package files + +import ( + "io/ioutil" + "os" + + "github.com/pkg/errors" + + "github.com/tendermint/tendermint/lite" + liteErr "github.com/tendermint/tendermint/lite/errors" +) + +const ( + // MaxFullCommitSize is the maximum number of bytes we will + // read in for a full commit to avoid excessive allocations + // in the deserializer + MaxFullCommitSize = 1024 * 1024 +) + +// SaveFullCommit exports the seed in binary / go-amino style +func SaveFullCommit(fc lite.FullCommit, path string) error { + f, err := os.Create(path) + if err != nil { + return errors.WithStack(err) + } + defer f.Close() + + _, err = cdc.MarshalBinaryWriter(f, fc) + if err != nil { + return errors.WithStack(err) + } + return nil +} + +// SaveFullCommitJSON exports the seed in a json format +func SaveFullCommitJSON(fc lite.FullCommit, path string) error { + f, err := os.Create(path) + if err != nil { + return errors.WithStack(err) + } + defer f.Close() + bz, err := cdc.MarshalJSON(fc) + if err != nil { + return errors.WithStack(err) + } + _, err = f.Write(bz) + if err != nil { + return errors.WithStack(err) + } + return nil +} + +// LoadFullCommit loads the full commit from the file system. +func LoadFullCommit(path string) (lite.FullCommit, error) { + var fc lite.FullCommit + f, err := os.Open(path) + if err != nil { + if os.IsNotExist(err) { + return fc, liteErr.ErrCommitNotFound() + } + return fc, errors.WithStack(err) + } + defer f.Close() + + _, err = cdc.UnmarshalBinaryReader(f, &fc, 0) + if err != nil { + return fc, errors.WithStack(err) + } + return fc, nil +} + +// LoadFullCommitJSON loads the commit from the file system in JSON format. +func LoadFullCommitJSON(path string) (lite.FullCommit, error) { + var fc lite.FullCommit + f, err := os.Open(path) + if err != nil { + if os.IsNotExist(err) { + return fc, liteErr.ErrCommitNotFound() + } + return fc, errors.WithStack(err) + } + defer f.Close() + + bz, err := ioutil.ReadAll(f) + if err != nil { + return fc, errors.WithStack(err) + } + err = cdc.UnmarshalJSON(bz, &fc) + if err != nil { + return fc, errors.WithStack(err) + } + return fc, nil +} diff --git a/lite/files/commit_test.go b/lite/files/commit_test.go new file mode 100644 index 000000000..2891e5809 --- /dev/null +++ b/lite/files/commit_test.go @@ -0,0 +1,66 @@ +package files + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cmn "github.com/tendermint/tendermint/libs/common" + + "github.com/tendermint/tendermint/lite" +) + +func tmpFile() string { + suffix := cmn.RandStr(16) + return filepath.Join(os.TempDir(), "fc-test-"+suffix) +} + +func TestSerializeFullCommits(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + // some constants + appHash := []byte("some crazy thing") + chainID := "ser-ial" + h := int64(25) + + // build a fc + keys := lite.GenValKeys(5) + vals := keys.ToValidators(10, 0) + fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) + + require.Equal(h, fc.Height()) + require.Equal(vals.Hash(), fc.ValidatorsHash()) + + // try read/write with json + jfile := tmpFile() + defer os.Remove(jfile) + jseed, err := LoadFullCommitJSON(jfile) + assert.NotNil(err) + err = SaveFullCommitJSON(fc, jfile) + require.Nil(err) + jseed, err = LoadFullCommitJSON(jfile) + assert.Nil(err, "%+v", err) + assert.Equal(h, jseed.Height()) + assert.Equal(vals.Hash(), jseed.ValidatorsHash()) + + // try read/write with binary + bfile := tmpFile() + defer os.Remove(bfile) + bseed, err := LoadFullCommit(bfile) + assert.NotNil(err) + err = SaveFullCommit(fc, bfile) + require.Nil(err) + bseed, err = LoadFullCommit(bfile) + assert.Nil(err, "%+v", err) + assert.Equal(h, bseed.Height()) + assert.Equal(vals.Hash(), bseed.ValidatorsHash()) + + // make sure they don't read the other format (different) + _, err = LoadFullCommit(jfile) + assert.NotNil(err) + _, err = LoadFullCommitJSON(bfile) + assert.NotNil(err) +} diff --git a/lite/files/provider.go b/lite/files/provider.go new file mode 100644 index 000000000..327b0331a --- /dev/null +++ b/lite/files/provider.go @@ -0,0 +1,139 @@ +/* +Package files defines a Provider that stores all data in the filesystem + +We assume the same validator hash may be reused by many different +headers/Commits, and thus store it separately. This leaves us +with three issues: + + 1. Given a validator hash, retrieve the validator set if previously stored + 2. Given a block height, find the Commit with the highest height <= h + 3. Given a FullCommit, store it quickly to satisfy 1 and 2 + +Note that we do not worry about caching, as that can be achieved by +pairing this with a MemStoreProvider and CacheProvider from certifiers +*/ +package files + +import ( + "encoding/hex" + "fmt" + "math" + "os" + "path/filepath" + "sort" + + "github.com/pkg/errors" + + "github.com/tendermint/tendermint/lite" + liteErr "github.com/tendermint/tendermint/lite/errors" +) + +// nolint +const ( + Ext = ".tsd" + ValDir = "validators" + CheckDir = "checkpoints" + dirPerm = os.FileMode(0755) + //filePerm = os.FileMode(0644) +) + +type provider struct { + valDir string + checkDir string +} + +// NewProvider creates the parent dir and subdirs +// for validators and checkpoints as needed +func NewProvider(dir string) lite.Provider { + valDir := filepath.Join(dir, ValDir) + checkDir := filepath.Join(dir, CheckDir) + for _, d := range []string{valDir, checkDir} { + err := os.MkdirAll(d, dirPerm) + if err != nil { + panic(err) + } + } + return &provider{valDir: valDir, checkDir: checkDir} +} + +func (p *provider) encodeHash(hash []byte) string { + return hex.EncodeToString(hash) + Ext +} + +func (p *provider) encodeHeight(h int64) string { + // pad up to 10^12 for height... + return fmt.Sprintf("%012d%s", h, Ext) +} + +// StoreCommit saves a full commit after it has been verified. +func (p *provider) StoreCommit(fc lite.FullCommit) error { + // make sure the fc is self-consistent before saving + err := fc.ValidateBasic(fc.Commit.Header.ChainID) + if err != nil { + return err + } + + paths := []string{ + filepath.Join(p.checkDir, p.encodeHeight(fc.Height())), + filepath.Join(p.valDir, p.encodeHash(fc.Header.ValidatorsHash)), + } + for _, path := range paths { + err := SaveFullCommit(fc, path) + // unknown error in creating or writing immediately breaks + if err != nil { + return err + } + } + return nil +} + +// GetByHeight returns the closest commit with height <= h. +func (p *provider) GetByHeight(h int64) (lite.FullCommit, error) { + // first we look for exact match, then search... + path := filepath.Join(p.checkDir, p.encodeHeight(h)) + fc, err := LoadFullCommit(path) + if liteErr.IsCommitNotFoundErr(err) { + path, err = p.searchForHeight(h) + if err == nil { + fc, err = LoadFullCommit(path) + } + } + return fc, err +} + +// LatestCommit returns the newest commit stored. +func (p *provider) LatestCommit() (fc lite.FullCommit, err error) { + // Note to future: please update by 2077 to avoid rollover + return p.GetByHeight(math.MaxInt32 - 1) +} + +// search for height, looks for a file with highest height < h +// return certifiers.ErrCommitNotFound() if not there... +func (p *provider) searchForHeight(h int64) (string, error) { + d, err := os.Open(p.checkDir) + if err != nil { + return "", errors.WithStack(err) + } + files, err := d.Readdirnames(0) + + d.Close() + if err != nil { + return "", errors.WithStack(err) + } + + desired := p.encodeHeight(h) + sort.Strings(files) + i := sort.SearchStrings(files, desired) + if i == 0 { + return "", liteErr.ErrCommitNotFound() + } + found := files[i-1] + path := filepath.Join(p.checkDir, found) + return path, errors.WithStack(err) +} + +// GetByHash returns a commit exactly matching this validator hash. +func (p *provider) GetByHash(hash []byte) (lite.FullCommit, error) { + path := filepath.Join(p.valDir, p.encodeHash(hash)) + return LoadFullCommit(path) +} diff --git a/lite/files/provider_test.go b/lite/files/provider_test.go new file mode 100644 index 000000000..5deebb1a2 --- /dev/null +++ b/lite/files/provider_test.go @@ -0,0 +1,96 @@ +package files_test + +import ( + "bytes" + "errors" + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/lite" + liteErr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/lite/files" +) + +func checkEqual(stored, loaded lite.FullCommit, chainID string) error { + err := loaded.ValidateBasic(chainID) + if err != nil { + return err + } + if !bytes.Equal(stored.ValidatorsHash(), loaded.ValidatorsHash()) { + return errors.New("Different block hashes") + } + return nil +} + +func TestFileProvider(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + dir, err := ioutil.TempDir("", "fileprovider-test") + assert.Nil(err) + defer os.RemoveAll(dir) + p := files.NewProvider(dir) + + chainID := "test-files" + appHash := []byte("some-data") + keys := lite.GenValKeys(5) + count := 10 + + // make a bunch of seeds... + seeds := make([]lite.FullCommit, count) + for i := 0; i < count; i++ { + // two seeds for each validator, to check how we handle dups + // (10, 0), (10, 1), (10, 1), (10, 2), (10, 2), ... + vals := keys.ToValidators(10, int64(count/2)) + h := int64(20 + 10*i) + check := keys.GenCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) + seeds[i] = lite.NewFullCommit(check, vals) + } + + // check provider is empty + seed, err := p.GetByHeight(20) + require.NotNil(err) + assert.True(liteErr.IsCommitNotFoundErr(err)) + + seed, err = p.GetByHash(seeds[3].ValidatorsHash()) + require.NotNil(err) + assert.True(liteErr.IsCommitNotFoundErr(err)) + + // now add them all to the provider + for _, s := range seeds { + err = p.StoreCommit(s) + require.Nil(err) + // and make sure we can get it back + s2, err := p.GetByHash(s.ValidatorsHash()) + assert.Nil(err) + err = checkEqual(s, s2, chainID) + assert.Nil(err) + // by height as well + s2, err = p.GetByHeight(s.Height()) + err = checkEqual(s, s2, chainID) + assert.Nil(err) + } + + // make sure we get the last hash if we overstep + seed, err = p.GetByHeight(5000) + if assert.Nil(err, "%+v", err) { + assert.Equal(seeds[count-1].Height(), seed.Height()) + err = checkEqual(seeds[count-1], seed, chainID) + assert.Nil(err) + } + + // and middle ones as well + seed, err = p.GetByHeight(47) + if assert.Nil(err, "%+v", err) { + // we only step by 10, so 40 must be the one below this + assert.EqualValues(40, seed.Height()) + } + + // and proper error for too low + _, err = p.GetByHeight(5) + assert.NotNil(err) + assert.True(liteErr.IsCommitNotFoundErr(err)) +} diff --git a/lite/files/wire.go b/lite/files/wire.go new file mode 100644 index 000000000..3a207744a --- /dev/null +++ b/lite/files/wire.go @@ -0,0 +1,12 @@ +package files + +import ( + "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" +) + +var cdc = amino.NewCodec() + +func init() { + crypto.RegisterAmino(cdc) +} diff --git a/lite/helpers.go b/lite/helpers.go new file mode 100644 index 000000000..695f6fb9b --- /dev/null +++ b/lite/helpers.go @@ -0,0 +1,159 @@ +package lite + +import ( + "time" + + crypto "github.com/tendermint/tendermint/crypto" + + "github.com/tendermint/tendermint/types" +) + +// ValKeys is a helper for testing. +// +// It lets us simulate signing with many keys, either ed25519 or secp256k1. +// The main use case is to create a set, and call GenCommit +// to get properly signed header for testing. +// +// You can set different weights of validators each time you call +// ToValidators, and can optionally extend the validator set later +// with Extend or ExtendSecp +type ValKeys []crypto.PrivKey + +// GenValKeys produces an array of private keys to generate commits. +func GenValKeys(n int) ValKeys { + res := make(ValKeys, n) + for i := range res { + res[i] = crypto.GenPrivKeyEd25519() + } + return res +} + +// Change replaces the key at index i. +func (v ValKeys) Change(i int) ValKeys { + res := make(ValKeys, len(v)) + copy(res, v) + res[i] = crypto.GenPrivKeyEd25519() + return res +} + +// Extend adds n more keys (to remove, just take a slice). +func (v ValKeys) Extend(n int) ValKeys { + extra := GenValKeys(n) + return append(v, extra...) +} + +// GenSecpValKeys produces an array of secp256k1 private keys to generate commits. +func GenSecpValKeys(n int) ValKeys { + res := make(ValKeys, n) + for i := range res { + res[i] = crypto.GenPrivKeySecp256k1() + } + return res +} + +// ExtendSecp adds n more secp256k1 keys (to remove, just take a slice). +func (v ValKeys) ExtendSecp(n int) ValKeys { + extra := GenSecpValKeys(n) + return append(v, extra...) +} + +// ToValidators produces a list of validators from the set of keys +// The first key has weight `init` and it increases by `inc` every step +// so we can have all the same weight, or a simple linear distribution +// (should be enough for testing). +func (v ValKeys) ToValidators(init, inc int64) *types.ValidatorSet { + res := make([]*types.Validator, len(v)) + for i, k := range v { + res[i] = types.NewValidator(k.PubKey(), init+int64(i)*inc) + } + return types.NewValidatorSet(res) +} + +// signHeader properly signs the header with all keys from first to last exclusive. +func (v ValKeys) signHeader(header *types.Header, first, last int) *types.Commit { + votes := make([]*types.Vote, len(v)) + + // we need this list to keep the ordering... + vset := v.ToValidators(1, 0) + + // fill in the votes we want + for i := first; i < last && i < len(v); i++ { + vote := makeVote(header, vset, v[i]) + votes[vote.ValidatorIndex] = vote + } + + res := &types.Commit{ + BlockID: types.BlockID{Hash: header.Hash()}, + Precommits: votes, + } + return res +} + +func makeVote(header *types.Header, vals *types.ValidatorSet, key crypto.PrivKey) *types.Vote { + addr := key.PubKey().Address() + idx, _ := vals.GetByAddress(addr) + vote := &types.Vote{ + ValidatorAddress: addr, + ValidatorIndex: idx, + Height: header.Height, + Round: 1, + Timestamp: time.Now().UTC(), + Type: types.VoteTypePrecommit, + BlockID: types.BlockID{Hash: header.Hash()}, + } + // Sign it + signBytes := vote.SignBytes(header.ChainID) + // TODO Consider reworking makeVote API to return an error + sig, err := key.Sign(signBytes) + if err != nil { + panic(err) + } + vote.Signature = sig + + return vote +} + +// Silences warning that vals can also be merkle.Hashable +// nolint: interfacer +func genHeader(chainID string, height int64, txs types.Txs, + vals *types.ValidatorSet, appHash, consHash, resHash []byte) *types.Header { + + return &types.Header{ + ChainID: chainID, + Height: height, + Time: time.Now(), + NumTxs: int64(len(txs)), + TotalTxs: int64(len(txs)), + // LastBlockID + // LastCommitHash + ValidatorsHash: vals.Hash(), + DataHash: txs.Hash(), + AppHash: appHash, + ConsensusHash: consHash, + LastResultsHash: resHash, + } +} + +// GenCommit calls genHeader and signHeader and combines them into a Commit. +func (v ValKeys) GenCommit(chainID string, height int64, txs types.Txs, + vals *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) Commit { + + header := genHeader(chainID, height, txs, vals, appHash, consHash, resHash) + check := Commit{ + Header: header, + Commit: v.signHeader(header, first, last), + } + return check +} + +// GenFullCommit calls genHeader and signHeader and combines them into a Commit. +func (v ValKeys) GenFullCommit(chainID string, height int64, txs types.Txs, + vals *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) FullCommit { + + header := genHeader(chainID, height, txs, vals, appHash, consHash, resHash) + commit := Commit{ + Header: header, + Commit: v.signHeader(header, first, last), + } + return NewFullCommit(commit, vals) +} diff --git a/lite/inquiring_certifier.go b/lite/inquiring_certifier.go new file mode 100644 index 000000000..042bd08e3 --- /dev/null +++ b/lite/inquiring_certifier.go @@ -0,0 +1,163 @@ +package lite + +import ( + "github.com/tendermint/tendermint/types" + + liteErr "github.com/tendermint/tendermint/lite/errors" +) + +var _ Certifier = (*InquiringCertifier)(nil) + +// InquiringCertifier wraps a dynamic certifier and implements an auto-update strategy. If a call +// to Certify fails due to a change it validator set, InquiringCertifier will try and find a +// previous FullCommit which it can use to safely update the validator set. It uses a source +// provider to obtain the needed FullCommits. It stores properly validated data on the local system. +type InquiringCertifier struct { + cert *DynamicCertifier + // These are only properly validated data, from local system + trusted Provider + // This is a source of new info, like a node rpc, or other import method + Source Provider +} + +// NewInquiringCertifier returns a new Inquiring object. It uses the trusted provider to store +// validated data and the source provider to obtain missing FullCommits. +// +// Example: The trusted provider should a CacheProvider, MemProvider or files.Provider. The source +// provider should be a client.HTTPProvider. +func NewInquiringCertifier(chainID string, fc FullCommit, trusted Provider, + source Provider) (*InquiringCertifier, error) { + + // store the data in trusted + err := trusted.StoreCommit(fc) + if err != nil { + return nil, err + } + + return &InquiringCertifier{ + cert: NewDynamicCertifier(chainID, fc.Validators, fc.Height()), + trusted: trusted, + Source: source, + }, nil +} + +// ChainID returns the chain id. +// Implements Certifier. +func (ic *InquiringCertifier) ChainID() string { + return ic.cert.ChainID() +} + +// Validators returns the validator set. +func (ic *InquiringCertifier) Validators() *types.ValidatorSet { + return ic.cert.cert.vSet +} + +// LastHeight returns the last height. +func (ic *InquiringCertifier) LastHeight() int64 { + return ic.cert.lastHeight +} + +// Certify makes sure this is checkpoint is valid. +// +// If the validators have changed since the last know time, it looks +// for a path to prove the new validators. +// +// On success, it will store the checkpoint in the store for later viewing +// Implements Certifier. +func (ic *InquiringCertifier) Certify(commit Commit) error { + err := ic.useClosestTrust(commit.Height()) + if err != nil { + return err + } + + err = ic.cert.Certify(commit) + if !liteErr.IsValidatorsChangedErr(err) { + return err + } + err = ic.updateToHash(commit.Header.ValidatorsHash) + if err != nil { + return err + } + + err = ic.cert.Certify(commit) + if err != nil { + return err + } + + // store the new checkpoint + return ic.trusted.StoreCommit(NewFullCommit(commit, ic.Validators())) +} + +// Update will verify if this is a valid change and update +// the certifying validator set if safe to do so. +func (ic *InquiringCertifier) Update(fc FullCommit) error { + err := ic.useClosestTrust(fc.Height()) + if err != nil { + return err + } + + err = ic.cert.Update(fc) + if err == nil { + err = ic.trusted.StoreCommit(fc) + } + return err +} + +func (ic *InquiringCertifier) useClosestTrust(h int64) error { + closest, err := ic.trusted.GetByHeight(h) + if err != nil { + return err + } + + // if the best seed is not the one we currently use, + // let's just reset the dynamic validator + if closest.Height() != ic.LastHeight() { + ic.cert = NewDynamicCertifier(ic.ChainID(), closest.Validators, closest.Height()) + } + return nil +} + +// updateToHash gets the validator hash we want to update to +// if IsTooMuchChangeErr, we try to find a path by binary search over height +func (ic *InquiringCertifier) updateToHash(vhash []byte) error { + // try to get the match, and update + fc, err := ic.Source.GetByHash(vhash) + if err != nil { + return err + } + err = ic.cert.Update(fc) + // handle IsTooMuchChangeErr by using divide and conquer + if liteErr.IsTooMuchChangeErr(err) { + err = ic.updateToHeight(fc.Height()) + } + return err +} + +// updateToHeight will use divide-and-conquer to find a path to h +func (ic *InquiringCertifier) updateToHeight(h int64) error { + // try to update to this height (with checks) + fc, err := ic.Source.GetByHeight(h) + if err != nil { + return err + } + start, end := ic.LastHeight(), fc.Height() + if end <= start { + return liteErr.ErrNoPathFound() + } + err = ic.Update(fc) + + // we can handle IsTooMuchChangeErr specially + if !liteErr.IsTooMuchChangeErr(err) { + return err + } + + // try to update to mid + mid := (start + end) / 2 + err = ic.updateToHeight(mid) + if err != nil { + return err + } + + // if we made it to mid, we recurse + return ic.updateToHeight(h) +} diff --git a/lite/inquiring_certifier_test.go b/lite/inquiring_certifier_test.go new file mode 100644 index 000000000..db8160bdc --- /dev/null +++ b/lite/inquiring_certifier_test.go @@ -0,0 +1,173 @@ +// nolint: vetshadow +package lite_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/lite" +) + +func TestInquirerValidPath(t *testing.T) { + assert, require := assert.New(t), require.New(t) + trust := lite.NewMemStoreProvider() + source := lite.NewMemStoreProvider() + + // set up the validators to generate test blocks + var vote int64 = 10 + keys := lite.GenValKeys(5) + + // construct a bunch of commits, each with one more height than the last + chainID := "inquiry-test" + consHash := []byte("params") + resHash := []byte("results") + count := 50 + commits := make([]lite.FullCommit, count) + for i := 0; i < count; i++ { + // extend the keys by 1 each time + keys = keys.Extend(1) + vals := keys.ToValidators(vote, 0) + h := int64(20 + 10*i) + appHash := []byte(fmt.Sprintf("h=%d", h)) + commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0, + len(keys)) + } + + // initialize a certifier with the initial state + cert, err := lite.NewInquiringCertifier(chainID, commits[0], trust, source) + require.Nil(err) + + // this should fail validation.... + commit := commits[count-1].Commit + err = cert.Certify(commit) + require.NotNil(err) + + // adding a few commits in the middle should be insufficient + for i := 10; i < 13; i++ { + err := source.StoreCommit(commits[i]) + require.Nil(err) + } + err = cert.Certify(commit) + assert.NotNil(err) + + // with more info, we succeed + for i := 0; i < count; i++ { + err := source.StoreCommit(commits[i]) + require.Nil(err) + } + err = cert.Certify(commit) + assert.Nil(err, "%+v", err) +} + +func TestInquirerMinimalPath(t *testing.T) { + assert, require := assert.New(t), require.New(t) + trust := lite.NewMemStoreProvider() + source := lite.NewMemStoreProvider() + + // set up the validators to generate test blocks + var vote int64 = 10 + keys := lite.GenValKeys(5) + + // construct a bunch of commits, each with one more height than the last + chainID := "minimal-path" + consHash := []byte("other-params") + count := 12 + commits := make([]lite.FullCommit, count) + for i := 0; i < count; i++ { + // extend the validators, so we are just below 2/3 + keys = keys.Extend(len(keys)/2 - 1) + vals := keys.ToValidators(vote, 0) + h := int64(5 + 10*i) + appHash := []byte(fmt.Sprintf("h=%d", h)) + resHash := []byte(fmt.Sprintf("res=%d", h)) + commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0, + len(keys)) + } + + // initialize a certifier with the initial state + cert, _ := lite.NewInquiringCertifier(chainID, commits[0], trust, source) + + // this should fail validation.... + commit := commits[count-1].Commit + err := cert.Certify(commit) + require.NotNil(err) + + // add a few seed in the middle should be insufficient + for i := 5; i < 8; i++ { + err := source.StoreCommit(commits[i]) + require.Nil(err) + } + err = cert.Certify(commit) + assert.NotNil(err) + + // with more info, we succeed + for i := 0; i < count; i++ { + err := source.StoreCommit(commits[i]) + require.Nil(err) + } + err = cert.Certify(commit) + assert.Nil(err, "%+v", err) +} + +func TestInquirerVerifyHistorical(t *testing.T) { + assert, require := assert.New(t), require.New(t) + trust := lite.NewMemStoreProvider() + source := lite.NewMemStoreProvider() + + // set up the validators to generate test blocks + var vote int64 = 10 + keys := lite.GenValKeys(5) + + // construct a bunch of commits, each with one more height than the last + chainID := "inquiry-test" + count := 10 + consHash := []byte("special-params") + commits := make([]lite.FullCommit, count) + for i := 0; i < count; i++ { + // extend the keys by 1 each time + keys = keys.Extend(1) + vals := keys.ToValidators(vote, 0) + h := int64(20 + 10*i) + appHash := []byte(fmt.Sprintf("h=%d", h)) + resHash := []byte(fmt.Sprintf("res=%d", h)) + commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0, + len(keys)) + } + + // initialize a certifier with the initial state + cert, _ := lite.NewInquiringCertifier(chainID, commits[0], trust, source) + + // store a few commits as trust + for _, i := range []int{2, 5} { + trust.StoreCommit(commits[i]) + } + + // let's see if we can jump forward using trusted commits + err := source.StoreCommit(commits[7]) + require.Nil(err, "%+v", err) + check := commits[7].Commit + err = cert.Certify(check) + require.Nil(err, "%+v", err) + assert.Equal(check.Height(), cert.LastHeight()) + + // add access to all commits via untrusted source + for i := 0; i < count; i++ { + err := source.StoreCommit(commits[i]) + require.Nil(err) + } + + // try to check an unknown seed in the past + mid := commits[3].Commit + err = cert.Certify(mid) + require.Nil(err, "%+v", err) + assert.Equal(mid.Height(), cert.LastHeight()) + + // and jump all the way forward again + end := commits[count-1].Commit + err = cert.Certify(end) + require.Nil(err, "%+v", err) + assert.Equal(end.Height(), cert.LastHeight()) +} diff --git a/lite/memprovider.go b/lite/memprovider.go new file mode 100644 index 000000000..ac0d83215 --- /dev/null +++ b/lite/memprovider.go @@ -0,0 +1,152 @@ +package lite + +import ( + "encoding/hex" + "sort" + "sync" + + liteErr "github.com/tendermint/tendermint/lite/errors" +) + +type memStoreProvider struct { + mtx sync.RWMutex + // byHeight is always sorted by Height... need to support range search (nil, h] + // btree would be more efficient for larger sets + byHeight fullCommits + byHash map[string]FullCommit + + sorted bool +} + +// fullCommits just exists to allow easy sorting +type fullCommits []FullCommit + +func (s fullCommits) Len() int { return len(s) } +func (s fullCommits) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s fullCommits) Less(i, j int) bool { + return s[i].Height() < s[j].Height() +} + +// NewMemStoreProvider returns a new in-memory provider. +func NewMemStoreProvider() Provider { + return &memStoreProvider{ + byHeight: fullCommits{}, + byHash: map[string]FullCommit{}, + } +} + +func (m *memStoreProvider) encodeHash(hash []byte) string { + return hex.EncodeToString(hash) +} + +// StoreCommit stores a FullCommit after verifying it. +func (m *memStoreProvider) StoreCommit(fc FullCommit) error { + // make sure the fc is self-consistent before saving + err := fc.ValidateBasic(fc.Commit.Header.ChainID) + if err != nil { + return err + } + + // store the valid fc + key := m.encodeHash(fc.ValidatorsHash()) + + m.mtx.Lock() + defer m.mtx.Unlock() + m.byHash[key] = fc + m.byHeight = append(m.byHeight, fc) + m.sorted = false + return nil +} + +// GetByHeight returns the FullCommit for height h or an error if the commit is not found. +func (m *memStoreProvider) GetByHeight(h int64) (FullCommit, error) { + // By heuristics, GetByHeight with linearsearch is fast enough + // for about 50 keys but after that, it needs binary search. + // See https://github.com/tendermint/tendermint/pull/1043#issue-285188242 + m.mtx.RLock() + n := len(m.byHeight) + m.mtx.RUnlock() + + if n <= 50 { + return m.getByHeightLinearSearch(h) + } + return m.getByHeightBinarySearch(h) +} + +func (m *memStoreProvider) sortByHeightIfNecessaryLocked() { + if !m.sorted { + sort.Sort(m.byHeight) + m.sorted = true + } +} + +func (m *memStoreProvider) getByHeightLinearSearch(h int64) (FullCommit, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + m.sortByHeightIfNecessaryLocked() + // search from highest to lowest + for i := len(m.byHeight) - 1; i >= 0; i-- { + if fc := m.byHeight[i]; fc.Height() <= h { + return fc, nil + } + } + return FullCommit{}, liteErr.ErrCommitNotFound() +} + +func (m *memStoreProvider) getByHeightBinarySearch(h int64) (FullCommit, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + m.sortByHeightIfNecessaryLocked() + low, high := 0, len(m.byHeight)-1 + var mid int + var hmid int64 + var midFC FullCommit + // Our goal is to either find: + // * item ByHeight with the query + // * greatest height with a height <= query + for low <= high { + mid = int(uint(low+high) >> 1) // Avoid an overflow + midFC = m.byHeight[mid] + hmid = midFC.Height() + switch { + case hmid == h: + return midFC, nil + case hmid < h: + low = mid + 1 + case hmid > h: + high = mid - 1 + } + } + + if high >= 0 { + if highFC := m.byHeight[high]; highFC.Height() < h { + return highFC, nil + } + } + return FullCommit{}, liteErr.ErrCommitNotFound() +} + +// GetByHash returns the FullCommit for the hash or an error if the commit is not found. +func (m *memStoreProvider) GetByHash(hash []byte) (FullCommit, error) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + fc, ok := m.byHash[m.encodeHash(hash)] + if !ok { + return fc, liteErr.ErrCommitNotFound() + } + return fc, nil +} + +// LatestCommit returns the latest FullCommit or an error if no commits exist. +func (m *memStoreProvider) LatestCommit() (FullCommit, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + l := len(m.byHeight) + if l == 0 { + return FullCommit{}, liteErr.ErrCommitNotFound() + } + m.sortByHeightIfNecessaryLocked() + return m.byHeight[l-1], nil +} diff --git a/lite/performance_test.go b/lite/performance_test.go new file mode 100644 index 000000000..8cd522cbb --- /dev/null +++ b/lite/performance_test.go @@ -0,0 +1,365 @@ +package lite + +import ( + "fmt" + "math/rand" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + liteErr "github.com/tendermint/tendermint/lite/errors" +) + +func TestMemStoreProvidergetByHeightBinaryAndLinearSameResult(t *testing.T) { + p := NewMemStoreProvider().(*memStoreProvider) + + // Store a bunch of commits at specific heights + // and then ensure that: + // * getByHeightLinearSearch + // * getByHeightBinarySearch + // both return the exact same result + + // 1. Non-existent height commits + nonExistent := []int64{-1000, -1, 0, 1, 10, 11, 17, 31, 67, 1000, 1e9} + ensureNonExistentCommitsAtHeight(t, "getByHeightLinearSearch", p.getByHeightLinearSearch, nonExistent) + ensureNonExistentCommitsAtHeight(t, "getByHeightBinarySearch", p.getByHeightBinarySearch, nonExistent) + + // 2. Save some known height commits + knownHeights := []int64{0, 1, 7, 9, 12, 13, 18, 44, 23, 16, 1024, 100, 199, 1e9} + createAndStoreCommits(t, p, knownHeights) + + // 3. Now check if those heights are retrieved + ensureExistentCommitsAtHeight(t, "getByHeightLinearSearch", p.getByHeightLinearSearch, knownHeights) + ensureExistentCommitsAtHeight(t, "getByHeightBinarySearch", p.getByHeightBinarySearch, knownHeights) + + // 4. And now for the height probing to ensure that any height + // requested returns a fullCommit of height <= requestedHeight. + comparegetByHeightAlgorithms(t, p, 0, 0) + comparegetByHeightAlgorithms(t, p, 1, 1) + comparegetByHeightAlgorithms(t, p, 2, 1) + comparegetByHeightAlgorithms(t, p, 5, 1) + comparegetByHeightAlgorithms(t, p, 7, 7) + comparegetByHeightAlgorithms(t, p, 10, 9) + comparegetByHeightAlgorithms(t, p, 12, 12) + comparegetByHeightAlgorithms(t, p, 14, 13) + comparegetByHeightAlgorithms(t, p, 19, 18) + comparegetByHeightAlgorithms(t, p, 43, 23) + comparegetByHeightAlgorithms(t, p, 45, 44) + comparegetByHeightAlgorithms(t, p, 1025, 1024) + comparegetByHeightAlgorithms(t, p, 101, 100) + comparegetByHeightAlgorithms(t, p, 1e3, 199) + comparegetByHeightAlgorithms(t, p, 1e4, 1024) + comparegetByHeightAlgorithms(t, p, 1e9, 1e9) + comparegetByHeightAlgorithms(t, p, 1e9+1, 1e9) +} + +func createAndStoreCommits(t *testing.T, p Provider, heights []int64) { + chainID := "cache-best-height-binary-and-linear" + appHash := []byte("0xdeadbeef") + keys := GenValKeys(len(heights) / 2) + + for _, h := range heights { + vals := keys.ToValidators(10, int64(len(heights)/2)) + fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) + err := p.StoreCommit(fc) + require.NoError(t, err, "StoreCommit height=%d", h) + } +} + +func comparegetByHeightAlgorithms(t *testing.T, p *memStoreProvider, ask, expect int64) { + algos := map[string]func(int64) (FullCommit, error){ + "getHeightByLinearSearch": p.getByHeightLinearSearch, + "getHeightByBinarySearch": p.getByHeightBinarySearch, + } + + for algo, fn := range algos { + fc, err := fn(ask) + // t.Logf("%s got=%v want=%d", algo, expect, fc.Height()) + require.Nil(t, err, "%s: %+v", algo, err) + if assert.Equal(t, expect, fc.Height()) { + err = p.StoreCommit(fc) + require.Nil(t, err, "%s: %+v", algo, err) + } + } +} + +var blankFullCommit FullCommit + +func ensureNonExistentCommitsAtHeight(t *testing.T, prefix string, fn func(int64) (FullCommit, error), data []int64) { + for i, qh := range data { + fc, err := fn(qh) + assert.NotNil(t, err, "#%d: %s: height=%d should return non-nil error", i, prefix, qh) + assert.Equal(t, fc, blankFullCommit, "#%d: %s: height=%d\ngot =%+v\nwant=%+v", i, prefix, qh, fc, blankFullCommit) + } +} + +func ensureExistentCommitsAtHeight(t *testing.T, prefix string, fn func(int64) (FullCommit, error), data []int64) { + for i, qh := range data { + fc, err := fn(qh) + assert.Nil(t, err, "#%d: %s: height=%d should not return an error: %v", i, prefix, qh, err) + assert.NotEqual(t, fc, blankFullCommit, "#%d: %s: height=%d got a blankCommit", i, prefix, qh) + } +} + +func BenchmarkGenCommit20(b *testing.B) { + keys := GenValKeys(20) + benchmarkGenCommit(b, keys) +} + +func BenchmarkGenCommit100(b *testing.B) { + keys := GenValKeys(100) + benchmarkGenCommit(b, keys) +} + +func BenchmarkGenCommitSec20(b *testing.B) { + keys := GenSecpValKeys(20) + benchmarkGenCommit(b, keys) +} + +func BenchmarkGenCommitSec100(b *testing.B) { + keys := GenSecpValKeys(100) + benchmarkGenCommit(b, keys) +} + +func benchmarkGenCommit(b *testing.B, keys ValKeys) { + chainID := fmt.Sprintf("bench-%d", len(keys)) + vals := keys.ToValidators(20, 10) + for i := 0; i < b.N; i++ { + h := int64(1 + i) + appHash := []byte(fmt.Sprintf("h=%d", h)) + resHash := []byte(fmt.Sprintf("res=%d", h)) + keys.GenCommit(chainID, h, nil, vals, appHash, []byte("params"), resHash, 0, len(keys)) + } +} + +// this benchmarks generating one key +func BenchmarkGenValKeys(b *testing.B) { + keys := GenValKeys(20) + for i := 0; i < b.N; i++ { + keys = keys.Extend(1) + } +} + +// this benchmarks generating one key +func BenchmarkGenSecpValKeys(b *testing.B) { + keys := GenSecpValKeys(20) + for i := 0; i < b.N; i++ { + keys = keys.Extend(1) + } +} + +func BenchmarkToValidators20(b *testing.B) { + benchmarkToValidators(b, 20) +} + +func BenchmarkToValidators100(b *testing.B) { + benchmarkToValidators(b, 100) +} + +// this benchmarks constructing the validator set (.PubKey() * nodes) +func benchmarkToValidators(b *testing.B, nodes int) { + keys := GenValKeys(nodes) + for i := 1; i <= b.N; i++ { + keys.ToValidators(int64(2*i), int64(i)) + } +} + +func BenchmarkToValidatorsSec100(b *testing.B) { + benchmarkToValidatorsSec(b, 100) +} + +// this benchmarks constructing the validator set (.PubKey() * nodes) +func benchmarkToValidatorsSec(b *testing.B, nodes int) { + keys := GenSecpValKeys(nodes) + for i := 1; i <= b.N; i++ { + keys.ToValidators(int64(2*i), int64(i)) + } +} + +func BenchmarkCertifyCommit20(b *testing.B) { + keys := GenValKeys(20) + benchmarkCertifyCommit(b, keys) +} + +func BenchmarkCertifyCommit100(b *testing.B) { + keys := GenValKeys(100) + benchmarkCertifyCommit(b, keys) +} + +func BenchmarkCertifyCommitSec20(b *testing.B) { + keys := GenSecpValKeys(20) + benchmarkCertifyCommit(b, keys) +} + +func BenchmarkCertifyCommitSec100(b *testing.B) { + keys := GenSecpValKeys(100) + benchmarkCertifyCommit(b, keys) +} + +func benchmarkCertifyCommit(b *testing.B, keys ValKeys) { + chainID := "bench-certify" + vals := keys.ToValidators(20, 10) + cert := NewStaticCertifier(chainID, vals) + check := keys.GenCommit(chainID, 123, nil, vals, []byte("foo"), []byte("params"), []byte("res"), 0, len(keys)) + for i := 0; i < b.N; i++ { + err := cert.Certify(check) + if err != nil { + panic(err) + } + } + +} + +type algo bool + +const ( + linearSearch = true + binarySearch = false +) + +// Lazy load the commits +var fcs5, fcs50, fcs100, fcs500, fcs1000 []FullCommit +var h5, h50, h100, h500, h1000 []int64 +var commitsOnce sync.Once + +func lazyGenerateFullCommits(b *testing.B) { + b.Logf("Generating FullCommits") + commitsOnce.Do(func() { + fcs5, h5 = genFullCommits(nil, nil, 5) + b.Logf("Generated 5 FullCommits") + fcs50, h50 = genFullCommits(fcs5, h5, 50) + b.Logf("Generated 50 FullCommits") + fcs100, h100 = genFullCommits(fcs50, h50, 100) + b.Logf("Generated 100 FullCommits") + fcs500, h500 = genFullCommits(fcs100, h100, 500) + b.Logf("Generated 500 FullCommits") + fcs1000, h1000 = genFullCommits(fcs500, h500, 1000) + b.Logf("Generated 1000 FullCommits") + }) +} + +func BenchmarkMemStoreProviderGetByHeightLinearSearch5(b *testing.B) { + benchmarkMemStoreProvidergetByHeight(b, fcs5, h5, linearSearch) +} + +func BenchmarkMemStoreProviderGetByHeightLinearSearch50(b *testing.B) { + benchmarkMemStoreProvidergetByHeight(b, fcs50, h50, linearSearch) +} + +func BenchmarkMemStoreProviderGetByHeightLinearSearch100(b *testing.B) { + benchmarkMemStoreProvidergetByHeight(b, fcs100, h100, linearSearch) +} + +func BenchmarkMemStoreProviderGetByHeightLinearSearch500(b *testing.B) { + benchmarkMemStoreProvidergetByHeight(b, fcs500, h500, linearSearch) +} + +func BenchmarkMemStoreProviderGetByHeightLinearSearch1000(b *testing.B) { + benchmarkMemStoreProvidergetByHeight(b, fcs1000, h1000, linearSearch) +} + +func BenchmarkMemStoreProviderGetByHeightBinarySearch5(b *testing.B) { + benchmarkMemStoreProvidergetByHeight(b, fcs5, h5, binarySearch) +} + +func BenchmarkMemStoreProviderGetByHeightBinarySearch50(b *testing.B) { + benchmarkMemStoreProvidergetByHeight(b, fcs50, h50, binarySearch) +} + +func BenchmarkMemStoreProviderGetByHeightBinarySearch100(b *testing.B) { + benchmarkMemStoreProvidergetByHeight(b, fcs100, h100, binarySearch) +} + +func BenchmarkMemStoreProviderGetByHeightBinarySearch500(b *testing.B) { + benchmarkMemStoreProvidergetByHeight(b, fcs500, h500, binarySearch) +} + +func BenchmarkMemStoreProviderGetByHeightBinarySearch1000(b *testing.B) { + benchmarkMemStoreProvidergetByHeight(b, fcs1000, h1000, binarySearch) +} + +var rng = rand.New(rand.NewSource(10)) + +func benchmarkMemStoreProvidergetByHeight(b *testing.B, fcs []FullCommit, fHeights []int64, algo algo) { + lazyGenerateFullCommits(b) + + b.StopTimer() + mp := NewMemStoreProvider() + for i, fc := range fcs { + if err := mp.StoreCommit(fc); err != nil { + b.Fatalf("FullCommit #%d: err: %v", i, err) + } + } + qHeights := make([]int64, len(fHeights)) + copy(qHeights, fHeights) + // Append some non-existent heights to trigger the worst cases. + qHeights = append(qHeights, 19, -100, -10000, 1e7, -17, 31, -1e9) + + memP := mp.(*memStoreProvider) + searchFn := memP.getByHeightLinearSearch + if algo == binarySearch { // nolint + searchFn = memP.getByHeightBinarySearch + } + + hPerm := rng.Perm(len(qHeights)) + b.StartTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, j := range hPerm { + h := qHeights[j] + if _, err := searchFn(h); err != nil { + } + } + } + b.ReportAllocs() +} + +func genFullCommits(prevFC []FullCommit, prevH []int64, want int) ([]FullCommit, []int64) { + fcs := make([]FullCommit, len(prevFC)) + copy(fcs, prevFC) + heights := make([]int64, len(prevH)) + copy(heights, prevH) + + appHash := []byte("benchmarks") + chainID := "benchmarks-gen-full-commits" + n := want + keys := GenValKeys(2 + (n / 3)) + for i := 0; i < n; i++ { + vals := keys.ToValidators(10, int64(n/2)) + h := int64(20 + 10*i) + fcs = append(fcs, keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5)) + heights = append(heights, h) + } + return fcs, heights +} + +func TestMemStoreProviderLatestCommitAlwaysUsesSorted(t *testing.T) { + p := NewMemStoreProvider().(*memStoreProvider) + // 1. With no commits yet stored, it should return ErrCommitNotFound + got, err := p.LatestCommit() + require.Equal(t, err.Error(), liteErr.ErrCommitNotFound().Error(), "should return ErrCommitNotFound()") + require.Equal(t, got, blankFullCommit, "With no fullcommits, it should return a blank FullCommit") + + // 2. Generate some full commits now and we'll add them unsorted. + genAndStoreCommitsOfHeight(t, p, 27, 100, 1, 12, 1000, 17, 91) + fc, err := p.LatestCommit() + require.Nil(t, err, "with commits saved no error expected") + require.NotEqual(t, fc, blankFullCommit, "with commits saved no blank FullCommit") + require.Equal(t, fc.Height(), int64(1000), "the latest commit i.e. the largest expected") +} + +func genAndStoreCommitsOfHeight(t *testing.T, p Provider, heights ...int64) { + n := len(heights) + appHash := []byte("tests") + chainID := "tests-gen-full-commits" + keys := GenValKeys(2 + (n / 3)) + for i := 0; i < n; i++ { + h := heights[i] + vals := keys.ToValidators(10, int64(n/2)) + fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) + err := p.StoreCommit(fc) + require.NoError(t, err, "StoreCommit height=%d", h) + } +} diff --git a/lite/provider.go b/lite/provider.go new file mode 100644 index 000000000..22dc964a1 --- /dev/null +++ b/lite/provider.go @@ -0,0 +1,103 @@ +package lite + +// Provider is used to get more validators by other means. +// +// Examples: MemProvider, files.Provider, client.Provider, CacheProvider.... +type Provider interface { + // StoreCommit saves a FullCommit after we have verified it, + // so we can query for it later. Important for updating our + // store of trusted commits. + StoreCommit(fc FullCommit) error + // GetByHeight returns the closest commit with height <= h. + GetByHeight(h int64) (FullCommit, error) + // GetByHash returns a commit exactly matching this validator hash. + GetByHash(hash []byte) (FullCommit, error) + // LatestCommit returns the newest commit stored. + LatestCommit() (FullCommit, error) +} + +// cacheProvider allows you to place one or more caches in front of a source +// Provider. It runs through them in order until a match is found. +// So you can keep a local cache, and check with the network if +// no data is there. +type cacheProvider struct { + Providers []Provider +} + +// NewCacheProvider returns a new provider which wraps multiple other providers. +func NewCacheProvider(providers ...Provider) Provider { + return cacheProvider{ + Providers: providers, + } +} + +// StoreCommit tries to add the seed to all providers. +// +// Aborts on first error it encounters (closest provider) +func (c cacheProvider) StoreCommit(fc FullCommit) (err error) { + for _, p := range c.Providers { + err = p.StoreCommit(fc) + if err != nil { + break + } + } + return err +} + +// GetByHeight should return the closest possible match from all providers. +// +// The Cache is usually organized in order from cheapest call (memory) +// to most expensive calls (disk/network). However, since GetByHeight returns +// a FullCommit at h' <= h, if the memory has a seed at h-10, but the network would +// give us the exact match, a naive "stop at first non-error" would hide +// the actual desired results. +// +// Thus, we query each provider in order until we find an exact match +// or we finished querying them all. If at least one returned a non-error, +// then this returns the best match (minimum h-h'). +func (c cacheProvider) GetByHeight(h int64) (fc FullCommit, err error) { + for _, p := range c.Providers { + var tfc FullCommit + tfc, err = p.GetByHeight(h) + if err == nil { + if tfc.Height() > fc.Height() { + fc = tfc + } + if tfc.Height() == h { + break + } + } + } + // even if the last one had an error, if any was a match, this is good + if fc.Height() > 0 { + err = nil + } + return fc, err +} + +// GetByHash returns the FullCommit for the hash or an error if the commit is not found. +func (c cacheProvider) GetByHash(hash []byte) (fc FullCommit, err error) { + for _, p := range c.Providers { + fc, err = p.GetByHash(hash) + if err == nil { + break + } + } + return fc, err +} + +// LatestCommit returns the latest FullCommit or an error if no commit exists. +func (c cacheProvider) LatestCommit() (fc FullCommit, err error) { + for _, p := range c.Providers { + var tfc FullCommit + tfc, err = p.LatestCommit() + if err == nil && tfc.Height() > fc.Height() { + fc = tfc + } + } + // even if the last one had an error, if any was a match, this is good + if fc.Height() > 0 { + err = nil + } + return fc, err +} diff --git a/lite/provider_test.go b/lite/provider_test.go new file mode 100644 index 000000000..77b5b1a85 --- /dev/null +++ b/lite/provider_test.go @@ -0,0 +1,149 @@ +// nolint: vetshadow +package lite_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/lite" + liteErr "github.com/tendermint/tendermint/lite/errors" +) + +// missingProvider doesn't store anything, always a miss +// Designed as a mock for testing +type missingProvider struct{} + +// NewMissingProvider returns a provider which does not store anything and always misses. +func NewMissingProvider() lite.Provider { + return missingProvider{} +} + +func (missingProvider) StoreCommit(lite.FullCommit) error { return nil } +func (missingProvider) GetByHeight(int64) (lite.FullCommit, error) { + return lite.FullCommit{}, liteErr.ErrCommitNotFound() +} +func (missingProvider) GetByHash([]byte) (lite.FullCommit, error) { + return lite.FullCommit{}, liteErr.ErrCommitNotFound() +} +func (missingProvider) LatestCommit() (lite.FullCommit, error) { + return lite.FullCommit{}, liteErr.ErrCommitNotFound() +} + +func TestMemProvider(t *testing.T) { + p := lite.NewMemStoreProvider() + checkProvider(t, p, "test-mem", "empty") +} + +func TestCacheProvider(t *testing.T) { + p := lite.NewCacheProvider( + NewMissingProvider(), + lite.NewMemStoreProvider(), + NewMissingProvider(), + ) + checkProvider(t, p, "test-cache", "kjfhekfhkewhgit") +} + +func checkProvider(t *testing.T, p lite.Provider, chainID, app string) { + assert, require := assert.New(t), require.New(t) + appHash := []byte(app) + keys := lite.GenValKeys(5) + count := 10 + + // make a bunch of commits... + commits := make([]lite.FullCommit, count) + for i := 0; i < count; i++ { + // two commits for each validator, to check how we handle dups + // (10, 0), (10, 1), (10, 1), (10, 2), (10, 2), ... + vals := keys.ToValidators(10, int64(count/2)) + h := int64(20 + 10*i) + commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) + } + + // check provider is empty + fc, err := p.GetByHeight(20) + require.NotNil(err) + assert.True(liteErr.IsCommitNotFoundErr(err)) + + fc, err = p.GetByHash(commits[3].ValidatorsHash()) + require.NotNil(err) + assert.True(liteErr.IsCommitNotFoundErr(err)) + + // now add them all to the provider + for _, s := range commits { + err = p.StoreCommit(s) + require.Nil(err) + // and make sure we can get it back + s2, err := p.GetByHash(s.ValidatorsHash()) + assert.Nil(err) + assert.Equal(s, s2) + // by height as well + s2, err = p.GetByHeight(s.Height()) + assert.Nil(err) + assert.Equal(s, s2) + } + + // make sure we get the last hash if we overstep + fc, err = p.GetByHeight(5000) + if assert.Nil(err) { + assert.Equal(commits[count-1].Height(), fc.Height()) + assert.Equal(commits[count-1], fc) + } + + // and middle ones as well + fc, err = p.GetByHeight(47) + if assert.Nil(err) { + // we only step by 10, so 40 must be the one below this + assert.EqualValues(40, fc.Height()) + } + +} + +// this will make a get height, and if it is good, set the data as well +func checkGetHeight(t *testing.T, p lite.Provider, ask, expect int64) { + fc, err := p.GetByHeight(ask) + require.Nil(t, err, "GetByHeight") + if assert.Equal(t, expect, fc.Height()) { + err = p.StoreCommit(fc) + require.Nil(t, err, "StoreCommit") + } +} + +func TestCacheGetsBestHeight(t *testing.T) { + // assert, require := assert.New(t), require.New(t) + require := require.New(t) + + // we will write data to the second level of the cache (p2), + // and see what gets cached, stored in + p := lite.NewMemStoreProvider() + p2 := lite.NewMemStoreProvider() + cp := lite.NewCacheProvider(p, p2) + + chainID := "cache-best-height" + appHash := []byte("01234567") + keys := lite.GenValKeys(5) + count := 10 + + // set a bunch of commits + for i := 0; i < count; i++ { + vals := keys.ToValidators(10, int64(count/2)) + h := int64(10 * (i + 1)) + fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) + err := p2.StoreCommit(fc) + require.NoError(err) + } + + // let's get a few heights from the cache and set them proper + checkGetHeight(t, cp, 57, 50) + checkGetHeight(t, cp, 33, 30) + + // make sure they are set in p as well (but nothing else) + checkGetHeight(t, p, 44, 30) + checkGetHeight(t, p, 50, 50) + checkGetHeight(t, p, 99, 50) + + // now, query the cache for a higher value + checkGetHeight(t, p2, 99, 90) + checkGetHeight(t, cp, 99, 90) +} diff --git a/lite/proxy/block.go b/lite/proxy/block.go new file mode 100644 index 000000000..4cff9ee68 --- /dev/null +++ b/lite/proxy/block.go @@ -0,0 +1,49 @@ +package proxy + +import ( + "bytes" + + "github.com/pkg/errors" + + "github.com/tendermint/tendermint/lite" + certerr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/types" +) + +func ValidateBlockMeta(meta *types.BlockMeta, check lite.Commit) error { + if meta == nil { + return errors.New("expecting a non-nil BlockMeta") + } + // TODO: check the BlockID?? + return ValidateHeader(meta.Header, check) +} + +func ValidateBlock(meta *types.Block, check lite.Commit) error { + if meta == nil { + return errors.New("expecting a non-nil Block") + } + err := ValidateHeader(meta.Header, check) + if err != nil { + return err + } + if !bytes.Equal(meta.Data.Hash(), meta.Header.DataHash) { + return errors.New("Data hash doesn't match header") + } + return nil +} + +func ValidateHeader(head *types.Header, check lite.Commit) error { + if head == nil { + return errors.New("expecting a non-nil Header") + } + // make sure they are for the same height (obvious fail) + if head.Height != check.Height() { + return certerr.ErrHeightMismatch(head.Height, check.Height()) + } + // check if they are equal by using hashes + chead := check.Header + if !bytes.Equal(head.Hash(), chead.Hash()) { + return errors.New("Headers don't match") + } + return nil +} diff --git a/lite/proxy/certifier.go b/lite/proxy/certifier.go new file mode 100644 index 000000000..6e319dc0d --- /dev/null +++ b/lite/proxy/certifier.go @@ -0,0 +1,35 @@ +package proxy + +import ( + "github.com/tendermint/tendermint/lite" + certclient "github.com/tendermint/tendermint/lite/client" + "github.com/tendermint/tendermint/lite/files" +) + +func GetCertifier(chainID, rootDir, nodeAddr string) (*lite.InquiringCertifier, error) { + trust := lite.NewCacheProvider( + lite.NewMemStoreProvider(), + files.NewProvider(rootDir), + ) + + source := certclient.NewHTTPProvider(nodeAddr) + + // XXX: total insecure hack to avoid `init` + fc, err := source.LatestCommit() + /* XXX + // this gets the most recent verified commit + fc, err := trust.LatestCommit() + if certerr.IsCommitNotFoundErr(err) { + return nil, errors.New("Please run init first to establish a root of trust") + }*/ + if err != nil { + return nil, err + } + + cert, err := lite.NewInquiringCertifier(chainID, fc, trust, source) + if err != nil { + return nil, err + } + + return cert, nil +} diff --git a/lite/proxy/errors.go b/lite/proxy/errors.go new file mode 100644 index 000000000..5a2713e3c --- /dev/null +++ b/lite/proxy/errors.go @@ -0,0 +1,22 @@ +package proxy + +import ( + "fmt" + + "github.com/pkg/errors" +) + +//-------------------------------------------- + +var errNoData = fmt.Errorf("No data returned for query") + +// IsNoDataErr checks whether an error is due to a query returning empty data +func IsNoDataErr(err error) bool { + return errors.Cause(err) == errNoData +} + +func ErrNoData() error { + return errors.WithStack(errNoData) +} + +//-------------------------------------------- diff --git a/lite/proxy/errors_test.go b/lite/proxy/errors_test.go new file mode 100644 index 000000000..7f51be50f --- /dev/null +++ b/lite/proxy/errors_test.go @@ -0,0 +1,17 @@ +package proxy + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestErrorNoData(t *testing.T) { + e1 := ErrNoData() + assert.True(t, IsNoDataErr(e1)) + + e2 := errors.New("foobar") + assert.False(t, IsNoDataErr(e2)) + assert.False(t, IsNoDataErr(nil)) +} diff --git a/lite/proxy/proxy.go b/lite/proxy/proxy.go new file mode 100644 index 000000000..0294ddf68 --- /dev/null +++ b/lite/proxy/proxy.go @@ -0,0 +1,78 @@ +package proxy + +import ( + "net/http" + + amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/libs/log" + + rpcclient "github.com/tendermint/tendermint/rpc/client" + "github.com/tendermint/tendermint/rpc/core" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + rpc "github.com/tendermint/tendermint/rpc/lib/server" +) + +const ( + wsEndpoint = "/websocket" +) + +// StartProxy will start the websocket manager on the client, +// set up the rpc routes to proxy via the given client, +// and start up an http/rpc server on the location given by bind (eg. :1234) +func StartProxy(c rpcclient.Client, listenAddr string, logger log.Logger) error { + err := c.Start() + if err != nil { + return err + } + + cdc := amino.NewCodec() + ctypes.RegisterAmino(cdc) + r := RPCRoutes(c) + + // build the handler... + mux := http.NewServeMux() + rpc.RegisterRPCFuncs(mux, r, cdc, logger) + + wm := rpc.NewWebsocketManager(r, cdc, rpc.EventSubscriber(c)) + wm.SetLogger(logger) + core.SetLogger(logger) + mux.HandleFunc(wsEndpoint, wm.WebsocketHandler) + + // TODO: limit max number of open connections rpc.Config{MaxOpenConnections: X} + _, err = rpc.StartHTTPServer(listenAddr, mux, logger, rpc.Config{}) + + return err +} + +// RPCRoutes just routes everything to the given client, as if it were +// a tendermint fullnode. +// +// if we want security, the client must implement it as a secure client +func RPCRoutes(c rpcclient.Client) map[string]*rpc.RPCFunc { + + return map[string]*rpc.RPCFunc{ + // Subscribe/unsubscribe are reserved for websocket events. + // We can just use the core tendermint impl, which uses the + // EventSwitch we registered in NewWebsocketManager above + "subscribe": rpc.NewWSRPCFunc(core.Subscribe, "query"), + "unsubscribe": rpc.NewWSRPCFunc(core.Unsubscribe, "query"), + + // info API + "status": rpc.NewRPCFunc(c.Status, ""), + "blockchain": rpc.NewRPCFunc(c.BlockchainInfo, "minHeight,maxHeight"), + "genesis": rpc.NewRPCFunc(c.Genesis, ""), + "block": rpc.NewRPCFunc(c.Block, "height"), + "commit": rpc.NewRPCFunc(c.Commit, "height"), + "tx": rpc.NewRPCFunc(c.Tx, "hash,prove"), + "validators": rpc.NewRPCFunc(c.Validators, ""), + + // broadcast API + "broadcast_tx_commit": rpc.NewRPCFunc(c.BroadcastTxCommit, "tx"), + "broadcast_tx_sync": rpc.NewRPCFunc(c.BroadcastTxSync, "tx"), + "broadcast_tx_async": rpc.NewRPCFunc(c.BroadcastTxAsync, "tx"), + + // abci API + "abci_query": rpc.NewRPCFunc(c.ABCIQuery, "path,data,prove"), + "abci_info": rpc.NewRPCFunc(c.ABCIInfo, ""), + } +} diff --git a/lite/proxy/query.go b/lite/proxy/query.go new file mode 100644 index 000000000..0ca5be174 --- /dev/null +++ b/lite/proxy/query.go @@ -0,0 +1,152 @@ +package proxy + +import ( + "github.com/pkg/errors" + + cmn "github.com/tendermint/tendermint/libs/common" + + "github.com/tendermint/tendermint/lite" + "github.com/tendermint/tendermint/lite/client" + certerr "github.com/tendermint/tendermint/lite/errors" + rpcclient "github.com/tendermint/tendermint/rpc/client" + ctypes "github.com/tendermint/tendermint/rpc/core/types" +) + +// KeyProof represents a proof of existence or absence of a single key. +// Copied from iavl repo. TODO +type KeyProof interface { + // Verify verfies the proof is valid. To verify absence, + // the value should be nil. + Verify(key, value, root []byte) error + + // Root returns the root hash of the proof. + Root() []byte + + // Serialize itself + Bytes() []byte +} + +// GetWithProof will query the key on the given node, and verify it has +// a valid proof, as defined by the certifier. +// +// If there is any error in checking, returns an error. +// If val is non-empty, proof should be KeyExistsProof +// If val is empty, proof should be KeyMissingProof +func GetWithProof(key []byte, reqHeight int64, node rpcclient.Client, + cert lite.Certifier) ( + val cmn.HexBytes, height int64, proof KeyProof, err error) { + + if reqHeight < 0 { + err = errors.Errorf("Height cannot be negative") + return + } + + _resp, proof, err := GetWithProofOptions("/key", key, + rpcclient.ABCIQueryOptions{Height: int64(reqHeight)}, + node, cert) + if _resp != nil { + resp := _resp.Response + val, height = resp.Value, resp.Height + } + return val, height, proof, err +} + +// GetWithProofOptions is useful if you want full access to the ABCIQueryOptions +func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOptions, + node rpcclient.Client, cert lite.Certifier) ( + *ctypes.ResultABCIQuery, KeyProof, error) { + + _resp, err := node.ABCIQueryWithOptions(path, key, opts) + if err != nil { + return nil, nil, err + } + resp := _resp.Response + + // make sure the proof is the proper height + if resp.IsErr() { + err = errors.Errorf("Query error for key %d: %d", key, resp.Code) + return nil, nil, err + } + if len(resp.Key) == 0 || len(resp.Proof) == 0 { + return nil, nil, ErrNoData() + } + if resp.Height == 0 { + return nil, nil, errors.New("Height returned is zero") + } + + // AppHash for height H is in header H+1 + commit, err := GetCertifiedCommit(resp.Height+1, node, cert) + if err != nil { + return nil, nil, err + } + + _ = commit + return &ctypes.ResultABCIQuery{Response: resp}, nil, nil + + /* // TODO refactor so iavl stuff is not in tendermint core + // https://github.com/tendermint/tendermint/issues/1183 + if len(resp.Value) > 0 { + // The key was found, construct a proof of existence. + proof, err := iavl.ReadKeyProof(resp.Proof) + if err != nil { + return nil, nil, errors.Wrap(err, "Error reading proof") + } + + eproof, ok := proof.(*iavl.KeyExistsProof) + if !ok { + return nil, nil, errors.New("Expected KeyExistsProof for non-empty value") + } + + // Validate the proof against the certified header to ensure data integrity. + err = eproof.Verify(resp.Key, resp.Value, commit.Header.AppHash) + if err != nil { + return nil, nil, errors.Wrap(err, "Couldn't verify proof") + } + return &ctypes.ResultABCIQuery{Response: resp}, eproof, nil + } + + // The key wasn't found, construct a proof of non-existence. + proof, err := iavl.ReadKeyProof(resp.Proof) + if err != nil { + return nil, nil, errors.Wrap(err, "Error reading proof") + } + + aproof, ok := proof.(*iavl.KeyAbsentProof) + if !ok { + return nil, nil, errors.New("Expected KeyAbsentProof for empty Value") + } + + // Validate the proof against the certified header to ensure data integrity. + err = aproof.Verify(resp.Key, nil, commit.Header.AppHash) + if err != nil { + return nil, nil, errors.Wrap(err, "Couldn't verify proof") + } + return &ctypes.ResultABCIQuery{Response: resp}, aproof, ErrNoData() + */ +} + +// GetCertifiedCommit gets the signed header for a given height +// and certifies it. Returns error if unable to get a proven header. +func GetCertifiedCommit(h int64, node rpcclient.Client, cert lite.Certifier) (lite.Commit, error) { + + // FIXME: cannot use cert.GetByHeight for now, as it also requires + // Validators and will fail on querying tendermint for non-current height. + // When this is supported, we should use it instead... + rpcclient.WaitForHeight(node, h, nil) + cresp, err := node.Commit(&h) + if err != nil { + return lite.Commit{}, err + } + + commit := client.CommitFromResult(cresp) + // validate downloaded checkpoint with our request and trust store. + if commit.Height() != h { + return lite.Commit{}, certerr.ErrHeightMismatch(h, commit.Height()) + } + + if err = cert.Certify(commit); err != nil { + return lite.Commit{}, err + } + + return commit, nil +} diff --git a/lite/proxy/query_test.go b/lite/proxy/query_test.go new file mode 100644 index 000000000..38a43af2b --- /dev/null +++ b/lite/proxy/query_test.go @@ -0,0 +1,139 @@ +package proxy + +import ( + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/abci/example/kvstore" + + "github.com/tendermint/tendermint/lite" + certclient "github.com/tendermint/tendermint/lite/client" + nm "github.com/tendermint/tendermint/node" + "github.com/tendermint/tendermint/rpc/client" + rpctest "github.com/tendermint/tendermint/rpc/test" + "github.com/tendermint/tendermint/types" +) + +var node *nm.Node + +// TODO fix tests!! + +func TestMain(m *testing.M) { + app := kvstore.NewKVStoreApplication() + + node = rpctest.StartTendermint(app) + + code := m.Run() + + node.Stop() + node.Wait() + os.Exit(code) +} + +func kvstoreTx(k, v []byte) []byte { + return []byte(fmt.Sprintf("%s=%s", k, v)) +} + +func _TestAppProofs(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + cl := client.NewLocal(node) + client.WaitForHeight(cl, 1, nil) + + k := []byte("my-key") + v := []byte("my-value") + + tx := kvstoreTx(k, v) + br, err := cl.BroadcastTxCommit(tx) + require.NoError(err, "%+v", err) + require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx) + require.EqualValues(0, br.DeliverTx.Code) + brh := br.Height + + // This sets up our trust on the node based on some past point. + source := certclient.NewProvider(cl) + seed, err := source.GetByHeight(brh - 2) + require.NoError(err, "%+v", err) + cert := lite.NewStaticCertifier("my-chain", seed.Validators) + + client.WaitForHeight(cl, 3, nil) + latest, err := source.LatestCommit() + require.NoError(err, "%+v", err) + rootHash := latest.Header.AppHash + + // verify a query before the tx block has no data (and valid non-exist proof) + bs, height, proof, err := GetWithProof(k, brh-1, cl, cert) + fmt.Println(bs, height, proof, err) + require.NotNil(err) + require.True(IsNoDataErr(err), err.Error()) + require.Nil(bs) + + // but given that block it is good + bs, height, proof, err = GetWithProof(k, brh, cl, cert) + require.NoError(err, "%+v", err) + require.NotNil(proof) + require.True(height >= int64(latest.Header.Height)) + + // Alexis there is a bug here, somehow the above code gives us rootHash = nil + // and proof.Verify doesn't care, while proofNotExists.Verify fails. + // I am hacking this in to make it pass, but please investigate further. + rootHash = proof.Root() + + //err = wire.ReadBinaryBytes(bs, &data) + //require.NoError(err, "%+v", err) + assert.EqualValues(v, bs) + err = proof.Verify(k, bs, rootHash) + assert.NoError(err, "%+v", err) + + // Test non-existing key. + missing := []byte("my-missing-key") + bs, _, proof, err = GetWithProof(missing, 0, cl, cert) + require.True(IsNoDataErr(err)) + require.Nil(bs) + require.NotNil(proof) + err = proof.Verify(missing, nil, rootHash) + assert.NoError(err, "%+v", err) + err = proof.Verify(k, nil, rootHash) + assert.Error(err) +} + +func _TestTxProofs(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + cl := client.NewLocal(node) + client.WaitForHeight(cl, 1, nil) + + tx := kvstoreTx([]byte("key-a"), []byte("value-a")) + br, err := cl.BroadcastTxCommit(tx) + require.NoError(err, "%+v", err) + require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx) + require.EqualValues(0, br.DeliverTx.Code) + brh := br.Height + + source := certclient.NewProvider(cl) + seed, err := source.GetByHeight(brh - 2) + require.NoError(err, "%+v", err) + cert := lite.NewStaticCertifier("my-chain", seed.Validators) + + // First let's make sure a bogus transaction hash returns a valid non-existence proof. + key := types.Tx([]byte("bogus")).Hash() + res, err := cl.Tx(key, true) + require.NotNil(err) + require.Contains(err.Error(), "not found") + + // Now let's check with the real tx hash. + key = types.Tx(tx).Hash() + res, err = cl.Tx(key, true) + require.NoError(err, "%+v", err) + require.NotNil(res) + err = res.Proof.Validate(key) + assert.NoError(err, "%+v", err) + + commit, err := GetCertifiedCommit(br.Height, cl, cert) + require.Nil(err, "%+v", err) + require.Equal(res.Proof.RootHash, commit.Header.DataHash) +} diff --git a/lite/proxy/validate_test.go b/lite/proxy/validate_test.go new file mode 100644 index 000000000..782a6aabb --- /dev/null +++ b/lite/proxy/validate_test.go @@ -0,0 +1,218 @@ +package proxy_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/tendermint/tendermint/lite" + "github.com/tendermint/tendermint/lite/proxy" + "github.com/tendermint/tendermint/types" +) + +var ( + deadBeefTxs = types.Txs{[]byte("DE"), []byte("AD"), []byte("BE"), []byte("EF")} + deadBeefHash = deadBeefTxs.Hash() + testTime1 = time.Date(2018, 1, 1, 1, 1, 1, 1, time.UTC) + testTime2 = time.Date(2017, 1, 2, 1, 1, 1, 1, time.UTC) +) + +var hdrHeight11 = &types.Header{ + Height: 11, + Time: testTime1, + ValidatorsHash: []byte("Tendermint"), +} + +func TestValidateBlock(t *testing.T) { + tests := []struct { + block *types.Block + commit lite.Commit + wantErr string + }{ + { + block: nil, wantErr: "non-nil Block", + }, + { + block: &types.Block{}, wantErr: "nil Header", + }, + { + block: &types.Block{Header: new(types.Header)}, + }, + + // Start Header.Height mismatch test + { + block: &types.Block{Header: &types.Header{Height: 10}}, + commit: lite.Commit{Header: &types.Header{Height: 11}}, + wantErr: "don't match - 10 vs 11", + }, + + { + block: &types.Block{Header: &types.Header{Height: 11}}, + commit: lite.Commit{Header: &types.Header{Height: 11}}, + }, + // End Header.Height mismatch test + + // Start Header.Hash mismatch test + { + block: &types.Block{Header: hdrHeight11}, + commit: lite.Commit{Header: &types.Header{Height: 11}}, + wantErr: "Headers don't match", + }, + + { + block: &types.Block{Header: hdrHeight11}, + commit: lite.Commit{Header: hdrHeight11}, + }, + // End Header.Hash mismatch test + + // Start Header.Data hash mismatch test + { + block: &types.Block{ + Header: &types.Header{Height: 11}, + Data: &types.Data{Txs: []types.Tx{[]byte("0xDE"), []byte("AD")}}, + }, + commit: lite.Commit{ + Header: &types.Header{Height: 11}, + Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("0xDEADBEEF")}}, + }, + wantErr: "Data hash doesn't match header", + }, + { + block: &types.Block{ + Header: &types.Header{Height: 11, DataHash: deadBeefHash}, + Data: &types.Data{Txs: deadBeefTxs}, + }, + commit: lite.Commit{ + Header: &types.Header{Height: 11}, + Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}}, + }, + }, + // End Header.Data hash mismatch test + } + + for i, tt := range tests { + err := proxy.ValidateBlock(tt.block, tt.commit) + if tt.wantErr != "" { + if err == nil { + assert.FailNowf(t, "Unexpectedly passed", "#%d", i) + } else { + assert.Contains(t, err.Error(), tt.wantErr, "#%d should contain the substring\n\n", i) + } + continue + } + + assert.Nil(t, err, "#%d: expecting a nil error", i) + } +} + +func TestValidateBlockMeta(t *testing.T) { + tests := []struct { + meta *types.BlockMeta + commit lite.Commit + wantErr string + }{ + { + meta: nil, wantErr: "non-nil BlockMeta", + }, + { + meta: &types.BlockMeta{}, wantErr: "non-nil Header", + }, + { + meta: &types.BlockMeta{Header: new(types.Header)}, + }, + + // Start Header.Height mismatch test + { + meta: &types.BlockMeta{Header: &types.Header{Height: 10}}, + commit: lite.Commit{Header: &types.Header{Height: 11}}, + wantErr: "don't match - 10 vs 11", + }, + + { + meta: &types.BlockMeta{Header: &types.Header{Height: 11}}, + commit: lite.Commit{Header: &types.Header{Height: 11}}, + }, + // End Header.Height mismatch test + + // Start Headers don't match test + { + meta: &types.BlockMeta{Header: hdrHeight11}, + commit: lite.Commit{Header: &types.Header{Height: 11}}, + wantErr: "Headers don't match", + }, + + { + meta: &types.BlockMeta{Header: hdrHeight11}, + commit: lite.Commit{Header: hdrHeight11}, + }, + + { + meta: &types.BlockMeta{ + Header: &types.Header{ + Height: 11, + ValidatorsHash: []byte("lite-test"), + // TODO: should be able to use empty time after Amino upgrade + Time: testTime1, + }, + }, + commit: lite.Commit{ + Header: &types.Header{Height: 11, DataHash: deadBeefHash}, + }, + wantErr: "Headers don't match", + }, + + { + meta: &types.BlockMeta{ + Header: &types.Header{ + Height: 11, DataHash: deadBeefHash, + ValidatorsHash: []byte("Tendermint"), + Time: testTime1, + }, + }, + commit: lite.Commit{ + Header: &types.Header{ + Height: 11, DataHash: deadBeefHash, + ValidatorsHash: []byte("Tendermint"), + Time: testTime2, + }, + Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}}, + }, + wantErr: "Headers don't match", + }, + + { + meta: &types.BlockMeta{ + Header: &types.Header{ + Height: 11, DataHash: deadBeefHash, + ValidatorsHash: []byte("Tendermint"), + Time: testTime2, + }, + }, + commit: lite.Commit{ + Header: &types.Header{ + Height: 11, DataHash: deadBeefHash, + ValidatorsHash: []byte("Tendermint-x"), + Time: testTime2, + }, + Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}}, + }, + wantErr: "Headers don't match", + }, + // End Headers don't match test + } + + for i, tt := range tests { + err := proxy.ValidateBlockMeta(tt.meta, tt.commit) + if tt.wantErr != "" { + if err == nil { + assert.FailNowf(t, "Unexpectedly passed", "#%d: wanted error %q", i, tt.wantErr) + } else { + assert.Contains(t, err.Error(), tt.wantErr, "#%d should contain the substring\n\n", i) + } + continue + } + + assert.Nil(t, err, "#%d: expecting a nil error", i) + } +} diff --git a/lite/proxy/wrapper.go b/lite/proxy/wrapper.go new file mode 100644 index 000000000..f0eb6b41e --- /dev/null +++ b/lite/proxy/wrapper.go @@ -0,0 +1,187 @@ +package proxy + +import ( + cmn "github.com/tendermint/tendermint/libs/common" + + "github.com/tendermint/tendermint/lite" + certclient "github.com/tendermint/tendermint/lite/client" + rpcclient "github.com/tendermint/tendermint/rpc/client" + ctypes "github.com/tendermint/tendermint/rpc/core/types" +) + +var _ rpcclient.Client = Wrapper{} + +// Wrapper wraps a rpcclient with a Certifier and double-checks any input that is +// provable before passing it along. Allows you to make any rpcclient fully secure. +type Wrapper struct { + rpcclient.Client + cert *lite.InquiringCertifier +} + +// SecureClient uses a given certifier to wrap an connection to an untrusted +// host and return a cryptographically secure rpc client. +// +// If it is wrapping an HTTP rpcclient, it will also wrap the websocket interface +func SecureClient(c rpcclient.Client, cert *lite.InquiringCertifier) Wrapper { + wrap := Wrapper{c, cert} + // TODO: no longer possible as no more such interface exposed.... + // if we wrap http client, then we can swap out the event switch to filter + // if hc, ok := c.(*rpcclient.HTTP); ok { + // evt := hc.WSEvents.EventSwitch + // hc.WSEvents.EventSwitch = WrappedSwitch{evt, wrap} + // } + return wrap +} + +// ABCIQueryWithOptions exposes all options for the ABCI query and verifies the returned proof +func (w Wrapper) ABCIQueryWithOptions(path string, data cmn.HexBytes, + opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + + res, _, err := GetWithProofOptions(path, data, opts, w.Client, w.cert) + return res, err +} + +// ABCIQuery uses default options for the ABCI query and verifies the returned proof +func (w Wrapper) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { + return w.ABCIQueryWithOptions(path, data, rpcclient.DefaultABCIQueryOptions) +} + +// Tx queries for a given tx and verifies the proof if it was requested +func (w Wrapper) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { + res, err := w.Client.Tx(hash, prove) + if !prove || err != nil { + return res, err + } + h := int64(res.Height) + check, err := GetCertifiedCommit(h, w.Client, w.cert) + if err != nil { + return res, err + } + err = res.Proof.Validate(check.Header.DataHash) + return res, err +} + +// BlockchainInfo requests a list of headers and verifies them all... +// Rather expensive. +// +// TODO: optimize this if used for anything needing performance +func (w Wrapper) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { + r, err := w.Client.BlockchainInfo(minHeight, maxHeight) + if err != nil { + return nil, err + } + + // go and verify every blockmeta in the result.... + for _, meta := range r.BlockMetas { + // get a checkpoint to verify from + c, err := w.Commit(&meta.Header.Height) + if err != nil { + return nil, err + } + check := certclient.CommitFromResult(c) + err = ValidateBlockMeta(meta, check) + if err != nil { + return nil, err + } + } + + return r, nil +} + +// Block returns an entire block and verifies all signatures +func (w Wrapper) Block(height *int64) (*ctypes.ResultBlock, error) { + r, err := w.Client.Block(height) + if err != nil { + return nil, err + } + // get a checkpoint to verify from + c, err := w.Commit(height) + if err != nil { + return nil, err + } + check := certclient.CommitFromResult(c) + + // now verify + err = ValidateBlockMeta(r.BlockMeta, check) + if err != nil { + return nil, err + } + err = ValidateBlock(r.Block, check) + if err != nil { + return nil, err + } + return r, nil +} + +// Commit downloads the Commit and certifies it with the lite. +// +// This is the foundation for all other verification in this module +func (w Wrapper) Commit(height *int64) (*ctypes.ResultCommit, error) { + rpcclient.WaitForHeight(w.Client, *height, nil) + r, err := w.Client.Commit(height) + // if we got it, then certify it + if err == nil { + check := certclient.CommitFromResult(r) + err = w.cert.Certify(check) + } + return r, err +} + +// // WrappedSwitch creates a websocket connection that auto-verifies any info +// // coming through before passing it along. +// // +// // Since the verification takes 1-2 rpc calls, this is obviously only for +// // relatively low-throughput situations that can tolerate a bit extra latency +// type WrappedSwitch struct { +// types.EventSwitch +// client rpcclient.Client +// } + +// // FireEvent verifies any block or header returned from the eventswitch +// func (s WrappedSwitch) FireEvent(event string, data events.EventData) { +// tm, ok := data.(types.TMEventData) +// if !ok { +// fmt.Printf("bad type %#v\n", data) +// return +// } + +// // check to validate it if possible, and drop if not valid +// switch t := tm.(type) { +// case types.EventDataNewBlockHeader: +// err := verifyHeader(s.client, t.Header) +// if err != nil { +// fmt.Printf("Invalid header: %#v\n", err) +// return +// } +// case types.EventDataNewBlock: +// err := verifyBlock(s.client, t.Block) +// if err != nil { +// fmt.Printf("Invalid block: %#v\n", err) +// return +// } +// // TODO: can we verify tx as well? anything else +// } + +// // looks good, we fire it +// s.EventSwitch.FireEvent(event, data) +// } + +// func verifyHeader(c rpcclient.Client, head *types.Header) error { +// // get a checkpoint to verify from +// commit, err := c.Commit(&head.Height) +// if err != nil { +// return err +// } +// check := certclient.CommitFromResult(commit) +// return ValidateHeader(head, check) +// } +// +// func verifyBlock(c rpcclient.Client, block *types.Block) error { +// // get a checkpoint to verify from +// commit, err := c.Commit(&block.Height) +// if err != nil { +// return err +// } +// check := certclient.CommitFromResult(commit) +// return ValidateBlock(block, check) +// } diff --git a/lite/static_certifier.go b/lite/static_certifier.go new file mode 100644 index 000000000..1ec3b809a --- /dev/null +++ b/lite/static_certifier.go @@ -0,0 +1,73 @@ +package lite + +import ( + "bytes" + + "github.com/pkg/errors" + + "github.com/tendermint/tendermint/types" + + liteErr "github.com/tendermint/tendermint/lite/errors" +) + +var _ Certifier = (*StaticCertifier)(nil) + +// StaticCertifier assumes a static set of validators, set on +// initilization and checks against them. +// The signatures on every header is checked for > 2/3 votes +// against the known validator set upon Certify +// +// Good for testing or really simple chains. Building block +// to support real-world functionality. +type StaticCertifier struct { + chainID string + vSet *types.ValidatorSet + vhash []byte +} + +// NewStaticCertifier returns a new certifier with a static validator set. +func NewStaticCertifier(chainID string, vals *types.ValidatorSet) *StaticCertifier { + return &StaticCertifier{ + chainID: chainID, + vSet: vals, + } +} + +// ChainID returns the chain id. +// Implements Certifier. +func (sc *StaticCertifier) ChainID() string { + return sc.chainID +} + +// Validators returns the validator set. +func (sc *StaticCertifier) Validators() *types.ValidatorSet { + return sc.vSet +} + +// Hash returns the hash of the validator set. +func (sc *StaticCertifier) Hash() []byte { + if len(sc.vhash) == 0 { + sc.vhash = sc.vSet.Hash() + } + return sc.vhash +} + +// Certify makes sure that the commit is valid. +// Implements Certifier. +func (sc *StaticCertifier) Certify(commit Commit) error { + // do basic sanity checks + err := commit.ValidateBasic(sc.chainID) + if err != nil { + return err + } + + // make sure it has the same validator set we have (static means static) + if !bytes.Equal(sc.Hash(), commit.Header.ValidatorsHash) { + return liteErr.ErrValidatorsChanged() + } + + // then make sure we have the proper signatures for this + err = sc.vSet.VerifyCommit(sc.chainID, commit.Commit.BlockID, + commit.Header.Height, commit.Commit) + return errors.WithStack(err) +} diff --git a/lite/static_certifier_test.go b/lite/static_certifier_test.go new file mode 100644 index 000000000..03567daa6 --- /dev/null +++ b/lite/static_certifier_test.go @@ -0,0 +1,59 @@ +package lite_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/tendermint/tendermint/types" + + "github.com/tendermint/tendermint/lite" + liteErr "github.com/tendermint/tendermint/lite/errors" +) + +func TestStaticCert(t *testing.T) { + // assert, require := assert.New(t), require.New(t) + assert := assert.New(t) + // require := require.New(t) + + keys := lite.GenValKeys(4) + // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! + vals := keys.ToValidators(20, 10) + // and a certifier based on our known set + chainID := "test-static" + cert := lite.NewStaticCertifier(chainID, vals) + + cases := []struct { + keys lite.ValKeys + vals *types.ValidatorSet + height int64 + first, last int // who actually signs + proper bool // true -> expect no error + changed bool // true -> expect validator change error + }{ + // perfect, signed by everyone + {keys, vals, 1, 0, len(keys), true, false}, + // skip little guy is okay + {keys, vals, 2, 1, len(keys), true, false}, + // but not the big guy + {keys, vals, 3, 0, len(keys) - 1, false, false}, + // even changing the power a little bit breaks the static validator + // the sigs are enough, but the validator hash is unknown + {keys, keys.ToValidators(20, 11), 4, 0, len(keys), false, true}, + } + + for _, tc := range cases { + check := tc.keys.GenCommit(chainID, tc.height, nil, tc.vals, + []byte("foo"), []byte("params"), []byte("results"), tc.first, tc.last) + err := cert.Certify(check) + if tc.proper { + assert.Nil(err, "%+v", err) + } else { + assert.NotNil(err) + if tc.changed { + assert.True(liteErr.IsValidatorsChangedErr(err), "%+v", err) + } + } + } + +} diff --git a/mempool/mempool.go b/mempool/mempool.go new file mode 100644 index 000000000..06852c9af --- /dev/null +++ b/mempool/mempool.go @@ -0,0 +1,550 @@ +package mempool + +import ( + "bytes" + "container/list" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/pkg/errors" + + abci "github.com/tendermint/tendermint/abci/types" + auto "github.com/tendermint/tendermint/libs/autofile" + "github.com/tendermint/tendermint/libs/clist" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/proxy" + "github.com/tendermint/tendermint/types" +) + +/* + +The mempool pushes new txs onto the proxyAppConn. +It gets a stream of (req, res) tuples from the proxy. +The mempool stores good txs in a concurrent linked-list. + +Multiple concurrent go-routines can traverse this linked-list +safely by calling .NextWait() on each element. + +So we have several go-routines: +1. Consensus calling Update() and Reap() synchronously +2. Many mempool reactor's peer routines calling CheckTx() +3. Many mempool reactor's peer routines traversing the txs linked list +4. Another goroutine calling GarbageCollectTxs() periodically + +To manage these goroutines, there are three methods of locking. +1. Mutations to the linked-list is protected by an internal mtx (CList is goroutine-safe) +2. Mutations to the linked-list elements are atomic +3. CheckTx() calls can be paused upon Update() and Reap(), protected by .proxyMtx + +Garbage collection of old elements from mempool.txs is handlde via +the DetachPrev() call, which makes old elements not reachable by +peer broadcastTxRoutine() automatically garbage collected. + +TODO: Better handle abci client errors. (make it automatically handle connection errors) + +*/ + +var ( + // ErrTxInCache is returned to the client if we saw tx earlier + ErrTxInCache = errors.New("Tx already exists in cache") + + // ErrMempoolIsFull means Tendermint & an application can't handle that much load + ErrMempoolIsFull = errors.New("Mempool is full") +) + +// TxID is the hex encoded hash of the bytes as a types.Tx. +func TxID(tx []byte) string { + return fmt.Sprintf("%X", types.Tx(tx).Hash()) +} + +// Mempool is an ordered in-memory pool for transactions before they are proposed in a consensus +// round. Transaction validity is checked using the CheckTx abci message before the transaction is +// added to the pool. The Mempool uses a concurrent list structure for storing transactions that +// can be efficiently accessed by multiple concurrent readers. +type Mempool struct { + config *cfg.MempoolConfig + + proxyMtx sync.Mutex + proxyAppConn proxy.AppConnMempool + txs *clist.CList // concurrent linked-list of good txs + counter int64 // simple incrementing counter + height int64 // the last block Update()'d to + rechecking int32 // for re-checking filtered txs on Update() + recheckCursor *clist.CElement // next expected response + recheckEnd *clist.CElement // re-checking stops here + notifiedTxsAvailable bool + txsAvailable chan int64 // fires the next height once for each height, when the mempool is not empty + + // Keep a cache of already-seen txs. + // This reduces the pressure on the proxyApp. + cache txCache + + // A log of mempool txs + wal *auto.AutoFile + + logger log.Logger + + metrics *Metrics +} + +// MempoolOption sets an optional parameter on the Mempool. +type MempoolOption func(*Mempool) + +// NewMempool returns a new Mempool with the given configuration and connection to an application. +func NewMempool( + config *cfg.MempoolConfig, + proxyAppConn proxy.AppConnMempool, + height int64, + options ...MempoolOption, +) *Mempool { + mempool := &Mempool{ + config: config, + proxyAppConn: proxyAppConn, + txs: clist.New(), + counter: 0, + height: height, + rechecking: 0, + recheckCursor: nil, + recheckEnd: nil, + logger: log.NewNopLogger(), + metrics: NopMetrics(), + } + if config.CacheSize > 0 { + mempool.cache = newMapTxCache(config.CacheSize) + } else { + mempool.cache = nopTxCache{} + } + proxyAppConn.SetResponseCallback(mempool.resCb) + for _, option := range options { + option(mempool) + } + return mempool +} + +// EnableTxsAvailable initializes the TxsAvailable channel, +// ensuring it will trigger once every height when transactions are available. +// NOTE: not thread safe - should only be called once, on startup +func (mem *Mempool) EnableTxsAvailable() { + mem.txsAvailable = make(chan int64, 1) +} + +// SetLogger sets the Logger. +func (mem *Mempool) SetLogger(l log.Logger) { + mem.logger = l +} + +// WithMetrics sets the metrics. +func WithMetrics(metrics *Metrics) MempoolOption { + return func(mem *Mempool) { mem.metrics = metrics } +} + +// CloseWAL closes and discards the underlying WAL file. +// Any further writes will not be relayed to disk. +func (mem *Mempool) CloseWAL() bool { + if mem == nil { + return false + } + + mem.proxyMtx.Lock() + defer mem.proxyMtx.Unlock() + + if mem.wal == nil { + return false + } + if err := mem.wal.Close(); err != nil && mem.logger != nil { + mem.logger.Error("Mempool.CloseWAL", "err", err) + } + mem.wal = nil + return true +} + +func (mem *Mempool) InitWAL() { + walDir := mem.config.WalDir() + if walDir != "" { + err := cmn.EnsureDir(walDir, 0700) + if err != nil { + cmn.PanicSanity(errors.Wrap(err, "Error ensuring Mempool wal dir")) + } + af, err := auto.OpenAutoFile(walDir + "/wal") + if err != nil { + cmn.PanicSanity(errors.Wrap(err, "Error opening Mempool wal file")) + } + mem.wal = af + } +} + +// Lock locks the mempool. The consensus must be able to hold lock to safely update. +func (mem *Mempool) Lock() { + mem.proxyMtx.Lock() +} + +// Unlock unlocks the mempool. +func (mem *Mempool) Unlock() { + mem.proxyMtx.Unlock() +} + +// Size returns the number of transactions in the mempool. +func (mem *Mempool) Size() int { + return mem.txs.Len() +} + +// Flushes the mempool connection to ensure async resCb calls are done e.g. +// from CheckTx. +func (mem *Mempool) FlushAppConn() error { + return mem.proxyAppConn.FlushSync() +} + +// Flush removes all transactions from the mempool and cache +func (mem *Mempool) Flush() { + mem.proxyMtx.Lock() + defer mem.proxyMtx.Unlock() + + mem.cache.Reset() + + for e := mem.txs.Front(); e != nil; e = e.Next() { + mem.txs.Remove(e) + e.DetachPrev() + } +} + +// TxsFront returns the first transaction in the ordered list for peer +// goroutines to call .NextWait() on. +func (mem *Mempool) TxsFront() *clist.CElement { + return mem.txs.Front() +} + +// TxsWaitChan returns a channel to wait on transactions. It will be closed +// once the mempool is not empty (ie. the internal `mem.txs` has at least one +// element) +func (mem *Mempool) TxsWaitChan() <-chan struct{} { + return mem.txs.WaitChan() +} + +// CheckTx executes a new transaction against the application to determine its validity +// and whether it should be added to the mempool. +// It blocks if we're waiting on Update() or Reap(). +// cb: A callback from the CheckTx command. +// It gets called from another goroutine. +// CONTRACT: Either cb will get called, or err returned. +func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) { + mem.proxyMtx.Lock() + defer mem.proxyMtx.Unlock() + + if mem.Size() >= mem.config.Size { + return ErrMempoolIsFull + } + + // CACHE + if !mem.cache.Push(tx) { + return ErrTxInCache + } + // END CACHE + + // WAL + if mem.wal != nil { + // TODO: Notify administrators when WAL fails + _, err := mem.wal.Write([]byte(tx)) + if err != nil { + mem.logger.Error("Error writing to WAL", "err", err) + } + _, err = mem.wal.Write([]byte("\n")) + if err != nil { + mem.logger.Error("Error writing to WAL", "err", err) + } + } + // END WAL + + // NOTE: proxyAppConn may error if tx buffer is full + if err = mem.proxyAppConn.Error(); err != nil { + return err + } + reqRes := mem.proxyAppConn.CheckTxAsync(tx) + if cb != nil { + reqRes.SetCallback(cb) + } + + return nil +} + +// ABCI callback function +func (mem *Mempool) resCb(req *abci.Request, res *abci.Response) { + if mem.recheckCursor == nil { + mem.resCbNormal(req, res) + } else { + mem.resCbRecheck(req, res) + } + mem.metrics.Size.Set(float64(mem.Size())) +} + +func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) { + switch r := res.Value.(type) { + case *abci.Response_CheckTx: + tx := req.GetCheckTx().Tx + if r.CheckTx.Code == abci.CodeTypeOK { + mem.counter++ + memTx := &mempoolTx{ + counter: mem.counter, + height: mem.height, + tx: tx, + } + mem.txs.PushBack(memTx) + mem.logger.Info("Added good transaction", "tx", TxID(tx), "res", r, "total", mem.Size()) + mem.notifyTxsAvailable() + } else { + // ignore bad transaction + mem.logger.Info("Rejected bad transaction", "tx", TxID(tx), "res", r) + + // remove from cache (it might be good later) + mem.cache.Remove(tx) + } + default: + // ignore other messages + } +} + +func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) { + switch r := res.Value.(type) { + case *abci.Response_CheckTx: + memTx := mem.recheckCursor.Value.(*mempoolTx) + if !bytes.Equal(req.GetCheckTx().Tx, memTx.tx) { + cmn.PanicSanity(cmn.Fmt("Unexpected tx response from proxy during recheck\n"+ + "Expected %X, got %X", r.CheckTx.Data, memTx.tx)) + } + if r.CheckTx.Code == abci.CodeTypeOK { + // Good, nothing to do. + } else { + // Tx became invalidated due to newly committed block. + mem.txs.Remove(mem.recheckCursor) + mem.recheckCursor.DetachPrev() + + // remove from cache (it might be good later) + mem.cache.Remove(req.GetCheckTx().Tx) + } + if mem.recheckCursor == mem.recheckEnd { + mem.recheckCursor = nil + } else { + mem.recheckCursor = mem.recheckCursor.Next() + } + if mem.recheckCursor == nil { + // Done! + atomic.StoreInt32(&mem.rechecking, 0) + mem.logger.Info("Done rechecking txs") + + // incase the recheck removed all txs + if mem.Size() > 0 { + mem.notifyTxsAvailable() + } + } + default: + // ignore other messages + } +} + +// TxsAvailable returns a channel which fires once for every height, +// and only when transactions are available in the mempool. +// NOTE: the returned channel may be nil if EnableTxsAvailable was not called. +func (mem *Mempool) TxsAvailable() <-chan int64 { + return mem.txsAvailable +} + +func (mem *Mempool) notifyTxsAvailable() { + if mem.Size() == 0 { + panic("notified txs available but mempool is empty!") + } + if mem.txsAvailable != nil && !mem.notifiedTxsAvailable { + // channel cap is 1, so this will send once + select { + case mem.txsAvailable <- mem.height + 1: + default: + } + mem.notifiedTxsAvailable = true + } +} + +// Reap returns a list of transactions currently in the mempool. +// If maxTxs is -1, there is no cap on the number of returned transactions. +func (mem *Mempool) Reap(maxTxs int) types.Txs { + mem.proxyMtx.Lock() + defer mem.proxyMtx.Unlock() + + for atomic.LoadInt32(&mem.rechecking) > 0 { + // TODO: Something better? + time.Sleep(time.Millisecond * 10) + } + + txs := mem.collectTxs(maxTxs) + return txs +} + +// maxTxs: -1 means uncapped, 0 means none +func (mem *Mempool) collectTxs(maxTxs int) types.Txs { + if maxTxs == 0 { + return []types.Tx{} + } else if maxTxs < 0 { + maxTxs = mem.txs.Len() + } + txs := make([]types.Tx, 0, cmn.MinInt(mem.txs.Len(), maxTxs)) + for e := mem.txs.Front(); e != nil && len(txs) < maxTxs; e = e.Next() { + memTx := e.Value.(*mempoolTx) + txs = append(txs, memTx.tx) + } + return txs +} + +// Update informs the mempool that the given txs were committed and can be discarded. +// NOTE: this should be called *after* block is committed by consensus. +// NOTE: unsafe; Lock/Unlock must be managed by caller +func (mem *Mempool) Update(height int64, txs types.Txs) error { + // First, create a lookup map of txns in new txs. + txsMap := make(map[string]struct{}) + for _, tx := range txs { + txsMap[string(tx)] = struct{}{} + } + + // Set height + mem.height = height + mem.notifiedTxsAvailable = false + + // Remove transactions that are already in txs. + goodTxs := mem.filterTxs(txsMap) + // Recheck mempool txs if any txs were committed in the block + // NOTE/XXX: in some apps a tx could be invalidated due to EndBlock, + // so we really still do need to recheck, but this is for debugging + if mem.config.Recheck && (mem.config.RecheckEmpty || len(goodTxs) > 0) { + mem.logger.Info("Recheck txs", "numtxs", len(goodTxs), "height", height) + mem.recheckTxs(goodTxs) + // At this point, mem.txs are being rechecked. + // mem.recheckCursor re-scans mem.txs and possibly removes some txs. + // Before mem.Reap(), we should wait for mem.recheckCursor to be nil. + } + mem.metrics.Size.Set(float64(mem.Size())) + return nil +} + +func (mem *Mempool) filterTxs(blockTxsMap map[string]struct{}) []types.Tx { + goodTxs := make([]types.Tx, 0, mem.txs.Len()) + for e := mem.txs.Front(); e != nil; e = e.Next() { + memTx := e.Value.(*mempoolTx) + // Remove the tx if it's alredy in a block. + if _, ok := blockTxsMap[string(memTx.tx)]; ok { + // remove from clist + mem.txs.Remove(e) + e.DetachPrev() + + // NOTE: we don't remove committed txs from the cache. + continue + } + // Good tx! + goodTxs = append(goodTxs, memTx.tx) + } + return goodTxs +} + +// NOTE: pass in goodTxs because mem.txs can mutate concurrently. +func (mem *Mempool) recheckTxs(goodTxs []types.Tx) { + if len(goodTxs) == 0 { + return + } + atomic.StoreInt32(&mem.rechecking, 1) + mem.recheckCursor = mem.txs.Front() + mem.recheckEnd = mem.txs.Back() + + // Push txs to proxyAppConn + // NOTE: resCb() may be called concurrently. + for _, tx := range goodTxs { + mem.proxyAppConn.CheckTxAsync(tx) + } + mem.proxyAppConn.FlushAsync() +} + +//-------------------------------------------------------------------------------- + +// mempoolTx is a transaction that successfully ran +type mempoolTx struct { + counter int64 // a simple incrementing counter + height int64 // height that this tx had been validated in + tx types.Tx // +} + +// Height returns the height for this transaction +func (memTx *mempoolTx) Height() int64 { + return atomic.LoadInt64(&memTx.height) +} + +//-------------------------------------------------------------------------------- + +type txCache interface { + Reset() + Push(tx types.Tx) bool + Remove(tx types.Tx) +} + +// mapTxCache maintains a cache of transactions. +type mapTxCache struct { + mtx sync.Mutex + size int + map_ map[string]struct{} + list *list.List // to remove oldest tx when cache gets too big +} + +var _ txCache = (*mapTxCache)(nil) + +// newMapTxCache returns a new mapTxCache. +func newMapTxCache(cacheSize int) *mapTxCache { + return &mapTxCache{ + size: cacheSize, + map_: make(map[string]struct{}, cacheSize), + list: list.New(), + } +} + +// Reset resets the cache to an empty state. +func (cache *mapTxCache) Reset() { + cache.mtx.Lock() + cache.map_ = make(map[string]struct{}, cache.size) + cache.list.Init() + cache.mtx.Unlock() +} + +// Push adds the given tx to the cache and returns true. It returns false if tx +// is already in the cache. +func (cache *mapTxCache) Push(tx types.Tx) bool { + cache.mtx.Lock() + defer cache.mtx.Unlock() + + if _, exists := cache.map_[string(tx)]; exists { + return false + } + + if cache.list.Len() >= cache.size { + popped := cache.list.Front() + poppedTx := popped.Value.(types.Tx) + // NOTE: the tx may have already been removed from the map + // but deleting a non-existent element is fine + delete(cache.map_, string(poppedTx)) + cache.list.Remove(popped) + } + cache.map_[string(tx)] = struct{}{} + cache.list.PushBack(tx) + return true +} + +// Remove removes the given tx from the cache. +func (cache *mapTxCache) Remove(tx types.Tx) { + cache.mtx.Lock() + delete(cache.map_, string(tx)) + cache.mtx.Unlock() +} + +type nopTxCache struct{} + +var _ txCache = (*nopTxCache)(nil) + +func (nopTxCache) Reset() {} +func (nopTxCache) Push(types.Tx) bool { return true } +func (nopTxCache) Remove(types.Tx) {} diff --git a/mempool/mempool_test.go b/mempool/mempool_test.go new file mode 100644 index 000000000..1a91de4f9 --- /dev/null +++ b/mempool/mempool_test.go @@ -0,0 +1,286 @@ +package mempool + +import ( + "crypto/md5" + "crypto/rand" + "encoding/binary" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "github.com/tendermint/tendermint/abci/example/counter" + "github.com/tendermint/tendermint/abci/example/kvstore" + abci "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/proxy" + "github.com/tendermint/tendermint/types" + + "github.com/stretchr/testify/require" +) + +func newMempoolWithApp(cc proxy.ClientCreator) *Mempool { + config := cfg.ResetTestRoot("mempool_test") + + appConnMem, _ := cc.NewABCIClient() + appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) + err := appConnMem.Start() + if err != nil { + panic(err) + } + mempool := NewMempool(config.Mempool, appConnMem, 0) + mempool.SetLogger(log.TestingLogger()) + return mempool +} + +func ensureNoFire(t *testing.T, ch <-chan int64, timeoutMS int) { + timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) + select { + case <-ch: + t.Fatal("Expected not to fire") + case <-timer.C: + } +} + +func ensureFire(t *testing.T, ch <-chan int64, timeoutMS int) { + timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) + select { + case <-ch: + case <-timer.C: + t.Fatal("Expected to fire") + } +} + +func checkTxs(t *testing.T, mempool *Mempool, count int) types.Txs { + txs := make(types.Txs, count) + for i := 0; i < count; i++ { + txBytes := make([]byte, 20) + txs[i] = txBytes + _, err := rand.Read(txBytes) + if err != nil { + t.Error(err) + } + if err := mempool.CheckTx(txBytes, nil); err != nil { + t.Fatalf("Error after CheckTx: %v", err) + } + } + return txs +} + +func TestTxsAvailable(t *testing.T) { + app := kvstore.NewKVStoreApplication() + cc := proxy.NewLocalClientCreator(app) + mempool := newMempoolWithApp(cc) + mempool.EnableTxsAvailable() + + timeoutMS := 500 + + // with no txs, it shouldnt fire + ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) + + // send a bunch of txs, it should only fire once + txs := checkTxs(t, mempool, 100) + ensureFire(t, mempool.TxsAvailable(), timeoutMS) + ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) + + // call update with half the txs. + // it should fire once now for the new height + // since there are still txs left + committedTxs, txs := txs[:50], txs[50:] + if err := mempool.Update(1, committedTxs); err != nil { + t.Error(err) + } + ensureFire(t, mempool.TxsAvailable(), timeoutMS) + ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) + + // send a bunch more txs. we already fired for this height so it shouldnt fire again + moreTxs := checkTxs(t, mempool, 50) + ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) + + // now call update with all the txs. it should not fire as there are no txs left + committedTxs = append(txs, moreTxs...) + if err := mempool.Update(2, committedTxs); err != nil { + t.Error(err) + } + ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) + + // send a bunch more txs, it should only fire once + checkTxs(t, mempool, 100) + ensureFire(t, mempool.TxsAvailable(), timeoutMS) + ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) +} + +func TestSerialReap(t *testing.T) { + app := counter.NewCounterApplication(true) + app.SetOption(abci.RequestSetOption{"serial", "on"}) + cc := proxy.NewLocalClientCreator(app) + + mempool := newMempoolWithApp(cc) + appConnCon, _ := cc.NewABCIClient() + appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) + err := appConnCon.Start() + require.Nil(t, err) + + cacheMap := make(map[string]struct{}) + deliverTxsRange := func(start, end int) { + // Deliver some txs. + for i := start; i < end; i++ { + + // This will succeed + txBytes := make([]byte, 8) + binary.BigEndian.PutUint64(txBytes, uint64(i)) + err := mempool.CheckTx(txBytes, nil) + _, cached := cacheMap[string(txBytes)] + if cached { + require.NotNil(t, err, "expected error for cached tx") + } else { + require.Nil(t, err, "expected no err for uncached tx") + } + cacheMap[string(txBytes)] = struct{}{} + + // Duplicates are cached and should return error + err = mempool.CheckTx(txBytes, nil) + require.NotNil(t, err, "Expected error after CheckTx on duplicated tx") + } + } + + reapCheck := func(exp int) { + txs := mempool.Reap(-1) + require.Equal(t, len(txs), exp, cmn.Fmt("Expected to reap %v txs but got %v", exp, len(txs))) + } + + updateRange := func(start, end int) { + txs := make([]types.Tx, 0) + for i := start; i < end; i++ { + txBytes := make([]byte, 8) + binary.BigEndian.PutUint64(txBytes, uint64(i)) + txs = append(txs, txBytes) + } + if err := mempool.Update(0, txs); err != nil { + t.Error(err) + } + } + + commitRange := func(start, end int) { + // Deliver some txs. + for i := start; i < end; i++ { + txBytes := make([]byte, 8) + binary.BigEndian.PutUint64(txBytes, uint64(i)) + res, err := appConnCon.DeliverTxSync(txBytes) + if err != nil { + t.Errorf("Client error committing tx: %v", err) + } + if res.IsErr() { + t.Errorf("Error committing tx. Code:%v result:%X log:%v", + res.Code, res.Data, res.Log) + } + } + res, err := appConnCon.CommitSync() + if err != nil { + t.Errorf("Client error committing: %v", err) + } + if len(res.Data) != 8 { + t.Errorf("Error committing. Hash:%X", res.Data) + } + } + + //---------------------------------------- + + // Deliver some txs. + deliverTxsRange(0, 100) + + // Reap the txs. + reapCheck(100) + + // Reap again. We should get the same amount + reapCheck(100) + + // Deliver 0 to 999, we should reap 900 new txs + // because 100 were already counted. + deliverTxsRange(0, 1000) + + // Reap the txs. + reapCheck(1000) + + // Reap again. We should get the same amount + reapCheck(1000) + + // Commit from the conensus AppConn + commitRange(0, 500) + updateRange(0, 500) + + // We should have 500 left. + reapCheck(500) + + // Deliver 100 invalid txs and 100 valid txs + deliverTxsRange(900, 1100) + + // We should have 600 now. + reapCheck(600) +} + +func TestMempoolCloseWAL(t *testing.T) { + // 1. Create the temporary directory for mempool and WAL testing. + rootDir, err := ioutil.TempDir("", "mempool-test") + require.Nil(t, err, "expecting successful tmpdir creation") + defer os.RemoveAll(rootDir) + + // 2. Ensure that it doesn't contain any elements -- Sanity check + m1, err := filepath.Glob(filepath.Join(rootDir, "*")) + require.Nil(t, err, "successful globbing expected") + require.Equal(t, 0, len(m1), "no matches yet") + + // 3. Create the mempool + wcfg := cfg.DefaultMempoolConfig() + wcfg.RootDir = rootDir + app := kvstore.NewKVStoreApplication() + cc := proxy.NewLocalClientCreator(app) + appConnMem, _ := cc.NewABCIClient() + mempool := NewMempool(wcfg, appConnMem, 10) + mempool.InitWAL() + + // 4. Ensure that the directory contains the WAL file + m2, err := filepath.Glob(filepath.Join(rootDir, "*")) + require.Nil(t, err, "successful globbing expected") + require.Equal(t, 1, len(m2), "expecting the wal match in") + + // 5. Write some contents to the WAL + mempool.CheckTx(types.Tx([]byte("foo")), nil) + walFilepath := mempool.wal.Path + sum1 := checksumFile(walFilepath, t) + + // 6. Sanity check to ensure that the written TX matches the expectation. + require.Equal(t, sum1, checksumIt([]byte("foo\n")), "foo with a newline should be written") + + // 7. Invoke CloseWAL() and ensure it discards the + // WAL thus any other write won't go through. + require.True(t, mempool.CloseWAL(), "CloseWAL should CloseWAL") + mempool.CheckTx(types.Tx([]byte("bar")), nil) + sum2 := checksumFile(walFilepath, t) + require.Equal(t, sum1, sum2, "expected no change to the WAL after invoking CloseWAL() since it was discarded") + + // 8. Second CloseWAL should do nothing + require.False(t, mempool.CloseWAL(), "CloseWAL should CloseWAL") + + // 9. Sanity check to ensure that the WAL file still exists + m3, err := filepath.Glob(filepath.Join(rootDir, "*")) + require.Nil(t, err, "successful globbing expected") + require.Equal(t, 1, len(m3), "expecting the wal match in") +} + +func checksumIt(data []byte) string { + h := md5.New() + h.Write(data) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +func checksumFile(p string, t *testing.T) string { + data, err := ioutil.ReadFile(p) + require.Nil(t, err, "expecting successful read of %q", p) + return checksumIt(data) +} diff --git a/mempool/metrics.go b/mempool/metrics.go new file mode 100644 index 000000000..f381678cb --- /dev/null +++ b/mempool/metrics.go @@ -0,0 +1,34 @@ +package mempool + +import ( + "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics/discard" + + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +// Metrics contains metrics exposed by this package. +// see MetricsProvider for descriptions. +type Metrics struct { + // Size of the mempool. + Size metrics.Gauge +} + +// PrometheusMetrics returns Metrics build using Prometheus client library. +func PrometheusMetrics() *Metrics { + return &Metrics{ + Size: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Subsystem: "mempool", + Name: "size", + Help: "Size of the mempool (number of uncommitted transactions).", + }, []string{}), + } +} + +// NopMetrics returns no-op Metrics. +func NopMetrics() *Metrics { + return &Metrics{ + Size: discard.NewGauge(), + } +} diff --git a/mempool/reactor.go b/mempool/reactor.go new file mode 100644 index 000000000..96988be78 --- /dev/null +++ b/mempool/reactor.go @@ -0,0 +1,195 @@ +package mempool + +import ( + "fmt" + "reflect" + "time" + + amino "github.com/tendermint/go-amino" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/clist" + "github.com/tendermint/tendermint/libs/log" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/types" +) + +const ( + MempoolChannel = byte(0x30) + + maxMsgSize = 1048576 // 1MB TODO make it configurable + peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount +) + +// MempoolReactor handles mempool tx broadcasting amongst peers. +type MempoolReactor struct { + p2p.BaseReactor + config *cfg.MempoolConfig + Mempool *Mempool +} + +// NewMempoolReactor returns a new MempoolReactor with the given config and mempool. +func NewMempoolReactor(config *cfg.MempoolConfig, mempool *Mempool) *MempoolReactor { + memR := &MempoolReactor{ + config: config, + Mempool: mempool, + } + memR.BaseReactor = *p2p.NewBaseReactor("MempoolReactor", memR) + return memR +} + +// SetLogger sets the Logger on the reactor and the underlying Mempool. +func (memR *MempoolReactor) SetLogger(l log.Logger) { + memR.Logger = l + memR.Mempool.SetLogger(l) +} + +// OnStart implements p2p.BaseReactor. +func (memR *MempoolReactor) OnStart() error { + if !memR.config.Broadcast { + memR.Logger.Info("Tx broadcasting is disabled") + } + return nil +} + +// GetChannels implements Reactor. +// It returns the list of channels for this reactor. +func (memR *MempoolReactor) GetChannels() []*p2p.ChannelDescriptor { + return []*p2p.ChannelDescriptor{ + { + ID: MempoolChannel, + Priority: 5, + }, + } +} + +// AddPeer implements Reactor. +// It starts a broadcast routine ensuring all txs are forwarded to the given peer. +func (memR *MempoolReactor) AddPeer(peer p2p.Peer) { + go memR.broadcastTxRoutine(peer) +} + +// RemovePeer implements Reactor. +func (memR *MempoolReactor) RemovePeer(peer p2p.Peer, reason interface{}) { + // broadcast routine checks if peer is gone and returns +} + +// Receive implements Reactor. +// It adds any received transactions to the mempool. +func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { + msg, err := decodeMsg(msgBytes) + if err != nil { + memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) + memR.Switch.StopPeerForError(src, err) + return + } + memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg) + + switch msg := msg.(type) { + case *TxMessage: + err := memR.Mempool.CheckTx(msg.Tx, nil) + if err != nil { + memR.Logger.Info("Could not check tx", "tx", TxID(msg.Tx), "err", err) + } + // broadcasting happens from go routines per peer + default: + memR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) + } +} + +// BroadcastTx is an alias for Mempool.CheckTx. Broadcasting itself happens in peer routines. +func (memR *MempoolReactor) BroadcastTx(tx types.Tx, cb func(*abci.Response)) error { + return memR.Mempool.CheckTx(tx, cb) +} + +// PeerState describes the state of a peer. +type PeerState interface { + GetHeight() int64 +} + +// Send new mempool txs to peer. +func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) { + if !memR.config.Broadcast { + return + } + + var next *clist.CElement + for { + // This happens because the CElement we were looking at got garbage + // collected (removed). That is, .NextWait() returned nil. Go ahead and + // start from the beginning. + if next == nil { + select { + case <-memR.Mempool.TxsWaitChan(): // Wait until a tx is available + if next = memR.Mempool.TxsFront(); next == nil { + continue + } + case <-peer.Quit(): + return + case <-memR.Quit(): + return + } + } + + memTx := next.Value.(*mempoolTx) + // make sure the peer is up to date + height := memTx.Height() + if peerState_i := peer.Get(types.PeerStateKey); peerState_i != nil { + peerState := peerState_i.(PeerState) + peerHeight := peerState.GetHeight() + if peerHeight < height-1 { // Allow for a lag of 1 block + time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) + continue + } + } + // send memTx + msg := &TxMessage{Tx: memTx.tx} + success := peer.Send(MempoolChannel, cdc.MustMarshalBinaryBare(msg)) + if !success { + time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) + continue + } + + select { + case <-next.NextWaitChan(): + // see the start of the for loop for nil check + next = next.Next() + case <-peer.Quit(): + return + case <-memR.Quit(): + return + } + } +} + +//----------------------------------------------------------------------------- +// Messages + +// MempoolMessage is a message sent or received by the MempoolReactor. +type MempoolMessage interface{} + +func RegisterMempoolMessages(cdc *amino.Codec) { + cdc.RegisterInterface((*MempoolMessage)(nil), nil) + cdc.RegisterConcrete(&TxMessage{}, "tendermint/mempool/TxMessage", nil) +} + +func decodeMsg(bz []byte) (msg MempoolMessage, err error) { + if len(bz) > maxMsgSize { + return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize) + } + err = cdc.UnmarshalBinaryBare(bz, &msg) + return +} + +//------------------------------------- + +// TxMessage is a MempoolMessage containing a transaction. +type TxMessage struct { + Tx types.Tx +} + +// String returns a string representation of the TxMessage. +func (m *TxMessage) String() string { + return fmt.Sprintf("[TxMessage %v]", m.Tx) +} diff --git a/mempool/reactor_test.go b/mempool/reactor_test.go new file mode 100644 index 000000000..b4362032a --- /dev/null +++ b/mempool/reactor_test.go @@ -0,0 +1,157 @@ +package mempool + +import ( + "fmt" + "sync" + "testing" + "time" + + "github.com/fortytw2/leaktest" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + + "github.com/go-kit/kit/log/term" + + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/libs/log" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/proxy" + "github.com/tendermint/tendermint/types" +) + +// mempoolLogger is a TestingLogger which uses a different +// color for each validator ("validator" key must exist). +func mempoolLogger() log.Logger { + return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor { + for i := 0; i < len(keyvals)-1; i += 2 { + if keyvals[i] == "validator" { + return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))} + } + } + return term.FgBgColor{} + }) +} + +// connect N mempool reactors through N switches +func makeAndConnectMempoolReactors(config *cfg.Config, N int) []*MempoolReactor { + reactors := make([]*MempoolReactor, N) + logger := mempoolLogger() + for i := 0; i < N; i++ { + app := kvstore.NewKVStoreApplication() + cc := proxy.NewLocalClientCreator(app) + mempool := newMempoolWithApp(cc) + + reactors[i] = NewMempoolReactor(config.Mempool, mempool) // so we dont start the consensus states + reactors[i].SetLogger(logger.With("validator", i)) + } + + p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch { + s.AddReactor("MEMPOOL", reactors[i]) + return s + + }, p2p.Connect2Switches) + return reactors +} + +// wait for all txs on all reactors +func waitForTxs(t *testing.T, txs types.Txs, reactors []*MempoolReactor) { + // wait for the txs in all mempools + wg := new(sync.WaitGroup) + for i := 0; i < len(reactors); i++ { + wg.Add(1) + go _waitForTxs(t, wg, txs, i, reactors) + } + + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + timer := time.After(TIMEOUT) + select { + case <-timer: + t.Fatal("Timed out waiting for txs") + case <-done: + } +} + +// wait for all txs on a single mempool +func _waitForTxs(t *testing.T, wg *sync.WaitGroup, txs types.Txs, reactorIdx int, reactors []*MempoolReactor) { + + mempool := reactors[reactorIdx].Mempool + for mempool.Size() != len(txs) { + time.Sleep(time.Millisecond * 100) + } + + reapedTxs := mempool.Reap(len(txs)) + for i, tx := range txs { + assert.Equal(t, tx, reapedTxs[i], fmt.Sprintf("txs at index %d on reactor %d don't match: %v vs %v", i, reactorIdx, tx, reapedTxs[i])) + } + wg.Done() +} + +const ( + NUM_TXS = 1000 + TIMEOUT = 120 * time.Second // ridiculously high because CircleCI is slow +) + +func TestReactorBroadcastTxMessage(t *testing.T) { + config := cfg.TestConfig() + const N = 4 + reactors := makeAndConnectMempoolReactors(config, N) + defer func() { + for _, r := range reactors { + r.Stop() + } + }() + + // send a bunch of txs to the first reactor's mempool + // and wait for them all to be received in the others + txs := checkTxs(t, reactors[0].Mempool, NUM_TXS) + waitForTxs(t, txs, reactors) +} + +func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + config := cfg.TestConfig() + const N = 2 + reactors := makeAndConnectMempoolReactors(config, N) + defer func() { + for _, r := range reactors { + r.Stop() + } + }() + + // stop peer + sw := reactors[1].Switch + sw.StopPeerForError(sw.Peers().List()[0], errors.New("some reason")) + + // check that we are not leaking any go-routines + // i.e. broadcastTxRoutine finishes when peer is stopped + leaktest.CheckTimeout(t, 10*time.Second)() +} + +func TestBroadcastTxForPeerStopsWhenReactorStops(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + config := cfg.TestConfig() + const N = 2 + reactors := makeAndConnectMempoolReactors(config, N) + + // stop reactors + for _, r := range reactors { + r.Stop() + } + + // check that we are not leaking any go-routines + // i.e. broadcastTxRoutine finishes when reactor is stopped + leaktest.CheckTimeout(t, 10*time.Second)() +} diff --git a/mempool/wire.go b/mempool/wire.go new file mode 100644 index 000000000..ed0897268 --- /dev/null +++ b/mempool/wire.go @@ -0,0 +1,11 @@ +package mempool + +import ( + "github.com/tendermint/go-amino" +) + +var cdc = amino.NewCodec() + +func init() { + RegisterMempoolMessages(cdc) +} diff --git a/networks/local/Makefile b/networks/local/Makefile new file mode 100644 index 000000000..98517851d --- /dev/null +++ b/networks/local/Makefile @@ -0,0 +1,7 @@ +# Makefile for the "localnode" docker image. + +all: + docker build --tag tendermint/localnode localnode + +.PHONY: all + diff --git a/networks/local/README.md b/networks/local/README.md new file mode 100644 index 000000000..09a0b12cb --- /dev/null +++ b/networks/local/README.md @@ -0,0 +1,79 @@ +# Local Cluster with Docker Compose + +## Requirements + +- [Install tendermint](/docs/install.md) +- [Install docker](https://docs.docker.com/engine/installation/) +- [Install docker-compose](https://docs.docker.com/compose/install/) + +## Build + +Build the `tendermint` binary and the `tendermint/localnode` docker image. + +Note the binary will be mounted into the container so it can be updated without +rebuilding the image. + +``` +cd $GOPATH/src/github.com/tendermint/tendermint + +# Build the linux binary in ./build +make build-linux + +# Build tendermint/localnode image +make build-docker-localnode +``` + + +## Run a testnet + +To start a 4 node testnet run: + +``` +make localnet-start +``` + +The nodes bind their RPC servers to ports 26657, 26660, 26662, and 26664 on the host. +This file creates a 4-node network using the localnode image. +The nodes of the network expose their P2P and RPC endpoints to the host machine on ports 26656-26657, 26659-26660, 26661-26662, and 26663-26664 respectively. + +To update the binary, just rebuild it and restart the nodes: + +``` +make build-linux +make localnet-stop +make localnet-start +``` + +## Configuration + +The `make localnet-start` creates files for a 4-node testnet in `./build` by calling the `tendermint testnet` command. + +The `./build` directory is mounted to the `/tendermint` mount point to attach the binary and config files to the container. + +For instance, to create a single node testnet: + +``` +cd $GOPATH/src/github.com/tendermint/tendermint + +# Clear the build folder +rm -rf ./build + +# Build binary +make build-linux + +# Create configuration +docker run -e LOG="stdout" -v `pwd`/build:/tendermint tendermint/localnode testnet --o . --v 1 + +#Run the node +docker run -v `pwd`/build:/tendermint tendermint/localnode + +``` + +## Logging + +Log is saved under the attached volume, in the `tendermint.log` file. If the `LOG` environment variable is set to `stdout` at start, the log is not saved, but printed on the screen. + +## Special binaries + +If you have multiple binaries with different names, you can specify which one to run with the BINARY environment variable. The path of the binary is relative to the attached volume. + diff --git a/networks/local/localnode/Dockerfile b/networks/local/localnode/Dockerfile new file mode 100644 index 000000000..3942cecd6 --- /dev/null +++ b/networks/local/localnode/Dockerfile @@ -0,0 +1,16 @@ +FROM alpine:3.7 +MAINTAINER Greg Szabo + +RUN apk update && \ + apk upgrade && \ + apk --no-cache add curl jq file + +VOLUME [ /tendermint ] +WORKDIR /tendermint +EXPOSE 26656 26657 +ENTRYPOINT ["/usr/bin/wrapper.sh"] +CMD ["node", "--proxy_app", "kvstore"] +STOPSIGNAL SIGTERM + +COPY wrapper.sh /usr/bin/wrapper.sh + diff --git a/networks/local/localnode/wrapper.sh b/networks/local/localnode/wrapper.sh new file mode 100755 index 000000000..fe8031e66 --- /dev/null +++ b/networks/local/localnode/wrapper.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env sh + +## +## Input parameters +## +BINARY=/tendermint/${BINARY:-tendermint} +ID=${ID:-0} +LOG=${LOG:-tendermint.log} + +## +## Assert linux binary +## +if ! [ -f "${BINARY}" ]; then + echo "The binary $(basename "${BINARY}") cannot be found. Please add the binary to the shared folder. Please use the BINARY environment variable if the name of the binary is not 'tendermint' E.g.: -e BINARY=tendermint_my_test_version" + exit 1 +fi +BINARY_CHECK="$(file "$BINARY" | grep 'ELF 64-bit LSB executable, x86-64')" +if [ -z "${BINARY_CHECK}" ]; then + echo "Binary needs to be OS linux, ARCH amd64" + exit 1 +fi + +## +## Run binary with all parameters +## +export TMHOME="/tendermint/node${ID}" + +if [ -d "`dirname ${TMHOME}/${LOG}`" ]; then + "$BINARY" "$@" | tee "${TMHOME}/${LOG}" +else + "$BINARY" "$@" +fi + +chmod 777 -R /tendermint + diff --git a/networks/remote/README.md b/networks/remote/README.md new file mode 100644 index 000000000..2094fcc98 --- /dev/null +++ b/networks/remote/README.md @@ -0,0 +1,3 @@ +# Remote Cluster with Terraform and Ansible + +See the [docs](/docs/terraform-and-ansible.md) diff --git a/networks/remote/ansible/.gitignore b/networks/remote/ansible/.gitignore new file mode 100644 index 000000000..a8b42eb6e --- /dev/null +++ b/networks/remote/ansible/.gitignore @@ -0,0 +1 @@ +*.retry diff --git a/networks/remote/ansible/ansible.cfg b/networks/remote/ansible/ansible.cfg new file mode 100644 index 000000000..045c1ea60 --- /dev/null +++ b/networks/remote/ansible/ansible.cfg @@ -0,0 +1,4 @@ +[defaults] +retry_files_enabled = False +host_key_checking = False + diff --git a/networks/remote/ansible/config.yml b/networks/remote/ansible/config.yml new file mode 100644 index 000000000..7b772fb70 --- /dev/null +++ b/networks/remote/ansible/config.yml @@ -0,0 +1,18 @@ +--- + +#Requires BINARY and CONFIGDIR variables set. +#N=4 hosts by default. + +- hosts: all + user: root + any_errors_fatal: true + gather_facts: yes + vars: + - service: tendermint + - N: 4 + roles: + - stop + - config + - unsafe_reset + - start + diff --git a/networks/remote/ansible/install.yml b/networks/remote/ansible/install.yml new file mode 100644 index 000000000..a57b4be44 --- /dev/null +++ b/networks/remote/ansible/install.yml @@ -0,0 +1,11 @@ +--- + +- hosts: all + user: root + any_errors_fatal: true + gather_facts: no + vars: + - service: tendermint + roles: + - install + diff --git a/networks/remote/ansible/inventory/COPYING b/networks/remote/ansible/inventory/COPYING new file mode 100644 index 000000000..10926e87f --- /dev/null +++ b/networks/remote/ansible/inventory/COPYING @@ -0,0 +1,675 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. + diff --git a/networks/remote/ansible/inventory/digital_ocean.ini b/networks/remote/ansible/inventory/digital_ocean.ini new file mode 100644 index 000000000..b809554b2 --- /dev/null +++ b/networks/remote/ansible/inventory/digital_ocean.ini @@ -0,0 +1,34 @@ +# Ansible DigitalOcean external inventory script settings +# + +[digital_ocean] + +# The module needs your DigitalOcean API Token. +# It may also be specified on the command line via --api-token +# or via the environment variables DO_API_TOKEN or DO_API_KEY +# +#api_token = 123456abcdefg + + +# API calls to DigitalOcean may be slow. For this reason, we cache the results +# of an API call. Set this to the path you want cache files to be written to. +# One file will be written to this directory: +# - ansible-digital_ocean.cache +# +cache_path = /tmp + + +# The number of seconds a cache file is considered valid. After this many +# seconds, a new API call will be made, and the cache file will be updated. +# +cache_max_age = 300 + +# Use the private network IP address instead of the public when available. +# +use_private_network = False + +# Pass variables to every group, e.g.: +# +# group_variables = { 'ansible_user': 'root' } +# +group_variables = {} diff --git a/networks/remote/ansible/inventory/digital_ocean.py b/networks/remote/ansible/inventory/digital_ocean.py new file mode 100755 index 000000000..24ba64370 --- /dev/null +++ b/networks/remote/ansible/inventory/digital_ocean.py @@ -0,0 +1,471 @@ +#!/usr/bin/env python + +''' +DigitalOcean external inventory script +====================================== + +Generates Ansible inventory of DigitalOcean Droplets. + +In addition to the --list and --host options used by Ansible, there are options +for generating JSON of other DigitalOcean data. This is useful when creating +droplets. For example, --regions will return all the DigitalOcean Regions. +This information can also be easily found in the cache file, whose default +location is /tmp/ansible-digital_ocean.cache). + +The --pretty (-p) option pretty-prints the output for better human readability. + +---- +Although the cache stores all the information received from DigitalOcean, +the cache is not used for current droplet information (in --list, --host, +--all, and --droplets). This is so that accurate droplet information is always +found. You can force this script to use the cache with --force-cache. + +---- +Configuration is read from `digital_ocean.ini`, then from environment variables, +then and command-line arguments. + +Most notably, the DigitalOcean API Token must be specified. It can be specified +in the INI file or with the following environment variables: + export DO_API_TOKEN='abc123' or + export DO_API_KEY='abc123' + +Alternatively, it can be passed on the command-line with --api-token. + +If you specify DigitalOcean credentials in the INI file, a handy way to +get them into your environment (e.g., to use the digital_ocean module) +is to use the output of the --env option with export: + export $(digital_ocean.py --env) + +---- +The following groups are generated from --list: + - ID (droplet ID) + - NAME (droplet NAME) + - image_ID + - image_NAME + - distro_NAME (distribution NAME from image) + - region_NAME + - size_NAME + - status_STATUS + +For each host, the following variables are registered: + - do_backup_ids + - do_created_at + - do_disk + - do_features - list + - do_id + - do_image - object + - do_ip_address + - do_private_ip_address + - do_kernel - object + - do_locked + - do_memory + - do_name + - do_networks - object + - do_next_backup_window + - do_region - object + - do_size - object + - do_size_slug + - do_snapshot_ids - list + - do_status + - do_tags + - do_vcpus + - do_volume_ids + +----- +``` +usage: digital_ocean.py [-h] [--list] [--host HOST] [--all] + [--droplets] [--regions] [--images] [--sizes] + [--ssh-keys] [--domains] [--pretty] + [--cache-path CACHE_PATH] + [--cache-max_age CACHE_MAX_AGE] + [--force-cache] + [--refresh-cache] + [--api-token API_TOKEN] + +Produce an Ansible Inventory file based on DigitalOcean credentials + +optional arguments: + -h, --help show this help message and exit + --list List all active Droplets as Ansible inventory + (default: True) + --host HOST Get all Ansible inventory variables about a specific + Droplet + --all List all DigitalOcean information as JSON + --droplets List Droplets as JSON + --regions List Regions as JSON + --images List Images as JSON + --sizes List Sizes as JSON + --ssh-keys List SSH keys as JSON + --domains List Domains as JSON + --pretty, -p Pretty-print results + --cache-path CACHE_PATH + Path to the cache files (default: .) + --cache-max_age CACHE_MAX_AGE + Maximum age of the cached items (default: 0) + --force-cache Only use data from the cache + --refresh-cache Force refresh of cache by making API requests to + DigitalOcean (default: False - use cache files) + --api-token API_TOKEN, -a API_TOKEN + DigitalOcean API Token +``` + +''' + +# (c) 2013, Evan Wies +# +# Inspired by the EC2 inventory plugin: +# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +import os +import sys +import re +import argparse +from time import time +import ConfigParser +import ast + +try: + import json +except ImportError: + import simplejson as json + +try: + from dopy.manager import DoManager +except ImportError as e: + sys.exit("failed=True msg='`dopy` library required for this script'") + + +class DigitalOceanInventory(object): + + ########################################################################### + # Main execution path + ########################################################################### + + def __init__(self): + ''' Main execution path ''' + + # DigitalOceanInventory data + self.data = {} # All DigitalOcean data + self.inventory = {} # Ansible Inventory + + # Define defaults + self.cache_path = '.' + self.cache_max_age = 0 + self.use_private_network = False + self.group_variables = {} + + # Read settings, environment variables, and CLI arguments + self.read_settings() + self.read_environment() + self.read_cli_args() + + # Verify credentials were set + if not hasattr(self, 'api_token'): + sys.stderr.write('''Could not find values for DigitalOcean api_token. +They must be specified via either ini file, command line argument (--api-token), +or environment variables (DO_API_TOKEN)\n''') + sys.exit(-1) + + # env command, show DigitalOcean credentials + if self.args.env: + print("DO_API_TOKEN=%s" % self.api_token) + sys.exit(0) + + # Manage cache + self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache" + self.cache_refreshed = False + + if self.is_cache_valid(): + self.load_from_cache() + if len(self.data) == 0: + if self.args.force_cache: + sys.stderr.write('''Cache is empty and --force-cache was specified\n''') + sys.exit(-1) + + self.manager = DoManager(None, self.api_token, api_version=2) + + # Pick the json_data to print based on the CLI command + if self.args.droplets: + self.load_from_digital_ocean('droplets') + json_data = {'droplets': self.data['droplets']} + elif self.args.regions: + self.load_from_digital_ocean('regions') + json_data = {'regions': self.data['regions']} + elif self.args.images: + self.load_from_digital_ocean('images') + json_data = {'images': self.data['images']} + elif self.args.sizes: + self.load_from_digital_ocean('sizes') + json_data = {'sizes': self.data['sizes']} + elif self.args.ssh_keys: + self.load_from_digital_ocean('ssh_keys') + json_data = {'ssh_keys': self.data['ssh_keys']} + elif self.args.domains: + self.load_from_digital_ocean('domains') + json_data = {'domains': self.data['domains']} + elif self.args.all: + self.load_from_digital_ocean() + json_data = self.data + elif self.args.host: + json_data = self.load_droplet_variables_for_host() + else: # '--list' this is last to make it default + self.load_from_digital_ocean('droplets') + self.build_inventory() + json_data = self.inventory + + if self.cache_refreshed: + self.write_to_cache() + + if self.args.pretty: + print(json.dumps(json_data, sort_keys=True, indent=2)) + else: + print(json.dumps(json_data)) + # That's all she wrote... + + ########################################################################### + # Script configuration + ########################################################################### + + def read_settings(self): + ''' Reads the settings from the digital_ocean.ini file ''' + config = ConfigParser.SafeConfigParser() + config.read(os.path.dirname(os.path.realpath(__file__)) + '/digital_ocean.ini') + + # Credentials + if config.has_option('digital_ocean', 'api_token'): + self.api_token = config.get('digital_ocean', 'api_token') + + # Cache related + if config.has_option('digital_ocean', 'cache_path'): + self.cache_path = config.get('digital_ocean', 'cache_path') + if config.has_option('digital_ocean', 'cache_max_age'): + self.cache_max_age = config.getint('digital_ocean', 'cache_max_age') + + # Private IP Address + if config.has_option('digital_ocean', 'use_private_network'): + self.use_private_network = config.getboolean('digital_ocean', 'use_private_network') + + # Group variables + if config.has_option('digital_ocean', 'group_variables'): + self.group_variables = ast.literal_eval(config.get('digital_ocean', 'group_variables')) + + def read_environment(self): + ''' Reads the settings from environment variables ''' + # Setup credentials + if os.getenv("DO_API_TOKEN"): + self.api_token = os.getenv("DO_API_TOKEN") + if os.getenv("DO_API_KEY"): + self.api_token = os.getenv("DO_API_KEY") + + def read_cli_args(self): + ''' Command line argument processing ''' + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials') + + parser.add_argument('--list', action='store_true', help='List all active Droplets as Ansible inventory (default: True)') + parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet') + + parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON') + parser.add_argument('--droplets', '-d', action='store_true', help='List Droplets as JSON') + parser.add_argument('--regions', action='store_true', help='List Regions as JSON') + parser.add_argument('--images', action='store_true', help='List Images as JSON') + parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON') + parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON') + parser.add_argument('--domains', action='store_true', help='List Domains as JSON') + + parser.add_argument('--pretty', '-p', action='store_true', help='Pretty-print results') + + parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)') + parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)') + parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache') + parser.add_argument('--refresh-cache', '-r', action='store_true', default=False, + help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)') + + parser.add_argument('--env', '-e', action='store_true', help='Display DO_API_TOKEN') + parser.add_argument('--api-token', '-a', action='store', help='DigitalOcean API Token') + + self.args = parser.parse_args() + + if self.args.api_token: + self.api_token = self.args.api_token + + # Make --list default if none of the other commands are specified + if (not self.args.droplets and not self.args.regions and + not self.args.images and not self.args.sizes and + not self.args.ssh_keys and not self.args.domains and + not self.args.all and not self.args.host): + self.args.list = True + + ########################################################################### + # Data Management + ########################################################################### + + def load_from_digital_ocean(self, resource=None): + '''Get JSON from DigitalOcean API''' + if self.args.force_cache and os.path.isfile(self.cache_filename): + return + # We always get fresh droplets + if self.is_cache_valid() and not (resource == 'droplets' or resource is None): + return + if self.args.refresh_cache: + resource = None + + if resource == 'droplets' or resource is None: + self.data['droplets'] = self.manager.all_active_droplets() + self.cache_refreshed = True + if resource == 'regions' or resource is None: + self.data['regions'] = self.manager.all_regions() + self.cache_refreshed = True + if resource == 'images' or resource is None: + self.data['images'] = self.manager.all_images(filter=None) + self.cache_refreshed = True + if resource == 'sizes' or resource is None: + self.data['sizes'] = self.manager.sizes() + self.cache_refreshed = True + if resource == 'ssh_keys' or resource is None: + self.data['ssh_keys'] = self.manager.all_ssh_keys() + self.cache_refreshed = True + if resource == 'domains' or resource is None: + self.data['domains'] = self.manager.all_domains() + self.cache_refreshed = True + + def build_inventory(self): + '''Build Ansible inventory of droplets''' + self.inventory = { + 'all': { + 'hosts': [], + 'vars': self.group_variables + }, + '_meta': {'hostvars': {}} + } + + # add all droplets by id and name + for droplet in self.data['droplets']: + # when using private_networking, the API reports the private one in "ip_address". + if 'private_networking' in droplet['features'] and not self.use_private_network: + for net in droplet['networks']['v4']: + if net['type'] == 'public': + dest = net['ip_address'] + else: + continue + else: + dest = droplet['ip_address'] + + self.inventory['all']['hosts'].append(dest) + + self.inventory[droplet['id']] = [dest] + self.inventory[droplet['name']] = [dest] + + # groups that are always present + for group in ('region_' + droplet['region']['slug'], + 'image_' + str(droplet['image']['id']), + 'size_' + droplet['size']['slug'], + 'distro_' + self.to_safe(droplet['image']['distribution']), + 'status_' + droplet['status']): + if group not in self.inventory: + self.inventory[group] = {'hosts': [], 'vars': {}} + self.inventory[group]['hosts'].append(dest) + + # groups that are not always present + for group in (droplet['image']['slug'], + droplet['image']['name']): + if group: + image = 'image_' + self.to_safe(group) + if image not in self.inventory: + self.inventory[image] = {'hosts': [], 'vars': {}} + self.inventory[image]['hosts'].append(dest) + + if droplet['tags']: + for tag in droplet['tags']: + if tag not in self.inventory: + self.inventory[tag] = {'hosts': [], 'vars': {}} + self.inventory[tag]['hosts'].append(dest) + + # hostvars + info = self.do_namespace(droplet) + self.inventory['_meta']['hostvars'][dest] = info + + def load_droplet_variables_for_host(self): + '''Generate a JSON response to a --host call''' + host = int(self.args.host) + droplet = self.manager.show_droplet(host) + info = self.do_namespace(droplet) + return {'droplet': info} + + ########################################################################### + # Cache Management + ########################################################################### + + def is_cache_valid(self): + ''' Determines if the cache files have expired, or if it is still valid ''' + if os.path.isfile(self.cache_filename): + mod_time = os.path.getmtime(self.cache_filename) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + return True + return False + + def load_from_cache(self): + ''' Reads the data from the cache file and assigns it to member variables as Python Objects''' + try: + cache = open(self.cache_filename, 'r') + json_data = cache.read() + cache.close() + data = json.loads(json_data) + except IOError: + data = {'data': {}, 'inventory': {}} + + self.data = data['data'] + self.inventory = data['inventory'] + + def write_to_cache(self): + ''' Writes data in JSON format to a file ''' + data = {'data': self.data, 'inventory': self.inventory} + json_data = json.dumps(data, sort_keys=True, indent=2) + + cache = open(self.cache_filename, 'w') + cache.write(json_data) + cache.close() + + ########################################################################### + # Utilities + ########################################################################### + + def push(self, my_dict, key, element): + ''' Pushed an element onto an array that may not have been defined in the dict ''' + if key in my_dict: + my_dict[key].append(element) + else: + my_dict[key] = [element] + + def to_safe(self, word): + ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' + return re.sub("[^A-Za-z0-9\-\.]", "_", word) + + def do_namespace(self, data): + ''' Returns a copy of the dictionary with all the keys put in a 'do_' namespace ''' + info = {} + for k, v in data.items(): + info['do_' + k] = v + return info + + +########################################################################### +# Run the script +DigitalOceanInventory() diff --git a/networks/remote/ansible/logzio.yml b/networks/remote/ansible/logzio.yml new file mode 100644 index 000000000..53f637f2f --- /dev/null +++ b/networks/remote/ansible/logzio.yml @@ -0,0 +1,14 @@ +--- + +#Note: You need to add LOGZIO_TOKEN variable with your API key. Like tihs: ansible-playbook -e LOGZIO_TOKEN=ABCXYZ123456 + +- hosts: all + user: root + any_errors_fatal: true + gather_facts: no + vars: + - service: tendermint + - JOURNALBEAT_BINARY: "{{lookup('env', 'GOPATH')}}/bin/journalbeat" + roles: + - logzio + diff --git a/networks/remote/ansible/reset.yml b/networks/remote/ansible/reset.yml new file mode 100644 index 000000000..63b1733c7 --- /dev/null +++ b/networks/remote/ansible/reset.yml @@ -0,0 +1,14 @@ +--- + +- hosts: all + user: root + any_errors_fatal: true + gather_facts: no + vars: + - service: tendermint + roles: + - stop + - unsafe_reset + - start + + diff --git a/networks/remote/ansible/restart.yml b/networks/remote/ansible/restart.yml new file mode 100644 index 000000000..71d4bc66d --- /dev/null +++ b/networks/remote/ansible/restart.yml @@ -0,0 +1,12 @@ +--- + +- hosts: all + user: root + any_errors_fatal: true + gather_facts: no + vars: + - service: tendermint + roles: + - stop + - start + diff --git a/networks/remote/ansible/roles/config/tasks/main.yml b/networks/remote/ansible/roles/config/tasks/main.yml new file mode 100644 index 000000000..a51098caa --- /dev/null +++ b/networks/remote/ansible/roles/config/tasks/main.yml @@ -0,0 +1,17 @@ +--- + +- name: Copy binary + copy: + src: "{{BINARY}}" + dest: /usr/bin + mode: 0755 + +- name: Copy config + when: item <= N and ansible_hostname == 'sentrynet-node' ~ item + copy: + src: "{{CONFIGDIR}}/node{{item}}/" + dest: "/home/{{service}}/.{{service}}/" + owner: "{{service}}" + group: "{{service}}" + loop: [ 0, 1, 2, 3, 4, 5, 6, 7 ] + diff --git a/networks/remote/ansible/roles/install/handlers/main.yml b/networks/remote/ansible/roles/install/handlers/main.yml new file mode 100644 index 000000000..16afbb618 --- /dev/null +++ b/networks/remote/ansible/roles/install/handlers/main.yml @@ -0,0 +1,5 @@ +--- + +- name: reload services + systemd: "name={{service}} daemon_reload=yes enabled=yes" + diff --git a/networks/remote/ansible/roles/install/tasks/main.yml b/networks/remote/ansible/roles/install/tasks/main.yml new file mode 100644 index 000000000..9e5a7524a --- /dev/null +++ b/networks/remote/ansible/roles/install/tasks/main.yml @@ -0,0 +1,15 @@ +--- + +- name: Create service group + group: "name={{service}}" + +- name: Create service user + user: "name={{service}} group={{service}} home=/home/{{service}}" + +- name: Change user folder to more permissive + file: "path=/home/{{service}} mode=0755" + +- name: Create service + template: "src=systemd.service.j2 dest=/etc/systemd/system/{{service}}.service" + notify: reload services + diff --git a/networks/remote/ansible/roles/install/templates/systemd.service.j2 b/networks/remote/ansible/roles/install/templates/systemd.service.j2 new file mode 100644 index 000000000..17b3de4d1 --- /dev/null +++ b/networks/remote/ansible/roles/install/templates/systemd.service.j2 @@ -0,0 +1,17 @@ +[Unit] +Description={{service}} +Requires=network-online.target +After=network-online.target + +[Service] +Restart=on-failure +User={{service}} +Group={{service}} +PermissionsStartOnly=true +ExecStart=/usr/bin/tendermint node --proxy_app=kvstore +ExecReload=/bin/kill -HUP $MAINPID +KillSignal=SIGTERM + +[Install] +WantedBy=multi-user.target + diff --git a/networks/remote/ansible/roles/logzio/files/journalbeat.service b/networks/remote/ansible/roles/logzio/files/journalbeat.service new file mode 100644 index 000000000..3cb66a454 --- /dev/null +++ b/networks/remote/ansible/roles/logzio/files/journalbeat.service @@ -0,0 +1,15 @@ +[Unit] +Description=journalbeat +#propagates activation, deactivation and activation fails. +Requires=network-online.target +After=network-online.target + +[Service] +Restart=on-failure +ExecStart=/usr/bin/journalbeat -c /etc/journalbeat/journalbeat.yml -path.home /usr/share/journalbeat -path.config /etc/journalbeat -path.data /var/lib/journalbeat -path.logs /var/log/journalbeat +Restart=always + +[Install] +WantedBy=multi-user.target + + diff --git a/networks/remote/ansible/roles/logzio/handlers/main.yml b/networks/remote/ansible/roles/logzio/handlers/main.yml new file mode 100644 index 000000000..0b371fc51 --- /dev/null +++ b/networks/remote/ansible/roles/logzio/handlers/main.yml @@ -0,0 +1,8 @@ +--- + +- name: reload daemon + command: "systemctl daemon-reload" + +- name: restart journalbeat + service: name=journalbeat state=restarted + diff --git a/networks/remote/ansible/roles/logzio/tasks/main.yml b/networks/remote/ansible/roles/logzio/tasks/main.yml new file mode 100644 index 000000000..ab3976f22 --- /dev/null +++ b/networks/remote/ansible/roles/logzio/tasks/main.yml @@ -0,0 +1,27 @@ +--- + +- name: Copy journalbeat binary + copy: src="{{JOURNALBEAT_BINARY}}" dest=/usr/bin/journalbeat mode=0755 + notify: restart journalbeat + +- name: Create folders + file: "path={{item}} state=directory recurse=yes" + with_items: + - /etc/journalbeat + - /etc/pki/tls/certs + - /usr/share/journalbeat + - /var/log/journalbeat + +- name: Copy journalbeat config + template: src=journalbeat.yml.j2 dest=/etc/journalbeat/journalbeat.yml mode=0600 + notify: restart journalbeat + +- name: Get server certificate for Logz.io + get_url: "url=https://raw.githubusercontent.com/logzio/public-certificates/master/COMODORSADomainValidationSecureServerCA.crt force=yes dest=/etc/pki/tls/certs/COMODORSADomainValidationSecureServerCA.crt" + +- name: Copy journalbeat service config + copy: src=journalbeat.service dest=/etc/systemd/system/journalbeat.service + notify: + - reload daemon + - restart journalbeat + diff --git a/networks/remote/ansible/roles/logzio/templates/journalbeat.yml.j2 b/networks/remote/ansible/roles/logzio/templates/journalbeat.yml.j2 new file mode 100644 index 000000000..a421ec8a5 --- /dev/null +++ b/networks/remote/ansible/roles/logzio/templates/journalbeat.yml.j2 @@ -0,0 +1,342 @@ +#======================== Journalbeat Configuration ============================ + +journalbeat: + # What position in journald to seek to at start up + # options: cursor, tail, head (defaults to tail) + #seek_position: tail + + # If seek_position is set to cursor and seeking to cursor fails + # fall back to this method. If set to none will it will exit + # options: tail, head, none (defaults to tail) + #cursor_seek_fallback: tail + + # Store the cursor of the successfully published events + #write_cursor_state: true + + # Path to the file to store the cursor (defaults to ".journalbeat-cursor-state") + #cursor_state_file: .journalbeat-cursor-state + + # How frequently should we save the cursor to disk (defaults to 5s) + #cursor_flush_period: 5s + + # Path to the file to store the queue of events pending (defaults to ".journalbeat-pending-queue") + #pending_queue.file: .journalbeat-pending-queue + + # How frequently should we save the queue to disk (defaults to 1s). + # Pending queue represents the WAL of events queued to be published + # or being published and waiting for acknowledgement. In case of a + # regular restart of journalbeat all the events not yet acknowledged + # will be flushed to disk during the shutdown. + # In case of disaster most probably journalbeat won't get a chance to shutdown + # itself gracefully and this flush period option will serve you as a + # backup creation frequency option. + #pending_queue.flush_period: 1s + + # Lowercase and remove leading underscores, e.g. "_MESSAGE" -> "message" + # (defaults to false) + #clean_field_names: false + + # All journal entries are strings by default. You can try to convert them to numbers. + # (defaults to false) + #convert_to_numbers: false + + # Store all the fields of the Systemd Journal entry under this field + # Can be almost any string suitable to be a field name of an ElasticSearch document. + # Dots can be used to create nested fields. + # Two exceptions: + # - no repeated dots; + # - no trailing dots, e.g. "journal..field_name." will fail + # (defaults to "" hence stores on the upper level of the event) + #move_metadata_to_field: "" + + # Specific units to monitor. + units: ["{{service}}.service"] + + # Specify Journal paths to open. You can pass an array of paths to Systemd Journal paths. + # If you want to open Journal from directory just pass an array consisting of one element + # representing the path. See: https://www.freedesktop.org/software/systemd/man/sd_journal_open.html + # By default this setting is empty thus journalbeat will attempt to find all journal files automatically + #journal_paths: ["/var/log/journal"] + + #default_type: journal + +#================================ General ====================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: journalbeat + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +tags: ["{{service}}"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +fields: + logzio_codec: plain + token: {{LOGZIO_TOKEN}} + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +fields_under_root: true + +# Internal queue size for single events in processing pipeline +#queue_size: 1000 + +# The internal queue size for bulk events in the processing pipeline. +# Do not modify this value. +#bulk_queue_size: 0 + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Processors =================================== + +# Processors are used to reduce the number of fields in the exported event or to +# enhance the event with external metadata. This section defines a list of +# processors that are applied one by one and the first one receives the initial +# event: +# +# event -> filter1 -> event1 -> filter2 ->event2 ... +# +# The supported processors are drop_fields, drop_event, include_fields, and +# add_cloud_metadata. +# +# For example, you can use the following processors to keep the fields that +# contain CPU load percentages, but remove the fields that contain CPU ticks +# values: +# +processors: +#- include_fields: +# fields: ["cpu"] +- drop_fields: + fields: ["beat.name", "beat.version", "logzio_codec", "SYSLOG_IDENTIFIER", "SYSLOG_FACILITY", "PRIORITY"] +# +# The following example drops the events that have the HTTP response code 200: +# +#processors: +#- drop_event: +# when: +# equals: +# http.code: 200 +# +# The following example enriches each event with metadata from the cloud +# provider about the host machine. It works on EC2, GCE, and DigitalOcean. +# +#processors: +#- add_cloud_metadata: +# + +#================================ Outputs ====================================== + +# Configure what outputs to use when sending the data collected by the beat. +# Multiple outputs may be used. + +#----------------------------- Logstash output --------------------------------- +output.logstash: + # Boolean flag to enable or disable the output module. + enabled: true + + # The Logstash hosts + hosts: ["listener.logz.io:5015"] + + # Number of workers per Logstash host. + #worker: 1 + + # Set gzip compression level. + #compression_level: 3 + + # Optional load balance the events between the Logstash hosts + #loadbalance: true + + # Number of batches to be send asynchronously to logstash while processing + # new batches. + #pipelining: 0 + + # Optional index name. The default index name is set to name of the beat + # in all lowercase. + #index: 'beatname' + + # SOCKS5 proxy server URL + #proxy_url: socks5://user:password@socks5-server:2233 + + # Resolve names locally when using a proxy server. Defaults to false. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + ssl.certificate_authorities: ["/etc/pki/tls/certs/COMODORSADomainValidationSecureServerCA.crt"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + +#------------------------------- File output ----------------------------------- +#output.file: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Path to the directory where to save the generated files. The option is + # mandatory. + #path: "/tmp/beatname" + + # Name of the generated files. The default is `beatname` and it generates + # files: `beatname`, `beatname.1`, `beatname.2`, etc. + #filename: beatname + + # Maximum size in kilobytes of each file. When this size is reached, and on + # every beatname restart, the files are rotated. The default value is 10240 + # kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, + # the oldest file is deleted and the rest are shifted from last to first. The + # default is 7 files. + #number_of_files: 7 + + +#----------------------------- Console output --------------------------------- +#output.console: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Pretty print json event + #pretty: false + +#================================= Paths ====================================== + +# The home path for the beatname installation. This is the default base path +# for all other path settings and for miscellaneous files that come with the +# distribution (for example, the sample dashboards). +# If not set by a CLI flag or in the configuration file, the default for the +# home path is the location of the binary. +#path.home: + +# The configuration path for the beatname installation. This is the default +# base path for configuration files, including the main YAML configuration file +# and the Elasticsearch template file. If not set by a CLI flag or in the +# configuration file, the default for the configuration path is the home path. +#path.config: ${path.home} + +# The data path for the beatname installation. This is the default base path +# for all the files in which beatname needs to store its data. If not set by a +# CLI flag or in the configuration file, the default for the data path is a data +# subdirectory inside the home path. +#path.data: ${path.home}/data + +# The logs path for a beatname installation. This is the default location for +# the Beat's log files. If not set by a CLI flag or in the configuration file, +# the default for the logs path is a logs subdirectory inside the home path. +#path.logs: ${path.home}/logs + +#============================== Dashboards ===================================== +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards is disabled by default and can be enabled either by setting the +# options here, or by using the `-setup` CLI flag. +#dashboards.enabled: false + +# The URL from where to download the dashboards archive. By default this URL +# has a value which is computed based on the Beat name and version. For released +# versions, this URL points to the dashboard archive on the artifacts.elastic.co +# website. +#dashboards.url: + +# The directory from where to read the dashboards. It is used instead of the URL +# when it has a value. +#dashboards.directory: + +# The file archive (zip file) from where to read the dashboards. It is used instead +# of the URL when it has a value. +#dashboards.file: + +# If this option is enabled, the snapshot URL is used instead of the default URL. +#dashboards.snapshot: false + +# The URL from where to download the snapshot version of the dashboards. By default +# this has a value which is computed based on the Beat name and version. +#dashboards.snapshot_url + +# In case the archive contains the dashboards from multiple Beats, this lets you +# select which one to load. You can load all the dashboards in the archive by +# setting this to the empty string. +#dashboards.beat: beatname + +# The name of the Kibana index to use for setting the configuration. Default is ".kibana" +#dashboards.kibana_index: .kibana + +# The Elasticsearch index name. This overwrites the index name defined in the +# dashboards and index pattern. Example: testbeat-* +#dashboards.index: + +#================================ Logging ====================================== +# There are three options for the log output: syslog, file, stderr. +# Under Windows systems, the log files are per default sent to the file output, +# under all other system per default to syslog. + +# Sets log level. The default log level is info. +# Available log levels are: critical, error, warning, info, debug +#logging.level: info + +# Enable debug output for selected components. To enable all selectors use ["*"] +# Other available selectors are "beat", "publish", "service" +# Multiple selectors can be chained. +#logging.selectors: [ ] + +# Send all logging output to syslog. The default is false. +#logging.to_syslog: true + +# If enabled, beatname periodically logs its internal metrics that have changed +# in the last period. For each metric that changed, the delta from the value at +# the beginning of the period is logged. Also, the total values for +# all non-zero internal metrics are logged on shutdown. The default is true. +#logging.metrics.enabled: true + +# The period after which to log the internal metrics. The default is 30s. +#logging.metrics.period: 30s + +# Logging to rotating files files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true +logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/beatname + + # The name of the files where the logs are written to. + #name: beatname + + # Configure log file size limit. If limit is reached, log file will be + # automatically rotated + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. Oldest files will be deleted first. + #keepfiles: 7 diff --git a/networks/remote/ansible/roles/start/tasks/main.yml b/networks/remote/ansible/roles/start/tasks/main.yml new file mode 100644 index 000000000..6bc611c91 --- /dev/null +++ b/networks/remote/ansible/roles/start/tasks/main.yml @@ -0,0 +1,5 @@ +--- + +- name: start service + service: "name={{service}} state=started" + diff --git a/networks/remote/ansible/roles/status/tasks/main.yml b/networks/remote/ansible/roles/status/tasks/main.yml new file mode 100644 index 000000000..50170c746 --- /dev/null +++ b/networks/remote/ansible/roles/status/tasks/main.yml @@ -0,0 +1,10 @@ +--- + +- name: application service status + command: "service {{service}} status" + changed_when: false + register: status + +- name: Result + debug: var=status.stdout_lines + diff --git a/networks/remote/ansible/roles/stop/tasks/main.yml b/networks/remote/ansible/roles/stop/tasks/main.yml new file mode 100644 index 000000000..7db356f22 --- /dev/null +++ b/networks/remote/ansible/roles/stop/tasks/main.yml @@ -0,0 +1,5 @@ +--- + +- name: stop service + service: "name={{service}} state=stopped" + diff --git a/networks/remote/ansible/roles/unsafe_reset/tasks/main.yml b/networks/remote/ansible/roles/unsafe_reset/tasks/main.yml new file mode 100644 index 000000000..6ac1ec55a --- /dev/null +++ b/networks/remote/ansible/roles/unsafe_reset/tasks/main.yml @@ -0,0 +1,4 @@ +- command: "{{service}} unsafe_reset_all {{ (service != 'tendermint') | ternary('node','') }} --home /home/{{service}}/.{{service}}" + become_user: "{{service}}" + become: yes + diff --git a/networks/remote/ansible/start.yml b/networks/remote/ansible/start.yml new file mode 100644 index 000000000..2be07dc73 --- /dev/null +++ b/networks/remote/ansible/start.yml @@ -0,0 +1,11 @@ +--- + +- hosts: all + user: root + any_errors_fatal: true + gather_facts: no + vars: + - service: tendermint + roles: + - start + diff --git a/networks/remote/ansible/status.yml b/networks/remote/ansible/status.yml new file mode 100644 index 000000000..a1721b87b --- /dev/null +++ b/networks/remote/ansible/status.yml @@ -0,0 +1,11 @@ +--- + +- hosts: all + user: root + any_errors_fatal: true + gather_facts: no + vars: + - service: tendermint + roles: + - status + diff --git a/networks/remote/ansible/stop.yml b/networks/remote/ansible/stop.yml new file mode 100644 index 000000000..abc6031d5 --- /dev/null +++ b/networks/remote/ansible/stop.yml @@ -0,0 +1,11 @@ +--- + +- hosts: all + user: root + any_errors_fatal: true + gather_facts: no + vars: + - service: tendermint + roles: + - stop + diff --git a/networks/remote/integration.sh b/networks/remote/integration.sh new file mode 100644 index 000000000..1624711f9 --- /dev/null +++ b/networks/remote/integration.sh @@ -0,0 +1,132 @@ +#!/usr/bin/env bash + +# XXX: this script is intended to be run from a fresh Digital Ocean droplet + +# NOTE: you must set this manually now +echo "export DO_API_TOKEN=\"yourToken\"" >> ~/.profile + +sudo apt-get update -y +sudo apt-get upgrade -y +sudo apt-get install -y jq unzip python-pip software-properties-common make + +# get and unpack golang +curl -O https://storage.googleapis.com/golang/go1.10.linux-amd64.tar.gz +tar -xvf go1.10.linux-amd64.tar.gz + +## move binary and add to path +mv go /usr/local +echo "export PATH=\$PATH:/usr/local/go/bin" >> ~/.profile + +## create the goApps directory, set GOPATH, and put it on PATH +mkdir goApps +echo "export GOPATH=/root/goApps" >> ~/.profile +echo "export PATH=\$PATH:\$GOPATH/bin" >> ~/.profile + +source ~/.profile + +## get the code and move into repo +REPO=github.com/tendermint/tendermint +go get $REPO +cd $GOPATH/src/$REPO + +## build +git checkout zach/ansible +make get_tools +make get_vendor_deps +make build + +# generate an ssh key +ssh-keygen -f $HOME/.ssh/id_rsa -t rsa -N '' +echo "export SSH_KEY_FILE=\"\$HOME/.ssh/id_rsa.pub\"" >> ~/.profile +source ~/.profile + +# install terraform +wget https://releases.hashicorp.com/terraform/0.11.7/terraform_0.11.7_linux_amd64.zip +unzip terraform_0.11.7_linux_amd64.zip -d /usr/bin/ + +# install ansible +sudo apt-get update -y +sudo apt-add-repository ppa:ansible/ansible -y +sudo apt-get update -y +sudo apt-get install ansible -y + +# required by ansible +pip install dopy + +# the next two commands are directory sensitive +cd $GOPATH/src/github.com/tendermint/tendermint/networks/remote/terraform + +terraform init +terraform apply -var DO_API_TOKEN="$DO_API_TOKEN" -var SSH_KEY_FILE="$SSH_KEY_FILE" -auto-approve + +# let the droplets boot +sleep 60 + +# get the IPs +ip0=`terraform output -json public_ips | jq '.value[0]'` +ip1=`terraform output -json public_ips | jq '.value[1]'` +ip2=`terraform output -json public_ips | jq '.value[2]'` +ip3=`terraform output -json public_ips | jq '.value[3]'` + +# to remove quotes +strip() { + opt=$1 + temp="${opt%\"}" + temp="${temp#\"}" + echo $temp +} + +ip0=$(strip $ip0) +ip1=$(strip $ip1) +ip2=$(strip $ip2) +ip3=$(strip $ip3) + +# all the ansible commands are also directory specific +cd $GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible + +ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml +ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$GOPATH/src/github.com/tendermint/tendermint/build/tendermint -e CONFIGDIR=$GOPATH/src/github.com/tendermint/tendermint/docs/examples + +sleep 10 + +# get each nodes ID then populate the ansible file +id0=`curl $ip0:26657/status | jq .result.node_info.id` +id1=`curl $ip1:26657/status | jq .result.node_info.id` +id2=`curl $ip2:26657/status | jq .result.node_info.id` +id3=`curl $ip3:26657/status | jq .result.node_info.id` + +id0=$(strip $id0) +id1=$(strip $id1) +id2=$(strip $id2) +id3=$(strip $id3) + +# remove file we'll re-write to with new info +old_ansible_file=$GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible/roles/install/templates/systemd.service.j2 +rm $old_ansible_file + +# need to populate the `--p2p.persistent_peers` flag +echo "[Unit] +Description={{service}} +Requires=network-online.target +After=network-online.target + +[Service] +Restart=on-failure +User={{service}} +Group={{service}} +PermissionsStartOnly=true +ExecStart=/usr/bin/tendermint node --proxy_app=kvstore --p2p.persistent_peers=$id0@$ip0:26656,$id1@$ip1:26656,$id2@$ip2:26656,$id3@$ip3:26656 +ExecReload=/bin/kill -HUP \$MAINPID +KillSignal=SIGTERM + +[Install] +WantedBy=multi-user.target +" >> $old_ansible_file + +# now, we can re-run the install command +ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml + +# and finally restart it all +ansible-playbook -i inventory/digital_ocean.py -l sentrynet restart.yml + +echo "congratulations, your testnet is now running :)" diff --git a/networks/remote/terraform/.gitignore b/networks/remote/terraform/.gitignore new file mode 100644 index 000000000..0cc2d499a --- /dev/null +++ b/networks/remote/terraform/.gitignore @@ -0,0 +1,4 @@ +.terraform +terraform.tfstate +terraform.tfstate.backup +terraform.tfstate.d diff --git a/networks/remote/terraform/cluster/main.tf b/networks/remote/terraform/cluster/main.tf new file mode 100644 index 000000000..98ab37cee --- /dev/null +++ b/networks/remote/terraform/cluster/main.tf @@ -0,0 +1,28 @@ +resource "digitalocean_tag" "cluster" { + name = "${var.name}" +} + +resource "digitalocean_ssh_key" "cluster" { + name = "${var.name}" + public_key = "${file(var.ssh_key)}" +} + +resource "digitalocean_droplet" "cluster" { + name = "${var.name}-node${count.index}" + image = "centos-7-x64" + size = "${var.instance_size}" + region = "${element(var.regions, count.index)}" + ssh_keys = ["${digitalocean_ssh_key.cluster.id}"] + count = "${var.servers}" + tags = ["${digitalocean_tag.cluster.id}"] + + lifecycle = { + prevent_destroy = false + } + + connection { + timeout = "30s" + } + +} + diff --git a/networks/remote/terraform/cluster/outputs.tf b/networks/remote/terraform/cluster/outputs.tf new file mode 100644 index 000000000..78291b6a9 --- /dev/null +++ b/networks/remote/terraform/cluster/outputs.tf @@ -0,0 +1,15 @@ +// The cluster name +output "name" { + value = "${var.name}" +} + +// The list of cluster instance IDs +output "instances" { + value = ["${digitalocean_droplet.cluster.*.id}"] +} + +// The list of cluster instance public IPs +output "public_ips" { + value = ["${digitalocean_droplet.cluster.*.ipv4_address}"] +} + diff --git a/networks/remote/terraform/cluster/variables.tf b/networks/remote/terraform/cluster/variables.tf new file mode 100644 index 000000000..1b6a70072 --- /dev/null +++ b/networks/remote/terraform/cluster/variables.tf @@ -0,0 +1,25 @@ +variable "name" { + description = "The cluster name, e.g cdn" +} + +variable "regions" { + description = "Regions to launch in" + type = "list" + default = ["AMS3", "FRA1", "LON1", "NYC3", "SFO2", "SGP1", "TOR1"] +} + +variable "ssh_key" { + description = "SSH key filename to copy to the nodes" + type = "string" +} + +variable "instance_size" { + description = "The instance size to use" + default = "2gb" +} + +variable "servers" { + description = "Desired instance count" + default = 4 +} + diff --git a/networks/remote/terraform/main.tf b/networks/remote/terraform/main.tf new file mode 100644 index 000000000..a768ee13a --- /dev/null +++ b/networks/remote/terraform/main.tf @@ -0,0 +1,37 @@ +#Terraform Configuration + +variable "DO_API_TOKEN" { + description = "DigitalOcean Access Token" +} + +variable "TESTNET_NAME" { + description = "Name of the testnet" + default = "sentrynet" +} + +variable "SSH_KEY_FILE" { + description = "SSH public key file to be used on the nodes" + type = "string" +} + +variable "SERVERS" { + description = "Number of nodes in testnet" + default = "4" +} + +provider "digitalocean" { + token = "${var.DO_API_TOKEN}" +} + +module "cluster" { + source = "./cluster" + name = "${var.TESTNET_NAME}" + ssh_key = "${var.SSH_KEY_FILE}" + servers = "${var.SERVERS}" +} + + +output "public_ips" { + value = "${module.cluster.public_ips}" +} + diff --git a/node/id.go b/node/id.go new file mode 100644 index 000000000..5100597c6 --- /dev/null +++ b/node/id.go @@ -0,0 +1,35 @@ +package node + +import ( + "time" + + "github.com/tendermint/tendermint/crypto" +) + +type NodeID struct { + Name string + PubKey crypto.PubKey +} + +type PrivNodeID struct { + NodeID + PrivKey crypto.PrivKey +} + +type NodeGreeting struct { + NodeID + Version string + ChainID string + Message string + Time time.Time +} + +type SignedNodeGreeting struct { + NodeGreeting + Signature crypto.Signature +} + +func (pnid *PrivNodeID) SignGreeting() *SignedNodeGreeting { + //greeting := NodeGreeting{} + return nil +} diff --git a/node/node.go b/node/node.go new file mode 100644 index 000000000..9f6428ec1 --- /dev/null +++ b/node/node.go @@ -0,0 +1,749 @@ +package node + +import ( + "bytes" + "context" + "errors" + "fmt" + "net" + "net/http" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + + amino "github.com/tendermint/go-amino" + abci "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" + + bc "github.com/tendermint/tendermint/blockchain" + cfg "github.com/tendermint/tendermint/config" + cs "github.com/tendermint/tendermint/consensus" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/evidence" + mempl "github.com/tendermint/tendermint/mempool" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/p2p/pex" + "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/proxy" + rpccore "github.com/tendermint/tendermint/rpc/core" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + grpccore "github.com/tendermint/tendermint/rpc/grpc" + rpc "github.com/tendermint/tendermint/rpc/lib" + rpcserver "github.com/tendermint/tendermint/rpc/lib/server" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/state/txindex" + "github.com/tendermint/tendermint/state/txindex/kv" + "github.com/tendermint/tendermint/state/txindex/null" + "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/version" + + _ "net/http/pprof" +) + +//------------------------------------------------------------------------------ + +// DBContext specifies config information for loading a new DB. +type DBContext struct { + ID string + Config *cfg.Config +} + +// DBProvider takes a DBContext and returns an instantiated DB. +type DBProvider func(*DBContext) (dbm.DB, error) + +// DefaultDBProvider returns a database using the DBBackend and DBDir +// specified in the ctx.Config. +func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) { + dbType := dbm.DBBackendType(ctx.Config.DBBackend) + return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()), nil +} + +// GenesisDocProvider returns a GenesisDoc. +// It allows the GenesisDoc to be pulled from sources other than the +// filesystem, for instance from a distributed key-value store cluster. +type GenesisDocProvider func() (*types.GenesisDoc, error) + +// DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads +// the GenesisDoc from the config.GenesisFile() on the filesystem. +func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider { + return func() (*types.GenesisDoc, error) { + return types.GenesisDocFromFile(config.GenesisFile()) + } +} + +// NodeProvider takes a config and a logger and returns a ready to go Node. +type NodeProvider func(*cfg.Config, log.Logger) (*Node, error) + +// DefaultNewNode returns a Tendermint node with default settings for the +// PrivValidator, ClientCreator, GenesisDoc, and DBProvider. +// It implements NodeProvider. +func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) { + return NewNode(config, + privval.LoadOrGenFilePV(config.PrivValidatorFile()), + proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), + DefaultGenesisDocProviderFunc(config), + DefaultDBProvider, + DefaultMetricsProvider, + logger, + ) +} + +// MetricsProvider returns a consensus, p2p and mempool Metrics. +type MetricsProvider func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics) + +// DefaultMetricsProvider returns consensus, p2p and mempool Metrics build +// using Prometheus client library. +func DefaultMetricsProvider() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics) { + return cs.PrometheusMetrics(), p2p.PrometheusMetrics(), mempl.PrometheusMetrics() +} + +// NopMetricsProvider returns consensus, p2p and mempool Metrics as no-op. +func NopMetricsProvider() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics) { + return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics() +} + +//------------------------------------------------------------------------------ + +// Node is the highest level interface to a full Tendermint node. +// It includes all configuration information and running services. +type Node struct { + cmn.BaseService + + // config + config *cfg.Config + genesisDoc *types.GenesisDoc // initial validator set + privValidator types.PrivValidator // local node's validator key + + // network + sw *p2p.Switch // p2p connections + addrBook pex.AddrBook // known peers + + // services + eventBus *types.EventBus // pub/sub for services + stateDB dbm.DB + blockStore *bc.BlockStore // store the blockchain to disk + bcReactor *bc.BlockchainReactor // for fast-syncing + mempoolReactor *mempl.MempoolReactor // for gossipping transactions + consensusState *cs.ConsensusState // latest consensus state + consensusReactor *cs.ConsensusReactor // for participating in the consensus + evidencePool *evidence.EvidencePool // tracking evidence + proxyApp proxy.AppConns // connection to the application + rpcListeners []net.Listener // rpc servers + txIndexer txindex.TxIndexer + indexerService *txindex.IndexerService + prometheusSrv *http.Server +} + +// NewNode returns a new, ready to go, Tendermint Node. +func NewNode(config *cfg.Config, + privValidator types.PrivValidator, + clientCreator proxy.ClientCreator, + genesisDocProvider GenesisDocProvider, + dbProvider DBProvider, + metricsProvider MetricsProvider, + logger log.Logger) (*Node, error) { + + // Get BlockStore + blockStoreDB, err := dbProvider(&DBContext{"blockstore", config}) + if err != nil { + return nil, err + } + blockStore := bc.NewBlockStore(blockStoreDB) + + // Get State + stateDB, err := dbProvider(&DBContext{"state", config}) + if err != nil { + return nil, err + } + + // Get genesis doc + // TODO: move to state package? + genDoc, err := loadGenesisDoc(stateDB) + if err != nil { + genDoc, err = genesisDocProvider() + if err != nil { + return nil, err + } + // save genesis doc to prevent a certain class of user errors (e.g. when it + // was changed, accidentally or not). Also good for audit trail. + saveGenesisDoc(stateDB, genDoc) + } + + state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + if err != nil { + return nil, err + } + + // Create the proxyApp, which manages connections (consensus, mempool, query) + // and sync tendermint and the app by performing a handshake + // and replaying any necessary blocks + consensusLogger := logger.With("module", "consensus") + handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc) + handshaker.SetLogger(consensusLogger) + proxyApp := proxy.NewAppConns(clientCreator, handshaker) + proxyApp.SetLogger(logger.With("module", "proxy")) + if err := proxyApp.Start(); err != nil { + return nil, fmt.Errorf("Error starting proxy app connections: %v", err) + } + + // reload the state (it may have been updated by the handshake) + state = sm.LoadState(stateDB) + + // If an address is provided, listen on the socket for a + // connection from an external signing process. + if config.PrivValidatorListenAddr != "" { + var ( + // TODO: persist this key so external signer + // can actually authenticate us + privKey = crypto.GenPrivKeyEd25519() + pvsc = privval.NewSocketPV( + logger.With("module", "privval"), + config.PrivValidatorListenAddr, + privKey, + ) + ) + + if err := pvsc.Start(); err != nil { + return nil, fmt.Errorf("Error starting private validator client: %v", err) + } + + privValidator = pvsc + } + + // Decide whether to fast-sync or not + // We don't fast-sync when the only validator is us. + fastSync := config.FastSync + if state.Validators.Size() == 1 { + addr, _ := state.Validators.GetByIndex(0) + if bytes.Equal(privValidator.GetAddress(), addr) { + fastSync = false + } + } + + // Log whether this node is a validator or an observer + if state.Validators.HasAddress(privValidator.GetAddress()) { + consensusLogger.Info("This node is a validator", "addr", privValidator.GetAddress(), "pubKey", privValidator.GetPubKey()) + } else { + consensusLogger.Info("This node is not a validator", "addr", privValidator.GetAddress(), "pubKey", privValidator.GetPubKey()) + } + + // metrics + var ( + csMetrics *cs.Metrics + p2pMetrics *p2p.Metrics + memplMetrics *mempl.Metrics + ) + if config.Instrumentation.Prometheus { + csMetrics, p2pMetrics, memplMetrics = metricsProvider() + } else { + csMetrics, p2pMetrics, memplMetrics = NopMetricsProvider() + } + + // Make MempoolReactor + mempoolLogger := logger.With("module", "mempool") + mempool := mempl.NewMempool( + config.Mempool, + proxyApp.Mempool(), + state.LastBlockHeight, + mempl.WithMetrics(memplMetrics), + ) + mempool.SetLogger(mempoolLogger) + mempool.InitWAL() // no need to have the mempool wal during tests + mempoolReactor := mempl.NewMempoolReactor(config.Mempool, mempool) + mempoolReactor.SetLogger(mempoolLogger) + + if config.Consensus.WaitForTxs() { + mempool.EnableTxsAvailable() + } + + // Make Evidence Reactor + evidenceDB, err := dbProvider(&DBContext{"evidence", config}) + if err != nil { + return nil, err + } + evidenceLogger := logger.With("module", "evidence") + evidenceStore := evidence.NewEvidenceStore(evidenceDB) + evidencePool := evidence.NewEvidencePool(stateDB, evidenceStore) + evidencePool.SetLogger(evidenceLogger) + evidenceReactor := evidence.NewEvidenceReactor(evidencePool) + evidenceReactor.SetLogger(evidenceLogger) + + blockExecLogger := logger.With("module", "state") + // make block executor for consensus and blockchain reactors to execute blocks + blockExec := sm.NewBlockExecutor(stateDB, blockExecLogger, proxyApp.Consensus(), mempool, evidencePool) + + // Make BlockchainReactor + bcReactor := bc.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) + bcReactor.SetLogger(logger.With("module", "blockchain")) + + // Make ConsensusReactor + consensusState := cs.NewConsensusState( + config.Consensus, + state.Copy(), + blockExec, + blockStore, + mempool, + evidencePool, + cs.WithMetrics(csMetrics), + ) + consensusState.SetLogger(consensusLogger) + if privValidator != nil { + consensusState.SetPrivValidator(privValidator) + } + consensusReactor := cs.NewConsensusReactor(consensusState, fastSync) + consensusReactor.SetLogger(consensusLogger) + + p2pLogger := logger.With("module", "p2p") + + sw := p2p.NewSwitch(config.P2P, p2p.WithMetrics(p2pMetrics)) + sw.SetLogger(p2pLogger) + sw.AddReactor("MEMPOOL", mempoolReactor) + sw.AddReactor("BLOCKCHAIN", bcReactor) + sw.AddReactor("CONSENSUS", consensusReactor) + sw.AddReactor("EVIDENCE", evidenceReactor) + + // Optionally, start the pex reactor + // + // TODO: + // + // We need to set Seeds and PersistentPeers on the switch, + // since it needs to be able to use these (and their DNS names) + // even if the PEX is off. We can include the DNS name in the NetAddress, + // but it would still be nice to have a clear list of the current "PersistentPeers" + // somewhere that we can return with net_info. + // + // If PEX is on, it should handle dialing the seeds. Otherwise the switch does it. + // Note we currently use the addrBook regardless at least for AddOurAddress + addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict) + addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile())) + if config.P2P.PexReactor { + // TODO persistent peers ? so we can have their DNS addrs saved + pexReactor := pex.NewPEXReactor(addrBook, + &pex.PEXReactorConfig{ + Seeds: cmn.SplitAndTrim(config.P2P.Seeds, ",", " "), + SeedMode: config.P2P.SeedMode, + PrivatePeerIDs: cmn.SplitAndTrim(config.P2P.PrivatePeerIDs, ",", " ")}) + pexReactor.SetLogger(p2pLogger) + sw.AddReactor("PEX", pexReactor) + } + + sw.SetAddrBook(addrBook) + + // Filter peers by addr or pubkey with an ABCI query. + // If the query return code is OK, add peer. + // XXX: Query format subject to change + if config.FilterPeers { + // NOTE: addr is ip:port + sw.SetAddrFilter(func(addr net.Addr) error { + resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: cmn.Fmt("/p2p/filter/addr/%s", addr.String())}) + if err != nil { + return err + } + if resQuery.IsErr() { + return fmt.Errorf("Error querying abci app: %v", resQuery) + } + return nil + }) + sw.SetIDFilter(func(id p2p.ID) error { + resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: cmn.Fmt("/p2p/filter/id/%s", id)}) + if err != nil { + return err + } + if resQuery.IsErr() { + return fmt.Errorf("Error querying abci app: %v", resQuery) + } + return nil + }) + } + + eventBus := types.NewEventBus() + eventBus.SetLogger(logger.With("module", "events")) + + // services which will be publishing and/or subscribing for messages (events) + // consensusReactor will set it on consensusState and blockExecutor + consensusReactor.SetEventBus(eventBus) + + // Transaction indexing + var txIndexer txindex.TxIndexer + switch config.TxIndex.Indexer { + case "kv": + store, err := dbProvider(&DBContext{"tx_index", config}) + if err != nil { + return nil, err + } + if config.TxIndex.IndexTags != "" { + txIndexer = kv.NewTxIndex(store, kv.IndexTags(cmn.SplitAndTrim(config.TxIndex.IndexTags, ",", " "))) + } else if config.TxIndex.IndexAllTags { + txIndexer = kv.NewTxIndex(store, kv.IndexAllTags()) + } else { + txIndexer = kv.NewTxIndex(store) + } + default: + txIndexer = &null.TxIndex{} + } + + indexerService := txindex.NewIndexerService(txIndexer, eventBus) + indexerService.SetLogger(logger.With("module", "txindex")) + + // run the profile server + profileHost := config.ProfListenAddress + if profileHost != "" { + go func() { + logger.Error("Profile server", "err", http.ListenAndServe(profileHost, nil)) + }() + } + + node := &Node{ + config: config, + genesisDoc: genDoc, + privValidator: privValidator, + + sw: sw, + addrBook: addrBook, + + stateDB: stateDB, + blockStore: blockStore, + bcReactor: bcReactor, + mempoolReactor: mempoolReactor, + consensusState: consensusState, + consensusReactor: consensusReactor, + evidencePool: evidencePool, + proxyApp: proxyApp, + txIndexer: txIndexer, + indexerService: indexerService, + eventBus: eventBus, + } + node.BaseService = *cmn.NewBaseService(logger, "Node", node) + return node, nil +} + +// OnStart starts the Node. It implements cmn.Service. +func (n *Node) OnStart() error { + err := n.eventBus.Start() + if err != nil { + return err + } + + // Create & add listener + l := p2p.NewDefaultListener( + n.config.P2P.ListenAddress, + n.config.P2P.ExternalAddress, + n.config.P2P.UPNP, + n.Logger.With("module", "p2p")) + n.sw.AddListener(l) + + // Generate node PrivKey + // TODO: pass in like privValidator + nodeKey, err := p2p.LoadOrGenNodeKey(n.config.NodeKeyFile()) + if err != nil { + return err + } + n.Logger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", n.config.NodeKeyFile()) + + nodeInfo := n.makeNodeInfo(nodeKey.ID()) + n.sw.SetNodeInfo(nodeInfo) + n.sw.SetNodeKey(nodeKey) + + // Add ourselves to addrbook to prevent dialing ourselves + n.addrBook.AddOurAddress(nodeInfo.NetAddress()) + + // Start the RPC server before the P2P server + // so we can eg. receive txs for the first block + if n.config.RPC.ListenAddress != "" { + listeners, err := n.startRPC() + if err != nil { + return err + } + n.rpcListeners = listeners + } + + if n.config.Instrumentation.Prometheus { + n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr) + } + + // Start the switch (the P2P server). + err = n.sw.Start() + if err != nil { + return err + } + + // Always connect to persistent peers + if n.config.P2P.PersistentPeers != "" { + err = n.sw.DialPeersAsync(n.addrBook, cmn.SplitAndTrim(n.config.P2P.PersistentPeers, ",", " "), true) + if err != nil { + return err + } + } + + // start tx indexer + return n.indexerService.Start() +} + +// OnStop stops the Node. It implements cmn.Service. +func (n *Node) OnStop() { + n.BaseService.OnStop() + + n.Logger.Info("Stopping Node") + // TODO: gracefully disconnect from peers. + n.sw.Stop() + + for _, l := range n.rpcListeners { + n.Logger.Info("Closing rpc listener", "listener", l) + if err := l.Close(); err != nil { + n.Logger.Error("Error closing listener", "listener", l, "err", err) + } + } + + n.eventBus.Stop() + n.indexerService.Stop() + + if pvsc, ok := n.privValidator.(*privval.SocketPV); ok { + if err := pvsc.Stop(); err != nil { + n.Logger.Error("Error stopping priv validator socket client", "err", err) + } + } + + if n.prometheusSrv != nil { + if err := n.prometheusSrv.Shutdown(context.Background()); err != nil { + // Error from closing listeners, or context timeout: + n.Logger.Error("Prometheus HTTP server Shutdown", "err", err) + } + } +} + +// RunForever waits for an interrupt signal and stops the node. +func (n *Node) RunForever() { + // Sleep forever and then... + cmn.TrapSignal(func() { + n.Stop() + }) +} + +// AddListener adds a listener to accept inbound peer connections. +// It should be called before starting the Node. +// The first listener is the primary listener (in NodeInfo) +func (n *Node) AddListener(l p2p.Listener) { + n.sw.AddListener(l) +} + +// ConfigureRPC sets all variables in rpccore so they will serve +// rpc calls from this node +func (n *Node) ConfigureRPC() { + rpccore.SetStateDB(n.stateDB) + rpccore.SetBlockStore(n.blockStore) + rpccore.SetConsensusState(n.consensusState) + rpccore.SetMempool(n.mempoolReactor.Mempool) + rpccore.SetEvidencePool(n.evidencePool) + rpccore.SetSwitch(n.sw) + rpccore.SetPubKey(n.privValidator.GetPubKey()) + rpccore.SetGenesisDoc(n.genesisDoc) + rpccore.SetAddrBook(n.addrBook) + rpccore.SetProxyAppQuery(n.proxyApp.Query()) + rpccore.SetTxIndexer(n.txIndexer) + rpccore.SetConsensusReactor(n.consensusReactor) + rpccore.SetEventBus(n.eventBus) + rpccore.SetLogger(n.Logger.With("module", "rpc")) +} + +func (n *Node) startRPC() ([]net.Listener, error) { + n.ConfigureRPC() + listenAddrs := cmn.SplitAndTrim(n.config.RPC.ListenAddress, ",", " ") + coreCodec := amino.NewCodec() + ctypes.RegisterAmino(coreCodec) + + if n.config.RPC.Unsafe { + rpccore.AddUnsafeRoutes() + } + + // we may expose the rpc over both a unix and tcp socket + listeners := make([]net.Listener, len(listenAddrs)) + for i, listenAddr := range listenAddrs { + mux := http.NewServeMux() + rpcLogger := n.Logger.With("module", "rpc-server") + wm := rpcserver.NewWebsocketManager(rpccore.Routes, coreCodec, rpcserver.EventSubscriber(n.eventBus)) + wm.SetLogger(rpcLogger.With("protocol", "websocket")) + mux.HandleFunc("/websocket", wm.WebsocketHandler) + rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, coreCodec, rpcLogger) + listener, err := rpcserver.StartHTTPServer( + listenAddr, + mux, + rpcLogger, + rpcserver.Config{MaxOpenConnections: n.config.RPC.MaxOpenConnections}, + ) + if err != nil { + return nil, err + } + listeners[i] = listener + } + + // we expose a simplified api over grpc for convenience to app devs + grpcListenAddr := n.config.RPC.GRPCListenAddress + if grpcListenAddr != "" { + listener, err := grpccore.StartGRPCServer( + grpcListenAddr, + grpccore.Config{ + MaxOpenConnections: n.config.RPC.GRPCMaxOpenConnections, + }, + ) + if err != nil { + return nil, err + } + listeners = append(listeners, listener) + } + + return listeners, nil +} + +// startPrometheusServer starts a Prometheus HTTP server, listening for metrics +// collectors on addr. +func (n *Node) startPrometheusServer(addr string) *http.Server { + srv := &http.Server{ + Addr: addr, + Handler: promhttp.InstrumentMetricHandler( + prometheus.DefaultRegisterer, promhttp.HandlerFor( + prometheus.DefaultGatherer, + promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections}, + ), + ), + } + go func() { + if err := srv.ListenAndServe(); err != http.ErrServerClosed { + // Error starting or closing listener: + n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err) + } + }() + return srv +} + +// Switch returns the Node's Switch. +func (n *Node) Switch() *p2p.Switch { + return n.sw +} + +// BlockStore returns the Node's BlockStore. +func (n *Node) BlockStore() *bc.BlockStore { + return n.blockStore +} + +// ConsensusState returns the Node's ConsensusState. +func (n *Node) ConsensusState() *cs.ConsensusState { + return n.consensusState +} + +// ConsensusReactor returns the Node's ConsensusReactor. +func (n *Node) ConsensusReactor() *cs.ConsensusReactor { + return n.consensusReactor +} + +// MempoolReactor returns the Node's MempoolReactor. +func (n *Node) MempoolReactor() *mempl.MempoolReactor { + return n.mempoolReactor +} + +// EvidencePool returns the Node's EvidencePool. +func (n *Node) EvidencePool() *evidence.EvidencePool { + return n.evidencePool +} + +// EventBus returns the Node's EventBus. +func (n *Node) EventBus() *types.EventBus { + return n.eventBus +} + +// PrivValidator returns the Node's PrivValidator. +// XXX: for convenience only! +func (n *Node) PrivValidator() types.PrivValidator { + return n.privValidator +} + +// GenesisDoc returns the Node's GenesisDoc. +func (n *Node) GenesisDoc() *types.GenesisDoc { + return n.genesisDoc +} + +// ProxyApp returns the Node's AppConns, representing its connections to the ABCI application. +func (n *Node) ProxyApp() proxy.AppConns { + return n.proxyApp +} + +func (n *Node) makeNodeInfo(nodeID p2p.ID) p2p.NodeInfo { + txIndexerStatus := "on" + if _, ok := n.txIndexer.(*null.TxIndex); ok { + txIndexerStatus = "off" + } + nodeInfo := p2p.NodeInfo{ + ID: nodeID, + Network: n.genesisDoc.ChainID, + Version: version.Version, + Channels: []byte{ + bc.BlockchainChannel, + cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel, + mempl.MempoolChannel, + evidence.EvidenceChannel, + }, + Moniker: n.config.Moniker, + Other: []string{ + cmn.Fmt("amino_version=%v", amino.Version), + cmn.Fmt("p2p_version=%v", p2p.Version), + cmn.Fmt("consensus_version=%v", cs.Version), + cmn.Fmt("rpc_version=%v/%v", rpc.Version, rpccore.Version), + cmn.Fmt("tx_index=%v", txIndexerStatus), + }, + } + + if n.config.P2P.PexReactor { + nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel) + } + + rpcListenAddr := n.config.RPC.ListenAddress + nodeInfo.Other = append(nodeInfo.Other, cmn.Fmt("rpc_addr=%v", rpcListenAddr)) + + if !n.sw.IsListening() { + return nodeInfo + } + + p2pListener := n.sw.Listeners()[0] + p2pHost := p2pListener.ExternalAddressHost() + p2pPort := p2pListener.ExternalAddress().Port + nodeInfo.ListenAddr = cmn.Fmt("%v:%v", p2pHost, p2pPort) + + return nodeInfo +} + +//------------------------------------------------------------------------------ + +// NodeInfo returns the Node's Info from the Switch. +func (n *Node) NodeInfo() p2p.NodeInfo { + return n.sw.NodeInfo() +} + +//------------------------------------------------------------------------------ + +var ( + genesisDocKey = []byte("genesisDoc") +) + +// panics if failed to unmarshal bytes +func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) { + bytes := db.Get(genesisDocKey) + if len(bytes) == 0 { + return nil, errors.New("Genesis doc not found") + } + var genDoc *types.GenesisDoc + err := cdc.UnmarshalJSON(bytes, &genDoc) + if err != nil { + cmn.PanicCrisis(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, bytes)) + } + return genDoc, nil +} + +// panics if failed to marshal the given genesis document +func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) { + bytes, err := cdc.MarshalJSON(genDoc) + if err != nil { + cmn.PanicCrisis(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err)) + } + db.SetSync(genesisDocKey, bytes) +} diff --git a/node/node_test.go b/node/node_test.go new file mode 100644 index 000000000..80f6f02c2 --- /dev/null +++ b/node/node_test.go @@ -0,0 +1,48 @@ +package node + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/tendermint/tendermint/libs/log" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/types" +) + +func TestNodeStartStop(t *testing.T) { + config := cfg.ResetTestRoot("node_node_test") + + // create & start node + n, err := DefaultNewNode(config, log.TestingLogger()) + assert.NoError(t, err, "expected no err on DefaultNewNode") + err1 := n.Start() + if err1 != nil { + t.Error(err1) + } + t.Logf("Started node %v", n.sw.NodeInfo()) + + // wait for the node to produce a block + blockCh := make(chan interface{}) + err = n.EventBus().Subscribe(context.Background(), "node_test", types.EventQueryNewBlock, blockCh) + assert.NoError(t, err) + select { + case <-blockCh: + case <-time.After(10 * time.Second): + t.Fatal("timed out waiting for the node to produce a block") + } + + // stop the node + go func() { + n.Stop() + }() + + select { + case <-n.Quit(): + case <-time.After(5 * time.Second): + t.Fatal("timed out waiting for shutdown") + } +} diff --git a/node/wire.go b/node/wire.go new file mode 100644 index 000000000..8b3ae8950 --- /dev/null +++ b/node/wire.go @@ -0,0 +1,12 @@ +package node + +import ( + amino "github.com/tendermint/go-amino" + crypto "github.com/tendermint/tendermint/crypto" +) + +var cdc = amino.NewCodec() + +func init() { + crypto.RegisterAmino(cdc) +} diff --git a/p2p/README.md b/p2p/README.md new file mode 100644 index 000000000..819a5056b --- /dev/null +++ b/p2p/README.md @@ -0,0 +1,11 @@ +# p2p + +The p2p package provides an abstraction around peer-to-peer communication. + +Docs: + +- [Connection](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/p2p/connection.md) for details on how connections and multiplexing work +- [Peer](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/p2p/peer.md) for details on peer ID, handshakes, and peer exchange +- [Node](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/p2p/node.md) for details about different types of nodes and how they should work +- [Pex](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/reactors/pex/pex.md) for details on peer discovery and exchange +- [Config](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/p2p/config.md) for details on some config option diff --git a/p2p/base_reactor.go b/p2p/base_reactor.go new file mode 100644 index 000000000..da1296da0 --- /dev/null +++ b/p2p/base_reactor.go @@ -0,0 +1,53 @@ +package p2p + +import ( + "github.com/tendermint/tendermint/p2p/conn" + cmn "github.com/tendermint/tendermint/libs/common" +) + +type Reactor interface { + cmn.Service // Start, Stop + + // SetSwitch allows setting a switch. + SetSwitch(*Switch) + + // GetChannels returns the list of channel descriptors. + GetChannels() []*conn.ChannelDescriptor + + // AddPeer is called by the switch when a new peer is added. + AddPeer(peer Peer) + + // RemovePeer is called by the switch when the peer is stopped (due to error + // or other reason). + RemovePeer(peer Peer, reason interface{}) + + // Receive is called when msgBytes is received from peer. + // + // NOTE reactor can not keep msgBytes around after Receive completes without + // copying. + // + // CONTRACT: msgBytes are not nil. + Receive(chID byte, peer Peer, msgBytes []byte) +} + +//-------------------------------------- + +type BaseReactor struct { + cmn.BaseService // Provides Start, Stop, .Quit + Switch *Switch +} + +func NewBaseReactor(name string, impl Reactor) *BaseReactor { + return &BaseReactor{ + BaseService: *cmn.NewBaseService(nil, name, impl), + Switch: nil, + } +} + +func (br *BaseReactor) SetSwitch(sw *Switch) { + br.Switch = sw +} +func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil } +func (*BaseReactor) AddPeer(peer Peer) {} +func (*BaseReactor) RemovePeer(peer Peer, reason interface{}) {} +func (*BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {} diff --git a/p2p/conn/conn_go110.go b/p2p/conn/conn_go110.go new file mode 100644 index 000000000..682188101 --- /dev/null +++ b/p2p/conn/conn_go110.go @@ -0,0 +1,15 @@ +// +build go1.10 + +package conn + +// Go1.10 has a proper net.Conn implementation that +// has the SetDeadline method implemented as per +// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706 +// lest we run into problems like +// https://github.com/tendermint/tendermint/issues/851 + +import "net" + +func NetPipe() (net.Conn, net.Conn) { + return net.Pipe() +} diff --git a/p2p/conn/conn_notgo110.go b/p2p/conn/conn_notgo110.go new file mode 100644 index 000000000..ed642eb54 --- /dev/null +++ b/p2p/conn/conn_notgo110.go @@ -0,0 +1,32 @@ +// +build !go1.10 + +package conn + +import ( + "net" + "time" +) + +// Only Go1.10 has a proper net.Conn implementation that +// has the SetDeadline method implemented as per +// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706 +// lest we run into problems like +// https://github.com/tendermint/tendermint/issues/851 +// so for go versions < Go1.10 use our custom net.Conn creator +// that doesn't return an `Unimplemented error` for net.Conn. +// Before https://github.com/tendermint/tendermint/commit/49faa79bdce5663894b3febbf4955fb1d172df04 +// we hadn't cared about errors from SetDeadline so swallow them up anyways. +type pipe struct { + net.Conn +} + +func (p *pipe) SetDeadline(t time.Time) error { + return nil +} + +func NetPipe() (net.Conn, net.Conn) { + p1, p2 := net.Pipe() + return &pipe{p1}, &pipe{p2} +} + +var _ net.Conn = (*pipe)(nil) diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go new file mode 100644 index 000000000..9672e0117 --- /dev/null +++ b/p2p/conn/connection.go @@ -0,0 +1,794 @@ +package conn + +import ( + "bufio" + "errors" + "fmt" + "io" + "math" + "net" + "reflect" + "sync/atomic" + "time" + + amino "github.com/tendermint/go-amino" + cmn "github.com/tendermint/tendermint/libs/common" + flow "github.com/tendermint/tendermint/libs/flowrate" + "github.com/tendermint/tendermint/libs/log" +) + +const ( + defaultMaxPacketMsgPayloadSize = 1024 + + numBatchPacketMsgs = 10 + minReadBufferSize = 1024 + minWriteBufferSize = 65536 + updateStats = 2 * time.Second + + // some of these defaults are written in the user config + // flushThrottle, sendRate, recvRate + // TODO: remove values present in config + defaultFlushThrottle = 100 * time.Millisecond + + defaultSendQueueCapacity = 1 + defaultRecvBufferCapacity = 4096 + defaultRecvMessageCapacity = 22020096 // 21MB + defaultSendRate = int64(512000) // 500KB/s + defaultRecvRate = int64(512000) // 500KB/s + defaultSendTimeout = 10 * time.Second + defaultPingInterval = 60 * time.Second + defaultPongTimeout = 45 * time.Second +) + +type receiveCbFunc func(chID byte, msgBytes []byte) +type errorCbFunc func(interface{}) + +/* +Each peer has one `MConnection` (multiplex connection) instance. + +__multiplex__ *noun* a system or signal involving simultaneous transmission of +several messages along a single channel of communication. + +Each `MConnection` handles message transmission on multiple abstract communication +`Channel`s. Each channel has a globally unique byte id. +The byte id and the relative priorities of each `Channel` are configured upon +initialization of the connection. + +There are two methods for sending messages: + func (m MConnection) Send(chID byte, msgBytes []byte) bool {} + func (m MConnection) TrySend(chID byte, msgBytes []byte}) bool {} + +`Send(chID, msgBytes)` is a blocking call that waits until `msg` is +successfully queued for the channel with the given id byte `chID`, or until the +request times out. The message `msg` is serialized using Go-Amino. + +`TrySend(chID, msgBytes)` is a nonblocking call that returns false if the +channel's queue is full. + +Inbound message bytes are handled with an onReceive callback function. +*/ +type MConnection struct { + cmn.BaseService + + conn net.Conn + bufConnReader *bufio.Reader + bufConnWriter *bufio.Writer + sendMonitor *flow.Monitor + recvMonitor *flow.Monitor + send chan struct{} + pong chan struct{} + channels []*Channel + channelsIdx map[byte]*Channel + onReceive receiveCbFunc + onError errorCbFunc + errored uint32 + config MConnConfig + + quit chan struct{} + flushTimer *cmn.ThrottleTimer // flush writes as necessary but throttled. + pingTimer *cmn.RepeatTimer // send pings periodically + + // close conn if pong is not received in pongTimeout + pongTimer *time.Timer + pongTimeoutCh chan bool // true - timeout, false - peer sent pong + + chStatsTimer *cmn.RepeatTimer // update channel stats periodically + + created time.Time // time of creation + + _maxPacketMsgSize int +} + +// MConnConfig is a MConnection configuration. +type MConnConfig struct { + SendRate int64 `mapstructure:"send_rate"` + RecvRate int64 `mapstructure:"recv_rate"` + + // Maximum payload size + MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"` + + // Interval to flush writes (throttled) + FlushThrottle time.Duration `mapstructure:"flush_throttle"` + + // Interval to send pings + PingInterval time.Duration `mapstructure:"ping_interval"` + + // Maximum wait time for pongs + PongTimeout time.Duration `mapstructure:"pong_timeout"` +} + +// DefaultMConnConfig returns the default config. +func DefaultMConnConfig() MConnConfig { + return MConnConfig{ + SendRate: defaultSendRate, + RecvRate: defaultRecvRate, + MaxPacketMsgPayloadSize: defaultMaxPacketMsgPayloadSize, + FlushThrottle: defaultFlushThrottle, + PingInterval: defaultPingInterval, + PongTimeout: defaultPongTimeout, + } +} + +// NewMConnection wraps net.Conn and creates multiplex connection +func NewMConnection(conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, onError errorCbFunc) *MConnection { + return NewMConnectionWithConfig( + conn, + chDescs, + onReceive, + onError, + DefaultMConnConfig()) +} + +// NewMConnectionWithConfig wraps net.Conn and creates multiplex connection with a config +func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, onError errorCbFunc, config MConnConfig) *MConnection { + if config.PongTimeout >= config.PingInterval { + panic("pongTimeout must be less than pingInterval (otherwise, next ping will reset pong timer)") + } + + mconn := &MConnection{ + conn: conn, + bufConnReader: bufio.NewReaderSize(conn, minReadBufferSize), + bufConnWriter: bufio.NewWriterSize(conn, minWriteBufferSize), + sendMonitor: flow.New(0, 0), + recvMonitor: flow.New(0, 0), + send: make(chan struct{}, 1), + pong: make(chan struct{}, 1), + onReceive: onReceive, + onError: onError, + config: config, + } + + // Create channels + var channelsIdx = map[byte]*Channel{} + var channels = []*Channel{} + + for _, desc := range chDescs { + channel := newChannel(mconn, *desc) + channelsIdx[channel.desc.ID] = channel + channels = append(channels, channel) + } + mconn.channels = channels + mconn.channelsIdx = channelsIdx + + mconn.BaseService = *cmn.NewBaseService(nil, "MConnection", mconn) + + // maxPacketMsgSize() is a bit heavy, so call just once + mconn._maxPacketMsgSize = mconn.maxPacketMsgSize() + + return mconn +} + +func (c *MConnection) SetLogger(l log.Logger) { + c.BaseService.SetLogger(l) + for _, ch := range c.channels { + ch.SetLogger(l) + } +} + +// OnStart implements BaseService +func (c *MConnection) OnStart() error { + if err := c.BaseService.OnStart(); err != nil { + return err + } + c.quit = make(chan struct{}) + c.flushTimer = cmn.NewThrottleTimer("flush", c.config.FlushThrottle) + c.pingTimer = cmn.NewRepeatTimer("ping", c.config.PingInterval) + c.pongTimeoutCh = make(chan bool, 1) + c.chStatsTimer = cmn.NewRepeatTimer("chStats", updateStats) + go c.sendRoutine() + go c.recvRoutine() + return nil +} + +// OnStop implements BaseService +func (c *MConnection) OnStop() { + c.BaseService.OnStop() + c.flushTimer.Stop() + c.pingTimer.Stop() + c.chStatsTimer.Stop() + if c.quit != nil { + close(c.quit) + } + c.conn.Close() // nolint: errcheck + + // We can't close pong safely here because + // recvRoutine may write to it after we've stopped. + // Though it doesn't need to get closed at all, + // we close it @ recvRoutine. +} + +func (c *MConnection) String() string { + return fmt.Sprintf("MConn{%v}", c.conn.RemoteAddr()) +} + +func (c *MConnection) flush() { + c.Logger.Debug("Flush", "conn", c) + err := c.bufConnWriter.Flush() + if err != nil { + c.Logger.Error("MConnection flush failed", "err", err) + } +} + +// Catch panics, usually caused by remote disconnects. +func (c *MConnection) _recover() { + if r := recover(); r != nil { + err := cmn.ErrorWrap(r, "recovered panic in MConnection") + c.stopForError(err) + } +} + +func (c *MConnection) stopForError(r interface{}) { + c.Stop() + if atomic.CompareAndSwapUint32(&c.errored, 0, 1) { + if c.onError != nil { + c.onError(r) + } + } +} + +// Queues a message to be sent to channel. +func (c *MConnection) Send(chID byte, msgBytes []byte) bool { + if !c.IsRunning() { + return false + } + + c.Logger.Debug("Send", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes)) + + // Send message to channel. + channel, ok := c.channelsIdx[chID] + if !ok { + c.Logger.Error(cmn.Fmt("Cannot send bytes, unknown channel %X", chID)) + return false + } + + success := channel.sendBytes(msgBytes) + if success { + // Wake up sendRoutine if necessary + select { + case c.send <- struct{}{}: + default: + } + } else { + c.Logger.Error("Send failed", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes)) + } + return success +} + +// Queues a message to be sent to channel. +// Nonblocking, returns true if successful. +func (c *MConnection) TrySend(chID byte, msgBytes []byte) bool { + if !c.IsRunning() { + return false + } + + c.Logger.Debug("TrySend", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes)) + + // Send message to channel. + channel, ok := c.channelsIdx[chID] + if !ok { + c.Logger.Error(cmn.Fmt("Cannot send bytes, unknown channel %X", chID)) + return false + } + + ok = channel.trySendBytes(msgBytes) + if ok { + // Wake up sendRoutine if necessary + select { + case c.send <- struct{}{}: + default: + } + } + + return ok +} + +// CanSend returns true if you can send more data onto the chID, false +// otherwise. Use only as a heuristic. +func (c *MConnection) CanSend(chID byte) bool { + if !c.IsRunning() { + return false + } + + channel, ok := c.channelsIdx[chID] + if !ok { + c.Logger.Error(cmn.Fmt("Unknown channel %X", chID)) + return false + } + return channel.canSend() +} + +// sendRoutine polls for packets to send from channels. +func (c *MConnection) sendRoutine() { + defer c._recover() + +FOR_LOOP: + for { + var _n int64 + var err error + SELECTION: + select { + case <-c.flushTimer.Ch: + // NOTE: flushTimer.Set() must be called every time + // something is written to .bufConnWriter. + c.flush() + case <-c.chStatsTimer.Chan(): + for _, channel := range c.channels { + channel.updateStats() + } + case <-c.pingTimer.Chan(): + c.Logger.Debug("Send Ping") + _n, err = cdc.MarshalBinaryWriter(c.bufConnWriter, PacketPing{}) + if err != nil { + break SELECTION + } + c.sendMonitor.Update(int(_n)) + c.Logger.Debug("Starting pong timer", "dur", c.config.PongTimeout) + c.pongTimer = time.AfterFunc(c.config.PongTimeout, func() { + select { + case c.pongTimeoutCh <- true: + default: + } + }) + c.flush() + case timeout := <-c.pongTimeoutCh: + if timeout { + c.Logger.Debug("Pong timeout") + err = errors.New("pong timeout") + } else { + c.stopPongTimer() + } + case <-c.pong: + c.Logger.Debug("Send Pong") + _n, err = cdc.MarshalBinaryWriter(c.bufConnWriter, PacketPong{}) + if err != nil { + break SELECTION + } + c.sendMonitor.Update(int(_n)) + c.flush() + case <-c.quit: + break FOR_LOOP + case <-c.send: + // Send some PacketMsgs + eof := c.sendSomePacketMsgs() + if !eof { + // Keep sendRoutine awake. + select { + case c.send <- struct{}{}: + default: + } + } + } + + if !c.IsRunning() { + break FOR_LOOP + } + if err != nil { + c.Logger.Error("Connection failed @ sendRoutine", "conn", c, "err", err) + c.stopForError(err) + break FOR_LOOP + } + } + + // Cleanup + c.stopPongTimer() +} + +// Returns true if messages from channels were exhausted. +// Blocks in accordance to .sendMonitor throttling. +func (c *MConnection) sendSomePacketMsgs() bool { + // Block until .sendMonitor says we can write. + // Once we're ready we send more than we asked for, + // but amortized it should even out. + c.sendMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.SendRate), true) + + // Now send some PacketMsgs. + for i := 0; i < numBatchPacketMsgs; i++ { + if c.sendPacketMsg() { + return true + } + } + return false +} + +// Returns true if messages from channels were exhausted. +func (c *MConnection) sendPacketMsg() bool { + // Choose a channel to create a PacketMsg from. + // The chosen channel will be the one whose recentlySent/priority is the least. + var leastRatio float32 = math.MaxFloat32 + var leastChannel *Channel + for _, channel := range c.channels { + // If nothing to send, skip this channel + if !channel.isSendPending() { + continue + } + // Get ratio, and keep track of lowest ratio. + ratio := float32(channel.recentlySent) / float32(channel.desc.Priority) + if ratio < leastRatio { + leastRatio = ratio + leastChannel = channel + } + } + + // Nothing to send? + if leastChannel == nil { + return true + } + // c.Logger.Info("Found a msgPacket to send") + + // Make & send a PacketMsg from this channel + _n, err := leastChannel.writePacketMsgTo(c.bufConnWriter) + if err != nil { + c.Logger.Error("Failed to write PacketMsg", "err", err) + c.stopForError(err) + return true + } + c.sendMonitor.Update(int(_n)) + c.flushTimer.Set() + return false +} + +// recvRoutine reads PacketMsgs and reconstructs the message using the channels' "recving" buffer. +// After a whole message has been assembled, it's pushed to onReceive(). +// Blocks depending on how the connection is throttled. +// Otherwise, it never blocks. +func (c *MConnection) recvRoutine() { + defer c._recover() + +FOR_LOOP: + for { + // Block until .recvMonitor says we can read. + c.recvMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.RecvRate), true) + + // Peek into bufConnReader for debugging + /* + if numBytes := c.bufConnReader.Buffered(); numBytes > 0 { + bz, err := c.bufConnReader.Peek(cmn.MinInt(numBytes, 100)) + if err == nil { + // return + } else { + c.Logger.Debug("Error peeking connection buffer", "err", err) + // return nil + } + c.Logger.Info("Peek connection buffer", "numBytes", numBytes, "bz", bz) + } + */ + + // Read packet type + var packet Packet + var _n int64 + var err error + _n, err = cdc.UnmarshalBinaryReader(c.bufConnReader, &packet, int64(c._maxPacketMsgSize)) + c.recvMonitor.Update(int(_n)) + if err != nil { + if c.IsRunning() { + c.Logger.Error("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err) + c.stopForError(err) + } + break FOR_LOOP + } + + // Read more depending on packet type. + switch pkt := packet.(type) { + case PacketPing: + // TODO: prevent abuse, as they cause flush()'s. + // https://github.com/tendermint/tendermint/issues/1190 + c.Logger.Debug("Receive Ping") + select { + case c.pong <- struct{}{}: + default: + // never block + } + case PacketPong: + c.Logger.Debug("Receive Pong") + select { + case c.pongTimeoutCh <- false: + default: + // never block + } + case PacketMsg: + channel, ok := c.channelsIdx[pkt.ChannelID] + if !ok || channel == nil { + err := fmt.Errorf("Unknown channel %X", pkt.ChannelID) + c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) + c.stopForError(err) + break FOR_LOOP + } + + msgBytes, err := channel.recvPacketMsg(pkt) + if err != nil { + if c.IsRunning() { + c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) + c.stopForError(err) + } + break FOR_LOOP + } + if msgBytes != nil { + c.Logger.Debug("Received bytes", "chID", pkt.ChannelID, "msgBytes", fmt.Sprintf("%X", msgBytes)) + // NOTE: This means the reactor.Receive runs in the same thread as the p2p recv routine + c.onReceive(pkt.ChannelID, msgBytes) + } + default: + err := fmt.Errorf("Unknown message type %v", reflect.TypeOf(packet)) + c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) + c.stopForError(err) + break FOR_LOOP + } + } + + // Cleanup + close(c.pong) + for range c.pong { + // Drain + } +} + +// not goroutine-safe +func (c *MConnection) stopPongTimer() { + if c.pongTimer != nil { + _ = c.pongTimer.Stop() + c.pongTimer = nil + } +} + +// maxPacketMsgSize returns a maximum size of PacketMsg, including the overhead +// of amino encoding. +func (c *MConnection) maxPacketMsgSize() int { + return len(cdc.MustMarshalBinary(PacketMsg{ + ChannelID: 0x01, + EOF: 1, + Bytes: make([]byte, c.config.MaxPacketMsgPayloadSize), + })) + 10 // leave room for changes in amino +} + +type ConnectionStatus struct { + Duration time.Duration + SendMonitor flow.Status + RecvMonitor flow.Status + Channels []ChannelStatus +} + +type ChannelStatus struct { + ID byte + SendQueueCapacity int + SendQueueSize int + Priority int + RecentlySent int64 +} + +func (c *MConnection) Status() ConnectionStatus { + var status ConnectionStatus + status.Duration = time.Since(c.created) + status.SendMonitor = c.sendMonitor.Status() + status.RecvMonitor = c.recvMonitor.Status() + status.Channels = make([]ChannelStatus, len(c.channels)) + for i, channel := range c.channels { + status.Channels[i] = ChannelStatus{ + ID: channel.desc.ID, + SendQueueCapacity: cap(channel.sendQueue), + SendQueueSize: int(channel.sendQueueSize), // TODO use atomic + Priority: channel.desc.Priority, + RecentlySent: channel.recentlySent, + } + } + return status +} + +//----------------------------------------------------------------------------- + +type ChannelDescriptor struct { + ID byte + Priority int + SendQueueCapacity int + RecvBufferCapacity int + RecvMessageCapacity int +} + +func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) { + if chDesc.SendQueueCapacity == 0 { + chDesc.SendQueueCapacity = defaultSendQueueCapacity + } + if chDesc.RecvBufferCapacity == 0 { + chDesc.RecvBufferCapacity = defaultRecvBufferCapacity + } + if chDesc.RecvMessageCapacity == 0 { + chDesc.RecvMessageCapacity = defaultRecvMessageCapacity + } + filled = chDesc + return +} + +// TODO: lowercase. +// NOTE: not goroutine-safe. +type Channel struct { + conn *MConnection + desc ChannelDescriptor + sendQueue chan []byte + sendQueueSize int32 // atomic. + recving []byte + sending []byte + recentlySent int64 // exponential moving average + + maxPacketMsgPayloadSize int + + Logger log.Logger +} + +func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel { + desc = desc.FillDefaults() + if desc.Priority <= 0 { + cmn.PanicSanity("Channel default priority must be a positive integer") + } + return &Channel{ + conn: conn, + desc: desc, + sendQueue: make(chan []byte, desc.SendQueueCapacity), + recving: make([]byte, 0, desc.RecvBufferCapacity), + maxPacketMsgPayloadSize: conn.config.MaxPacketMsgPayloadSize, + } +} + +func (ch *Channel) SetLogger(l log.Logger) { + ch.Logger = l +} + +// Queues message to send to this channel. +// Goroutine-safe +// Times out (and returns false) after defaultSendTimeout +func (ch *Channel) sendBytes(bytes []byte) bool { + select { + case ch.sendQueue <- bytes: + atomic.AddInt32(&ch.sendQueueSize, 1) + return true + case <-time.After(defaultSendTimeout): + return false + } +} + +// Queues message to send to this channel. +// Nonblocking, returns true if successful. +// Goroutine-safe +func (ch *Channel) trySendBytes(bytes []byte) bool { + select { + case ch.sendQueue <- bytes: + atomic.AddInt32(&ch.sendQueueSize, 1) + return true + default: + return false + } +} + +// Goroutine-safe +func (ch *Channel) loadSendQueueSize() (size int) { + return int(atomic.LoadInt32(&ch.sendQueueSize)) +} + +// Goroutine-safe +// Use only as a heuristic. +func (ch *Channel) canSend() bool { + return ch.loadSendQueueSize() < defaultSendQueueCapacity +} + +// Returns true if any PacketMsgs are pending to be sent. +// Call before calling nextPacketMsg() +// Goroutine-safe +func (ch *Channel) isSendPending() bool { + if len(ch.sending) == 0 { + if len(ch.sendQueue) == 0 { + return false + } + ch.sending = <-ch.sendQueue + } + return true +} + +// Creates a new PacketMsg to send. +// Not goroutine-safe +func (ch *Channel) nextPacketMsg() PacketMsg { + packet := PacketMsg{} + packet.ChannelID = byte(ch.desc.ID) + maxSize := ch.maxPacketMsgPayloadSize + packet.Bytes = ch.sending[:cmn.MinInt(maxSize, len(ch.sending))] + if len(ch.sending) <= maxSize { + packet.EOF = byte(0x01) + ch.sending = nil + atomic.AddInt32(&ch.sendQueueSize, -1) // decrement sendQueueSize + } else { + packet.EOF = byte(0x00) + ch.sending = ch.sending[cmn.MinInt(maxSize, len(ch.sending)):] + } + return packet +} + +// Writes next PacketMsg to w and updates c.recentlySent. +// Not goroutine-safe +func (ch *Channel) writePacketMsgTo(w io.Writer) (n int64, err error) { + var packet = ch.nextPacketMsg() + n, err = cdc.MarshalBinaryWriter(w, packet) + ch.recentlySent += n + return +} + +// Handles incoming PacketMsgs. It returns a message bytes if message is +// complete. NOTE message bytes may change on next call to recvPacketMsg. +// Not goroutine-safe +func (ch *Channel) recvPacketMsg(packet PacketMsg) ([]byte, error) { + ch.Logger.Debug("Read PacketMsg", "conn", ch.conn, "packet", packet) + var recvCap, recvReceived = ch.desc.RecvMessageCapacity, len(ch.recving) + len(packet.Bytes) + if recvCap < recvReceived { + return nil, fmt.Errorf("Received message exceeds available capacity: %v < %v", recvCap, recvReceived) + } + ch.recving = append(ch.recving, packet.Bytes...) + if packet.EOF == byte(0x01) { + msgBytes := ch.recving + + // clear the slice without re-allocating. + // http://stackoverflow.com/questions/16971741/how-do-you-clear-a-slice-in-go + // suggests this could be a memory leak, but we might as well keep the memory for the channel until it closes, + // at which point the recving slice stops being used and should be garbage collected + ch.recving = ch.recving[:0] // make([]byte, 0, ch.desc.RecvBufferCapacity) + return msgBytes, nil + } + return nil, nil +} + +// Call this periodically to update stats for throttling purposes. +// Not goroutine-safe +func (ch *Channel) updateStats() { + // Exponential decay of stats. + // TODO: optimize. + ch.recentlySent = int64(float64(ch.recentlySent) * 0.8) +} + +//---------------------------------------- +// Packet + +type Packet interface { + AssertIsPacket() +} + +func RegisterPacket(cdc *amino.Codec) { + cdc.RegisterInterface((*Packet)(nil), nil) + cdc.RegisterConcrete(PacketPing{}, "tendermint/p2p/PacketPing", nil) + cdc.RegisterConcrete(PacketPong{}, "tendermint/p2p/PacketPong", nil) + cdc.RegisterConcrete(PacketMsg{}, "tendermint/p2p/PacketMsg", nil) +} + +func (_ PacketPing) AssertIsPacket() {} +func (_ PacketPong) AssertIsPacket() {} +func (_ PacketMsg) AssertIsPacket() {} + +type PacketPing struct { +} + +type PacketPong struct { +} + +type PacketMsg struct { + ChannelID byte + EOF byte // 1 means message ends here. + Bytes []byte +} + +func (mp PacketMsg) String() string { + return fmt.Sprintf("PacketMsg{%X:%X T:%X}", mp.ChannelID, mp.Bytes, mp.EOF) +} diff --git a/p2p/conn/connection_test.go b/p2p/conn/connection_test.go new file mode 100644 index 000000000..19e05fbc7 --- /dev/null +++ b/p2p/conn/connection_test.go @@ -0,0 +1,492 @@ +package conn + +import ( + "bytes" + "net" + "testing" + "time" + + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/libs/log" +) + +const maxPingPongPacketSize = 1024 // bytes + +func createTestMConnection(conn net.Conn) *MConnection { + onReceive := func(chID byte, msgBytes []byte) { + } + onError := func(r interface{}) { + } + c := createMConnectionWithCallbacks(conn, onReceive, onError) + c.SetLogger(log.TestingLogger()) + return c +} + +func createMConnectionWithCallbacks(conn net.Conn, onReceive func(chID byte, msgBytes []byte), onError func(r interface{})) *MConnection { + cfg := DefaultMConnConfig() + cfg.PingInterval = 90 * time.Millisecond + cfg.PongTimeout = 45 * time.Millisecond + chDescs := []*ChannelDescriptor{&ChannelDescriptor{ID: 0x01, Priority: 1, SendQueueCapacity: 1}} + c := NewMConnectionWithConfig(conn, chDescs, onReceive, onError, cfg) + c.SetLogger(log.TestingLogger()) + return c +} + +func TestMConnectionSend(t *testing.T) { + server, client := NetPipe() + defer server.Close() // nolint: errcheck + defer client.Close() // nolint: errcheck + + mconn := createTestMConnection(client) + err := mconn.Start() + require.Nil(t, err) + defer mconn.Stop() + + msg := []byte("Ant-Man") + assert.True(t, mconn.Send(0x01, msg)) + // Note: subsequent Send/TrySend calls could pass because we are reading from + // the send queue in a separate goroutine. + _, err = server.Read(make([]byte, len(msg))) + if err != nil { + t.Error(err) + } + assert.True(t, mconn.CanSend(0x01)) + + msg = []byte("Spider-Man") + assert.True(t, mconn.TrySend(0x01, msg)) + _, err = server.Read(make([]byte, len(msg))) + if err != nil { + t.Error(err) + } + + assert.False(t, mconn.CanSend(0x05), "CanSend should return false because channel is unknown") + assert.False(t, mconn.Send(0x05, []byte("Absorbing Man")), "Send should return false because channel is unknown") +} + +func TestMConnectionReceive(t *testing.T) { + server, client := NetPipe() + defer server.Close() // nolint: errcheck + defer client.Close() // nolint: errcheck + + receivedCh := make(chan []byte) + errorsCh := make(chan interface{}) + onReceive := func(chID byte, msgBytes []byte) { + receivedCh <- msgBytes + } + onError := func(r interface{}) { + errorsCh <- r + } + mconn1 := createMConnectionWithCallbacks(client, onReceive, onError) + err := mconn1.Start() + require.Nil(t, err) + defer mconn1.Stop() + + mconn2 := createTestMConnection(server) + err = mconn2.Start() + require.Nil(t, err) + defer mconn2.Stop() + + msg := []byte("Cyclops") + assert.True(t, mconn2.Send(0x01, msg)) + + select { + case receivedBytes := <-receivedCh: + assert.Equal(t, []byte(msg), receivedBytes) + case err := <-errorsCh: + t.Fatalf("Expected %s, got %+v", msg, err) + case <-time.After(500 * time.Millisecond): + t.Fatalf("Did not receive %s message in 500ms", msg) + } +} + +func TestMConnectionStatus(t *testing.T) { + server, client := NetPipe() + defer server.Close() // nolint: errcheck + defer client.Close() // nolint: errcheck + + mconn := createTestMConnection(client) + err := mconn.Start() + require.Nil(t, err) + defer mconn.Stop() + + status := mconn.Status() + assert.NotNil(t, status) + assert.Zero(t, status.Channels[0].SendQueueSize) +} + +func TestMConnectionPongTimeoutResultsInError(t *testing.T) { + server, client := net.Pipe() + defer server.Close() + defer client.Close() + + receivedCh := make(chan []byte) + errorsCh := make(chan interface{}) + onReceive := func(chID byte, msgBytes []byte) { + receivedCh <- msgBytes + } + onError := func(r interface{}) { + errorsCh <- r + } + mconn := createMConnectionWithCallbacks(client, onReceive, onError) + err := mconn.Start() + require.Nil(t, err) + defer mconn.Stop() + + serverGotPing := make(chan struct{}) + go func() { + // read ping + var pkt PacketPing + _, err = cdc.UnmarshalBinaryReader(server, &pkt, maxPingPongPacketSize) + assert.Nil(t, err) + serverGotPing <- struct{}{} + }() + <-serverGotPing + + pongTimerExpired := mconn.config.PongTimeout + 20*time.Millisecond + select { + case msgBytes := <-receivedCh: + t.Fatalf("Expected error, but got %v", msgBytes) + case err := <-errorsCh: + assert.NotNil(t, err) + case <-time.After(pongTimerExpired): + t.Fatalf("Expected to receive error after %v", pongTimerExpired) + } +} + +func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) { + server, client := net.Pipe() + defer server.Close() + defer client.Close() + + receivedCh := make(chan []byte) + errorsCh := make(chan interface{}) + onReceive := func(chID byte, msgBytes []byte) { + receivedCh <- msgBytes + } + onError := func(r interface{}) { + errorsCh <- r + } + mconn := createMConnectionWithCallbacks(client, onReceive, onError) + err := mconn.Start() + require.Nil(t, err) + defer mconn.Stop() + + // sending 3 pongs in a row (abuse) + _, err = server.Write(cdc.MustMarshalBinary(PacketPong{})) + require.Nil(t, err) + _, err = server.Write(cdc.MustMarshalBinary(PacketPong{})) + require.Nil(t, err) + _, err = server.Write(cdc.MustMarshalBinary(PacketPong{})) + require.Nil(t, err) + + serverGotPing := make(chan struct{}) + go func() { + // read ping (one byte) + var packet, err = Packet(nil), error(nil) + _, err = cdc.UnmarshalBinaryReader(server, &packet, maxPingPongPacketSize) + require.Nil(t, err) + serverGotPing <- struct{}{} + // respond with pong + _, err = server.Write(cdc.MustMarshalBinary(PacketPong{})) + require.Nil(t, err) + }() + <-serverGotPing + + pongTimerExpired := mconn.config.PongTimeout + 20*time.Millisecond + select { + case msgBytes := <-receivedCh: + t.Fatalf("Expected no data, but got %v", msgBytes) + case err := <-errorsCh: + t.Fatalf("Expected no error, but got %v", err) + case <-time.After(pongTimerExpired): + assert.True(t, mconn.IsRunning()) + } +} + +func TestMConnectionMultiplePings(t *testing.T) { + server, client := net.Pipe() + defer server.Close() + defer client.Close() + + receivedCh := make(chan []byte) + errorsCh := make(chan interface{}) + onReceive := func(chID byte, msgBytes []byte) { + receivedCh <- msgBytes + } + onError := func(r interface{}) { + errorsCh <- r + } + mconn := createMConnectionWithCallbacks(client, onReceive, onError) + err := mconn.Start() + require.Nil(t, err) + defer mconn.Stop() + + // sending 3 pings in a row (abuse) + // see https://github.com/tendermint/tendermint/issues/1190 + _, err = server.Write(cdc.MustMarshalBinary(PacketPing{})) + require.Nil(t, err) + var pkt PacketPong + _, err = cdc.UnmarshalBinaryReader(server, &pkt, maxPingPongPacketSize) + require.Nil(t, err) + _, err = server.Write(cdc.MustMarshalBinary(PacketPing{})) + require.Nil(t, err) + _, err = cdc.UnmarshalBinaryReader(server, &pkt, maxPingPongPacketSize) + require.Nil(t, err) + _, err = server.Write(cdc.MustMarshalBinary(PacketPing{})) + require.Nil(t, err) + _, err = cdc.UnmarshalBinaryReader(server, &pkt, maxPingPongPacketSize) + require.Nil(t, err) + + assert.True(t, mconn.IsRunning()) +} + +func TestMConnectionPingPongs(t *testing.T) { + // check that we are not leaking any go-routines + defer leaktest.CheckTimeout(t, 10*time.Second)() + + server, client := net.Pipe() + + defer server.Close() + defer client.Close() + + receivedCh := make(chan []byte) + errorsCh := make(chan interface{}) + onReceive := func(chID byte, msgBytes []byte) { + receivedCh <- msgBytes + } + onError := func(r interface{}) { + errorsCh <- r + } + mconn := createMConnectionWithCallbacks(client, onReceive, onError) + err := mconn.Start() + require.Nil(t, err) + defer mconn.Stop() + + serverGotPing := make(chan struct{}) + go func() { + // read ping + var pkt PacketPing + _, err = cdc.UnmarshalBinaryReader(server, &pkt, maxPingPongPacketSize) + require.Nil(t, err) + serverGotPing <- struct{}{} + // respond with pong + _, err = server.Write(cdc.MustMarshalBinary(PacketPong{})) + require.Nil(t, err) + + time.Sleep(mconn.config.PingInterval) + + // read ping + _, err = cdc.UnmarshalBinaryReader(server, &pkt, maxPingPongPacketSize) + require.Nil(t, err) + // respond with pong + _, err = server.Write(cdc.MustMarshalBinary(PacketPong{})) + require.Nil(t, err) + }() + <-serverGotPing + + pongTimerExpired := (mconn.config.PongTimeout + 20*time.Millisecond) * 2 + select { + case msgBytes := <-receivedCh: + t.Fatalf("Expected no data, but got %v", msgBytes) + case err := <-errorsCh: + t.Fatalf("Expected no error, but got %v", err) + case <-time.After(2 * pongTimerExpired): + assert.True(t, mconn.IsRunning()) + } +} + +func TestMConnectionStopsAndReturnsError(t *testing.T) { + server, client := NetPipe() + defer server.Close() // nolint: errcheck + defer client.Close() // nolint: errcheck + + receivedCh := make(chan []byte) + errorsCh := make(chan interface{}) + onReceive := func(chID byte, msgBytes []byte) { + receivedCh <- msgBytes + } + onError := func(r interface{}) { + errorsCh <- r + } + mconn := createMConnectionWithCallbacks(client, onReceive, onError) + err := mconn.Start() + require.Nil(t, err) + defer mconn.Stop() + + if err := client.Close(); err != nil { + t.Error(err) + } + + select { + case receivedBytes := <-receivedCh: + t.Fatalf("Expected error, got %v", receivedBytes) + case err := <-errorsCh: + assert.NotNil(t, err) + assert.False(t, mconn.IsRunning()) + case <-time.After(500 * time.Millisecond): + t.Fatal("Did not receive error in 500ms") + } +} + +func newClientAndServerConnsForReadErrors(t *testing.T, chOnErr chan struct{}) (*MConnection, *MConnection) { + server, client := NetPipe() + + onReceive := func(chID byte, msgBytes []byte) {} + onError := func(r interface{}) {} + + // create client conn with two channels + chDescs := []*ChannelDescriptor{ + {ID: 0x01, Priority: 1, SendQueueCapacity: 1}, + {ID: 0x02, Priority: 1, SendQueueCapacity: 1}, + } + mconnClient := NewMConnection(client, chDescs, onReceive, onError) + mconnClient.SetLogger(log.TestingLogger().With("module", "client")) + err := mconnClient.Start() + require.Nil(t, err) + + // create server conn with 1 channel + // it fires on chOnErr when there's an error + serverLogger := log.TestingLogger().With("module", "server") + onError = func(r interface{}) { + chOnErr <- struct{}{} + } + mconnServer := createMConnectionWithCallbacks(server, onReceive, onError) + mconnServer.SetLogger(serverLogger) + err = mconnServer.Start() + require.Nil(t, err) + return mconnClient, mconnServer +} + +func expectSend(ch chan struct{}) bool { + after := time.After(time.Second * 5) + select { + case <-ch: + return true + case <-after: + return false + } +} + +func TestMConnectionReadErrorBadEncoding(t *testing.T) { + chOnErr := make(chan struct{}) + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) + defer mconnClient.Stop() + defer mconnServer.Stop() + + client := mconnClient.conn + + // send badly encoded msgPacket + bz := cdc.MustMarshalBinary(PacketMsg{}) + bz[4] += 0x01 // Invalid prefix bytes. + + // Write it. + _, err := client.Write(bz) + assert.Nil(t, err) + assert.True(t, expectSend(chOnErr), "badly encoded msgPacket") +} + +func TestMConnectionReadErrorUnknownChannel(t *testing.T) { + chOnErr := make(chan struct{}) + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) + defer mconnClient.Stop() + defer mconnServer.Stop() + + msg := []byte("Ant-Man") + + // fail to send msg on channel unknown by client + assert.False(t, mconnClient.Send(0x03, msg)) + + // send msg on channel unknown by the server. + // should cause an error + assert.True(t, mconnClient.Send(0x02, msg)) + assert.True(t, expectSend(chOnErr), "unknown channel") +} + +func TestMConnectionReadErrorLongMessage(t *testing.T) { + chOnErr := make(chan struct{}) + chOnRcv := make(chan struct{}) + + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) + defer mconnClient.Stop() + defer mconnServer.Stop() + + mconnServer.onReceive = func(chID byte, msgBytes []byte) { + chOnRcv <- struct{}{} + } + + client := mconnClient.conn + + // send msg thats just right + var err error + var buf = new(bytes.Buffer) + var packet = PacketMsg{ + ChannelID: 0x01, + EOF: 1, + Bytes: make([]byte, mconnClient.config.MaxPacketMsgPayloadSize), + } + _, err = cdc.MarshalBinaryWriter(buf, packet) + assert.Nil(t, err) + _, err = client.Write(buf.Bytes()) + assert.Nil(t, err) + assert.True(t, expectSend(chOnRcv), "msg just right") + assert.False(t, expectSend(chOnErr), "msg just right") + + // send msg thats too long + buf = new(bytes.Buffer) + packet = PacketMsg{ + ChannelID: 0x01, + EOF: 1, + Bytes: make([]byte, mconnClient.config.MaxPacketMsgPayloadSize+100), + } + _, err = cdc.MarshalBinaryWriter(buf, packet) + assert.Nil(t, err) + _, err = client.Write(buf.Bytes()) + assert.NotNil(t, err) + assert.False(t, expectSend(chOnRcv), "msg too long") + assert.True(t, expectSend(chOnErr), "msg too long") +} + +func TestMConnectionReadErrorUnknownMsgType(t *testing.T) { + chOnErr := make(chan struct{}) + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) + defer mconnClient.Stop() + defer mconnServer.Stop() + + // send msg with unknown msg type + err := error(nil) + err = amino.EncodeUvarint(mconnClient.conn, 4) + assert.Nil(t, err) + _, err = mconnClient.conn.Write([]byte{0xFF, 0xFF, 0xFF, 0xFF}) + assert.Nil(t, err) + assert.True(t, expectSend(chOnErr), "unknown msg type") +} + +func TestMConnectionTrySend(t *testing.T) { + server, client := NetPipe() + defer server.Close() + defer client.Close() + + mconn := createTestMConnection(client) + err := mconn.Start() + require.Nil(t, err) + defer mconn.Stop() + + msg := []byte("Semicolon-Woman") + resultCh := make(chan string, 2) + assert.True(t, mconn.TrySend(0x01, msg)) + server.Read(make([]byte, len(msg))) + assert.True(t, mconn.CanSend(0x01)) + assert.True(t, mconn.TrySend(0x01, msg)) + assert.False(t, mconn.CanSend(0x01)) + go func() { + mconn.TrySend(0x01, msg) + resultCh <- "TrySend" + }() + assert.False(t, mconn.CanSend(0x01)) + assert.False(t, mconn.TrySend(0x01, msg)) + assert.Equal(t, "TrySend", <-resultCh) +} diff --git a/p2p/conn/secret_connection.go b/p2p/conn/secret_connection.go new file mode 100644 index 000000000..a2cbe008d --- /dev/null +++ b/p2p/conn/secret_connection.go @@ -0,0 +1,352 @@ +// Uses nacl's secret_box to encrypt a net.Conn. +// It is (meant to be) an implementation of the STS protocol. +// Note we do not (yet) assume that a remote peer's pubkey +// is known ahead of time, and thus we are technically +// still vulnerable to MITM. (TODO!) +// See docs/sts-final.pdf for more info +package conn + +import ( + "bytes" + crand "crypto/rand" + "crypto/sha256" + "encoding/binary" + "errors" + "io" + "net" + "time" + + "golang.org/x/crypto/nacl/box" + "golang.org/x/crypto/nacl/secretbox" + "golang.org/x/crypto/ripemd160" + + "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" +) + +// 4 + 1024 == 1028 total frame size +const dataLenSize = 4 +const dataMaxSize = 1024 +const totalFrameSize = dataMaxSize + dataLenSize +const sealedFrameSize = totalFrameSize + secretbox.Overhead + +// Implements net.Conn +type SecretConnection struct { + conn io.ReadWriteCloser + recvBuffer []byte + recvNonce *[24]byte + sendNonce *[24]byte + remPubKey crypto.PubKey + shrSecret *[32]byte // shared secret +} + +// Performs handshake and returns a new authenticated SecretConnection. +// Returns nil if error in handshake. +// Caller should call conn.Close() +// See docs/sts-final.pdf for more information. +func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (*SecretConnection, error) { + + locPubKey := locPrivKey.PubKey() + + // Generate ephemeral keys for perfect forward secrecy. + locEphPub, locEphPriv := genEphKeys() + + // Write local ephemeral pubkey and receive one too. + // NOTE: every 32-byte string is accepted as a Curve25519 public key + // (see DJB's Curve25519 paper: http://cr.yp.to/ecdh/curve25519-20060209.pdf) + remEphPub, err := shareEphPubKey(conn, locEphPub) + if err != nil { + return nil, err + } + + // Compute common shared secret. + shrSecret := computeSharedSecret(remEphPub, locEphPriv) + + // Sort by lexical order. + loEphPub, hiEphPub := sort32(locEphPub, remEphPub) + + // Check if the local ephemeral public key + // was the least, lexicographically sorted. + locIsLeast := bytes.Equal(locEphPub[:], loEphPub[:]) + + // Generate nonces to use for secretbox. + recvNonce, sendNonce := genNonces(loEphPub, hiEphPub, locIsLeast) + + // Generate common challenge to sign. + challenge := genChallenge(loEphPub, hiEphPub) + + // Construct SecretConnection. + sc := &SecretConnection{ + conn: conn, + recvBuffer: nil, + recvNonce: recvNonce, + sendNonce: sendNonce, + shrSecret: shrSecret, + } + + // Sign the challenge bytes for authentication. + locSignature := signChallenge(challenge, locPrivKey) + + // Share (in secret) each other's pubkey & challenge signature + authSigMsg, err := shareAuthSignature(sc, locPubKey, locSignature) + if err != nil { + return nil, err + } + remPubKey, remSignature := authSigMsg.Key, authSigMsg.Sig + if !remPubKey.VerifyBytes(challenge[:], remSignature) { + return nil, errors.New("Challenge verification failed") + } + + // We've authorized. + sc.remPubKey = remPubKey + return sc, nil +} + +// Returns authenticated remote pubkey +func (sc *SecretConnection) RemotePubKey() crypto.PubKey { + return sc.remPubKey +} + +// Writes encrypted frames of `sealedFrameSize` +// CONTRACT: data smaller than dataMaxSize is read atomically. +func (sc *SecretConnection) Write(data []byte) (n int, err error) { + for 0 < len(data) { + var frame = make([]byte, totalFrameSize) + var chunk []byte + if dataMaxSize < len(data) { + chunk = data[:dataMaxSize] + data = data[dataMaxSize:] + } else { + chunk = data + data = nil + } + chunkLength := len(chunk) + binary.BigEndian.PutUint32(frame, uint32(chunkLength)) + copy(frame[dataLenSize:], chunk) + + // encrypt the frame + var sealedFrame = make([]byte, sealedFrameSize) + secretbox.Seal(sealedFrame[:0], frame, sc.sendNonce, sc.shrSecret) + // fmt.Printf("secretbox.Seal(sealed:%X,sendNonce:%X,shrSecret:%X\n", sealedFrame, sc.sendNonce, sc.shrSecret) + incr2Nonce(sc.sendNonce) + // end encryption + + _, err := sc.conn.Write(sealedFrame) + if err != nil { + return n, err + } + n += len(chunk) + } + return +} + +// CONTRACT: data smaller than dataMaxSize is read atomically. +func (sc *SecretConnection) Read(data []byte) (n int, err error) { + if 0 < len(sc.recvBuffer) { + n = copy(data, sc.recvBuffer) + sc.recvBuffer = sc.recvBuffer[n:] + return + } + + sealedFrame := make([]byte, sealedFrameSize) + _, err = io.ReadFull(sc.conn, sealedFrame) + if err != nil { + return + } + + // decrypt the frame + var frame = make([]byte, totalFrameSize) + // fmt.Printf("secretbox.Open(sealed:%X,recvNonce:%X,shrSecret:%X\n", sealedFrame, sc.recvNonce, sc.shrSecret) + _, ok := secretbox.Open(frame[:0], sealedFrame, sc.recvNonce, sc.shrSecret) + if !ok { + return n, errors.New("Failed to decrypt SecretConnection") + } + incr2Nonce(sc.recvNonce) + // end decryption + + var chunkLength = binary.BigEndian.Uint32(frame) // read the first two bytes + if chunkLength > dataMaxSize { + return 0, errors.New("chunkLength is greater than dataMaxSize") + } + var chunk = frame[dataLenSize : dataLenSize+chunkLength] + + n = copy(data, chunk) + sc.recvBuffer = chunk[n:] + return +} + +// Implements net.Conn +func (sc *SecretConnection) Close() error { return sc.conn.Close() } +func (sc *SecretConnection) LocalAddr() net.Addr { return sc.conn.(net.Conn).LocalAddr() } +func (sc *SecretConnection) RemoteAddr() net.Addr { return sc.conn.(net.Conn).RemoteAddr() } +func (sc *SecretConnection) SetDeadline(t time.Time) error { return sc.conn.(net.Conn).SetDeadline(t) } +func (sc *SecretConnection) SetReadDeadline(t time.Time) error { + return sc.conn.(net.Conn).SetReadDeadline(t) +} +func (sc *SecretConnection) SetWriteDeadline(t time.Time) error { + return sc.conn.(net.Conn).SetWriteDeadline(t) +} + +func genEphKeys() (ephPub, ephPriv *[32]byte) { + var err error + ephPub, ephPriv, err = box.GenerateKey(crand.Reader) + if err != nil { + panic("Could not generate ephemeral keypairs") + } + return +} + +func shareEphPubKey(conn io.ReadWriteCloser, locEphPub *[32]byte) (remEphPub *[32]byte, err error) { + + // Send our pubkey and receive theirs in tandem. + var trs, _ = cmn.Parallel( + func(_ int) (val interface{}, err error, abort bool) { + var _, err1 = cdc.MarshalBinaryWriter(conn, locEphPub) + if err1 != nil { + return nil, err1, true // abort + } else { + return nil, nil, false + } + }, + func(_ int) (val interface{}, err error, abort bool) { + var _remEphPub [32]byte + var _, err2 = cdc.UnmarshalBinaryReader(conn, &_remEphPub, 1024*1024) // TODO + if err2 != nil { + return nil, err2, true // abort + } else { + return _remEphPub, nil, false + } + }, + ) + + // If error: + if trs.FirstError() != nil { + err = trs.FirstError() + return + } + + // Otherwise: + var _remEphPub = trs.FirstValue().([32]byte) + return &_remEphPub, nil +} + +func computeSharedSecret(remPubKey, locPrivKey *[32]byte) (shrSecret *[32]byte) { + shrSecret = new([32]byte) + box.Precompute(shrSecret, remPubKey, locPrivKey) + return +} + +func sort32(foo, bar *[32]byte) (lo, hi *[32]byte) { + if bytes.Compare(foo[:], bar[:]) < 0 { + lo = foo + hi = bar + } else { + lo = bar + hi = foo + } + return +} + +func genNonces(loPubKey, hiPubKey *[32]byte, locIsLo bool) (recvNonce, sendNonce *[24]byte) { + nonce1 := hash24(append(loPubKey[:], hiPubKey[:]...)) + nonce2 := new([24]byte) + copy(nonce2[:], nonce1[:]) + nonce2[len(nonce2)-1] ^= 0x01 + if locIsLo { + recvNonce = nonce1 + sendNonce = nonce2 + } else { + recvNonce = nonce2 + sendNonce = nonce1 + } + return +} + +func genChallenge(loPubKey, hiPubKey *[32]byte) (challenge *[32]byte) { + return hash32(append(loPubKey[:], hiPubKey[:]...)) +} + +func signChallenge(challenge *[32]byte, locPrivKey crypto.PrivKey) (signature crypto.Signature) { + signature, err := locPrivKey.Sign(challenge[:]) + // TODO(ismail): let signChallenge return an error instead + if err != nil { + panic(err) + } + return +} + +type authSigMessage struct { + Key crypto.PubKey + Sig crypto.Signature +} + +func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKey, signature crypto.Signature) (recvMsg authSigMessage, err error) { + + // Send our info and receive theirs in tandem. + var trs, _ = cmn.Parallel( + func(_ int) (val interface{}, err error, abort bool) { + var _, err1 = cdc.MarshalBinaryWriter(sc, authSigMessage{pubKey, signature}) + if err1 != nil { + return nil, err1, true // abort + } else { + return nil, nil, false + } + }, + func(_ int) (val interface{}, err error, abort bool) { + var _recvMsg authSigMessage + var _, err2 = cdc.UnmarshalBinaryReader(sc, &_recvMsg, 1024*1024) // TODO + if err2 != nil { + return nil, err2, true // abort + } else { + return _recvMsg, nil, false + } + }, + ) + + // If error: + if trs.FirstError() != nil { + err = trs.FirstError() + return + } + + var _recvMsg = trs.FirstValue().(authSigMessage) + return _recvMsg, nil +} + +//-------------------------------------------------------------------------------- + +// sha256 +func hash32(input []byte) (res *[32]byte) { + hasher := sha256.New() + hasher.Write(input) // nolint: errcheck, gas + resSlice := hasher.Sum(nil) + res = new([32]byte) + copy(res[:], resSlice) + return +} + +// We only fill in the first 20 bytes with ripemd160 +func hash24(input []byte) (res *[24]byte) { + hasher := ripemd160.New() + hasher.Write(input) // nolint: errcheck, gas + resSlice := hasher.Sum(nil) + res = new([24]byte) + copy(res[:], resSlice) + return +} + +// increment nonce big-endian by 2 with wraparound. +func incr2Nonce(nonce *[24]byte) { + incrNonce(nonce) + incrNonce(nonce) +} + +// increment nonce big-endian by 1 with wraparound. +func incrNonce(nonce *[24]byte) { + for i := 23; 0 <= i; i-- { + nonce[i]++ + if nonce[i] != 0 { + return + } + } +} diff --git a/p2p/conn/secret_connection_test.go b/p2p/conn/secret_connection_test.go new file mode 100644 index 000000000..7274dfaf7 --- /dev/null +++ b/p2p/conn/secret_connection_test.go @@ -0,0 +1,250 @@ +package conn + +import ( + "fmt" + "io" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + crypto "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" +) + +type kvstoreConn struct { + *io.PipeReader + *io.PipeWriter +} + +func (drw kvstoreConn) Close() (err error) { + err2 := drw.PipeWriter.CloseWithError(io.EOF) + err1 := drw.PipeReader.Close() + if err2 != nil { + return err + } + return err1 +} + +// Each returned ReadWriteCloser is akin to a net.Connection +func makeKVStoreConnPair() (fooConn, barConn kvstoreConn) { + barReader, fooWriter := io.Pipe() + fooReader, barWriter := io.Pipe() + return kvstoreConn{fooReader, fooWriter}, kvstoreConn{barReader, barWriter} +} + +func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection) { + + var fooConn, barConn = makeKVStoreConnPair() + var fooPrvKey = crypto.GenPrivKeyEd25519() + var fooPubKey = fooPrvKey.PubKey() + var barPrvKey = crypto.GenPrivKeyEd25519() + var barPubKey = barPrvKey.PubKey() + + // Make connections from both sides in parallel. + var trs, ok = cmn.Parallel( + func(_ int) (val interface{}, err error, abort bool) { + fooSecConn, err = MakeSecretConnection(fooConn, fooPrvKey) + if err != nil { + tb.Errorf("Failed to establish SecretConnection for foo: %v", err) + return nil, err, true + } + remotePubBytes := fooSecConn.RemotePubKey() + if !remotePubBytes.Equals(barPubKey) { + err = fmt.Errorf("Unexpected fooSecConn.RemotePubKey. Expected %v, got %v", + barPubKey, fooSecConn.RemotePubKey()) + tb.Error(err) + return nil, err, false + } + return nil, nil, false + }, + func(_ int) (val interface{}, err error, abort bool) { + barSecConn, err = MakeSecretConnection(barConn, barPrvKey) + if barSecConn == nil { + tb.Errorf("Failed to establish SecretConnection for bar: %v", err) + return nil, err, true + } + remotePubBytes := barSecConn.RemotePubKey() + if !remotePubBytes.Equals(fooPubKey) { + err = fmt.Errorf("Unexpected barSecConn.RemotePubKey. Expected %v, got %v", + fooPubKey, barSecConn.RemotePubKey()) + tb.Error(err) + return nil, nil, false + } + return nil, nil, false + }, + ) + + require.Nil(tb, trs.FirstError()) + require.True(tb, ok, "Unexpected task abortion") + + return +} + +func TestSecretConnectionHandshake(t *testing.T) { + fooSecConn, barSecConn := makeSecretConnPair(t) + if err := fooSecConn.Close(); err != nil { + t.Error(err) + } + if err := barSecConn.Close(); err != nil { + t.Error(err) + } +} + +func TestSecretConnectionReadWrite(t *testing.T) { + fooConn, barConn := makeKVStoreConnPair() + fooWrites, barWrites := []string{}, []string{} + fooReads, barReads := []string{}, []string{} + + // Pre-generate the things to write (for foo & bar) + for i := 0; i < 100; i++ { + fooWrites = append(fooWrites, cmn.RandStr((cmn.RandInt()%(dataMaxSize*5))+1)) + barWrites = append(barWrites, cmn.RandStr((cmn.RandInt()%(dataMaxSize*5))+1)) + } + + // A helper that will run with (fooConn, fooWrites, fooReads) and vice versa + genNodeRunner := func(id string, nodeConn kvstoreConn, nodeWrites []string, nodeReads *[]string) cmn.Task { + return func(_ int) (interface{}, error, bool) { + // Initiate cryptographic private key and secret connection trhough nodeConn. + nodePrvKey := crypto.GenPrivKeyEd25519() + nodeSecretConn, err := MakeSecretConnection(nodeConn, nodePrvKey) + if err != nil { + t.Errorf("Failed to establish SecretConnection for node: %v", err) + return nil, err, true + } + // In parallel, handle some reads and writes. + var trs, ok = cmn.Parallel( + func(_ int) (interface{}, error, bool) { + // Node writes: + for _, nodeWrite := range nodeWrites { + n, err := nodeSecretConn.Write([]byte(nodeWrite)) + if err != nil { + t.Errorf("Failed to write to nodeSecretConn: %v", err) + return nil, err, true + } + if n != len(nodeWrite) { + err = fmt.Errorf("Failed to write all bytes. Expected %v, wrote %v", len(nodeWrite), n) + t.Error(err) + return nil, err, true + } + } + if err := nodeConn.PipeWriter.Close(); err != nil { + t.Error(err) + return nil, err, true + } + return nil, nil, false + }, + func(_ int) (interface{}, error, bool) { + // Node reads: + readBuffer := make([]byte, dataMaxSize) + for { + n, err := nodeSecretConn.Read(readBuffer) + if err == io.EOF { + return nil, nil, false + } else if err != nil { + t.Errorf("Failed to read from nodeSecretConn: %v", err) + return nil, err, true + } + *nodeReads = append(*nodeReads, string(readBuffer[:n])) + } + if err := nodeConn.PipeReader.Close(); err != nil { + t.Error(err) + return nil, err, true + } + return nil, nil, false + }, + ) + assert.True(t, ok, "Unexpected task abortion") + + // If error: + if trs.FirstError() != nil { + return nil, trs.FirstError(), true + } + + // Otherwise: + return nil, nil, false + } + } + + // Run foo & bar in parallel + var trs, ok = cmn.Parallel( + genNodeRunner("foo", fooConn, fooWrites, &fooReads), + genNodeRunner("bar", barConn, barWrites, &barReads), + ) + require.Nil(t, trs.FirstError()) + require.True(t, ok, "unexpected task abortion") + + // A helper to ensure that the writes and reads match. + // Additionally, small writes (<= dataMaxSize) must be atomically read. + compareWritesReads := func(writes []string, reads []string) { + for { + // Pop next write & corresponding reads + var read, write string = "", writes[0] + var readCount = 0 + for _, readChunk := range reads { + read += readChunk + readCount++ + if len(write) <= len(read) { + break + } + if len(write) <= dataMaxSize { + break // atomicity of small writes + } + } + // Compare + if write != read { + t.Errorf("Expected to read %X, got %X", write, read) + } + // Iterate + writes = writes[1:] + reads = reads[readCount:] + if len(writes) == 0 { + break + } + } + } + + compareWritesReads(fooWrites, barReads) + compareWritesReads(barWrites, fooReads) + +} + +func BenchmarkSecretConnection(b *testing.B) { + b.StopTimer() + fooSecConn, barSecConn := makeSecretConnPair(b) + fooWriteText := cmn.RandStr(dataMaxSize) + // Consume reads from bar's reader + go func() { + readBuffer := make([]byte, dataMaxSize) + for { + _, err := barSecConn.Read(readBuffer) + if err == io.EOF { + return + } else if err != nil { + b.Fatalf("Failed to read from barSecConn: %v", err) + } + } + }() + + b.StartTimer() + for i := 0; i < b.N; i++ { + _, err := fooSecConn.Write([]byte(fooWriteText)) + if err != nil { + b.Fatalf("Failed to write to fooSecConn: %v", err) + } + } + b.StopTimer() + + if err := fooSecConn.Close(); err != nil { + b.Error(err) + } + //barSecConn.Close() race condition +} + +func fingerprint(bz []byte) []byte { + const fbsize = 40 + if len(bz) < fbsize { + return bz + } else { + return bz[:fbsize] + } +} diff --git a/p2p/conn/wire.go b/p2p/conn/wire.go new file mode 100644 index 000000000..3182fde38 --- /dev/null +++ b/p2p/conn/wire.go @@ -0,0 +1,13 @@ +package conn + +import ( + "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" +) + +var cdc *amino.Codec = amino.NewCodec() + +func init() { + crypto.RegisterAmino(cdc) + RegisterPacket(cdc) +} diff --git a/p2p/dummy/peer.go b/p2p/dummy/peer.go new file mode 100644 index 000000000..d18a9f99d --- /dev/null +++ b/p2p/dummy/peer.go @@ -0,0 +1,80 @@ +package dummy + +import ( + "net" + + p2p "github.com/tendermint/tendermint/p2p" + tmconn "github.com/tendermint/tendermint/p2p/conn" + cmn "github.com/tendermint/tendermint/libs/common" +) + +type peer struct { + cmn.BaseService + kv map[string]interface{} +} + +var _ p2p.Peer = (*peer)(nil) + +// NewPeer creates new dummy peer. +func NewPeer() *peer { + p := &peer{ + kv: make(map[string]interface{}), + } + p.BaseService = *cmn.NewBaseService(nil, "peer", p) + + return p +} + +// ID always returns dummy. +func (p *peer) ID() p2p.ID { + return p2p.ID("dummy") +} + +// IsOutbound always returns false. +func (p *peer) IsOutbound() bool { + return false +} + +// IsPersistent always returns false. +func (p *peer) IsPersistent() bool { + return false +} + +// NodeInfo always returns empty node info. +func (p *peer) NodeInfo() p2p.NodeInfo { + return p2p.NodeInfo{} +} + +// RemoteIP always returns localhost. +func (p *peer) RemoteIP() net.IP { + return net.ParseIP("127.0.0.1") +} + +// Status always returns empry connection status. +func (p *peer) Status() tmconn.ConnectionStatus { + return tmconn.ConnectionStatus{} +} + +// Send does not do anything and just returns true. +func (p *peer) Send(byte, []byte) bool { + return true +} + +// TrySend does not do anything and just returns true. +func (p *peer) TrySend(byte, []byte) bool { + return true +} + +// Set records value under key specified in the map. +func (p *peer) Set(key string, value interface{}) { + p.kv[key] = value +} + +// Get returns a value associated with the key. Nil is returned if no value +// found. +func (p *peer) Get(key string) interface{} { + if value, ok := p.kv[key]; ok { + return value + } + return nil +} diff --git a/p2p/errors.go b/p2p/errors.go new file mode 100644 index 000000000..fc477d1c2 --- /dev/null +++ b/p2p/errors.go @@ -0,0 +1,76 @@ +package p2p + +import ( + "fmt" + "net" +) + +// ErrSwitchDuplicatePeerID to be raised when a peer is connecting with a known +// ID. +type ErrSwitchDuplicatePeerID struct { + ID ID +} + +func (e ErrSwitchDuplicatePeerID) Error() string { + return fmt.Sprintf("Duplicate peer ID %v", e.ID) +} + +// ErrSwitchDuplicatePeerIP to be raised whena a peer is connecting with a known +// IP. +type ErrSwitchDuplicatePeerIP struct { + IP net.IP +} + +func (e ErrSwitchDuplicatePeerIP) Error() string { + return fmt.Sprintf("Duplicate peer IP %v", e.IP.String()) +} + +// ErrSwitchConnectToSelf to be raised when trying to connect to itself. +type ErrSwitchConnectToSelf struct { + Addr *NetAddress +} + +func (e ErrSwitchConnectToSelf) Error() string { + return fmt.Sprintf("Connect to self: %v", e.Addr) +} + +type ErrSwitchAuthenticationFailure struct { + Dialed *NetAddress + Got ID +} + +func (e ErrSwitchAuthenticationFailure) Error() string { + return fmt.Sprintf( + "Failed to authenticate peer. Dialed %v, but got peer with ID %s", + e.Dialed, + e.Got, + ) +} + +//------------------------------------------------------------------- + +type ErrNetAddressNoID struct { + Addr string +} + +func (e ErrNetAddressNoID) Error() string { + return fmt.Sprintf("Address (%s) does not contain ID", e.Addr) +} + +type ErrNetAddressInvalid struct { + Addr string + Err error +} + +func (e ErrNetAddressInvalid) Error() string { + return fmt.Sprintf("Invalid address (%s): %v", e.Addr, e.Err) +} + +type ErrNetAddressLookup struct { + Addr string + Err error +} + +func (e ErrNetAddressLookup) Error() string { + return fmt.Sprintf("Error looking up host (%s): %v", e.Addr, e.Err) +} diff --git a/p2p/fuzz.go b/p2p/fuzz.go new file mode 100644 index 000000000..80e4fed6a --- /dev/null +++ b/p2p/fuzz.go @@ -0,0 +1,152 @@ +package p2p + +import ( + "net" + "sync" + "time" + + "github.com/tendermint/tendermint/config" + cmn "github.com/tendermint/tendermint/libs/common" +) + +// FuzzedConnection wraps any net.Conn and depending on the mode either delays +// reads/writes or randomly drops reads/writes/connections. +type FuzzedConnection struct { + conn net.Conn + + mtx sync.Mutex + start <-chan time.Time + active bool + + config *config.FuzzConnConfig +} + +// FuzzConn creates a new FuzzedConnection. Fuzzing starts immediately. +func FuzzConn(conn net.Conn) net.Conn { + return FuzzConnFromConfig(conn, config.DefaultFuzzConnConfig()) +} + +// FuzzConnFromConfig creates a new FuzzedConnection from a config. Fuzzing +// starts immediately. +func FuzzConnFromConfig(conn net.Conn, config *config.FuzzConnConfig) net.Conn { + return &FuzzedConnection{ + conn: conn, + start: make(<-chan time.Time), + active: true, + config: config, + } +} + +// FuzzConnAfter creates a new FuzzedConnection. Fuzzing starts when the +// duration elapses. +func FuzzConnAfter(conn net.Conn, d time.Duration) net.Conn { + return FuzzConnAfterFromConfig(conn, d, config.DefaultFuzzConnConfig()) +} + +// FuzzConnAfterFromConfig creates a new FuzzedConnection from a config. +// Fuzzing starts when the duration elapses. +func FuzzConnAfterFromConfig( + conn net.Conn, + d time.Duration, + config *config.FuzzConnConfig, +) net.Conn { + return &FuzzedConnection{ + conn: conn, + start: time.After(d), + active: false, + config: config, + } +} + +// Config returns the connection's config. +func (fc *FuzzedConnection) Config() *config.FuzzConnConfig { + return fc.config +} + +// Read implements net.Conn. +func (fc *FuzzedConnection) Read(data []byte) (n int, err error) { + if fc.fuzz() { + return 0, nil + } + return fc.conn.Read(data) +} + +// Write implements net.Conn. +func (fc *FuzzedConnection) Write(data []byte) (n int, err error) { + if fc.fuzz() { + return 0, nil + } + return fc.conn.Write(data) +} + +// Close implements net.Conn. +func (fc *FuzzedConnection) Close() error { return fc.conn.Close() } + +// LocalAddr implements net.Conn. +func (fc *FuzzedConnection) LocalAddr() net.Addr { return fc.conn.LocalAddr() } + +// RemoteAddr implements net.Conn. +func (fc *FuzzedConnection) RemoteAddr() net.Addr { return fc.conn.RemoteAddr() } + +// SetDeadline implements net.Conn. +func (fc *FuzzedConnection) SetDeadline(t time.Time) error { return fc.conn.SetDeadline(t) } + +// SetReadDeadline implements net.Conn. +func (fc *FuzzedConnection) SetReadDeadline(t time.Time) error { + return fc.conn.SetReadDeadline(t) +} + +// SetWriteDeadline implements net.Conn. +func (fc *FuzzedConnection) SetWriteDeadline(t time.Time) error { + return fc.conn.SetWriteDeadline(t) +} + +func (fc *FuzzedConnection) randomDuration() time.Duration { + maxDelayMillis := int(fc.config.MaxDelay.Nanoseconds() / 1000) + return time.Millisecond * time.Duration(cmn.RandInt()%maxDelayMillis) // nolint: gas +} + +// implements the fuzz (delay, kill conn) +// and returns whether or not the read/write should be ignored +func (fc *FuzzedConnection) fuzz() bool { + if !fc.shouldFuzz() { + return false + } + + switch fc.config.Mode { + case config.FuzzModeDrop: + // randomly drop the r/w, drop the conn, or sleep + r := cmn.RandFloat64() + if r <= fc.config.ProbDropRW { + return true + } else if r < fc.config.ProbDropRW+fc.config.ProbDropConn { + // XXX: can't this fail because machine precision? + // XXX: do we need an error? + fc.Close() // nolint: errcheck, gas + return true + } else if r < fc.config.ProbDropRW+fc.config.ProbDropConn+fc.config.ProbSleep { + time.Sleep(fc.randomDuration()) + } + case config.FuzzModeDelay: + // sleep a bit + time.Sleep(fc.randomDuration()) + } + return false +} + +func (fc *FuzzedConnection) shouldFuzz() bool { + if fc.active { + return true + } + + fc.mtx.Lock() + defer fc.mtx.Unlock() + + select { + case <-fc.start: + fc.active = true + return true + default: + return false + } +} diff --git a/p2p/key.go b/p2p/key.go new file mode 100644 index 000000000..9548d34f0 --- /dev/null +++ b/p2p/key.go @@ -0,0 +1,111 @@ +package p2p + +import ( + "bytes" + "encoding/hex" + "fmt" + "io/ioutil" + + crypto "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" +) + +// ID is a hex-encoded crypto.Address +type ID string + +// IDByteLength is the length of a crypto.Address. Currently only 20. +// TODO: support other length addresses ? +const IDByteLength = 20 + +//------------------------------------------------------------------------------ +// Persistent peer ID +// TODO: encrypt on disk + +// NodeKey is the persistent peer key. +// It contains the nodes private key for authentication. +type NodeKey struct { + PrivKey crypto.PrivKey `json:"priv_key"` // our priv key +} + +// ID returns the peer's canonical ID - the hash of its public key. +func (nodeKey *NodeKey) ID() ID { + return PubKeyToID(nodeKey.PubKey()) +} + +// PubKey returns the peer's PubKey +func (nodeKey *NodeKey) PubKey() crypto.PubKey { + return nodeKey.PrivKey.PubKey() +} + +// PubKeyToID returns the ID corresponding to the given PubKey. +// It's the hex-encoding of the pubKey.Address(). +func PubKeyToID(pubKey crypto.PubKey) ID { + return ID(hex.EncodeToString(pubKey.Address())) +} + +// LoadOrGenNodeKey attempts to load the NodeKey from the given filePath. +// If the file does not exist, it generates and saves a new NodeKey. +func LoadOrGenNodeKey(filePath string) (*NodeKey, error) { + if cmn.FileExists(filePath) { + nodeKey, err := LoadNodeKey(filePath) + if err != nil { + return nil, err + } + return nodeKey, nil + } + return genNodeKey(filePath) +} + +func LoadNodeKey(filePath string) (*NodeKey, error) { + jsonBytes, err := ioutil.ReadFile(filePath) + if err != nil { + return nil, err + } + nodeKey := new(NodeKey) + err = cdc.UnmarshalJSON(jsonBytes, nodeKey) + if err != nil { + return nil, fmt.Errorf("Error reading NodeKey from %v: %v", filePath, err) + } + return nodeKey, nil +} + +func genNodeKey(filePath string) (*NodeKey, error) { + privKey := crypto.GenPrivKeyEd25519() + nodeKey := &NodeKey{ + PrivKey: privKey, + } + + jsonBytes, err := cdc.MarshalJSON(nodeKey) + if err != nil { + return nil, err + } + err = ioutil.WriteFile(filePath, jsonBytes, 0600) + if err != nil { + return nil, err + } + return nodeKey, nil +} + +//------------------------------------------------------------------------------ + +// MakePoWTarget returns the big-endian encoding of 2^(targetBits - difficulty) - 1. +// It can be used as a Proof of Work target. +// NOTE: targetBits must be a multiple of 8 and difficulty must be less than targetBits. +func MakePoWTarget(difficulty, targetBits uint) []byte { + if targetBits%8 != 0 { + panic(fmt.Sprintf("targetBits (%d) not a multiple of 8", targetBits)) + } + if difficulty >= targetBits { + panic(fmt.Sprintf("difficulty (%d) >= targetBits (%d)", difficulty, targetBits)) + } + targetBytes := targetBits / 8 + zeroPrefixLen := (int(difficulty) / 8) + prefix := bytes.Repeat([]byte{0}, zeroPrefixLen) + mod := (difficulty % 8) + if mod > 0 { + nonZeroPrefix := byte(1<<(8-mod) - 1) + prefix = append(prefix, nonZeroPrefix) + } + tailLen := int(targetBytes) - len(prefix) + return append(prefix, bytes.Repeat([]byte{0xFF}, tailLen)...) +} diff --git a/p2p/key_test.go b/p2p/key_test.go new file mode 100644 index 000000000..51e1c0787 --- /dev/null +++ b/p2p/key_test.go @@ -0,0 +1,50 @@ +package p2p + +import ( + "bytes" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + cmn "github.com/tendermint/tendermint/libs/common" +) + +func TestLoadOrGenNodeKey(t *testing.T) { + filePath := filepath.Join(os.TempDir(), cmn.RandStr(12)+"_peer_id.json") + + nodeKey, err := LoadOrGenNodeKey(filePath) + assert.Nil(t, err) + + nodeKey2, err := LoadOrGenNodeKey(filePath) + assert.Nil(t, err) + + assert.Equal(t, nodeKey, nodeKey2) +} + +//---------------------------------------------------------- + +func padBytes(bz []byte, targetBytes int) []byte { + return append(bz, bytes.Repeat([]byte{0xFF}, targetBytes-len(bz))...) +} + +func TestPoWTarget(t *testing.T) { + + targetBytes := 20 + cases := []struct { + difficulty uint + target []byte + }{ + {0, padBytes([]byte{}, targetBytes)}, + {1, padBytes([]byte{127}, targetBytes)}, + {8, padBytes([]byte{0}, targetBytes)}, + {9, padBytes([]byte{0, 127}, targetBytes)}, + {10, padBytes([]byte{0, 63}, targetBytes)}, + {16, padBytes([]byte{0, 0}, targetBytes)}, + {17, padBytes([]byte{0, 0, 127}, targetBytes)}, + } + + for _, c := range cases { + assert.Equal(t, MakePoWTarget(c.difficulty, 20*8), c.target) + } +} diff --git a/p2p/listener.go b/p2p/listener.go new file mode 100644 index 000000000..3509ec69c --- /dev/null +++ b/p2p/listener.go @@ -0,0 +1,284 @@ +package p2p + +import ( + "fmt" + "net" + "strconv" + "strings" + "time" + + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/p2p/upnp" +) + +// Listener is a network listener for stream-oriented protocols, providing +// convenient methods to get listener's internal and external addresses. +// Clients are supposed to read incoming connections from a channel, returned +// by Connections() method. +type Listener interface { + Connections() <-chan net.Conn + InternalAddress() *NetAddress + ExternalAddress() *NetAddress + ExternalAddressHost() string + String() string + Stop() error +} + +// DefaultListener is a cmn.Service, running net.Listener underneath. +// Optionally, UPnP is used upon calling NewDefaultListener to resolve external +// address. +type DefaultListener struct { + cmn.BaseService + + listener net.Listener + intAddr *NetAddress + extAddr *NetAddress + connections chan net.Conn +} + +var _ Listener = (*DefaultListener)(nil) + +const ( + numBufferedConnections = 10 + defaultExternalPort = 8770 + tryListenSeconds = 5 +) + +func splitHostPort(addr string) (host string, port int) { + host, portStr, err := net.SplitHostPort(addr) + if err != nil { + panic(err) + } + port, err = strconv.Atoi(portStr) + if err != nil { + panic(err) + } + return host, port +} + +// NewDefaultListener creates a new DefaultListener on lAddr, optionally trying +// to determine external address using UPnP. +func NewDefaultListener( + fullListenAddrString string, + externalAddrString string, + useUPnP bool, + logger log.Logger) Listener { + + // Split protocol, address, and port. + protocol, lAddr := cmn.ProtocolAndAddress(fullListenAddrString) + lAddrIP, lAddrPort := splitHostPort(lAddr) + + // Create listener + var listener net.Listener + var err error + for i := 0; i < tryListenSeconds; i++ { + listener, err = net.Listen(protocol, lAddr) + if err == nil { + break + } else if i < tryListenSeconds-1 { + time.Sleep(time.Second * 1) + } + } + if err != nil { + panic(err) + } + // Actual listener local IP & port + listenerIP, listenerPort := splitHostPort(listener.Addr().String()) + logger.Info("Local listener", "ip", listenerIP, "port", listenerPort) + + // Determine internal address... + var intAddr *NetAddress + intAddr, err = NewNetAddressStringWithOptionalID(lAddr) + if err != nil { + panic(err) + } + + inAddrAny := lAddrIP == "" || lAddrIP == "0.0.0.0" + + // Determine external address. + var extAddr *NetAddress + + if externalAddrString != "" { + var err error + extAddr, err = NewNetAddressStringWithOptionalID(externalAddrString) + if err != nil { + panic(fmt.Sprintf("Error in ExternalAddress: %v", err)) + } + } + + // If the lAddrIP is INADDR_ANY, try UPnP. + if extAddr == nil && useUPnP && inAddrAny { + extAddr = getUPNPExternalAddress(lAddrPort, listenerPort, logger) + } + + // Otherwise just use the local address. + if extAddr == nil { + defaultToIPv4 := inAddrAny + extAddr = getNaiveExternalAddress(defaultToIPv4, listenerPort, false, logger) + } + if extAddr == nil { + panic("Could not determine external address!") + } + + dl := &DefaultListener{ + listener: listener, + intAddr: intAddr, + extAddr: extAddr, + connections: make(chan net.Conn, numBufferedConnections), + } + dl.BaseService = *cmn.NewBaseService(logger, "DefaultListener", dl) + err = dl.Start() // Started upon construction + if err != nil { + logger.Error("Error starting base service", "err", err) + } + return dl +} + +// OnStart implements cmn.Service by spinning a goroutine, listening for new +// connections. +func (l *DefaultListener) OnStart() error { + if err := l.BaseService.OnStart(); err != nil { + return err + } + go l.listenRoutine() + return nil +} + +// OnStop implements cmn.Service by closing the listener. +func (l *DefaultListener) OnStop() { + l.BaseService.OnStop() + l.listener.Close() // nolint: errcheck +} + +// Accept connections and pass on the channel +func (l *DefaultListener) listenRoutine() { + for { + conn, err := l.listener.Accept() + + if !l.IsRunning() { + break // Go to cleanup + } + + // listener wasn't stopped, + // yet we encountered an error. + if err != nil { + panic(err) + } + + l.connections <- conn + } + + // Cleanup + close(l.connections) + for range l.connections { + // Drain + } +} + +// Connections returns a channel of inbound connections. +// It gets closed when the listener closes. +func (l *DefaultListener) Connections() <-chan net.Conn { + return l.connections +} + +// InternalAddress returns the internal NetAddress (address used for +// listening). +func (l *DefaultListener) InternalAddress() *NetAddress { + return l.intAddr +} + +// ExternalAddress returns the external NetAddress (publicly available, +// determined using either UPnP or local resolver). +func (l *DefaultListener) ExternalAddress() *NetAddress { + return l.extAddr +} + +// ExternalAddressHost returns the external NetAddress IP string. If an IP is +// IPv6, it's wrapped in brackets ("[2001:db8:1f70::999:de8:7648:6e8]"). +func (l *DefaultListener) ExternalAddressHost() string { + ip := l.ExternalAddress().IP + if isIpv6(ip) { + // Means it's ipv6, so format it with brackets + return "[" + ip.String() + "]" + } + return ip.String() +} + +func (l *DefaultListener) String() string { + return fmt.Sprintf("Listener(@%v)", l.extAddr) +} + +/* external address helpers */ + +// UPNP external address discovery & port mapping +func getUPNPExternalAddress(externalPort, internalPort int, logger log.Logger) *NetAddress { + logger.Info("Getting UPNP external address") + nat, err := upnp.Discover() + if err != nil { + logger.Info("Could not perform UPNP discover", "err", err) + return nil + } + + ext, err := nat.GetExternalAddress() + if err != nil { + logger.Info("Could not get UPNP external address", "err", err) + return nil + } + + // UPnP can't seem to get the external port, so let's just be explicit. + if externalPort == 0 { + externalPort = defaultExternalPort + } + + externalPort, err = nat.AddPortMapping("tcp", externalPort, internalPort, "tendermint", 0) + if err != nil { + logger.Info("Could not add UPNP port mapping", "err", err) + return nil + } + + logger.Info("Got UPNP external address", "address", ext) + return NewNetAddressIPPort(ext, uint16(externalPort)) +} + +func isIpv6(ip net.IP) bool { + v4 := ip.To4() + if v4 != nil { + return false + } + + ipString := ip.String() + + // Extra check just to be sure it's IPv6 + return (strings.Contains(ipString, ":") && !strings.Contains(ipString, ".")) +} + +// TODO: use syscalls: see issue #712 +func getNaiveExternalAddress(defaultToIPv4 bool, port int, settleForLocal bool, logger log.Logger) *NetAddress { + addrs, err := net.InterfaceAddrs() + if err != nil { + panic(cmn.Fmt("Could not fetch interface addresses: %v", err)) + } + + for _, a := range addrs { + ipnet, ok := a.(*net.IPNet) + if !ok { + continue + } + if defaultToIPv4 || !isIpv6(ipnet.IP) { + v4 := ipnet.IP.To4() + if v4 == nil || (!settleForLocal && v4[0] == 127) { + // loopback + continue + } + } else if !settleForLocal && ipnet.IP.IsLoopback() { + // IPv6, check for loopback + continue + } + return NewNetAddressIPPort(ipnet.IP, uint16(port)) + } + + // try again, but settle for local + logger.Info("Node may not be connected to internet. Settling for local address") + return getNaiveExternalAddress(defaultToIPv4, port, true, logger) +} diff --git a/p2p/listener_test.go b/p2p/listener_test.go new file mode 100644 index 000000000..f87b5d6f5 --- /dev/null +++ b/p2p/listener_test.go @@ -0,0 +1,79 @@ +package p2p + +import ( + "bytes" + "net" + "strings" + "testing" + + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" +) + +func TestListener(t *testing.T) { + // Create a listener + l := NewDefaultListener("tcp://:8001", "", false, log.TestingLogger()) + + // Dial the listener + lAddr := l.ExternalAddress() + connOut, err := lAddr.Dial() + if err != nil { + t.Fatalf("Could not connect to listener address %v", lAddr) + } else { + t.Logf("Created a connection to listener address %v", lAddr) + } + connIn, ok := <-l.Connections() + if !ok { + t.Fatalf("Could not get inbound connection from listener") + } + + msg := []byte("hi!") + go func() { + _, err := connIn.Write(msg) + if err != nil { + t.Error(err) + } + }() + b := make([]byte, 32) + n, err := connOut.Read(b) + if err != nil { + t.Fatalf("Error reading off connection: %v", err) + } + + b = b[:n] + if !bytes.Equal(msg, b) { + t.Fatalf("Got %s, expected %s", b, msg) + } + + // Close the server, no longer needed. + l.Stop() +} + +func TestExternalAddress(t *testing.T) { + { + // Create a listener with no external addr. Should default + // to local ipv4. + l := NewDefaultListener("tcp://:8001", "", false, log.TestingLogger()) + lAddr := l.ExternalAddress().String() + _, _, err := net.SplitHostPort(lAddr) + require.Nil(t, err) + spl := strings.Split(lAddr, ".") + require.Equal(t, len(spl), 4) + l.Stop() + } + + { + // Create a listener with set external ipv4 addr. + setExAddr := "8.8.8.8:8080" + l := NewDefaultListener("tcp://:8001", setExAddr, false, log.TestingLogger()) + lAddr := l.ExternalAddress().String() + require.Equal(t, lAddr, setExAddr) + l.Stop() + } + + { + // Invalid external addr causes panic + setExAddr := "awrlsckjnal:8080" + require.Panics(t, func() { NewDefaultListener("tcp://:8001", setExAddr, false, log.TestingLogger()) }) + } +} diff --git a/p2p/metrics.go b/p2p/metrics.go new file mode 100644 index 000000000..ab876ee7c --- /dev/null +++ b/p2p/metrics.go @@ -0,0 +1,33 @@ +package p2p + +import ( + "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics/discard" + + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +// Metrics contains metrics exposed by this package. +type Metrics struct { + // Number of peers. + Peers metrics.Gauge +} + +// PrometheusMetrics returns Metrics build using Prometheus client library. +func PrometheusMetrics() *Metrics { + return &Metrics{ + Peers: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Subsystem: "p2p", + Name: "peers", + Help: "Number of peers.", + }, []string{}), + } +} + +// NopMetrics returns no-op Metrics. +func NopMetrics() *Metrics { + return &Metrics{ + Peers: discard.NewGauge(), + } +} diff --git a/p2p/netaddress.go b/p2p/netaddress.go new file mode 100644 index 000000000..ebac8cc82 --- /dev/null +++ b/p2p/netaddress.go @@ -0,0 +1,317 @@ +// Modified for Tendermint +// Originally Copyright (c) 2013-2014 Conformal Systems LLC. +// https://github.com/conformal/btcd/blob/master/LICENSE + +package p2p + +import ( + "encoding/hex" + "flag" + "fmt" + "net" + "strconv" + "strings" + "time" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +// NetAddress defines information about a peer on the network +// including its ID, IP address, and port. +type NetAddress struct { + ID ID `json:"id"` + IP net.IP `json:"ip"` + Port uint16 `json:"port"` + + // TODO: + // Name string `json:"name"` // optional DNS name + + // memoize .String() + str string +} + +// IDAddressString returns id@hostPort. +func IDAddressString(id ID, hostPort string) string { + return fmt.Sprintf("%s@%s", id, hostPort) +} + +// NewNetAddress returns a new NetAddress using the provided TCP +// address. When testing, other net.Addr (except TCP) will result in +// using 0.0.0.0:0. When normal run, other net.Addr (except TCP) will +// panic. +// TODO: socks proxies? +func NewNetAddress(id ID, addr net.Addr) *NetAddress { + tcpAddr, ok := addr.(*net.TCPAddr) + if !ok { + if flag.Lookup("test.v") == nil { // normal run + cmn.PanicSanity(cmn.Fmt("Only TCPAddrs are supported. Got: %v", addr)) + } else { // in testing + netAddr := NewNetAddressIPPort(net.IP("0.0.0.0"), 0) + netAddr.ID = id + return netAddr + } + } + ip := tcpAddr.IP + port := uint16(tcpAddr.Port) + na := NewNetAddressIPPort(ip, port) + na.ID = id + return na +} + +// NewNetAddressString returns a new NetAddress using the provided address in +// the form of "ID@IP:Port". +// Also resolves the host if host is not an IP. +// Errors are of type ErrNetAddressXxx where Xxx is in (NoID, Invalid, Lookup) +func NewNetAddressString(addr string) (*NetAddress, error) { + spl := strings.Split(addr, "@") + if len(spl) < 2 { + return nil, ErrNetAddressNoID{addr} + } + return NewNetAddressStringWithOptionalID(addr) +} + +// NewNetAddressStringWithOptionalID returns a new NetAddress using the +// provided address in the form of "ID@IP:Port", where the ID is optional. +// Also resolves the host if host is not an IP. +func NewNetAddressStringWithOptionalID(addr string) (*NetAddress, error) { + addrWithoutProtocol := removeProtocolIfDefined(addr) + + var id ID + spl := strings.Split(addrWithoutProtocol, "@") + if len(spl) == 2 { + idStr := spl[0] + idBytes, err := hex.DecodeString(idStr) + if err != nil { + return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} + } + if len(idBytes) != IDByteLength { + return nil, ErrNetAddressInvalid{ + addrWithoutProtocol, + fmt.Errorf("invalid hex length - got %d, expected %d", len(idBytes), IDByteLength)} + } + + id, addrWithoutProtocol = ID(idStr), spl[1] + } + + host, portStr, err := net.SplitHostPort(addrWithoutProtocol) + if err != nil { + return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} + } + + ip := net.ParseIP(host) + if ip == nil { + if len(host) > 0 { + ips, err := net.LookupIP(host) + if err != nil { + return nil, ErrNetAddressLookup{host, err} + } + ip = ips[0] + } + } + + port, err := strconv.ParseUint(portStr, 10, 16) + if err != nil { + return nil, ErrNetAddressInvalid{portStr, err} + } + + na := NewNetAddressIPPort(ip, uint16(port)) + na.ID = id + return na, nil +} + +// NewNetAddressStrings returns an array of NetAddress'es build using +// the provided strings. +func NewNetAddressStrings(addrs []string) ([]*NetAddress, []error) { + netAddrs := make([]*NetAddress, 0) + errs := make([]error, 0) + for _, addr := range addrs { + netAddr, err := NewNetAddressString(addr) + if err != nil { + errs = append(errs, err) + } else { + netAddrs = append(netAddrs, netAddr) + } + } + return netAddrs, errs +} + +// NewNetAddressIPPort returns a new NetAddress using the provided IP +// and port number. +func NewNetAddressIPPort(ip net.IP, port uint16) *NetAddress { + return &NetAddress{ + IP: ip, + Port: port, + } +} + +// Equals reports whether na and other are the same addresses, +// including their ID, IP, and Port. +func (na *NetAddress) Equals(other interface{}) bool { + if o, ok := other.(*NetAddress); ok { + return na.String() == o.String() + } + return false +} + +// Same returns true is na has the same non-empty ID or DialString as other. +func (na *NetAddress) Same(other interface{}) bool { + if o, ok := other.(*NetAddress); ok { + if na.DialString() == o.DialString() { + return true + } + if na.ID != "" && na.ID == o.ID { + return true + } + } + return false +} + +// String representation: @: +func (na *NetAddress) String() string { + if na.str == "" { + addrStr := na.DialString() + if na.ID != "" { + addrStr = IDAddressString(na.ID, addrStr) + } + na.str = addrStr + } + return na.str +} + +func (na *NetAddress) DialString() string { + return net.JoinHostPort( + na.IP.String(), + strconv.FormatUint(uint64(na.Port), 10), + ) +} + +// Dial calls net.Dial on the address. +func (na *NetAddress) Dial() (net.Conn, error) { + conn, err := net.Dial("tcp", na.DialString()) + if err != nil { + return nil, err + } + return conn, nil +} + +// DialTimeout calls net.DialTimeout on the address. +func (na *NetAddress) DialTimeout(timeout time.Duration) (net.Conn, error) { + conn, err := net.DialTimeout("tcp", na.DialString(), timeout) + if err != nil { + return nil, err + } + return conn, nil +} + +// Routable returns true if the address is routable. +func (na *NetAddress) Routable() bool { + // TODO(oga) bitcoind doesn't include RFC3849 here, but should we? + return na.Valid() && !(na.RFC1918() || na.RFC3927() || na.RFC4862() || + na.RFC4193() || na.RFC4843() || na.Local()) +} + +// For IPv4 these are either a 0 or all bits set address. For IPv6 a zero +// address or one that matches the RFC3849 documentation address format. +func (na *NetAddress) Valid() bool { + return na.IP != nil && !(na.IP.IsUnspecified() || na.RFC3849() || + na.IP.Equal(net.IPv4bcast)) +} + +// Local returns true if it is a local address. +func (na *NetAddress) Local() bool { + return na.IP.IsLoopback() || zero4.Contains(na.IP) +} + +// ReachabilityTo checks whenever o can be reached from na. +func (na *NetAddress) ReachabilityTo(o *NetAddress) int { + const ( + Unreachable = 0 + Default = iota + Teredo + Ipv6_weak + Ipv4 + Ipv6_strong + ) + if !na.Routable() { + return Unreachable + } else if na.RFC4380() { + if !o.Routable() { + return Default + } else if o.RFC4380() { + return Teredo + } else if o.IP.To4() != nil { + return Ipv4 + } else { // ipv6 + return Ipv6_weak + } + } else if na.IP.To4() != nil { + if o.Routable() && o.IP.To4() != nil { + return Ipv4 + } + return Default + } else /* ipv6 */ { + var tunnelled bool + // Is our v6 is tunnelled? + if o.RFC3964() || o.RFC6052() || o.RFC6145() { + tunnelled = true + } + if !o.Routable() { + return Default + } else if o.RFC4380() { + return Teredo + } else if o.IP.To4() != nil { + return Ipv4 + } else if tunnelled { + // only prioritise ipv6 if we aren't tunnelling it. + return Ipv6_weak + } + return Ipv6_strong + } +} + +// RFC1918: IPv4 Private networks (10.0.0.0/8, 192.168.0.0/16, 172.16.0.0/12) +// RFC3849: IPv6 Documentation address (2001:0DB8::/32) +// RFC3927: IPv4 Autoconfig (169.254.0.0/16) +// RFC3964: IPv6 6to4 (2002::/16) +// RFC4193: IPv6 unique local (FC00::/7) +// RFC4380: IPv6 Teredo tunneling (2001::/32) +// RFC4843: IPv6 ORCHID: (2001:10::/28) +// RFC4862: IPv6 Autoconfig (FE80::/64) +// RFC6052: IPv6 well known prefix (64:FF9B::/96) +// RFC6145: IPv6 IPv4 translated address ::FFFF:0:0:0/96 +var rfc1918_10 = net.IPNet{IP: net.ParseIP("10.0.0.0"), Mask: net.CIDRMask(8, 32)} +var rfc1918_192 = net.IPNet{IP: net.ParseIP("192.168.0.0"), Mask: net.CIDRMask(16, 32)} +var rfc1918_172 = net.IPNet{IP: net.ParseIP("172.16.0.0"), Mask: net.CIDRMask(12, 32)} +var rfc3849 = net.IPNet{IP: net.ParseIP("2001:0DB8::"), Mask: net.CIDRMask(32, 128)} +var rfc3927 = net.IPNet{IP: net.ParseIP("169.254.0.0"), Mask: net.CIDRMask(16, 32)} +var rfc3964 = net.IPNet{IP: net.ParseIP("2002::"), Mask: net.CIDRMask(16, 128)} +var rfc4193 = net.IPNet{IP: net.ParseIP("FC00::"), Mask: net.CIDRMask(7, 128)} +var rfc4380 = net.IPNet{IP: net.ParseIP("2001::"), Mask: net.CIDRMask(32, 128)} +var rfc4843 = net.IPNet{IP: net.ParseIP("2001:10::"), Mask: net.CIDRMask(28, 128)} +var rfc4862 = net.IPNet{IP: net.ParseIP("FE80::"), Mask: net.CIDRMask(64, 128)} +var rfc6052 = net.IPNet{IP: net.ParseIP("64:FF9B::"), Mask: net.CIDRMask(96, 128)} +var rfc6145 = net.IPNet{IP: net.ParseIP("::FFFF:0:0:0"), Mask: net.CIDRMask(96, 128)} +var zero4 = net.IPNet{IP: net.ParseIP("0.0.0.0"), Mask: net.CIDRMask(8, 32)} + +func (na *NetAddress) RFC1918() bool { + return rfc1918_10.Contains(na.IP) || + rfc1918_192.Contains(na.IP) || + rfc1918_172.Contains(na.IP) +} +func (na *NetAddress) RFC3849() bool { return rfc3849.Contains(na.IP) } +func (na *NetAddress) RFC3927() bool { return rfc3927.Contains(na.IP) } +func (na *NetAddress) RFC3964() bool { return rfc3964.Contains(na.IP) } +func (na *NetAddress) RFC4193() bool { return rfc4193.Contains(na.IP) } +func (na *NetAddress) RFC4380() bool { return rfc4380.Contains(na.IP) } +func (na *NetAddress) RFC4843() bool { return rfc4843.Contains(na.IP) } +func (na *NetAddress) RFC4862() bool { return rfc4862.Contains(na.IP) } +func (na *NetAddress) RFC6052() bool { return rfc6052.Contains(na.IP) } +func (na *NetAddress) RFC6145() bool { return rfc6145.Contains(na.IP) } + +func removeProtocolIfDefined(addr string) string { + if strings.Contains(addr, "://") { + return strings.Split(addr, "://")[1] + } + return addr + +} diff --git a/p2p/netaddress_test.go b/p2p/netaddress_test.go new file mode 100644 index 000000000..653b436a6 --- /dev/null +++ b/p2p/netaddress_test.go @@ -0,0 +1,148 @@ +package p2p + +import ( + "net" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewNetAddress(t *testing.T) { + tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:8080") + require.Nil(t, err) + addr := NewNetAddress("", tcpAddr) + + assert.Equal(t, "127.0.0.1:8080", addr.String()) + + assert.NotPanics(t, func() { + NewNetAddress("", &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8000}) + }, "Calling NewNetAddress with UDPAddr should not panic in testing") +} + +func TestNewNetAddressStringWithOptionalID(t *testing.T) { + testCases := []struct { + addr string + expected string + correct bool + }{ + {"127.0.0.1:8080", "127.0.0.1:8080", true}, + {"tcp://127.0.0.1:8080", "127.0.0.1:8080", true}, + {"udp://127.0.0.1:8080", "127.0.0.1:8080", true}, + {"udp//127.0.0.1:8080", "", false}, + // {"127.0.0:8080", false}, + {"notahost", "", false}, + {"127.0.0.1:notapath", "", false}, + {"notahost:8080", "", false}, + {"8082", "", false}, + {"127.0.0:8080000", "", false}, + + {"deadbeef@127.0.0.1:8080", "", false}, + {"this-isnot-hex@127.0.0.1:8080", "", false}, + {"xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, + + {"tcp://deadbeef@127.0.0.1:8080", "", false}, + {"tcp://this-isnot-hex@127.0.0.1:8080", "", false}, + {"tcp://xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + {"tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, + + {"tcp://@127.0.0.1:8080", "", false}, + {"tcp://@", "", false}, + {"", "", false}, + {"@", "", false}, + {" @", "", false}, + {" @ ", "", false}, + } + + for _, tc := range testCases { + addr, err := NewNetAddressStringWithOptionalID(tc.addr) + if tc.correct { + if assert.Nil(t, err, tc.addr) { + assert.Equal(t, tc.expected, addr.String()) + } + } else { + assert.NotNil(t, err, tc.addr) + } + } +} + +func TestNewNetAddressString(t *testing.T) { + testCases := []struct { + addr string + expected string + correct bool + }{ + {"127.0.0.1:8080", "127.0.0.1:8080", false}, + {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, + } + + for _, tc := range testCases { + addr, err := NewNetAddressString(tc.addr) + if tc.correct { + if assert.Nil(t, err, tc.addr) { + assert.Equal(t, tc.expected, addr.String()) + } + } else { + assert.NotNil(t, err, tc.addr) + } + } +} + +func TestNewNetAddressStrings(t *testing.T) { + addrs, errs := NewNetAddressStrings([]string{ + "127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeed@127.0.0.2:8080"}) + assert.Len(t, errs, 1) + assert.Equal(t, 2, len(addrs)) +} + +func TestNewNetAddressIPPort(t *testing.T) { + addr := NewNetAddressIPPort(net.ParseIP("127.0.0.1"), 8080) + assert.Equal(t, "127.0.0.1:8080", addr.String()) +} + +func TestNetAddressProperties(t *testing.T) { + // TODO add more test cases + testCases := []struct { + addr string + valid bool + local bool + routable bool + }{ + {"127.0.0.1:8080", true, true, false}, + {"ya.ru:80", true, false, true}, + } + + for _, tc := range testCases { + addr, err := NewNetAddressStringWithOptionalID(tc.addr) + require.Nil(t, err) + + assert.Equal(t, tc.valid, addr.Valid()) + assert.Equal(t, tc.local, addr.Local()) + assert.Equal(t, tc.routable, addr.Routable()) + } +} + +func TestNetAddressReachabilityTo(t *testing.T) { + // TODO add more test cases + testCases := []struct { + addr string + other string + reachability int + }{ + {"127.0.0.1:8080", "127.0.0.1:8081", 0}, + {"ya.ru:80", "127.0.0.1:8080", 1}, + } + + for _, tc := range testCases { + addr, err := NewNetAddressStringWithOptionalID(tc.addr) + require.Nil(t, err) + + other, err := NewNetAddressStringWithOptionalID(tc.other) + require.Nil(t, err) + + assert.Equal(t, tc.reachability, addr.ReachabilityTo(other)) + } +} diff --git a/p2p/node_info.go b/p2p/node_info.go new file mode 100644 index 000000000..5e8160a3b --- /dev/null +++ b/p2p/node_info.go @@ -0,0 +1,164 @@ +package p2p + +import ( + "fmt" + cmn "github.com/tendermint/tendermint/libs/common" + "strings" +) + +const ( + maxNodeInfoSize = 10240 // 10Kb + maxNumChannels = 16 // plenty of room for upgrades, for now +) + +// Max size of the NodeInfo struct +func MaxNodeInfoSize() int { + return maxNodeInfoSize +} + +// NodeInfo is the basic node information exchanged +// between two peers during the Tendermint P2P handshake. +type NodeInfo struct { + // Authenticate + // TODO: replace with NetAddress + ID ID `json:"id"` // authenticated identifier + ListenAddr string `json:"listen_addr"` // accepting incoming + + // Check compatibility. + // Channels are HexBytes so easier to read as JSON + Network string `json:"network"` // network/chain ID + Version string `json:"version"` // major.minor.revision + Channels cmn.HexBytes `json:"channels"` // channels this node knows about + + // ASCIIText fields + Moniker string `json:"moniker"` // arbitrary moniker + Other []string `json:"other"` // other application specific data +} + +// Validate checks the self-reported NodeInfo is safe. +// It returns an error if there +// are too many Channels, if there are any duplicate Channels, +// if the ListenAddr is malformed, or if the ListenAddr is a host name +// that can not be resolved to some IP. +// TODO: constraints for Moniker/Other? Or is that for the UI ? +// JAE: It needs to be done on the client, but to prevent ambiguous +// unicode characters, maybe it's worth sanitizing it here. +// In the future we might want to validate these, once we have a +// name-resolution system up. +// International clients could then use punycode (or we could use +// url-encoding), and we just need to be careful with how we handle that in our +// clients. (e.g. off by default). +func (info NodeInfo) Validate() error { + if len(info.Channels) > maxNumChannels { + return fmt.Errorf("info.Channels is too long (%v). Max is %v", len(info.Channels), maxNumChannels) + } + + // Sanitize ASCII text fields. + if !cmn.IsASCIIText(info.Moniker) || cmn.ASCIITrim(info.Moniker) == "" { + return fmt.Errorf("info.Moniker must be valid non-empty ASCII text without tabs, but got %v.", info.Moniker) + } + for i, s := range info.Other { + if !cmn.IsASCIIText(s) || cmn.ASCIITrim(s) == "" { + return fmt.Errorf("info.Other[%v] must be valid non-empty ASCII text without tabs, but got %v.", i, s) + } + } + + channels := make(map[byte]struct{}) + for _, ch := range info.Channels { + _, ok := channels[ch] + if ok { + return fmt.Errorf("info.Channels contains duplicate channel id %v", ch) + } + channels[ch] = struct{}{} + } + + // ensure ListenAddr is good + _, err := NewNetAddressString(IDAddressString(info.ID, info.ListenAddr)) + return err +} + +// CompatibleWith checks if two NodeInfo are compatible with eachother. +// CONTRACT: two nodes are compatible if the major version matches and network match +// and they have at least one channel in common. +func (info NodeInfo) CompatibleWith(other NodeInfo) error { + iMajor, iMinor, _, iErr := splitVersion(info.Version) + oMajor, oMinor, _, oErr := splitVersion(other.Version) + + // if our own version number is not formatted right, we messed up + if iErr != nil { + return iErr + } + + // version number must be formatted correctly ("x.x.x") + if oErr != nil { + return oErr + } + + // major version must match + if iMajor != oMajor { + return fmt.Errorf("Peer is on a different major version. Got %v, expected %v", oMajor, iMajor) + } + + // minor version can differ + if iMinor != oMinor { + // ok + } + + // nodes must be on the same network + if info.Network != other.Network { + return fmt.Errorf("Peer is on a different network. Got %v, expected %v", other.Network, info.Network) + } + + // if we have no channels, we're just testing + if len(info.Channels) == 0 { + return nil + } + + // for each of our channels, check if they have it + found := false +OUTER_LOOP: + for _, ch1 := range info.Channels { + for _, ch2 := range other.Channels { + if ch1 == ch2 { + found = true + break OUTER_LOOP // only need one + } + } + } + if !found { + return fmt.Errorf("Peer has no common channels. Our channels: %v ; Peer channels: %v", info.Channels, other.Channels) + } + return nil +} + +// NetAddress returns a NetAddress derived from the NodeInfo - +// it includes the authenticated peer ID and the self-reported +// ListenAddr. Note that the ListenAddr is not authenticated and +// may not match that address actually dialed if its an outbound peer. +func (info NodeInfo) NetAddress() *NetAddress { + netAddr, err := NewNetAddressString(IDAddressString(info.ID, info.ListenAddr)) + if err != nil { + switch err.(type) { + case ErrNetAddressLookup: + // XXX If the peer provided a host name and the lookup fails here + // we're out of luck. + // TODO: use a NetAddress in NodeInfo + default: + panic(err) // everything should be well formed by now + } + } + return netAddr +} + +func (info NodeInfo) String() string { + return fmt.Sprintf("NodeInfo{id: %v, moniker: %v, network: %v [listen %v], version: %v (%v)}", + info.ID, info.Moniker, info.Network, info.ListenAddr, info.Version, info.Other) +} + +func splitVersion(version string) (string, string, string, error) { + spl := strings.Split(version, ".") + if len(spl) != 3 { + return "", "", "", fmt.Errorf("Invalid version format %v", version) + } + return spl[0], spl[1], spl[2], nil +} diff --git a/p2p/peer.go b/p2p/peer.go new file mode 100644 index 000000000..5c615275b --- /dev/null +++ b/p2p/peer.go @@ -0,0 +1,431 @@ +package p2p + +import ( + "fmt" + "net" + "sync/atomic" + "time" + + crypto "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + + "github.com/tendermint/tendermint/config" + tmconn "github.com/tendermint/tendermint/p2p/conn" +) + +var testIPSuffix uint32 + +// Peer is an interface representing a peer connected on a reactor. +type Peer interface { + cmn.Service + + ID() ID // peer's cryptographic ID + RemoteIP() net.IP // remote IP of the connection + IsOutbound() bool // did we dial the peer + IsPersistent() bool // do we redial this peer when we disconnect + NodeInfo() NodeInfo // peer's info + Status() tmconn.ConnectionStatus + + Send(byte, []byte) bool + TrySend(byte, []byte) bool + + Set(string, interface{}) + Get(string) interface{} +} + +//---------------------------------------------------------- + +// peerConn contains the raw connection and its config. +type peerConn struct { + outbound bool + persistent bool + config *config.P2PConfig + conn net.Conn // source connection + ip net.IP +} + +// ID only exists for SecretConnection. +// NOTE: Will panic if conn is not *SecretConnection. +func (pc peerConn) ID() ID { + return PubKeyToID(pc.conn.(*tmconn.SecretConnection).RemotePubKey()) +} + +// Return the IP from the connection RemoteAddr +func (pc peerConn) RemoteIP() net.IP { + if pc.ip != nil { + return pc.ip + } + + // In test cases a conn could not be present at all or be an in-memory + // implementation where we want to return a fake ip. + if pc.conn == nil || pc.conn.RemoteAddr().String() == "pipe" { + pc.ip = net.IP{172, 16, 0, byte(atomic.AddUint32(&testIPSuffix, 1))} + + return pc.ip + } + + host, _, err := net.SplitHostPort(pc.conn.RemoteAddr().String()) + if err != nil { + panic(err) + } + + ips, err := net.LookupIP(host) + if err != nil { + panic(err) + } + + pc.ip = ips[0] + + return pc.ip +} + +// peer implements Peer. +// +// Before using a peer, you will need to perform a handshake on connection. +type peer struct { + cmn.BaseService + + // raw peerConn and the multiplex connection + peerConn + mconn *tmconn.MConnection + + // peer's node info and the channel it knows about + // channels = nodeInfo.Channels + // cached to avoid copying nodeInfo in hasChannel + nodeInfo NodeInfo + channels []byte + + // User data + Data *cmn.CMap +} + +func newPeer( + pc peerConn, + mConfig tmconn.MConnConfig, + nodeInfo NodeInfo, + reactorsByCh map[byte]Reactor, + chDescs []*tmconn.ChannelDescriptor, + onPeerError func(Peer, interface{}), +) *peer { + p := &peer{ + peerConn: pc, + nodeInfo: nodeInfo, + channels: nodeInfo.Channels, + Data: cmn.NewCMap(), + } + + p.mconn = createMConnection( + pc.conn, + p, + reactorsByCh, + chDescs, + onPeerError, + mConfig, + ) + p.BaseService = *cmn.NewBaseService(nil, "Peer", p) + + return p +} + +func newOutboundPeerConn( + addr *NetAddress, + config *config.P2PConfig, + persistent bool, + ourNodePrivKey crypto.PrivKey, +) (peerConn, error) { + conn, err := dial(addr, config) + if err != nil { + return peerConn{}, cmn.ErrorWrap(err, "Error creating peer") + } + + pc, err := newPeerConn(conn, config, true, persistent, ourNodePrivKey) + if err != nil { + if cerr := conn.Close(); cerr != nil { + return peerConn{}, cmn.ErrorWrap(err, cerr.Error()) + } + return peerConn{}, err + } + + // ensure dialed ID matches connection ID + if addr.ID != pc.ID() { + if cerr := conn.Close(); cerr != nil { + return peerConn{}, cmn.ErrorWrap(err, cerr.Error()) + } + return peerConn{}, ErrSwitchAuthenticationFailure{addr, pc.ID()} + } + + return pc, nil +} + +func newInboundPeerConn( + conn net.Conn, + config *config.P2PConfig, + ourNodePrivKey crypto.PrivKey, +) (peerConn, error) { + + // TODO: issue PoW challenge + + return newPeerConn(conn, config, false, false, ourNodePrivKey) +} + +func newPeerConn( + rawConn net.Conn, + cfg *config.P2PConfig, + outbound, persistent bool, + ourNodePrivKey crypto.PrivKey, +) (pc peerConn, err error) { + conn := rawConn + + // Fuzz connection + if cfg.TestFuzz { + // so we have time to do peer handshakes and get set up + conn = FuzzConnAfterFromConfig(conn, 10*time.Second, cfg.TestFuzzConfig) + } + + // Set deadline for secret handshake + dl := time.Now().Add(cfg.HandshakeTimeout) + if err := conn.SetDeadline(dl); err != nil { + return pc, cmn.ErrorWrap( + err, + "Error setting deadline while encrypting connection", + ) + } + + // Encrypt connection + conn, err = tmconn.MakeSecretConnection(conn, ourNodePrivKey) + if err != nil { + return pc, cmn.ErrorWrap(err, "Error creating peer") + } + + // Only the information we already have + return peerConn{ + config: cfg, + outbound: outbound, + persistent: persistent, + conn: conn, + }, nil +} + +//--------------------------------------------------- +// Implements cmn.Service + +// SetLogger implements BaseService. +func (p *peer) SetLogger(l log.Logger) { + p.Logger = l + p.mconn.SetLogger(l) +} + +// OnStart implements BaseService. +func (p *peer) OnStart() error { + if err := p.BaseService.OnStart(); err != nil { + return err + } + err := p.mconn.Start() + return err +} + +// OnStop implements BaseService. +func (p *peer) OnStop() { + p.BaseService.OnStop() + p.mconn.Stop() // stop everything and close the conn +} + +//--------------------------------------------------- +// Implements Peer + +// ID returns the peer's ID - the hex encoded hash of its pubkey. +func (p *peer) ID() ID { + return p.nodeInfo.ID +} + +// IsOutbound returns true if the connection is outbound, false otherwise. +func (p *peer) IsOutbound() bool { + return p.peerConn.outbound +} + +// IsPersistent returns true if the peer is persitent, false otherwise. +func (p *peer) IsPersistent() bool { + return p.peerConn.persistent +} + +// NodeInfo returns a copy of the peer's NodeInfo. +func (p *peer) NodeInfo() NodeInfo { + return p.nodeInfo +} + +// Status returns the peer's ConnectionStatus. +func (p *peer) Status() tmconn.ConnectionStatus { + return p.mconn.Status() +} + +// Send msg bytes to the channel identified by chID byte. Returns false if the +// send queue is full after timeout, specified by MConnection. +func (p *peer) Send(chID byte, msgBytes []byte) bool { + if !p.IsRunning() { + // see Switch#Broadcast, where we fetch the list of peers and loop over + // them - while we're looping, one peer may be removed and stopped. + return false + } else if !p.hasChannel(chID) { + return false + } + return p.mconn.Send(chID, msgBytes) +} + +// TrySend msg bytes to the channel identified by chID byte. Immediately returns +// false if the send queue is full. +func (p *peer) TrySend(chID byte, msgBytes []byte) bool { + if !p.IsRunning() { + return false + } else if !p.hasChannel(chID) { + return false + } + return p.mconn.TrySend(chID, msgBytes) +} + +// Get the data for a given key. +func (p *peer) Get(key string) interface{} { + return p.Data.Get(key) +} + +// Set sets the data for the given key. +func (p *peer) Set(key string, data interface{}) { + p.Data.Set(key, data) +} + +// hasChannel returns true if the peer reported +// knowing about the given chID. +func (p *peer) hasChannel(chID byte) bool { + for _, ch := range p.channels { + if ch == chID { + return true + } + } + // NOTE: probably will want to remove this + // but could be helpful while the feature is new + p.Logger.Debug( + "Unknown channel for peer", + "channel", + chID, + "channels", + p.channels, + ) + return false +} + +//--------------------------------------------------- +// methods used by the Switch + +// CloseConn should be called by the Switch if the peer was created but never +// started. +func (pc *peerConn) CloseConn() { + pc.conn.Close() // nolint: errcheck +} + +// HandshakeTimeout performs the Tendermint P2P handshake between a given node +// and the peer by exchanging their NodeInfo. It sets the received nodeInfo on +// the peer. +// NOTE: blocking +func (pc *peerConn) HandshakeTimeout( + ourNodeInfo NodeInfo, + timeout time.Duration, +) (peerNodeInfo NodeInfo, err error) { + // Set deadline for handshake so we don't block forever on conn.ReadFull + if err := pc.conn.SetDeadline(time.Now().Add(timeout)); err != nil { + return peerNodeInfo, cmn.ErrorWrap(err, "Error setting deadline") + } + + var trs, _ = cmn.Parallel( + func(_ int) (val interface{}, err error, abort bool) { + _, err = cdc.MarshalBinaryWriter(pc.conn, ourNodeInfo) + return + }, + func(_ int) (val interface{}, err error, abort bool) { + _, err = cdc.UnmarshalBinaryReader( + pc.conn, + &peerNodeInfo, + int64(MaxNodeInfoSize()), + ) + return + }, + ) + if err := trs.FirstError(); err != nil { + return peerNodeInfo, cmn.ErrorWrap(err, "Error during handshake") + } + + // Remove deadline + if err := pc.conn.SetDeadline(time.Time{}); err != nil { + return peerNodeInfo, cmn.ErrorWrap(err, "Error removing deadline") + } + + return peerNodeInfo, nil +} + +// Addr returns peer's remote network address. +func (p *peer) Addr() net.Addr { + return p.peerConn.conn.RemoteAddr() +} + +// CanSend returns true if the send queue is not full, false otherwise. +func (p *peer) CanSend(chID byte) bool { + if !p.IsRunning() { + return false + } + return p.mconn.CanSend(chID) +} + +// String representation. +func (p *peer) String() string { + if p.outbound { + return fmt.Sprintf("Peer{%v %v out}", p.mconn, p.ID()) + } + + return fmt.Sprintf("Peer{%v %v in}", p.mconn, p.ID()) +} + +//------------------------------------------------------------------ +// helper funcs + +func dial(addr *NetAddress, cfg *config.P2PConfig) (net.Conn, error) { + if cfg.TestDialFail { + return nil, fmt.Errorf("dial err (peerConfig.DialFail == true)") + } + + conn, err := addr.DialTimeout(cfg.DialTimeout) + if err != nil { + return nil, err + } + return conn, nil +} + +func createMConnection( + conn net.Conn, + p *peer, + reactorsByCh map[byte]Reactor, + chDescs []*tmconn.ChannelDescriptor, + onPeerError func(Peer, interface{}), + config tmconn.MConnConfig, +) *tmconn.MConnection { + + onReceive := func(chID byte, msgBytes []byte) { + reactor := reactorsByCh[chID] + if reactor == nil { + // Note that its ok to panic here as it's caught in the conn._recover, + // which does onPeerError. + panic(cmn.Fmt("Unknown channel %X", chID)) + } + reactor.Receive(chID, p, msgBytes) + } + + onError := func(r interface{}) { + onPeerError(p, r) + } + + return tmconn.NewMConnectionWithConfig( + conn, + chDescs, + onReceive, + onError, + config, + ) +} diff --git a/p2p/peer_set.go b/p2p/peer_set.go new file mode 100644 index 000000000..257856156 --- /dev/null +++ b/p2p/peer_set.go @@ -0,0 +1,144 @@ +package p2p + +import ( + "net" + "sync" +) + +// IPeerSet has a (immutable) subset of the methods of PeerSet. +type IPeerSet interface { + Has(key ID) bool + HasIP(ip net.IP) bool + Get(key ID) Peer + List() []Peer + Size() int +} + +//----------------------------------------------------------------------------- + +// PeerSet is a special structure for keeping a table of peers. +// Iteration over the peers is super fast and thread-safe. +type PeerSet struct { + mtx sync.Mutex + lookup map[ID]*peerSetItem + list []Peer +} + +type peerSetItem struct { + peer Peer + index int +} + +// NewPeerSet creates a new peerSet with a list of initial capacity of 256 items. +func NewPeerSet() *PeerSet { + return &PeerSet{ + lookup: make(map[ID]*peerSetItem), + list: make([]Peer, 0, 256), + } +} + +// Add adds the peer to the PeerSet. +// It returns an error carrying the reason, if the peer is already present. +func (ps *PeerSet) Add(peer Peer) error { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if ps.lookup[peer.ID()] != nil { + return ErrSwitchDuplicatePeerID{peer.ID()} + } + + index := len(ps.list) + // Appending is safe even with other goroutines + // iterating over the ps.list slice. + ps.list = append(ps.list, peer) + ps.lookup[peer.ID()] = &peerSetItem{peer, index} + return nil +} + +// Has returns true if the set contains the peer referred to by this +// peerKey, otherwise false. +func (ps *PeerSet) Has(peerKey ID) bool { + ps.mtx.Lock() + _, ok := ps.lookup[peerKey] + ps.mtx.Unlock() + return ok +} + +// HasIP returns true if the set contains the peer referred to by this IP +// address, otherwise false. +func (ps *PeerSet) HasIP(peerIP net.IP) bool { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + return ps.hasIP(peerIP) +} + +// hasIP does not acquire a lock so it can be used in public methods which +// already lock. +func (ps *PeerSet) hasIP(peerIP net.IP) bool { + for _, item := range ps.lookup { + if item.peer.RemoteIP().Equal(peerIP) { + return true + } + } + + return false +} + +// Get looks up a peer by the provided peerKey. Returns nil if peer is not +// found. +func (ps *PeerSet) Get(peerKey ID) Peer { + ps.mtx.Lock() + defer ps.mtx.Unlock() + item, ok := ps.lookup[peerKey] + if ok { + return item.peer + } + return nil +} + +// Remove discards peer by its Key, if the peer was previously memoized. +func (ps *PeerSet) Remove(peer Peer) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + item := ps.lookup[peer.ID()] + if item == nil { + return + } + + index := item.index + // Create a new copy of the list but with one less item. + // (we must copy because we'll be mutating the list). + newList := make([]Peer, len(ps.list)-1) + copy(newList, ps.list) + // If it's the last peer, that's an easy special case. + if index == len(ps.list)-1 { + ps.list = newList + delete(ps.lookup, peer.ID()) + return + } + + // Replace the popped item with the last item in the old list. + lastPeer := ps.list[len(ps.list)-1] + lastPeerKey := lastPeer.ID() + lastPeerItem := ps.lookup[lastPeerKey] + newList[index] = lastPeer + lastPeerItem.index = index + ps.list = newList + delete(ps.lookup, peer.ID()) +} + +// Size returns the number of unique items in the peerSet. +func (ps *PeerSet) Size() int { + ps.mtx.Lock() + defer ps.mtx.Unlock() + return len(ps.list) +} + +// List returns the threadsafe list of peers. +func (ps *PeerSet) List() []Peer { + ps.mtx.Lock() + defer ps.mtx.Unlock() + return ps.list +} diff --git a/p2p/peer_set_test.go b/p2p/peer_set_test.go new file mode 100644 index 000000000..aa63ef949 --- /dev/null +++ b/p2p/peer_set_test.go @@ -0,0 +1,172 @@ +package p2p + +import ( + "math/rand" + "net" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + + crypto "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" +) + +// Returns an empty kvstore peer +func randPeer(ip net.IP) *peer { + if ip == nil { + ip = net.IP{127, 0, 0, 1} + } + + nodeKey := NodeKey{PrivKey: crypto.GenPrivKeyEd25519()} + p := &peer{ + nodeInfo: NodeInfo{ + ID: nodeKey.ID(), + ListenAddr: cmn.Fmt("%v.%v.%v.%v:26656", rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256), + }, + } + + p.ip = ip + + return p +} + +func TestPeerSetAddRemoveOne(t *testing.T) { + t.Parallel() + + peerSet := NewPeerSet() + + var peerList []Peer + for i := 0; i < 5; i++ { + p := randPeer(net.IP{127, 0, 0, byte(i)}) + if err := peerSet.Add(p); err != nil { + t.Error(err) + } + peerList = append(peerList, p) + } + + n := len(peerList) + // 1. Test removing from the front + for i, peerAtFront := range peerList { + peerSet.Remove(peerAtFront) + wantSize := n - i - 1 + for j := 0; j < 2; j++ { + assert.Equal(t, false, peerSet.Has(peerAtFront.ID()), "#%d Run #%d: failed to remove peer", i, j) + assert.Equal(t, wantSize, peerSet.Size(), "#%d Run #%d: failed to remove peer and decrement size", i, j) + // Test the route of removing the now non-existent element + peerSet.Remove(peerAtFront) + } + } + + // 2. Next we are testing removing the peer at the end + // a) Replenish the peerSet + for _, peer := range peerList { + if err := peerSet.Add(peer); err != nil { + t.Error(err) + } + } + + // b) In reverse, remove each element + for i := n - 1; i >= 0; i-- { + peerAtEnd := peerList[i] + peerSet.Remove(peerAtEnd) + assert.Equal(t, false, peerSet.Has(peerAtEnd.ID()), "#%d: failed to remove item at end", i) + assert.Equal(t, i, peerSet.Size(), "#%d: differing sizes after peerSet.Remove(atEndPeer)", i) + } +} + +func TestPeerSetAddRemoveMany(t *testing.T) { + t.Parallel() + peerSet := NewPeerSet() + + peers := []Peer{} + N := 100 + for i := 0; i < N; i++ { + peer := randPeer(net.IP{127, 0, 0, byte(i)}) + if err := peerSet.Add(peer); err != nil { + t.Errorf("Failed to add new peer") + } + if peerSet.Size() != i+1 { + t.Errorf("Failed to add new peer and increment size") + } + peers = append(peers, peer) + } + + for i, peer := range peers { + peerSet.Remove(peer) + if peerSet.Has(peer.ID()) { + t.Errorf("Failed to remove peer") + } + if peerSet.Size() != len(peers)-i-1 { + t.Errorf("Failed to remove peer and decrement size") + } + } +} + +func TestPeerSetAddDuplicate(t *testing.T) { + t.Parallel() + peerSet := NewPeerSet() + peer := randPeer(nil) + + n := 20 + errsChan := make(chan error) + // Add the same asynchronously to test the + // concurrent guarantees of our APIs, and + // our expectation in the end is that only + // one addition succeeded, but the rest are + // instances of ErrSwitchDuplicatePeer. + for i := 0; i < n; i++ { + go func() { + errsChan <- peerSet.Add(peer) + }() + } + + // Now collect and tally the results + errsTally := make(map[string]int) + for i := 0; i < n; i++ { + err := <-errsChan + + switch err.(type) { + case ErrSwitchDuplicatePeerID: + errsTally["duplicateID"]++ + default: + errsTally["other"]++ + } + } + + // Our next procedure is to ensure that only one addition + // succeeded and that the rest are each ErrSwitchDuplicatePeer. + wantErrCount, gotErrCount := n-1, errsTally["duplicateID"] + assert.Equal(t, wantErrCount, gotErrCount, "invalid ErrSwitchDuplicatePeer count") + + wantNilErrCount, gotNilErrCount := 1, errsTally["other"] + assert.Equal(t, wantNilErrCount, gotNilErrCount, "invalid nil errCount") +} + +func TestPeerSetGet(t *testing.T) { + t.Parallel() + + var ( + peerSet = NewPeerSet() + peer = randPeer(nil) + ) + + assert.Nil(t, peerSet.Get(peer.ID()), "expecting a nil lookup, before .Add") + + if err := peerSet.Add(peer); err != nil { + t.Fatalf("Failed to add new peer: %v", err) + } + + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + // Add them asynchronously to test the + // concurrent guarantees of our APIs. + wg.Add(1) + go func(i int) { + defer wg.Done() + have, want := peerSet.Get(peer.ID()), peer + assert.Equal(t, have, want, "%d: have %v, want %v", i, have, want) + }(i) + } + wg.Wait() +} diff --git a/p2p/peer_test.go b/p2p/peer_test.go new file mode 100644 index 000000000..281b218d9 --- /dev/null +++ b/p2p/peer_test.go @@ -0,0 +1,175 @@ +package p2p + +import ( + golog "log" + "net" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + crypto "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + + "github.com/tendermint/tendermint/config" + tmconn "github.com/tendermint/tendermint/p2p/conn" +) + +const testCh = 0x01 + +func TestPeerBasic(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + // simulate remote peer + rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: cfg} + rp.Start() + defer rp.Stop() + + p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), cfg, tmconn.DefaultMConnConfig()) + require.Nil(err) + + err = p.Start() + require.Nil(err) + defer p.Stop() + + assert.True(p.IsRunning()) + assert.True(p.IsOutbound()) + assert.False(p.IsPersistent()) + p.persistent = true + assert.True(p.IsPersistent()) + assert.Equal(rp.Addr().DialString(), p.Addr().String()) + assert.Equal(rp.ID(), p.ID()) +} + +func TestPeerSend(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + config := cfg + + // simulate remote peer + rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: config} + rp.Start() + defer rp.Stop() + + p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config, tmconn.DefaultMConnConfig()) + require.Nil(err) + + err = p.Start() + require.Nil(err) + + defer p.Stop() + + assert.True(p.CanSend(testCh)) + assert.True(p.Send(testCh, []byte("Asylum"))) +} + +func createOutboundPeerAndPerformHandshake( + addr *NetAddress, + config *config.P2PConfig, + mConfig tmconn.MConnConfig, +) (*peer, error) { + chDescs := []*tmconn.ChannelDescriptor{ + {ID: testCh, Priority: 1}, + } + reactorsByCh := map[byte]Reactor{testCh: NewTestReactor(chDescs, true)} + pk := crypto.GenPrivKeyEd25519() + pc, err := newOutboundPeerConn(addr, config, false, pk) + if err != nil { + return nil, err + } + nodeInfo, err := pc.HandshakeTimeout(NodeInfo{ + ID: addr.ID, + Moniker: "host_peer", + Network: "testing", + Version: "123.123.123", + Channels: []byte{testCh}, + }, 1*time.Second) + if err != nil { + return nil, err + } + + p := newPeer(pc, mConfig, nodeInfo, reactorsByCh, chDescs, func(p Peer, r interface{}) {}) + p.SetLogger(log.TestingLogger().With("peer", addr)) + return p, nil +} + +type remotePeer struct { + PrivKey crypto.PrivKey + Config *config.P2PConfig + addr *NetAddress + quit chan struct{} + channels cmn.HexBytes + listenAddr string +} + +func (rp *remotePeer) Addr() *NetAddress { + return rp.addr +} + +func (rp *remotePeer) ID() ID { + return PubKeyToID(rp.PrivKey.PubKey()) +} + +func (rp *remotePeer) Start() { + if rp.listenAddr == "" { + rp.listenAddr = "127.0.0.1:0" + } + + l, e := net.Listen("tcp", rp.listenAddr) // any available address + if e != nil { + golog.Fatalf("net.Listen tcp :0: %+v", e) + } + rp.addr = NewNetAddress(PubKeyToID(rp.PrivKey.PubKey()), l.Addr()) + rp.quit = make(chan struct{}) + if rp.channels == nil { + rp.channels = []byte{testCh} + } + go rp.accept(l) +} + +func (rp *remotePeer) Stop() { + close(rp.quit) +} + +func (rp *remotePeer) accept(l net.Listener) { + conns := []net.Conn{} + + for { + conn, err := l.Accept() + if err != nil { + golog.Fatalf("Failed to accept conn: %+v", err) + } + + pc, err := newInboundPeerConn(conn, rp.Config, rp.PrivKey) + if err != nil { + golog.Fatalf("Failed to create a peer: %+v", err) + } + + _, err = pc.HandshakeTimeout(NodeInfo{ + ID: rp.Addr().ID, + Moniker: "remote_peer", + Network: "testing", + Version: "123.123.123", + ListenAddr: l.Addr().String(), + Channels: rp.channels, + }, 1*time.Second) + if err != nil { + golog.Fatalf("Failed to perform handshake: %+v", err) + } + + conns = append(conns, conn) + + select { + case <-rp.quit: + for _, conn := range conns { + if err := conn.Close(); err != nil { + golog.Fatal(err) + } + } + return + default: + } + } +} diff --git a/p2p/pex/addrbook.go b/p2p/pex/addrbook.go new file mode 100644 index 000000000..421aa135a --- /dev/null +++ b/p2p/pex/addrbook.go @@ -0,0 +1,813 @@ +// Modified for Tendermint +// Originally Copyright (c) 2013-2014 Conformal Systems LLC. +// https://github.com/conformal/btcd/blob/master/LICENSE + +package pex + +import ( + "crypto/sha256" + "encoding/binary" + "math" + "net" + "sync" + "time" + + crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/p2p" + cmn "github.com/tendermint/tendermint/libs/common" +) + +const ( + bucketTypeNew = 0x01 + bucketTypeOld = 0x02 +) + +// AddrBook is an address book used for tracking peers +// so we can gossip about them to others and select +// peers to dial. +// TODO: break this up? +type AddrBook interface { + cmn.Service + + // Add our own addresses so we don't later add ourselves + AddOurAddress(*p2p.NetAddress) + // Check if it is our address + OurAddress(*p2p.NetAddress) bool + + // Add and remove an address + AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error + RemoveAddress(*p2p.NetAddress) + + // Check if the address is in the book + HasAddress(*p2p.NetAddress) bool + + // Do we need more peers? + NeedMoreAddrs() bool + + // Pick an address to dial + PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress + + // Mark address + MarkGood(*p2p.NetAddress) + MarkAttempt(*p2p.NetAddress) + MarkBad(*p2p.NetAddress) + + IsGood(*p2p.NetAddress) bool + + // Send a selection of addresses to peers + GetSelection() []*p2p.NetAddress + // Send a selection of addresses with bias + GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress + + // TODO: remove + ListOfKnownAddresses() []*knownAddress + + // Persist to disk + Save() +} + +var _ AddrBook = (*addrBook)(nil) + +// addrBook - concurrency safe peer address manager. +// Implements AddrBook. +type addrBook struct { + cmn.BaseService + + // immutable after creation + filePath string + routabilityStrict bool + key string // random prefix for bucket placement + + // accessed concurrently + mtx sync.Mutex + rand *cmn.Rand + ourAddrs map[string]struct{} + addrLookup map[p2p.ID]*knownAddress // new & old + bucketsOld []map[string]*knownAddress + bucketsNew []map[string]*knownAddress + nOld int + nNew int + + wg sync.WaitGroup +} + +// NewAddrBook creates a new address book. +// Use Start to begin processing asynchronous address updates. +func NewAddrBook(filePath string, routabilityStrict bool) *addrBook { + am := &addrBook{ + rand: cmn.NewRand(), + ourAddrs: make(map[string]struct{}), + addrLookup: make(map[p2p.ID]*knownAddress), + filePath: filePath, + routabilityStrict: routabilityStrict, + } + am.init() + am.BaseService = *cmn.NewBaseService(nil, "AddrBook", am) + return am +} + +// Initialize the buckets. +// When modifying this, don't forget to update loadFromFile() +func (a *addrBook) init() { + a.key = crypto.CRandHex(24) // 24/2 * 8 = 96 bits + // New addr buckets + a.bucketsNew = make([]map[string]*knownAddress, newBucketCount) + for i := range a.bucketsNew { + a.bucketsNew[i] = make(map[string]*knownAddress) + } + // Old addr buckets + a.bucketsOld = make([]map[string]*knownAddress, oldBucketCount) + for i := range a.bucketsOld { + a.bucketsOld[i] = make(map[string]*knownAddress) + } +} + +// OnStart implements Service. +func (a *addrBook) OnStart() error { + if err := a.BaseService.OnStart(); err != nil { + return err + } + a.loadFromFile(a.filePath) + + // wg.Add to ensure that any invocation of .Wait() + // later on will wait for saveRoutine to terminate. + a.wg.Add(1) + go a.saveRoutine() + + return nil +} + +// OnStop implements Service. +func (a *addrBook) OnStop() { + a.BaseService.OnStop() +} + +func (a *addrBook) Wait() { + a.wg.Wait() +} + +func (a *addrBook) FilePath() string { + return a.filePath +} + +//------------------------------------------------------- + +// AddOurAddress one of our addresses. +func (a *addrBook) AddOurAddress(addr *p2p.NetAddress) { + a.mtx.Lock() + defer a.mtx.Unlock() + a.Logger.Info("Add our address to book", "addr", addr) + a.ourAddrs[addr.String()] = struct{}{} +} + +// OurAddress returns true if it is our address. +func (a *addrBook) OurAddress(addr *p2p.NetAddress) bool { + a.mtx.Lock() + _, ok := a.ourAddrs[addr.String()] + a.mtx.Unlock() + return ok +} + +// AddAddress implements AddrBook +// Add address to a "new" bucket. If it's already in one, only add it probabilistically. +// Returns error if the addr is non-routable. Does not add self. +// NOTE: addr must not be nil +func (a *addrBook) AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error { + a.mtx.Lock() + defer a.mtx.Unlock() + return a.addAddress(addr, src) +} + +// RemoveAddress implements AddrBook - removes the address from the book. +func (a *addrBook) RemoveAddress(addr *p2p.NetAddress) { + a.mtx.Lock() + defer a.mtx.Unlock() + ka := a.addrLookup[addr.ID] + if ka == nil { + return + } + a.Logger.Info("Remove address from book", "addr", ka.Addr, "ID", ka.ID()) + a.removeFromAllBuckets(ka) +} + +// IsGood returns true if peer was ever marked as good and haven't +// done anything wrong since then. +func (a *addrBook) IsGood(addr *p2p.NetAddress) bool { + a.mtx.Lock() + defer a.mtx.Unlock() + return a.addrLookup[addr.ID].isOld() +} + +// HasAddress returns true if the address is in the book. +func (a *addrBook) HasAddress(addr *p2p.NetAddress) bool { + a.mtx.Lock() + defer a.mtx.Unlock() + ka := a.addrLookup[addr.ID] + return ka != nil +} + +// NeedMoreAddrs implements AddrBook - returns true if there are not have enough addresses in the book. +func (a *addrBook) NeedMoreAddrs() bool { + return a.Size() < needAddressThreshold +} + +// PickAddress implements AddrBook. It picks an address to connect to. +// The address is picked randomly from an old or new bucket according +// to the biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to that range) +// and determines how biased we are to pick an address from a new bucket. +// PickAddress returns nil if the AddrBook is empty or if we try to pick +// from an empty bucket. +func (a *addrBook) PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress { + a.mtx.Lock() + defer a.mtx.Unlock() + + bookSize := a.size() + if bookSize <= 0 { + if bookSize < 0 { + a.Logger.Error("Addrbook size less than 0", "nNew", a.nNew, "nOld", a.nOld) + } + return nil + } + if biasTowardsNewAddrs > 100 { + biasTowardsNewAddrs = 100 + } + if biasTowardsNewAddrs < 0 { + biasTowardsNewAddrs = 0 + } + + // Bias between new and old addresses. + oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(biasTowardsNewAddrs)) + newCorrelation := math.Sqrt(float64(a.nNew)) * float64(biasTowardsNewAddrs) + + // pick a random peer from a random bucket + var bucket map[string]*knownAddress + pickFromOldBucket := (newCorrelation+oldCorrelation)*a.rand.Float64() < oldCorrelation + if (pickFromOldBucket && a.nOld == 0) || + (!pickFromOldBucket && a.nNew == 0) { + return nil + } + // loop until we pick a random non-empty bucket + for len(bucket) == 0 { + if pickFromOldBucket { + bucket = a.bucketsOld[a.rand.Intn(len(a.bucketsOld))] + } else { + bucket = a.bucketsNew[a.rand.Intn(len(a.bucketsNew))] + } + } + // pick a random index and loop over the map to return that index + randIndex := a.rand.Intn(len(bucket)) + for _, ka := range bucket { + if randIndex == 0 { + return ka.Addr + } + randIndex-- + } + return nil +} + +// MarkGood implements AddrBook - it marks the peer as good and +// moves it into an "old" bucket. +func (a *addrBook) MarkGood(addr *p2p.NetAddress) { + a.mtx.Lock() + defer a.mtx.Unlock() + ka := a.addrLookup[addr.ID] + if ka == nil { + return + } + ka.markGood() + if ka.isNew() { + a.moveToOld(ka) + } +} + +// MarkAttempt implements AddrBook - it marks that an attempt was made to connect to the address. +func (a *addrBook) MarkAttempt(addr *p2p.NetAddress) { + a.mtx.Lock() + defer a.mtx.Unlock() + ka := a.addrLookup[addr.ID] + if ka == nil { + return + } + ka.markAttempt() +} + +// MarkBad implements AddrBook. Currently it just ejects the address. +// TODO: black list for some amount of time +func (a *addrBook) MarkBad(addr *p2p.NetAddress) { + a.RemoveAddress(addr) +} + +// GetSelection implements AddrBook. +// It randomly selects some addresses (old & new). Suitable for peer-exchange protocols. +// Must never return a nil address. +func (a *addrBook) GetSelection() []*p2p.NetAddress { + a.mtx.Lock() + defer a.mtx.Unlock() + + bookSize := a.size() + if bookSize <= 0 { + if bookSize < 0 { + a.Logger.Error("Addrbook size less than 0", "nNew", a.nNew, "nOld", a.nOld) + } + return nil + } + + numAddresses := cmn.MaxInt( + cmn.MinInt(minGetSelection, bookSize), + bookSize*getSelectionPercent/100) + numAddresses = cmn.MinInt(maxGetSelection, numAddresses) + + // XXX: instead of making a list of all addresses, shuffling, and slicing a random chunk, + // could we just select a random numAddresses of indexes? + allAddr := make([]*p2p.NetAddress, bookSize) + i := 0 + for _, ka := range a.addrLookup { + allAddr[i] = ka.Addr + i++ + } + + // Fisher-Yates shuffle the array. We only need to do the first + // `numAddresses' since we are throwing the rest. + for i := 0; i < numAddresses; i++ { + // pick a number between current index and the end + j := cmn.RandIntn(len(allAddr)-i) + i + allAddr[i], allAddr[j] = allAddr[j], allAddr[i] + } + + // slice off the limit we are willing to share. + return allAddr[:numAddresses] +} + +// GetSelectionWithBias implements AddrBook. +// It randomly selects some addresses (old & new). Suitable for peer-exchange protocols. +// Must never return a nil address. +// +// Each address is picked randomly from an old or new bucket according to the +// biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to +// that range) and determines how biased we are to pick an address from a new +// bucket. +func (a *addrBook) GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress { + a.mtx.Lock() + defer a.mtx.Unlock() + + bookSize := a.size() + if bookSize <= 0 { + if bookSize < 0 { + a.Logger.Error("Addrbook size less than 0", "nNew", a.nNew, "nOld", a.nOld) + } + return nil + } + + if biasTowardsNewAddrs > 100 { + biasTowardsNewAddrs = 100 + } + if biasTowardsNewAddrs < 0 { + biasTowardsNewAddrs = 0 + } + + numAddresses := cmn.MaxInt( + cmn.MinInt(minGetSelection, bookSize), + bookSize*getSelectionPercent/100) + numAddresses = cmn.MinInt(maxGetSelection, numAddresses) + + selection := make([]*p2p.NetAddress, numAddresses) + + oldBucketToAddrsMap := make(map[int]map[string]struct{}) + var oldIndex int + newBucketToAddrsMap := make(map[int]map[string]struct{}) + var newIndex int + + selectionIndex := 0 +ADDRS_LOOP: + for selectionIndex < numAddresses { + pickFromOldBucket := int((float64(selectionIndex)/float64(numAddresses))*100) >= biasTowardsNewAddrs + pickFromOldBucket = (pickFromOldBucket && a.nOld > 0) || a.nNew == 0 + bucket := make(map[string]*knownAddress) + + // loop until we pick a random non-empty bucket + for len(bucket) == 0 { + if pickFromOldBucket { + oldIndex = a.rand.Intn(len(a.bucketsOld)) + bucket = a.bucketsOld[oldIndex] + } else { + newIndex = a.rand.Intn(len(a.bucketsNew)) + bucket = a.bucketsNew[newIndex] + } + } + + // pick a random index + randIndex := a.rand.Intn(len(bucket)) + + // loop over the map to return that index + var selectedAddr *p2p.NetAddress + for _, ka := range bucket { + if randIndex == 0 { + selectedAddr = ka.Addr + break + } + randIndex-- + } + + // if we have selected the address before, restart the loop + // otherwise, record it and continue + if pickFromOldBucket { + if addrsMap, ok := oldBucketToAddrsMap[oldIndex]; ok { + if _, ok = addrsMap[selectedAddr.String()]; ok { + continue ADDRS_LOOP + } + } else { + oldBucketToAddrsMap[oldIndex] = make(map[string]struct{}) + } + oldBucketToAddrsMap[oldIndex][selectedAddr.String()] = struct{}{} + } else { + if addrsMap, ok := newBucketToAddrsMap[newIndex]; ok { + if _, ok = addrsMap[selectedAddr.String()]; ok { + continue ADDRS_LOOP + } + } else { + newBucketToAddrsMap[newIndex] = make(map[string]struct{}) + } + newBucketToAddrsMap[newIndex][selectedAddr.String()] = struct{}{} + } + + selection[selectionIndex] = selectedAddr + selectionIndex++ + } + + return selection +} + +// ListOfKnownAddresses returns the new and old addresses. +func (a *addrBook) ListOfKnownAddresses() []*knownAddress { + a.mtx.Lock() + defer a.mtx.Unlock() + + addrs := []*knownAddress{} + for _, addr := range a.addrLookup { + addrs = append(addrs, addr.copy()) + } + return addrs +} + +//------------------------------------------------ + +// Size returns the number of addresses in the book. +func (a *addrBook) Size() int { + a.mtx.Lock() + defer a.mtx.Unlock() + return a.size() +} + +func (a *addrBook) size() int { + return a.nNew + a.nOld +} + +//---------------------------------------------------------- + +// Save persists the address book to disk. +func (a *addrBook) Save() { + a.saveToFile(a.filePath) // thread safe +} + +func (a *addrBook) saveRoutine() { + defer a.wg.Done() + + saveFileTicker := time.NewTicker(dumpAddressInterval) +out: + for { + select { + case <-saveFileTicker.C: + a.saveToFile(a.filePath) + case <-a.Quit(): + break out + } + } + saveFileTicker.Stop() + a.saveToFile(a.filePath) + a.Logger.Info("Address handler done") +} + +//---------------------------------------------------------- + +func (a *addrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAddress { + switch bucketType { + case bucketTypeNew: + return a.bucketsNew[bucketIdx] + case bucketTypeOld: + return a.bucketsOld[bucketIdx] + default: + cmn.PanicSanity("Should not happen") + return nil + } +} + +// Adds ka to new bucket. Returns false if it couldn't do it cuz buckets full. +// NOTE: currently it always returns true. +func (a *addrBook) addToNewBucket(ka *knownAddress, bucketIdx int) { + // Sanity check + if ka.isOld() { + a.Logger.Error("Failed Sanity Check! Cant add old address to new bucket", "ka", ka, "bucket", bucketIdx) + return + } + + addrStr := ka.Addr.String() + bucket := a.getBucket(bucketTypeNew, bucketIdx) + + // Already exists? + if _, ok := bucket[addrStr]; ok { + return + } + + // Enforce max addresses. + if len(bucket) > newBucketSize { + a.Logger.Info("new bucket is full, expiring new") + a.expireNew(bucketIdx) + } + + // Add to bucket. + bucket[addrStr] = ka + // increment nNew if the peer doesnt already exist in a bucket + if ka.addBucketRef(bucketIdx) == 1 { + a.nNew++ + } + + // Add it to addrLookup + a.addrLookup[ka.ID()] = ka +} + +// Adds ka to old bucket. Returns false if it couldn't do it cuz buckets full. +func (a *addrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool { + // Sanity check + if ka.isNew() { + a.Logger.Error(cmn.Fmt("Cannot add new address to old bucket: %v", ka)) + return false + } + if len(ka.Buckets) != 0 { + a.Logger.Error(cmn.Fmt("Cannot add already old address to another old bucket: %v", ka)) + return false + } + + addrStr := ka.Addr.String() + bucket := a.getBucket(bucketTypeOld, bucketIdx) + + // Already exists? + if _, ok := bucket[addrStr]; ok { + return true + } + + // Enforce max addresses. + if len(bucket) > oldBucketSize { + return false + } + + // Add to bucket. + bucket[addrStr] = ka + if ka.addBucketRef(bucketIdx) == 1 { + a.nOld++ + } + + // Ensure in addrLookup + a.addrLookup[ka.ID()] = ka + + return true +} + +func (a *addrBook) removeFromBucket(ka *knownAddress, bucketType byte, bucketIdx int) { + if ka.BucketType != bucketType { + a.Logger.Error(cmn.Fmt("Bucket type mismatch: %v", ka)) + return + } + bucket := a.getBucket(bucketType, bucketIdx) + delete(bucket, ka.Addr.String()) + if ka.removeBucketRef(bucketIdx) == 0 { + if bucketType == bucketTypeNew { + a.nNew-- + } else { + a.nOld-- + } + delete(a.addrLookup, ka.ID()) + } +} + +func (a *addrBook) removeFromAllBuckets(ka *knownAddress) { + for _, bucketIdx := range ka.Buckets { + bucket := a.getBucket(ka.BucketType, bucketIdx) + delete(bucket, ka.Addr.String()) + } + ka.Buckets = nil + if ka.BucketType == bucketTypeNew { + a.nNew-- + } else { + a.nOld-- + } + delete(a.addrLookup, ka.ID()) +} + +//---------------------------------------------------------- + +func (a *addrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress { + bucket := a.getBucket(bucketType, bucketIdx) + var oldest *knownAddress + for _, ka := range bucket { + if oldest == nil || ka.LastAttempt.Before(oldest.LastAttempt) { + oldest = ka + } + } + return oldest +} + +// adds the address to a "new" bucket. if its already in one, +// it only adds it probabilistically +func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error { + if addr == nil || src == nil { + return ErrAddrBookNilAddr{addr, src} + } + + if a.routabilityStrict && !addr.Routable() { + return ErrAddrBookNonRoutable{addr} + } + // TODO: we should track ourAddrs by ID and by IP:PORT and refuse both. + if _, ok := a.ourAddrs[addr.String()]; ok { + return ErrAddrBookSelf{addr} + } + + ka := a.addrLookup[addr.ID] + if ka != nil { + // If its already old and the addr is the same, ignore it. + if ka.isOld() && ka.Addr.Equals(addr) { + return nil + } + // Already in max new buckets. + if len(ka.Buckets) == maxNewBucketsPerAddress { + return nil + } + // The more entries we have, the less likely we are to add more. + factor := int32(2 * len(ka.Buckets)) + if a.rand.Int31n(factor) != 0 { + return nil + } + } else { + ka = newKnownAddress(addr, src) + } + + bucket := a.calcNewBucket(addr, src) + a.addToNewBucket(ka, bucket) + return nil +} + +// Make space in the new buckets by expiring the really bad entries. +// If no bad entries are available we remove the oldest. +func (a *addrBook) expireNew(bucketIdx int) { + for addrStr, ka := range a.bucketsNew[bucketIdx] { + // If an entry is bad, throw it away + if ka.isBad() { + a.Logger.Info(cmn.Fmt("expiring bad address %v", addrStr)) + a.removeFromBucket(ka, bucketTypeNew, bucketIdx) + return + } + } + + // If we haven't thrown out a bad entry, throw out the oldest entry + oldest := a.pickOldest(bucketTypeNew, bucketIdx) + a.removeFromBucket(oldest, bucketTypeNew, bucketIdx) +} + +// Promotes an address from new to old. If the destination bucket is full, +// demote the oldest one to a "new" bucket. +// TODO: Demote more probabilistically? +func (a *addrBook) moveToOld(ka *knownAddress) { + // Sanity check + if ka.isOld() { + a.Logger.Error(cmn.Fmt("Cannot promote address that is already old %v", ka)) + return + } + if len(ka.Buckets) == 0 { + a.Logger.Error(cmn.Fmt("Cannot promote address that isn't in any new buckets %v", ka)) + return + } + + // Remove from all (new) buckets. + a.removeFromAllBuckets(ka) + // It's officially old now. + ka.BucketType = bucketTypeOld + + // Try to add it to its oldBucket destination. + oldBucketIdx := a.calcOldBucket(ka.Addr) + added := a.addToOldBucket(ka, oldBucketIdx) + if !added { + // No room; move the oldest to a new bucket + oldest := a.pickOldest(bucketTypeOld, oldBucketIdx) + a.removeFromBucket(oldest, bucketTypeOld, oldBucketIdx) + newBucketIdx := a.calcNewBucket(oldest.Addr, oldest.Src) + a.addToNewBucket(oldest, newBucketIdx) + + // Finally, add our ka to old bucket again. + added = a.addToOldBucket(ka, oldBucketIdx) + if !added { + a.Logger.Error(cmn.Fmt("Could not re-add ka %v to oldBucketIdx %v", ka, oldBucketIdx)) + } + } +} + +//--------------------------------------------------------------------- +// calculate bucket placements + +// doublesha256( key + sourcegroup + +// int64(doublesha256(key + group + sourcegroup))%bucket_per_group ) % num_new_buckets +func (a *addrBook) calcNewBucket(addr, src *p2p.NetAddress) int { + data1 := []byte{} + data1 = append(data1, []byte(a.key)...) + data1 = append(data1, []byte(a.groupKey(addr))...) + data1 = append(data1, []byte(a.groupKey(src))...) + hash1 := doubleSha256(data1) + hash64 := binary.BigEndian.Uint64(hash1) + hash64 %= newBucketsPerGroup + var hashbuf [8]byte + binary.BigEndian.PutUint64(hashbuf[:], hash64) + data2 := []byte{} + data2 = append(data2, []byte(a.key)...) + data2 = append(data2, a.groupKey(src)...) + data2 = append(data2, hashbuf[:]...) + + hash2 := doubleSha256(data2) + return int(binary.BigEndian.Uint64(hash2) % newBucketCount) +} + +// doublesha256( key + group + +// int64(doublesha256(key + addr))%buckets_per_group ) % num_old_buckets +func (a *addrBook) calcOldBucket(addr *p2p.NetAddress) int { + data1 := []byte{} + data1 = append(data1, []byte(a.key)...) + data1 = append(data1, []byte(addr.String())...) + hash1 := doubleSha256(data1) + hash64 := binary.BigEndian.Uint64(hash1) + hash64 %= oldBucketsPerGroup + var hashbuf [8]byte + binary.BigEndian.PutUint64(hashbuf[:], hash64) + data2 := []byte{} + data2 = append(data2, []byte(a.key)...) + data2 = append(data2, a.groupKey(addr)...) + data2 = append(data2, hashbuf[:]...) + + hash2 := doubleSha256(data2) + return int(binary.BigEndian.Uint64(hash2) % oldBucketCount) +} + +// Return a string representing the network group of this address. +// This is the /16 for IPv4, the /32 (/36 for he.net) for IPv6, the string +// "local" for a local address and the string "unroutable" for an unroutable +// address. +func (a *addrBook) groupKey(na *p2p.NetAddress) string { + if a.routabilityStrict && na.Local() { + return "local" + } + if a.routabilityStrict && !na.Routable() { + return "unroutable" + } + + if ipv4 := na.IP.To4(); ipv4 != nil { + return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(16, 32)}).String() + } + if na.RFC6145() || na.RFC6052() { + // last four bytes are the ip address + ip := net.IP(na.IP[12:16]) + return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String() + } + + if na.RFC3964() { + ip := net.IP(na.IP[2:7]) + return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String() + + } + if na.RFC4380() { + // teredo tunnels have the last 4 bytes as the v4 address XOR + // 0xff. + ip := net.IP(make([]byte, 4)) + for i, byte := range na.IP[12:16] { + ip[i] = byte ^ 0xff + } + return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String() + } + + // OK, so now we know ourselves to be a IPv6 address. + // bitcoind uses /32 for everything, except for Hurricane Electric's + // (he.net) IP range, which it uses /36 for. + bits := 32 + heNet := &net.IPNet{IP: net.ParseIP("2001:470::"), + Mask: net.CIDRMask(32, 128)} + if heNet.Contains(na.IP) { + bits = 36 + } + + return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(bits, 128)}).String() +} + +// doubleSha256 calculates sha256(sha256(b)) and returns the resulting bytes. +func doubleSha256(b []byte) []byte { + hasher := sha256.New() + hasher.Write(b) // nolint: errcheck, gas + sum := hasher.Sum(nil) + hasher.Reset() + hasher.Write(sum) // nolint: errcheck, gas + return hasher.Sum(nil) +} diff --git a/p2p/pex/addrbook_test.go b/p2p/pex/addrbook_test.go new file mode 100644 index 000000000..dd983f76f --- /dev/null +++ b/p2p/pex/addrbook_test.go @@ -0,0 +1,356 @@ +package pex + +import ( + "encoding/hex" + "fmt" + "io/ioutil" + "math/rand" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/p2p" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" +) + +func createTempFileName(prefix string) string { + f, err := ioutil.TempFile("", prefix) + if err != nil { + panic(err) + } + fname := f.Name() + err = f.Close() + if err != nil { + panic(err) + } + return fname +} + +func deleteTempFile(fname string) { + err := os.Remove(fname) + if err != nil { + panic(err) + } +} + +func TestAddrBookPickAddress(t *testing.T) { + fname := createTempFileName("addrbook_test") + defer deleteTempFile(fname) + + // 0 addresses + book := NewAddrBook(fname, true) + book.SetLogger(log.TestingLogger()) + assert.Zero(t, book.Size()) + + addr := book.PickAddress(50) + assert.Nil(t, addr, "expected no address") + + randAddrs := randNetAddressPairs(t, 1) + addrSrc := randAddrs[0] + book.AddAddress(addrSrc.addr, addrSrc.src) + + // pick an address when we only have new address + addr = book.PickAddress(0) + assert.NotNil(t, addr, "expected an address") + addr = book.PickAddress(50) + assert.NotNil(t, addr, "expected an address") + addr = book.PickAddress(100) + assert.NotNil(t, addr, "expected an address") + + // pick an address when we only have old address + book.MarkGood(addrSrc.addr) + addr = book.PickAddress(0) + assert.NotNil(t, addr, "expected an address") + addr = book.PickAddress(50) + assert.NotNil(t, addr, "expected an address") + + // in this case, nNew==0 but we biased 100% to new, so we return nil + addr = book.PickAddress(100) + assert.Nil(t, addr, "did not expected an address") +} + +func TestAddrBookSaveLoad(t *testing.T) { + fname := createTempFileName("addrbook_test") + defer deleteTempFile(fname) + + // 0 addresses + book := NewAddrBook(fname, true) + book.SetLogger(log.TestingLogger()) + book.saveToFile(fname) + + book = NewAddrBook(fname, true) + book.SetLogger(log.TestingLogger()) + book.loadFromFile(fname) + + assert.Zero(t, book.Size()) + + // 100 addresses + randAddrs := randNetAddressPairs(t, 100) + + for _, addrSrc := range randAddrs { + book.AddAddress(addrSrc.addr, addrSrc.src) + } + + assert.Equal(t, 100, book.Size()) + book.saveToFile(fname) + + book = NewAddrBook(fname, true) + book.SetLogger(log.TestingLogger()) + book.loadFromFile(fname) + + assert.Equal(t, 100, book.Size()) +} + +func TestAddrBookLookup(t *testing.T) { + fname := createTempFileName("addrbook_test") + defer deleteTempFile(fname) + + randAddrs := randNetAddressPairs(t, 100) + + book := NewAddrBook(fname, true) + book.SetLogger(log.TestingLogger()) + for _, addrSrc := range randAddrs { + addr := addrSrc.addr + src := addrSrc.src + book.AddAddress(addr, src) + + ka := book.addrLookup[addr.ID] + assert.NotNil(t, ka, "Expected to find KnownAddress %v but wasn't there.", addr) + + if !(ka.Addr.Equals(addr) && ka.Src.Equals(src)) { + t.Fatalf("KnownAddress doesn't match addr & src") + } + } +} + +func TestAddrBookPromoteToOld(t *testing.T) { + fname := createTempFileName("addrbook_test") + defer deleteTempFile(fname) + + randAddrs := randNetAddressPairs(t, 100) + + book := NewAddrBook(fname, true) + book.SetLogger(log.TestingLogger()) + for _, addrSrc := range randAddrs { + book.AddAddress(addrSrc.addr, addrSrc.src) + } + + // Attempt all addresses. + for _, addrSrc := range randAddrs { + book.MarkAttempt(addrSrc.addr) + } + + // Promote half of them + for i, addrSrc := range randAddrs { + if i%2 == 0 { + book.MarkGood(addrSrc.addr) + } + } + + // TODO: do more testing :) + + selection := book.GetSelection() + t.Logf("selection: %v", selection) + + if len(selection) > book.Size() { + t.Errorf("selection could not be bigger than the book") + } + + selection = book.GetSelectionWithBias(30) + t.Logf("selection: %v", selection) + + if len(selection) > book.Size() { + t.Errorf("selection with bias could not be bigger than the book") + } + + assert.Equal(t, book.Size(), 100, "expecting book size to be 100") +} + +func TestAddrBookHandlesDuplicates(t *testing.T) { + fname := createTempFileName("addrbook_test") + defer deleteTempFile(fname) + + book := NewAddrBook(fname, true) + book.SetLogger(log.TestingLogger()) + + randAddrs := randNetAddressPairs(t, 100) + + differentSrc := randIPv4Address(t) + for _, addrSrc := range randAddrs { + book.AddAddress(addrSrc.addr, addrSrc.src) + book.AddAddress(addrSrc.addr, addrSrc.src) // duplicate + book.AddAddress(addrSrc.addr, differentSrc) // different src + } + + assert.Equal(t, 100, book.Size()) +} + +type netAddressPair struct { + addr *p2p.NetAddress + src *p2p.NetAddress +} + +func randNetAddressPairs(t *testing.T, n int) []netAddressPair { + randAddrs := make([]netAddressPair, n) + for i := 0; i < n; i++ { + randAddrs[i] = netAddressPair{addr: randIPv4Address(t), src: randIPv4Address(t)} + } + return randAddrs +} + +func randIPv4Address(t *testing.T) *p2p.NetAddress { + for { + ip := fmt.Sprintf("%v.%v.%v.%v", + rand.Intn(254)+1, + rand.Intn(255), + rand.Intn(255), + rand.Intn(255), + ) + port := rand.Intn(65535-1) + 1 + id := p2p.ID(hex.EncodeToString(cmn.RandBytes(p2p.IDByteLength))) + idAddr := p2p.IDAddressString(id, fmt.Sprintf("%v:%v", ip, port)) + addr, err := p2p.NewNetAddressString(idAddr) + assert.Nil(t, err, "error generating rand network address") + if addr.Routable() { + return addr + } + } +} + +func TestAddrBookRemoveAddress(t *testing.T) { + fname := createTempFileName("addrbook_test") + defer deleteTempFile(fname) + + book := NewAddrBook(fname, true) + book.SetLogger(log.TestingLogger()) + + addr := randIPv4Address(t) + book.AddAddress(addr, addr) + assert.Equal(t, 1, book.Size()) + + book.RemoveAddress(addr) + assert.Equal(t, 0, book.Size()) + + nonExistingAddr := randIPv4Address(t) + book.RemoveAddress(nonExistingAddr) + assert.Equal(t, 0, book.Size()) +} + +func TestAddrBookGetSelection(t *testing.T) { + fname := createTempFileName("addrbook_test") + defer deleteTempFile(fname) + + book := NewAddrBook(fname, true) + book.SetLogger(log.TestingLogger()) + + // 1) empty book + assert.Empty(t, book.GetSelection()) + + // 2) add one address + addr := randIPv4Address(t) + book.AddAddress(addr, addr) + + assert.Equal(t, 1, len(book.GetSelection())) + assert.Equal(t, addr, book.GetSelection()[0]) + + // 3) add a bunch of addresses + randAddrs := randNetAddressPairs(t, 100) + for _, addrSrc := range randAddrs { + book.AddAddress(addrSrc.addr, addrSrc.src) + } + + // check there is no duplicates + addrs := make(map[string]*p2p.NetAddress) + selection := book.GetSelection() + for _, addr := range selection { + if dup, ok := addrs[addr.String()]; ok { + t.Fatalf("selection %v contains duplicates %v", selection, dup) + } + addrs[addr.String()] = addr + } + + if len(selection) > book.Size() { + t.Errorf("selection %v could not be bigger than the book", selection) + } +} + +func TestAddrBookGetSelectionWithBias(t *testing.T) { + const biasTowardsNewAddrs = 30 + + fname := createTempFileName("addrbook_test") + defer deleteTempFile(fname) + + book := NewAddrBook(fname, true) + book.SetLogger(log.TestingLogger()) + + // 1) empty book + selection := book.GetSelectionWithBias(biasTowardsNewAddrs) + assert.Empty(t, selection) + + // 2) add one address + addr := randIPv4Address(t) + book.AddAddress(addr, addr) + + selection = book.GetSelectionWithBias(biasTowardsNewAddrs) + assert.Equal(t, 1, len(selection)) + assert.Equal(t, addr, selection[0]) + + // 3) add a bunch of addresses + randAddrs := randNetAddressPairs(t, 100) + for _, addrSrc := range randAddrs { + book.AddAddress(addrSrc.addr, addrSrc.src) + } + + // check there is no duplicates + addrs := make(map[string]*p2p.NetAddress) + selection = book.GetSelectionWithBias(biasTowardsNewAddrs) + for _, addr := range selection { + if dup, ok := addrs[addr.String()]; ok { + t.Fatalf("selection %v contains duplicates %v", selection, dup) + } + addrs[addr.String()] = addr + } + + if len(selection) > book.Size() { + t.Fatalf("selection %v could not be bigger than the book", selection) + } + + // 4) mark 80% of the addresses as good + randAddrsLen := len(randAddrs) + for i, addrSrc := range randAddrs { + if int((float64(i)/float64(randAddrsLen))*100) >= 20 { + book.MarkGood(addrSrc.addr) + } + } + + selection = book.GetSelectionWithBias(biasTowardsNewAddrs) + + // check that ~70% of addresses returned are good + good := 0 + for _, addr := range selection { + if book.IsGood(addr) { + good++ + } + } + got, expected := int((float64(good)/float64(len(selection)))*100), (100 - biasTowardsNewAddrs) + if got >= expected { + t.Fatalf("expected more good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)", got, expected, good, len(selection)) + } +} + +func TestAddrBookHasAddress(t *testing.T) { + fname := createTempFileName("addrbook_test") + defer deleteTempFile(fname) + + book := NewAddrBook(fname, true) + book.SetLogger(log.TestingLogger()) + addr := randIPv4Address(t) + book.AddAddress(addr, addr) + + assert.True(t, book.HasAddress(addr)) + + book.RemoveAddress(addr) + + assert.False(t, book.HasAddress(addr)) +} diff --git a/p2p/pex/errors.go b/p2p/pex/errors.go new file mode 100644 index 000000000..0b8bf4715 --- /dev/null +++ b/p2p/pex/errors.go @@ -0,0 +1,32 @@ +package pex + +import ( + "fmt" + + "github.com/tendermint/tendermint/p2p" +) + +type ErrAddrBookNonRoutable struct { + Addr *p2p.NetAddress +} + +func (err ErrAddrBookNonRoutable) Error() string { + return fmt.Sprintf("Cannot add non-routable address %v", err.Addr) +} + +type ErrAddrBookSelf struct { + Addr *p2p.NetAddress +} + +func (err ErrAddrBookSelf) Error() string { + return fmt.Sprintf("Cannot add ourselves with address %v", err.Addr) +} + +type ErrAddrBookNilAddr struct { + Addr *p2p.NetAddress + Src *p2p.NetAddress +} + +func (err ErrAddrBookNilAddr) Error() string { + return fmt.Sprintf("Cannot add a nil address. Got (addr, src) = (%v, %v)", err.Addr, err.Src) +} diff --git a/p2p/pex/file.go b/p2p/pex/file.go new file mode 100644 index 000000000..3237e1253 --- /dev/null +++ b/p2p/pex/file.go @@ -0,0 +1,83 @@ +package pex + +import ( + "encoding/json" + "os" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +/* Loading & Saving */ + +type addrBookJSON struct { + Key string `json:"key"` + Addrs []*knownAddress `json:"addrs"` +} + +func (a *addrBook) saveToFile(filePath string) { + a.Logger.Info("Saving AddrBook to file", "size", a.Size()) + + a.mtx.Lock() + defer a.mtx.Unlock() + // Compile Addrs + addrs := []*knownAddress{} + for _, ka := range a.addrLookup { + addrs = append(addrs, ka) + } + + aJSON := &addrBookJSON{ + Key: a.key, + Addrs: addrs, + } + + jsonBytes, err := json.MarshalIndent(aJSON, "", "\t") + if err != nil { + a.Logger.Error("Failed to save AddrBook to file", "err", err) + return + } + err = cmn.WriteFileAtomic(filePath, jsonBytes, 0644) + if err != nil { + a.Logger.Error("Failed to save AddrBook to file", "file", filePath, "err", err) + } +} + +// Returns false if file does not exist. +// cmn.Panics if file is corrupt. +func (a *addrBook) loadFromFile(filePath string) bool { + // If doesn't exist, do nothing. + _, err := os.Stat(filePath) + if os.IsNotExist(err) { + return false + } + + // Load addrBookJSON{} + r, err := os.Open(filePath) + if err != nil { + cmn.PanicCrisis(cmn.Fmt("Error opening file %s: %v", filePath, err)) + } + defer r.Close() // nolint: errcheck + aJSON := &addrBookJSON{} + dec := json.NewDecoder(r) + err = dec.Decode(aJSON) + if err != nil { + cmn.PanicCrisis(cmn.Fmt("Error reading file %s: %v", filePath, err)) + } + + // Restore all the fields... + // Restore the key + a.key = aJSON.Key + // Restore .bucketsNew & .bucketsOld + for _, ka := range aJSON.Addrs { + for _, bucketIndex := range ka.Buckets { + bucket := a.getBucket(ka.BucketType, bucketIndex) + bucket[ka.Addr.String()] = ka + } + a.addrLookup[ka.ID()] = ka + if ka.BucketType == bucketTypeNew { + a.nNew++ + } else { + a.nOld++ + } + } + return true +} diff --git a/p2p/pex/known_address.go b/p2p/pex/known_address.go new file mode 100644 index 000000000..5673dec11 --- /dev/null +++ b/p2p/pex/known_address.go @@ -0,0 +1,141 @@ +package pex + +import ( + "time" + + "github.com/tendermint/tendermint/p2p" +) + +// knownAddress tracks information about a known network address +// that is used to determine how viable an address is. +type knownAddress struct { + Addr *p2p.NetAddress `json:"addr"` + Src *p2p.NetAddress `json:"src"` + Attempts int32 `json:"attempts"` + LastAttempt time.Time `json:"last_attempt"` + LastSuccess time.Time `json:"last_success"` + BucketType byte `json:"bucket_type"` + Buckets []int `json:"buckets"` +} + +func newKnownAddress(addr *p2p.NetAddress, src *p2p.NetAddress) *knownAddress { + return &knownAddress{ + Addr: addr, + Src: src, + Attempts: 0, + LastAttempt: time.Now(), + BucketType: bucketTypeNew, + Buckets: nil, + } +} + +func (ka *knownAddress) ID() p2p.ID { + return ka.Addr.ID +} + +func (ka *knownAddress) copy() *knownAddress { + return &knownAddress{ + Addr: ka.Addr, + Src: ka.Src, + Attempts: ka.Attempts, + LastAttempt: ka.LastAttempt, + LastSuccess: ka.LastSuccess, + BucketType: ka.BucketType, + Buckets: ka.Buckets, + } +} + +func (ka *knownAddress) isOld() bool { + return ka.BucketType == bucketTypeOld +} + +func (ka *knownAddress) isNew() bool { + return ka.BucketType == bucketTypeNew +} + +func (ka *knownAddress) markAttempt() { + now := time.Now() + ka.LastAttempt = now + ka.Attempts++ +} + +func (ka *knownAddress) markGood() { + now := time.Now() + ka.LastAttempt = now + ka.Attempts = 0 + ka.LastSuccess = now +} + +func (ka *knownAddress) addBucketRef(bucketIdx int) int { + for _, bucket := range ka.Buckets { + if bucket == bucketIdx { + // TODO refactor to return error? + // log.Warn(Fmt("Bucket already exists in ka.Buckets: %v", ka)) + return -1 + } + } + ka.Buckets = append(ka.Buckets, bucketIdx) + return len(ka.Buckets) +} + +func (ka *knownAddress) removeBucketRef(bucketIdx int) int { + buckets := []int{} + for _, bucket := range ka.Buckets { + if bucket != bucketIdx { + buckets = append(buckets, bucket) + } + } + if len(buckets) != len(ka.Buckets)-1 { + // TODO refactor to return error? + // log.Warn(Fmt("bucketIdx not found in ka.Buckets: %v", ka)) + return -1 + } + ka.Buckets = buckets + return len(ka.Buckets) +} + +/* + An address is bad if the address in question is a New address, has not been tried in the last + minute, and meets one of the following criteria: + + 1) It claims to be from the future + 2) It hasn't been seen in over a week + 3) It has failed at least three times and never succeeded + 4) It has failed ten times in the last week + + All addresses that meet these criteria are assumed to be worthless and not + worth keeping hold of. + +*/ +func (ka *knownAddress) isBad() bool { + // Is Old --> good + if ka.BucketType == bucketTypeOld { + return false + } + + // Has been attempted in the last minute --> good + if ka.LastAttempt.After(time.Now().Add(-1 * time.Minute)) { + return false + } + + // TODO: From the future? + + // Too old? + // TODO: should be a timestamp of last seen, not just last attempt + if ka.LastAttempt.Before(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) { + return true + } + + // Never succeeded? + if ka.LastSuccess.IsZero() && ka.Attempts >= numRetries { + return true + } + + // Hasn't succeeded in too long? + if ka.LastSuccess.Before(time.Now().Add(-1*minBadDays*time.Hour*24)) && + ka.Attempts >= maxFailures { + return true + } + + return false +} diff --git a/p2p/pex/params.go b/p2p/pex/params.go new file mode 100644 index 000000000..29b4d45ab --- /dev/null +++ b/p2p/pex/params.go @@ -0,0 +1,55 @@ +package pex + +import "time" + +const ( + // addresses under which the address manager will claim to need more addresses. + needAddressThreshold = 1000 + + // interval used to dump the address cache to disk for future use. + dumpAddressInterval = time.Minute * 2 + + // max addresses in each old address bucket. + oldBucketSize = 64 + + // buckets we split old addresses over. + oldBucketCount = 64 + + // max addresses in each new address bucket. + newBucketSize = 64 + + // buckets that we spread new addresses over. + newBucketCount = 256 + + // old buckets over which an address group will be spread. + oldBucketsPerGroup = 4 + + // new buckets over which a source address group will be spread. + newBucketsPerGroup = 32 + + // buckets a frequently seen new address may end up in. + maxNewBucketsPerAddress = 4 + + // days before which we assume an address has vanished + // if we have not seen it announced in that long. + numMissingDays = 7 + + // tries without a single success before we assume an address is bad. + numRetries = 3 + + // max failures we will accept without a success before considering an address bad. + maxFailures = 10 // ? + + // days since the last success before we will consider evicting an address. + minBadDays = 7 + + // % of total addresses known returned by GetSelection. + getSelectionPercent = 23 + + // min addresses that must be returned by GetSelection. Useful for bootstrapping. + minGetSelection = 32 + + // max addresses returned by GetSelection + // NOTE: this must match "maxMsgSize" + maxGetSelection = 250 +) diff --git a/p2p/pex/pex_reactor.go b/p2p/pex/pex_reactor.go new file mode 100644 index 000000000..e90665a37 --- /dev/null +++ b/p2p/pex/pex_reactor.go @@ -0,0 +1,700 @@ +package pex + +import ( + "fmt" + "reflect" + "sort" + "sync" + "time" + + amino "github.com/tendermint/go-amino" + cmn "github.com/tendermint/tendermint/libs/common" + + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/p2p/conn" +) + +type Peer = p2p.Peer + +const ( + // PexChannel is a channel for PEX messages + PexChannel = byte(0x00) + + // over-estimate of max NetAddress size + // hexID (40) + IP (16) + Port (2) + Name (100) ... + // NOTE: dont use massive DNS name .. + maxAddressSize = 256 + + // NOTE: amplificaiton factor! + // small request results in up to maxMsgSize response + maxMsgSize = maxAddressSize * maxGetSelection + + // ensure we have enough peers + defaultEnsurePeersPeriod = 30 * time.Second + defaultMinNumOutboundPeers = p2p.DefaultMinNumOutboundPeers + + // Seed/Crawler constants + + // We want seeds to only advertise good peers. Therefore they should wait at + // least as long as we expect it to take for a peer to become good before + // disconnecting. + // see consensus/reactor.go: blocksToContributeToBecomeGoodPeer + // 10000 blocks assuming 1s blocks ~ 2.7 hours. + defaultSeedDisconnectWaitPeriod = 3 * time.Hour + + defaultCrawlPeerInterval = 2 * time.Minute // don't redial for this. TODO: back-off. what for? + + defaultCrawlPeersPeriod = 30 * time.Second // check some peers every this + + maxAttemptsToDial = 16 // ~ 35h in total (last attempt - 18h) + + // if node connects to seed, it does not have any trusted peers. + // Especially in the beginning, node should have more trusted peers than + // untrusted. + biasToSelectNewPeers = 30 // 70 to select good peers +) + +// PEXReactor handles PEX (peer exchange) and ensures that an +// adequate number of peers are connected to the switch. +// +// It uses `AddrBook` (address book) to store `NetAddress`es of the peers. +// +// ## Preventing abuse +// +// Only accept pexAddrsMsg from peers we sent a corresponding pexRequestMsg too. +// Only accept one pexRequestMsg every ~defaultEnsurePeersPeriod. +type PEXReactor struct { + p2p.BaseReactor + + book AddrBook + config *PEXReactorConfig + ensurePeersPeriod time.Duration // TODO: should go in the config + + // maps to prevent abuse + requestsSent *cmn.CMap // ID->struct{}: unanswered send requests + lastReceivedRequests *cmn.CMap // ID->time.Time: last time peer requested from us + + attemptsToDial sync.Map // address (string) -> {number of attempts (int), last time dialed (time.Time)} +} + +func (r *PEXReactor) minReceiveRequestInterval() time.Duration { + // NOTE: must be less than ensurePeersPeriod, otherwise we'll request + // peers too quickly from others and they'll think we're bad! + return r.ensurePeersPeriod / 3 +} + +// PEXReactorConfig holds reactor specific configuration data. +type PEXReactorConfig struct { + // Seed/Crawler mode + SeedMode bool + + // Seeds is a list of addresses reactor may use + // if it can't connect to peers in the addrbook. + Seeds []string + + // PrivatePeerIDs is a list of peer IDs, which must not be gossiped to other + // peers. + PrivatePeerIDs []string +} + +type _attemptsToDial struct { + number int + lastDialed time.Time +} + +// NewPEXReactor creates new PEX reactor. +func NewPEXReactor(b AddrBook, config *PEXReactorConfig) *PEXReactor { + r := &PEXReactor{ + book: b, + config: config, + ensurePeersPeriod: defaultEnsurePeersPeriod, + requestsSent: cmn.NewCMap(), + lastReceivedRequests: cmn.NewCMap(), + } + r.BaseReactor = *p2p.NewBaseReactor("PEXReactor", r) + return r +} + +// OnStart implements BaseService +func (r *PEXReactor) OnStart() error { + if err := r.BaseReactor.OnStart(); err != nil { + return err + } + err := r.book.Start() + if err != nil && err != cmn.ErrAlreadyStarted { + return err + } + + // return err if user provided a bad seed address + // or a host name that we cant resolve + if err := r.checkSeeds(); err != nil { + return err + } + + // Check if this node should run + // in seed/crawler mode + if r.config.SeedMode { + go r.crawlPeersRoutine() + } else { + go r.ensurePeersRoutine() + } + return nil +} + +// OnStop implements BaseService +func (r *PEXReactor) OnStop() { + r.BaseReactor.OnStop() + r.book.Stop() +} + +// GetChannels implements Reactor +func (r *PEXReactor) GetChannels() []*conn.ChannelDescriptor { + return []*conn.ChannelDescriptor{ + { + ID: PexChannel, + Priority: 1, + SendQueueCapacity: 10, + }, + } +} + +// AddPeer implements Reactor by adding peer to the address book (if inbound) +// or by requesting more addresses (if outbound). +func (r *PEXReactor) AddPeer(p Peer) { + if p.IsOutbound() { + // For outbound peers, the address is already in the books - + // either via DialPeersAsync or r.Receive. + // Ask it for more peers if we need. + if r.book.NeedMoreAddrs() { + r.RequestAddrs(p) + } + } else { + // inbound peer is its own source + addr := p.NodeInfo().NetAddress() + src := addr + + // ignore private addrs + if isAddrPrivate(addr, r.config.PrivatePeerIDs) { + return + } + + // add to book. dont RequestAddrs right away because + // we don't trust inbound as much - let ensurePeersRoutine handle it. + err := r.book.AddAddress(addr, src) + r.logErrAddrBook(err) + } +} + +func (r *PEXReactor) logErrAddrBook(err error) { + if err != nil { + switch err.(type) { + case ErrAddrBookNilAddr: + r.Logger.Error("Failed to add new address", "err", err) + default: + // non-routable, self, full book, etc. + r.Logger.Debug("Failed to add new address", "err", err) + } + } +} + +// RemovePeer implements Reactor. +func (r *PEXReactor) RemovePeer(p Peer, reason interface{}) { + id := string(p.ID()) + r.requestsSent.Delete(id) + r.lastReceivedRequests.Delete(id) +} + +// Receive implements Reactor by handling incoming PEX messages. +func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) { + msg, err := decodeMsg(msgBytes) + if err != nil { + r.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) + r.Switch.StopPeerForError(src, err) + return + } + r.Logger.Debug("Received message", "src", src, "chId", chID, "msg", msg) + + switch msg := msg.(type) { + case *pexRequestMessage: + // Check we're not receiving too many requests + if err := r.receiveRequest(src); err != nil { + r.Switch.StopPeerForError(src, err) + return + } + + // Seeds disconnect after sending a batch of addrs + // NOTE: this is a prime candidate for amplification attacks + // so it's important we + // 1) restrict how frequently peers can request + // 2) limit the output size + if r.config.SeedMode { + r.SendAddrs(src, r.book.GetSelectionWithBias(biasToSelectNewPeers)) + r.Switch.StopPeerGracefully(src) + } else { + r.SendAddrs(src, r.book.GetSelection()) + } + + case *pexAddrsMessage: + // If we asked for addresses, add them to the book + if err := r.ReceiveAddrs(msg.Addrs, src); err != nil { + r.Switch.StopPeerForError(src, err) + return + } + default: + r.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) + } +} + +// enforces a minimum amount of time between requests +func (r *PEXReactor) receiveRequest(src Peer) error { + id := string(src.ID()) + v := r.lastReceivedRequests.Get(id) + if v == nil { + // initialize with empty time + lastReceived := time.Time{} + r.lastReceivedRequests.Set(id, lastReceived) + return nil + } + + lastReceived := v.(time.Time) + if lastReceived.Equal(time.Time{}) { + // first time gets a free pass. then we start tracking the time + lastReceived = time.Now() + r.lastReceivedRequests.Set(id, lastReceived) + return nil + } + + now := time.Now() + minInterval := r.minReceiveRequestInterval() + if now.Sub(lastReceived) < minInterval { + return fmt.Errorf("Peer (%v) sent next PEX request too soon. lastReceived: %v, now: %v, minInterval: %v. Disconnecting", + src.ID(), + lastReceived, + now, + minInterval, + ) + } + r.lastReceivedRequests.Set(id, now) + return nil +} + +// RequestAddrs asks peer for more addresses if we do not already +// have a request out for this peer. +func (r *PEXReactor) RequestAddrs(p Peer) { + r.Logger.Debug("Request addrs", "from", p) + id := string(p.ID()) + if r.requestsSent.Has(id) { + return + } + r.requestsSent.Set(id, struct{}{}) + p.Send(PexChannel, cdc.MustMarshalBinaryBare(&pexRequestMessage{})) +} + +// ReceiveAddrs adds the given addrs to the addrbook if theres an open +// request for this peer and deletes the open request. +// If there's no open request for the src peer, it returns an error. +func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { + + id := string(src.ID()) + if !r.requestsSent.Has(id) { + return cmn.NewError("Received unsolicited pexAddrsMessage") + } + r.requestsSent.Delete(id) + + srcAddr := src.NodeInfo().NetAddress() + for _, netAddr := range addrs { + // NOTE: GetSelection methods should never return nil addrs + if netAddr == nil { + return cmn.NewError("received nil addr") + } + + // ignore private peers + // TODO: give private peers to AddrBook so it can enforce this on AddAddress. + // We'd then have to check for ErrPrivatePeer on AddAddress here, which is + // an error we just ignore (maybe peer is probing us for our private peers :P) + if isAddrPrivate(netAddr, r.config.PrivatePeerIDs) { + continue + } + + err := r.book.AddAddress(netAddr, srcAddr) + r.logErrAddrBook(err) + } + return nil +} + +// SendAddrs sends addrs to the peer. +func (r *PEXReactor) SendAddrs(p Peer, netAddrs []*p2p.NetAddress) { + p.Send(PexChannel, cdc.MustMarshalBinaryBare(&pexAddrsMessage{Addrs: netAddrs})) +} + +// SetEnsurePeersPeriod sets period to ensure peers connected. +func (r *PEXReactor) SetEnsurePeersPeriod(d time.Duration) { + r.ensurePeersPeriod = d +} + +// Ensures that sufficient peers are connected. (continuous) +func (r *PEXReactor) ensurePeersRoutine() { + var ( + seed = cmn.NewRand() + jitter = seed.Int63n(r.ensurePeersPeriod.Nanoseconds()) + ) + + // Randomize first round of communication to avoid thundering herd. + // If no potential peers are present directly start connecting so we guarantee + // swift setup with the help of configured seeds. + if r.hasPotentialPeers() { + time.Sleep(time.Duration(jitter)) + } + + // fire once immediately. + // ensures we dial the seeds right away if the book is empty + r.ensurePeers() + + // fire periodically + ticker := time.NewTicker(r.ensurePeersPeriod) + for { + select { + case <-ticker.C: + r.ensurePeers() + case <-r.Quit(): + ticker.Stop() + return + } + } +} + +// ensurePeers ensures that sufficient peers are connected. (once) +// +// heuristic that we haven't perfected yet, or, perhaps is manually edited by +// the node operator. It should not be used to compute what addresses are +// already connected or not. +func (r *PEXReactor) ensurePeers() { + var ( + out, in, dial = r.Switch.NumPeers() + numToDial = defaultMinNumOutboundPeers - (out + dial) + ) + r.Logger.Info( + "Ensure peers", + "numOutPeers", out, + "numInPeers", in, + "numDialing", dial, + "numToDial", numToDial, + ) + + if numToDial <= 0 { + return + } + + // bias to prefer more vetted peers when we have fewer connections. + // not perfect, but somewhate ensures that we prioritize connecting to more-vetted + // NOTE: range here is [10, 90]. Too high ? + newBias := cmn.MinInt(out, 8)*10 + 10 + + toDial := make(map[p2p.ID]*p2p.NetAddress) + // Try maxAttempts times to pick numToDial addresses to dial + maxAttempts := numToDial * 3 + + for i := 0; i < maxAttempts && len(toDial) < numToDial; i++ { + try := r.book.PickAddress(newBias) + if try == nil { + continue + } + if _, selected := toDial[try.ID]; selected { + continue + } + if dialling := r.Switch.IsDialing(try.ID); dialling { + continue + } + if connected := r.Switch.Peers().Has(try.ID); connected { + continue + } + // TODO: consider moving some checks from toDial into here + // so we don't even consider dialing peers that we want to wait + // before dialling again, or have dialed too many times already + r.Logger.Info("Will dial address", "addr", try) + toDial[try.ID] = try + } + + // Dial picked addresses + for _, addr := range toDial { + go r.dialPeer(addr) + } + + // If we need more addresses, pick a random peer and ask for more. + if r.book.NeedMoreAddrs() { + peers := r.Switch.Peers().List() + peersCount := len(peers) + if peersCount > 0 { + peer := peers[cmn.RandInt()%peersCount] // nolint: gas + r.Logger.Info("We need more addresses. Sending pexRequest to random peer", "peer", peer) + r.RequestAddrs(peer) + } + } + + // If we are not connected to nor dialing anybody, fallback to dialing a seed. + if out+in+dial+len(toDial) == 0 { + r.Logger.Info("No addresses to dial nor connected peers. Falling back to seeds") + r.dialSeeds() + } +} + +func (r *PEXReactor) dialAttemptsInfo(addr *p2p.NetAddress) (attempts int, lastDialed time.Time) { + _attempts, ok := r.attemptsToDial.Load(addr.DialString()) + if !ok { + return + } + atd := _attempts.(_attemptsToDial) + return atd.number, atd.lastDialed +} + +func (r *PEXReactor) dialPeer(addr *p2p.NetAddress) { + attempts, lastDialed := r.dialAttemptsInfo(addr) + + if attempts > maxAttemptsToDial { + r.Logger.Error("Reached max attempts to dial", "addr", addr, "attempts", attempts) + r.book.MarkBad(addr) + return + } + + // exponential backoff if it's not our first attempt to dial given address + if attempts > 0 { + jitterSeconds := time.Duration(cmn.RandFloat64() * float64(time.Second)) // 1s == (1e9 ns) + backoffDuration := jitterSeconds + ((1 << uint(attempts)) * time.Second) + sinceLastDialed := time.Since(lastDialed) + if sinceLastDialed < backoffDuration { + r.Logger.Debug("Too early to dial", "addr", addr, "backoff_duration", backoffDuration, "last_dialed", lastDialed, "time_since", sinceLastDialed) + return + } + } + + err := r.Switch.DialPeerWithAddress(addr, false) + if err != nil { + r.Logger.Error("Dialing failed", "addr", addr, "err", err, "attempts", attempts) + // TODO: detect more "bad peer" scenarios + if _, ok := err.(p2p.ErrSwitchAuthenticationFailure); ok { + r.book.MarkBad(addr) + r.attemptsToDial.Delete(addr.DialString()) + } else { + r.book.MarkAttempt(addr) + // FIXME: if the addr is going to be removed from the addrbook (hard to + // tell at this point), we need to Delete it from attemptsToDial, not + // record another attempt. + // record attempt + r.attemptsToDial.Store(addr.DialString(), _attemptsToDial{attempts + 1, time.Now()}) + } + } else { + // cleanup any history + r.attemptsToDial.Delete(addr.DialString()) + } +} + +// check seed addresses are well formed +func (r *PEXReactor) checkSeeds() error { + lSeeds := len(r.config.Seeds) + if lSeeds == 0 { + return nil + } + _, errs := p2p.NewNetAddressStrings(r.config.Seeds) + for _, err := range errs { + if err != nil { + return err + } + } + return nil +} + +// randomly dial seeds until we connect to one or exhaust them +func (r *PEXReactor) dialSeeds() { + lSeeds := len(r.config.Seeds) + if lSeeds == 0 { + return + } + seedAddrs, _ := p2p.NewNetAddressStrings(r.config.Seeds) + + perm := cmn.RandPerm(lSeeds) + // perm := r.Switch.rng.Perm(lSeeds) + for _, i := range perm { + // dial a random seed + seedAddr := seedAddrs[i] + err := r.Switch.DialPeerWithAddress(seedAddr, false) + if err == nil { + return + } + r.Switch.Logger.Error("Error dialing seed", "err", err, "seed", seedAddr) + } + r.Switch.Logger.Error("Couldn't connect to any seeds") +} + +// AttemptsToDial returns the number of attempts to dial specific address. It +// returns 0 if never attempted or successfully connected. +func (r *PEXReactor) AttemptsToDial(addr *p2p.NetAddress) int { + lAttempts, attempted := r.attemptsToDial.Load(addr.DialString()) + if attempted { + return lAttempts.(_attemptsToDial).number + } + return 0 +} + +//---------------------------------------------------------- + +// Explores the network searching for more peers. (continuous) +// Seed/Crawler Mode causes this node to quickly disconnect +// from peers, except other seed nodes. +func (r *PEXReactor) crawlPeersRoutine() { + // Do an initial crawl + r.crawlPeers() + + // Fire periodically + ticker := time.NewTicker(defaultCrawlPeersPeriod) + + for { + select { + case <-ticker.C: + r.attemptDisconnects() + r.crawlPeers() + case <-r.Quit(): + return + } + } +} + +// hasPotentialPeers indicates if there is a potential peer to connect to, by +// consulting the Switch as well as the AddrBook. +func (r *PEXReactor) hasPotentialPeers() bool { + out, in, dial := r.Switch.NumPeers() + + return out+in+dial > 0 && len(r.book.ListOfKnownAddresses()) > 0 +} + +// crawlPeerInfo handles temporary data needed for the +// network crawling performed during seed/crawler mode. +type crawlPeerInfo struct { + // The listening address of a potential peer we learned about + Addr *p2p.NetAddress + + // The last time we attempt to reach this address + LastAttempt time.Time + + // The last time we successfully reached this address + LastSuccess time.Time +} + +// oldestFirst implements sort.Interface for []crawlPeerInfo +// based on the LastAttempt field. +type oldestFirst []crawlPeerInfo + +func (of oldestFirst) Len() int { return len(of) } +func (of oldestFirst) Swap(i, j int) { of[i], of[j] = of[j], of[i] } +func (of oldestFirst) Less(i, j int) bool { return of[i].LastAttempt.Before(of[j].LastAttempt) } + +// getPeersToCrawl returns addresses of potential peers that we wish to validate. +// NOTE: The status information is ordered as described above. +func (r *PEXReactor) getPeersToCrawl() []crawlPeerInfo { + var of oldestFirst + + // TODO: be more selective + addrs := r.book.ListOfKnownAddresses() + for _, addr := range addrs { + if len(addr.ID()) == 0 { + continue // dont use peers without id + } + + of = append(of, crawlPeerInfo{ + Addr: addr.Addr, + LastAttempt: addr.LastAttempt, + LastSuccess: addr.LastSuccess, + }) + } + sort.Sort(of) + return of +} + +// crawlPeers will crawl the network looking for new peer addresses. (once) +func (r *PEXReactor) crawlPeers() { + peerInfos := r.getPeersToCrawl() + + now := time.Now() + // Use addresses we know of to reach additional peers + for _, pi := range peerInfos { + // Do not attempt to connect with peers we recently dialed + if now.Sub(pi.LastAttempt) < defaultCrawlPeerInterval { + continue + } + // Otherwise, attempt to connect with the known address + err := r.Switch.DialPeerWithAddress(pi.Addr, false) + if err != nil { + r.book.MarkAttempt(pi.Addr) + continue + } + // Ask for more addresses + peer := r.Switch.Peers().Get(pi.Addr.ID) + if peer != nil { + r.RequestAddrs(peer) + } + } +} + +// attemptDisconnects checks if we've been with each peer long enough to disconnect +func (r *PEXReactor) attemptDisconnects() { + for _, peer := range r.Switch.Peers().List() { + if peer.Status().Duration < defaultSeedDisconnectWaitPeriod { + continue + } + if peer.IsPersistent() { + continue + } + r.Switch.StopPeerGracefully(peer) + } +} + +// isAddrPrivate returns true if addr.ID is a private ID. +func isAddrPrivate(addr *p2p.NetAddress, privatePeerIDs []string) bool { + for _, id := range privatePeerIDs { + if string(addr.ID) == id { + return true + } + } + return false +} + +//----------------------------------------------------------------------------- +// Messages + +// PexMessage is a primary type for PEX messages. Underneath, it could contain +// either pexRequestMessage, or pexAddrsMessage messages. +type PexMessage interface{} + +func RegisterPexMessage(cdc *amino.Codec) { + cdc.RegisterInterface((*PexMessage)(nil), nil) + cdc.RegisterConcrete(&pexRequestMessage{}, "tendermint/p2p/PexRequestMessage", nil) + cdc.RegisterConcrete(&pexAddrsMessage{}, "tendermint/p2p/PexAddrsMessage", nil) +} + +func decodeMsg(bz []byte) (msg PexMessage, err error) { + if len(bz) > maxMsgSize { + return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize) + } + err = cdc.UnmarshalBinaryBare(bz, &msg) + return +} + +/* +A pexRequestMessage requests additional peer addresses. +*/ +type pexRequestMessage struct { +} + +func (m *pexRequestMessage) String() string { + return "[pexRequest]" +} + +/* +A message with announced peer addresses. +*/ +type pexAddrsMessage struct { + Addrs []*p2p.NetAddress +} + +func (m *pexAddrsMessage) String() string { + return fmt.Sprintf("[pexAddrs %v]", m.Addrs) +} diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go new file mode 100644 index 000000000..629c9397a --- /dev/null +++ b/p2p/pex/pex_reactor_test.go @@ -0,0 +1,454 @@ +package pex + +import ( + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + crypto "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/p2p/conn" +) + +var ( + cfg *config.P2PConfig +) + +func init() { + cfg = config.DefaultP2PConfig() + cfg.PexReactor = true + cfg.AllowDuplicateIP = true +} + +func TestPEXReactorBasic(t *testing.T) { + r, book := createReactor(&PEXReactorConfig{}) + defer teardownReactor(book) + + assert.NotNil(t, r) + assert.NotEmpty(t, r.GetChannels()) +} + +func TestPEXReactorAddRemovePeer(t *testing.T) { + r, book := createReactor(&PEXReactorConfig{}) + defer teardownReactor(book) + + size := book.Size() + peer := p2p.CreateRandomPeer(false) + + r.AddPeer(peer) + assert.Equal(t, size+1, book.Size()) + + r.RemovePeer(peer, "peer not available") + + outboundPeer := p2p.CreateRandomPeer(true) + + r.AddPeer(outboundPeer) + assert.Equal(t, size+1, book.Size(), "outbound peers should not be added to the address book") + + r.RemovePeer(outboundPeer, "peer not available") +} + +// --- FAIL: TestPEXReactorRunning (11.10s) +// pex_reactor_test.go:411: expected all switches to be connected to at +// least one peer (switches: 0 => {outbound: 1, inbound: 0}, 1 => +// {outbound: 0, inbound: 1}, 2 => {outbound: 0, inbound: 0}, ) +// +// EXPLANATION: peers are getting rejected because in switch#addPeer we check +// if any peer (who we already connected to) has the same IP. Even though local +// peers have different IP addresses, they all have the same underlying remote +// IP: 127.0.0.1. +// +func TestPEXReactorRunning(t *testing.T) { + N := 3 + switches := make([]*p2p.Switch, N) + + // directory to store address books + dir, err := ioutil.TempDir("", "pex_reactor") + require.Nil(t, err) + defer os.RemoveAll(dir) // nolint: errcheck + + books := make([]*addrBook, N) + logger := log.TestingLogger() + + // create switches + for i := 0; i < N; i++ { + switches[i] = p2p.MakeSwitch(cfg, i, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { + books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false) + books[i].SetLogger(logger.With("pex", i)) + sw.SetAddrBook(books[i]) + + sw.SetLogger(logger.With("pex", i)) + + r := NewPEXReactor(books[i], &PEXReactorConfig{}) + r.SetLogger(logger.With("pex", i)) + r.SetEnsurePeersPeriod(250 * time.Millisecond) + sw.AddReactor("pex", r) + + return sw + }) + } + + addOtherNodeAddrToAddrBook := func(switchIndex, otherSwitchIndex int) { + addr := switches[otherSwitchIndex].NodeInfo().NetAddress() + books[switchIndex].AddAddress(addr, addr) + } + + addOtherNodeAddrToAddrBook(0, 1) + addOtherNodeAddrToAddrBook(1, 0) + addOtherNodeAddrToAddrBook(2, 1) + + for i, sw := range switches { + sw.AddListener(p2p.NewDefaultListener("tcp://"+sw.NodeInfo().ListenAddr, "", false, logger.With("pex", i))) + + err := sw.Start() // start switch and reactors + require.Nil(t, err) + } + + assertPeersWithTimeout(t, switches, 10*time.Millisecond, 10*time.Second, N-1) + + // stop them + for _, s := range switches { + s.Stop() + } +} + +func TestPEXReactorReceive(t *testing.T) { + r, book := createReactor(&PEXReactorConfig{}) + defer teardownReactor(book) + + peer := p2p.CreateRandomPeer(false) + + // we have to send a request to receive responses + r.RequestAddrs(peer) + + size := book.Size() + addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()} + msg := cdc.MustMarshalBinaryBare(&pexAddrsMessage{Addrs: addrs}) + r.Receive(PexChannel, peer, msg) + assert.Equal(t, size+1, book.Size()) + + msg = cdc.MustMarshalBinaryBare(&pexRequestMessage{}) + r.Receive(PexChannel, peer, msg) // should not panic. +} + +func TestPEXReactorRequestMessageAbuse(t *testing.T) { + r, book := createReactor(&PEXReactorConfig{}) + defer teardownReactor(book) + + sw := createSwitchAndAddReactors(r) + sw.SetAddrBook(book) + + peer := newMockPeer() + p2p.AddPeerToSwitch(sw, peer) + assert.True(t, sw.Peers().Has(peer.ID())) + + id := string(peer.ID()) + msg := cdc.MustMarshalBinaryBare(&pexRequestMessage{}) + + // first time creates the entry + r.Receive(PexChannel, peer, msg) + assert.True(t, r.lastReceivedRequests.Has(id)) + assert.True(t, sw.Peers().Has(peer.ID())) + + // next time sets the last time value + r.Receive(PexChannel, peer, msg) + assert.True(t, r.lastReceivedRequests.Has(id)) + assert.True(t, sw.Peers().Has(peer.ID())) + + // third time is too many too soon - peer is removed + r.Receive(PexChannel, peer, msg) + assert.False(t, r.lastReceivedRequests.Has(id)) + assert.False(t, sw.Peers().Has(peer.ID())) +} + +func TestPEXReactorAddrsMessageAbuse(t *testing.T) { + r, book := createReactor(&PEXReactorConfig{}) + defer teardownReactor(book) + + sw := createSwitchAndAddReactors(r) + sw.SetAddrBook(book) + + peer := newMockPeer() + p2p.AddPeerToSwitch(sw, peer) + assert.True(t, sw.Peers().Has(peer.ID())) + + id := string(peer.ID()) + + // request addrs from the peer + r.RequestAddrs(peer) + assert.True(t, r.requestsSent.Has(id)) + assert.True(t, sw.Peers().Has(peer.ID())) + + addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()} + msg := cdc.MustMarshalBinaryBare(&pexAddrsMessage{Addrs: addrs}) + + // receive some addrs. should clear the request + r.Receive(PexChannel, peer, msg) + assert.False(t, r.requestsSent.Has(id)) + assert.True(t, sw.Peers().Has(peer.ID())) + + // receiving more addrs causes a disconnect + r.Receive(PexChannel, peer, msg) + assert.False(t, sw.Peers().Has(peer.ID())) +} + +func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) { + // directory to store address books + dir, err := ioutil.TempDir("", "pex_reactor") + require.Nil(t, err) + defer os.RemoveAll(dir) // nolint: errcheck + + // 1. create seed + seed := p2p.MakeSwitch( + cfg, + 0, + "127.0.0.1", + "123.123.123", + func(i int, sw *p2p.Switch) *p2p.Switch { + book := NewAddrBook(filepath.Join(dir, "addrbook0.json"), false) + book.SetLogger(log.TestingLogger()) + sw.SetAddrBook(book) + + sw.SetLogger(log.TestingLogger()) + + r := NewPEXReactor(book, &PEXReactorConfig{}) + r.SetLogger(log.TestingLogger()) + sw.AddReactor("pex", r) + return sw + }, + ) + seed.AddListener( + p2p.NewDefaultListener("tcp://"+seed.NodeInfo().ListenAddr, "", false, log.TestingLogger()), + ) + require.Nil(t, seed.Start()) + defer seed.Stop() + + // 2. create usual peer with only seed configured. + peer := p2p.MakeSwitch( + cfg, + 1, + "127.0.0.1", + "123.123.123", + func(i int, sw *p2p.Switch) *p2p.Switch { + book := NewAddrBook(filepath.Join(dir, "addrbook1.json"), false) + book.SetLogger(log.TestingLogger()) + sw.SetAddrBook(book) + + sw.SetLogger(log.TestingLogger()) + + r := NewPEXReactor( + book, + &PEXReactorConfig{ + Seeds: []string{seed.NodeInfo().NetAddress().String()}, + }, + ) + r.SetLogger(log.TestingLogger()) + sw.AddReactor("pex", r) + return sw + }, + ) + require.Nil(t, peer.Start()) + defer peer.Stop() + + // 3. check that the peer connects to seed immediately + assertPeersWithTimeout(t, []*p2p.Switch{peer}, 10*time.Millisecond, 3*time.Second, 1) +} + +func TestPEXReactorCrawlStatus(t *testing.T) { + pexR, book := createReactor(&PEXReactorConfig{SeedMode: true}) + defer teardownReactor(book) + + // Seed/Crawler mode uses data from the Switch + sw := createSwitchAndAddReactors(pexR) + sw.SetAddrBook(book) + + // Create a peer, add it to the peer set and the addrbook. + peer := p2p.CreateRandomPeer(false) + p2p.AddPeerToSwitch(pexR.Switch, peer) + addr1 := peer.NodeInfo().NetAddress() + pexR.book.AddAddress(addr1, addr1) + + // Add a non-connected address to the book. + _, addr2 := p2p.CreateRoutableAddr() + pexR.book.AddAddress(addr2, addr1) + + // Get some peerInfos to crawl + peerInfos := pexR.getPeersToCrawl() + + // Make sure it has the proper number of elements + assert.Equal(t, 2, len(peerInfos)) + + // TODO: test +} + +func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) { + peer := p2p.CreateRandomPeer(false) + + pexR, book := createReactor(&PEXReactorConfig{PrivatePeerIDs: []string{string(peer.NodeInfo().ID)}}) + defer teardownReactor(book) + + // we have to send a request to receive responses + pexR.RequestAddrs(peer) + + size := book.Size() + addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()} + msg := cdc.MustMarshalBinaryBare(&pexAddrsMessage{Addrs: addrs}) + pexR.Receive(PexChannel, peer, msg) + assert.Equal(t, size, book.Size()) + + pexR.AddPeer(peer) + assert.Equal(t, size, book.Size()) +} + +func TestPEXReactorDialPeer(t *testing.T) { + pexR, book := createReactor(&PEXReactorConfig{}) + defer teardownReactor(book) + + sw := createSwitchAndAddReactors(pexR) + sw.SetAddrBook(book) + + peer := newMockPeer() + addr := peer.NodeInfo().NetAddress() + + assert.Equal(t, 0, pexR.AttemptsToDial(addr)) + + // 1st unsuccessful attempt + pexR.dialPeer(addr) + + assert.Equal(t, 1, pexR.AttemptsToDial(addr)) + + // 2nd unsuccessful attempt + pexR.dialPeer(addr) + + // must be skipped because it is too early + assert.Equal(t, 1, pexR.AttemptsToDial(addr)) + + if !testing.Short() { + time.Sleep(3 * time.Second) + + // 3rd attempt + pexR.dialPeer(addr) + + assert.Equal(t, 2, pexR.AttemptsToDial(addr)) + } +} + +type mockPeer struct { + *cmn.BaseService + pubKey crypto.PubKey + addr *p2p.NetAddress + outbound, persistent bool +} + +func newMockPeer() mockPeer { + _, netAddr := p2p.CreateRoutableAddr() + mp := mockPeer{ + addr: netAddr, + pubKey: crypto.GenPrivKeyEd25519().PubKey(), + } + mp.BaseService = cmn.NewBaseService(nil, "MockPeer", mp) + mp.Start() + return mp +} + +func (mp mockPeer) ID() p2p.ID { return mp.addr.ID } +func (mp mockPeer) IsOutbound() bool { return mp.outbound } +func (mp mockPeer) IsPersistent() bool { return mp.persistent } +func (mp mockPeer) NodeInfo() p2p.NodeInfo { + return p2p.NodeInfo{ + ID: mp.addr.ID, + ListenAddr: mp.addr.DialString(), + } +} +func (mp mockPeer) RemoteIP() net.IP { return net.ParseIP("127.0.0.1") } +func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} } +func (mp mockPeer) Send(byte, []byte) bool { return false } +func (mp mockPeer) TrySend(byte, []byte) bool { return false } +func (mp mockPeer) Set(string, interface{}) {} +func (mp mockPeer) Get(string) interface{} { return nil } + +func assertPeersWithTimeout( + t *testing.T, + switches []*p2p.Switch, + checkPeriod, timeout time.Duration, + nPeers int, +) { + var ( + ticker = time.NewTicker(checkPeriod) + remaining = timeout + ) + + for { + select { + case <-ticker.C: + // check peers are connected + allGood := true + for _, s := range switches { + outbound, inbound, _ := s.NumPeers() + if outbound+inbound < nPeers { + allGood = false + } + } + remaining -= checkPeriod + if remaining < 0 { + remaining = 0 + } + if allGood { + return + } + case <-time.After(remaining): + numPeersStr := "" + for i, s := range switches { + outbound, inbound, _ := s.NumPeers() + numPeersStr += fmt.Sprintf("%d => {outbound: %d, inbound: %d}, ", i, outbound, inbound) + } + t.Errorf( + "expected all switches to be connected to at least one peer (switches: %s)", + numPeersStr, + ) + return + } + } +} + +func createReactor(conf *PEXReactorConfig) (r *PEXReactor, book *addrBook) { + // directory to store address book + dir, err := ioutil.TempDir("", "pex_reactor") + if err != nil { + panic(err) + } + book = NewAddrBook(filepath.Join(dir, "addrbook.json"), true) + book.SetLogger(log.TestingLogger()) + + r = NewPEXReactor(book, conf) + r.SetLogger(log.TestingLogger()) + return +} + +func teardownReactor(book *addrBook) { + err := os.RemoveAll(filepath.Dir(book.FilePath())) + if err != nil { + panic(err) + } +} + +func createSwitchAndAddReactors(reactors ...p2p.Reactor) *p2p.Switch { + sw := p2p.MakeSwitch(cfg, 0, "127.0.0.1", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { return sw }) + sw.SetLogger(log.TestingLogger()) + for _, r := range reactors { + sw.AddReactor(r.String(), r) + r.SetSwitch(sw) + } + return sw +} diff --git a/p2p/pex/wire.go b/p2p/pex/wire.go new file mode 100644 index 000000000..57fc93858 --- /dev/null +++ b/p2p/pex/wire.go @@ -0,0 +1,11 @@ +package pex + +import ( + "github.com/tendermint/go-amino" +) + +var cdc *amino.Codec = amino.NewCodec() + +func init() { + RegisterPexMessage(cdc) +} diff --git a/p2p/switch.go b/p2p/switch.go new file mode 100644 index 000000000..d1e2ef23b --- /dev/null +++ b/p2p/switch.go @@ -0,0 +1,662 @@ +package p2p + +import ( + "fmt" + "math" + "net" + "sync" + "time" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/p2p/conn" + cmn "github.com/tendermint/tendermint/libs/common" +) + +const ( + // wait a random amount of time from this interval + // before dialing peers or reconnecting to help prevent DoS + dialRandomizerIntervalMilliseconds = 3000 + + // repeatedly try to reconnect for a few minutes + // ie. 5 * 20 = 100s + reconnectAttempts = 20 + reconnectInterval = 5 * time.Second + + // then move into exponential backoff mode for ~1day + // ie. 3**10 = 16hrs + reconnectBackOffAttempts = 10 + reconnectBackOffBaseSeconds = 3 + + // keep at least this many outbound peers + // TODO: move to config + DefaultMinNumOutboundPeers = 10 +) + +//----------------------------------------------------------------------------- + +// An AddrBook represents an address book from the pex package, which is used +// to store peer addresses. +type AddrBook interface { + AddAddress(addr *NetAddress, src *NetAddress) error + AddOurAddress(*NetAddress) + OurAddress(*NetAddress) bool + MarkGood(*NetAddress) + RemoveAddress(*NetAddress) + HasAddress(*NetAddress) bool + Save() +} + +//----------------------------------------------------------------------------- + +// Switch handles peer connections and exposes an API to receive incoming messages +// on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one +// or more `Channels`. So while sending outgoing messages is typically performed on the peer, +// incoming messages are received on the reactor. +type Switch struct { + cmn.BaseService + + config *config.P2PConfig + listeners []Listener + reactors map[string]Reactor + chDescs []*conn.ChannelDescriptor + reactorsByCh map[byte]Reactor + peers *PeerSet + dialing *cmn.CMap + reconnecting *cmn.CMap + nodeInfo NodeInfo // our node info + nodeKey *NodeKey // our node privkey + addrBook AddrBook + + filterConnByAddr func(net.Addr) error + filterConnByID func(ID) error + + mConfig conn.MConnConfig + + rng *cmn.Rand // seed for randomizing dial times and orders + + metrics *Metrics +} + +// SwitchOption sets an optional parameter on the Switch. +type SwitchOption func(*Switch) + +// NewSwitch creates a new Switch with the given config. +func NewSwitch(cfg *config.P2PConfig, options ...SwitchOption) *Switch { + sw := &Switch{ + config: cfg, + reactors: make(map[string]Reactor), + chDescs: make([]*conn.ChannelDescriptor, 0), + reactorsByCh: make(map[byte]Reactor), + peers: NewPeerSet(), + dialing: cmn.NewCMap(), + reconnecting: cmn.NewCMap(), + metrics: NopMetrics(), + } + + // Ensure we have a completely undeterministic PRNG. + sw.rng = cmn.NewRand() + + mConfig := conn.DefaultMConnConfig() + mConfig.FlushThrottle = time.Duration(cfg.FlushThrottleTimeout) * time.Millisecond + mConfig.SendRate = cfg.SendRate + mConfig.RecvRate = cfg.RecvRate + mConfig.MaxPacketMsgPayloadSize = cfg.MaxPacketMsgPayloadSize + + sw.mConfig = mConfig + + sw.BaseService = *cmn.NewBaseService(nil, "P2P Switch", sw) + + for _, option := range options { + option(sw) + } + + return sw +} + +// WithMetrics sets the metrics. +func WithMetrics(metrics *Metrics) SwitchOption { + return func(sw *Switch) { sw.metrics = metrics } +} + +//--------------------------------------------------------------------- +// Switch setup + +// AddReactor adds the given reactor to the switch. +// NOTE: Not goroutine safe. +func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor { + // Validate the reactor. + // No two reactors can share the same channel. + reactorChannels := reactor.GetChannels() + for _, chDesc := range reactorChannels { + chID := chDesc.ID + if sw.reactorsByCh[chID] != nil { + cmn.PanicSanity(fmt.Sprintf("Channel %X has multiple reactors %v & %v", chID, sw.reactorsByCh[chID], reactor)) + } + sw.chDescs = append(sw.chDescs, chDesc) + sw.reactorsByCh[chID] = reactor + } + sw.reactors[name] = reactor + reactor.SetSwitch(sw) + return reactor +} + +// Reactors returns a map of reactors registered on the switch. +// NOTE: Not goroutine safe. +func (sw *Switch) Reactors() map[string]Reactor { + return sw.reactors +} + +// Reactor returns the reactor with the given name. +// NOTE: Not goroutine safe. +func (sw *Switch) Reactor(name string) Reactor { + return sw.reactors[name] +} + +// AddListener adds the given listener to the switch for listening to incoming peer connections. +// NOTE: Not goroutine safe. +func (sw *Switch) AddListener(l Listener) { + sw.listeners = append(sw.listeners, l) +} + +// Listeners returns the list of listeners the switch listens on. +// NOTE: Not goroutine safe. +func (sw *Switch) Listeners() []Listener { + return sw.listeners +} + +// IsListening returns true if the switch has at least one listener. +// NOTE: Not goroutine safe. +func (sw *Switch) IsListening() bool { + return len(sw.listeners) > 0 +} + +// SetNodeInfo sets the switch's NodeInfo for checking compatibility and handshaking with other nodes. +// NOTE: Not goroutine safe. +func (sw *Switch) SetNodeInfo(nodeInfo NodeInfo) { + sw.nodeInfo = nodeInfo +} + +// NodeInfo returns the switch's NodeInfo. +// NOTE: Not goroutine safe. +func (sw *Switch) NodeInfo() NodeInfo { + return sw.nodeInfo +} + +// SetNodeKey sets the switch's private key for authenticated encryption. +// NOTE: Not goroutine safe. +func (sw *Switch) SetNodeKey(nodeKey *NodeKey) { + sw.nodeKey = nodeKey +} + +//--------------------------------------------------------------------- +// Service start/stop + +// OnStart implements BaseService. It starts all the reactors, peers, and listeners. +func (sw *Switch) OnStart() error { + // Start reactors + for _, reactor := range sw.reactors { + err := reactor.Start() + if err != nil { + return cmn.ErrorWrap(err, "failed to start %v", reactor) + } + } + // Start listeners + for _, listener := range sw.listeners { + go sw.listenerRoutine(listener) + } + return nil +} + +// OnStop implements BaseService. It stops all listeners, peers, and reactors. +func (sw *Switch) OnStop() { + // Stop listeners + for _, listener := range sw.listeners { + listener.Stop() + } + sw.listeners = nil + // Stop peers + for _, peer := range sw.peers.List() { + peer.Stop() + sw.peers.Remove(peer) + } + // Stop reactors + sw.Logger.Debug("Switch: Stopping reactors") + for _, reactor := range sw.reactors { + reactor.Stop() + } +} + +//--------------------------------------------------------------------- +// Peers + +// Broadcast runs a go routine for each attempted send, which will block trying +// to send for defaultSendTimeoutSeconds. Returns a channel which receives +// success values for each attempted send (false if times out). Channel will be +// closed once msg bytes are sent to all peers (or time out). +// +// NOTE: Broadcast uses goroutines, so order of broadcast may not be preserved. +func (sw *Switch) Broadcast(chID byte, msgBytes []byte) chan bool { + successChan := make(chan bool, len(sw.peers.List())) + sw.Logger.Debug("Broadcast", "channel", chID, "msgBytes", fmt.Sprintf("%X", msgBytes)) + var wg sync.WaitGroup + for _, peer := range sw.peers.List() { + wg.Add(1) + go func(peer Peer) { + defer wg.Done() + success := peer.Send(chID, msgBytes) + successChan <- success + }(peer) + } + go func() { + wg.Wait() + close(successChan) + }() + return successChan +} + +// NumPeers returns the count of outbound/inbound and outbound-dialing peers. +func (sw *Switch) NumPeers() (outbound, inbound, dialing int) { + peers := sw.peers.List() + for _, peer := range peers { + if peer.IsOutbound() { + outbound++ + } else { + inbound++ + } + } + dialing = sw.dialing.Size() + return +} + +// Peers returns the set of peers that are connected to the switch. +func (sw *Switch) Peers() IPeerSet { + return sw.peers +} + +// StopPeerForError disconnects from a peer due to external error. +// If the peer is persistent, it will attempt to reconnect. +// TODO: make record depending on reason. +func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) { + sw.Logger.Error("Stopping peer for error", "peer", peer, "err", reason) + sw.stopAndRemovePeer(peer, reason) + + if peer.IsPersistent() { + // NOTE: this is the self-reported addr, not the original we dialed + go sw.reconnectToPeer(peer.NodeInfo().NetAddress()) + } +} + +// StopPeerGracefully disconnects from a peer gracefully. +// TODO: handle graceful disconnects. +func (sw *Switch) StopPeerGracefully(peer Peer) { + sw.Logger.Info("Stopping peer gracefully") + sw.stopAndRemovePeer(peer, nil) +} + +func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) { + sw.peers.Remove(peer) + sw.metrics.Peers.Add(float64(-1)) + peer.Stop() + for _, reactor := range sw.reactors { + reactor.RemovePeer(peer, reason) + } +} + +// reconnectToPeer tries to reconnect to the addr, first repeatedly +// with a fixed interval, then with exponential backoff. +// If no success after all that, it stops trying, and leaves it +// to the PEX/Addrbook to find the peer with the addr again +// NOTE: this will keep trying even if the handshake or auth fails. +// TODO: be more explicit with error types so we only retry on certain failures +// - ie. if we're getting ErrDuplicatePeer we can stop +// because the addrbook got us the peer back already +func (sw *Switch) reconnectToPeer(addr *NetAddress) { + if sw.reconnecting.Has(string(addr.ID)) { + return + } + sw.reconnecting.Set(string(addr.ID), addr) + defer sw.reconnecting.Delete(string(addr.ID)) + + start := time.Now() + sw.Logger.Info("Reconnecting to peer", "addr", addr) + for i := 0; i < reconnectAttempts; i++ { + if !sw.IsRunning() { + return + } + + err := sw.DialPeerWithAddress(addr, true) + if err == nil { + return // success + } + + sw.Logger.Info("Error reconnecting to peer. Trying again", "tries", i, "err", err, "addr", addr) + // sleep a set amount + sw.randomSleep(reconnectInterval) + continue + } + + sw.Logger.Error("Failed to reconnect to peer. Beginning exponential backoff", + "addr", addr, "elapsed", time.Since(start)) + for i := 0; i < reconnectBackOffAttempts; i++ { + if !sw.IsRunning() { + return + } + + // sleep an exponentially increasing amount + sleepIntervalSeconds := math.Pow(reconnectBackOffBaseSeconds, float64(i)) + sw.randomSleep(time.Duration(sleepIntervalSeconds) * time.Second) + err := sw.DialPeerWithAddress(addr, true) + if err == nil { + return // success + } + sw.Logger.Info("Error reconnecting to peer. Trying again", "tries", i, "err", err, "addr", addr) + } + sw.Logger.Error("Failed to reconnect to peer. Giving up", "addr", addr, "elapsed", time.Since(start)) +} + +// SetAddrBook allows to set address book on Switch. +func (sw *Switch) SetAddrBook(addrBook AddrBook) { + sw.addrBook = addrBook +} + +// MarkPeerAsGood marks the given peer as good when it did something useful +// like contributed to consensus. +func (sw *Switch) MarkPeerAsGood(peer Peer) { + if sw.addrBook != nil { + sw.addrBook.MarkGood(peer.NodeInfo().NetAddress()) + } +} + +//--------------------------------------------------------------------- +// Dialing + +// IsDialing returns true if the switch is currently dialing the given ID. +func (sw *Switch) IsDialing(id ID) bool { + return sw.dialing.Has(string(id)) +} + +// DialPeersAsync dials a list of peers asynchronously in random order (optionally, making them persistent). +// Used to dial peers from config on startup or from unsafe-RPC (trusted sources). +// TODO: remove addrBook arg since it's now set on the switch +func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent bool) error { + netAddrs, errs := NewNetAddressStrings(peers) + // only log errors, dial correct addresses + for _, err := range errs { + sw.Logger.Error("Error in peer's address", "err", err) + } + + ourAddr := sw.nodeInfo.NetAddress() + + // TODO: this code feels like it's in the wrong place. + // The integration tests depend on the addrBook being saved + // right away but maybe we can change that. Recall that + // the addrBook is only written to disk every 2min + if addrBook != nil { + // add peers to `addrBook` + for _, netAddr := range netAddrs { + // do not add our address or ID + if !netAddr.Same(ourAddr) { + if err := addrBook.AddAddress(netAddr, ourAddr); err != nil { + sw.Logger.Error("Can't add peer's address to addrbook", "err", err) + } + } + } + // Persist some peers to disk right away. + // NOTE: integration tests depend on this + addrBook.Save() + } + + // permute the list, dial them in random order. + perm := sw.rng.Perm(len(netAddrs)) + for i := 0; i < len(perm); i++ { + go func(i int) { + j := perm[i] + + addr := netAddrs[j] + // do not dial ourselves + if addr.Same(ourAddr) { + return + } + + sw.randomSleep(0) + err := sw.DialPeerWithAddress(addr, persistent) + if err != nil { + switch err.(type) { + case ErrSwitchConnectToSelf, ErrSwitchDuplicatePeerID: + sw.Logger.Debug("Error dialing peer", "err", err) + default: + sw.Logger.Error("Error dialing peer", "err", err) + } + } + }(i) + } + return nil +} + +// DialPeerWithAddress dials the given peer and runs sw.addPeer if it connects and authenticates successfully. +// If `persistent == true`, the switch will always try to reconnect to this peer if the connection ever fails. +func (sw *Switch) DialPeerWithAddress(addr *NetAddress, persistent bool) error { + sw.dialing.Set(string(addr.ID), addr) + defer sw.dialing.Delete(string(addr.ID)) + return sw.addOutboundPeerWithConfig(addr, sw.config, persistent) +} + +// sleep for interval plus some random amount of ms on [0, dialRandomizerIntervalMilliseconds] +func (sw *Switch) randomSleep(interval time.Duration) { + r := time.Duration(sw.rng.Int63n(dialRandomizerIntervalMilliseconds)) * time.Millisecond + time.Sleep(r + interval) +} + +//------------------------------------------------------------------------------------ +// Connection filtering + +// FilterConnByAddr returns an error if connecting to the given address is forbidden. +func (sw *Switch) FilterConnByAddr(addr net.Addr) error { + if sw.filterConnByAddr != nil { + return sw.filterConnByAddr(addr) + } + return nil +} + +// FilterConnByID returns an error if connecting to the given peer ID is forbidden. +func (sw *Switch) FilterConnByID(id ID) error { + if sw.filterConnByID != nil { + return sw.filterConnByID(id) + } + return nil + +} + +// SetAddrFilter sets the function for filtering connections by address. +func (sw *Switch) SetAddrFilter(f func(net.Addr) error) { + sw.filterConnByAddr = f +} + +// SetIDFilter sets the function for filtering connections by peer ID. +func (sw *Switch) SetIDFilter(f func(ID) error) { + sw.filterConnByID = f +} + +//------------------------------------------------------------------------------------ + +func (sw *Switch) listenerRoutine(l Listener) { + for { + inConn, ok := <-l.Connections() + if !ok { + break + } + + // ignore connection if we already have enough + // leave room for MinNumOutboundPeers + maxPeers := sw.config.MaxNumPeers - DefaultMinNumOutboundPeers + if maxPeers <= sw.peers.Size() { + sw.Logger.Info("Ignoring inbound connection: already have enough peers", "address", inConn.RemoteAddr().String(), "numPeers", sw.peers.Size(), "max", maxPeers) + continue + } + + // New inbound connection! + err := sw.addInboundPeerWithConfig(inConn, sw.config) + if err != nil { + sw.Logger.Info("Ignoring inbound connection: error while adding peer", "address", inConn.RemoteAddr().String(), "err", err) + continue + } + } + + // cleanup +} + +func (sw *Switch) addInboundPeerWithConfig( + conn net.Conn, + config *config.P2PConfig, +) error { + peerConn, err := newInboundPeerConn(conn, config, sw.nodeKey.PrivKey) + if err != nil { + conn.Close() // peer is nil + return err + } + if err = sw.addPeer(peerConn); err != nil { + peerConn.CloseConn() + return err + } + + return nil +} + +// dial the peer; make secret connection; authenticate against the dialed ID; +// add the peer. +// if dialing fails, start the reconnect loop. If handhsake fails, its over. +// If peer is started succesffuly, reconnectLoop will start when +// StopPeerForError is called +func (sw *Switch) addOutboundPeerWithConfig( + addr *NetAddress, + config *config.P2PConfig, + persistent bool, +) error { + sw.Logger.Info("Dialing peer", "address", addr) + peerConn, err := newOutboundPeerConn( + addr, + config, + persistent, + sw.nodeKey.PrivKey, + ) + if err != nil { + if persistent { + go sw.reconnectToPeer(addr) + } + return err + } + + if err := sw.addPeer(peerConn); err != nil { + peerConn.CloseConn() + return err + } + return nil +} + +// addPeer performs the Tendermint P2P handshake with a peer +// that already has a SecretConnection. If all goes well, +// it starts the peer and adds it to the switch. +// NOTE: This performs a blocking handshake before the peer is added. +// NOTE: If error is returned, caller is responsible for calling +// peer.CloseConn() +func (sw *Switch) addPeer(pc peerConn) error { + + addr := pc.conn.RemoteAddr() + if err := sw.FilterConnByAddr(addr); err != nil { + return err + } + + // Exchange NodeInfo on the conn + peerNodeInfo, err := pc.HandshakeTimeout(sw.nodeInfo, time.Duration(sw.config.HandshakeTimeout)) + if err != nil { + return err + } + + peerID := peerNodeInfo.ID + + // ensure connection key matches self reported key + connID := pc.ID() + + if peerID != connID { + return fmt.Errorf( + "nodeInfo.ID() (%v) doesn't match conn.ID() (%v)", + peerID, + connID, + ) + } + + // Validate the peers nodeInfo + if err := peerNodeInfo.Validate(); err != nil { + return err + } + + // Avoid self + if sw.nodeKey.ID() == peerID { + addr := peerNodeInfo.NetAddress() + // remove the given address from the address book + // and add to our addresses to avoid dialing again + sw.addrBook.RemoveAddress(addr) + sw.addrBook.AddOurAddress(addr) + return ErrSwitchConnectToSelf{addr} + } + + // Avoid duplicate + if sw.peers.Has(peerID) { + return ErrSwitchDuplicatePeerID{peerID} + } + + // Check for duplicate connection or peer info IP. + if !sw.config.AllowDuplicateIP && + (sw.peers.HasIP(pc.RemoteIP()) || + sw.peers.HasIP(peerNodeInfo.NetAddress().IP)) { + return ErrSwitchDuplicatePeerIP{pc.RemoteIP()} + } + + // Filter peer against ID white list + if err := sw.FilterConnByID(peerID); err != nil { + return err + } + + // Check version, chain id + if err := sw.nodeInfo.CompatibleWith(peerNodeInfo); err != nil { + return err + } + + peer := newPeer(pc, sw.mConfig, peerNodeInfo, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError) + peer.SetLogger(sw.Logger.With("peer", addr)) + + peer.Logger.Info("Successful handshake with peer", "peerNodeInfo", peerNodeInfo) + + // All good. Start peer + if sw.IsRunning() { + if err = sw.startInitPeer(peer); err != nil { + return err + } + } + + // Add the peer to .peers. + // We start it first so that a peer in the list is safe to Stop. + // It should not err since we already checked peers.Has(). + if err := sw.peers.Add(peer); err != nil { + return err + } + sw.metrics.Peers.Add(float64(1)) + + sw.Logger.Info("Added peer", "peer", peer) + return nil +} + +func (sw *Switch) startInitPeer(peer *peer) error { + err := peer.Start() // spawn send/recv routines + if err != nil { + // Should never happen + sw.Logger.Error("Error starting peer", "peer", peer, "err", err) + return err + } + + for _, reactor := range sw.reactors { + reactor.AddPeer(peer) + } + + return nil +} diff --git a/p2p/switch_test.go b/p2p/switch_test.go new file mode 100644 index 000000000..97539112e --- /dev/null +++ b/p2p/switch_test.go @@ -0,0 +1,428 @@ +package p2p + +import ( + "bytes" + "fmt" + "net" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/libs/log" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/p2p/conn" +) + +var ( + cfg *config.P2PConfig +) + +func init() { + cfg = config.DefaultP2PConfig() + cfg.PexReactor = true + cfg.AllowDuplicateIP = true +} + +type PeerMessage struct { + PeerID ID + Bytes []byte + Counter int +} + +type TestReactor struct { + BaseReactor + + mtx sync.Mutex + channels []*conn.ChannelDescriptor + logMessages bool + msgsCounter int + msgsReceived map[byte][]PeerMessage +} + +func NewTestReactor(channels []*conn.ChannelDescriptor, logMessages bool) *TestReactor { + tr := &TestReactor{ + channels: channels, + logMessages: logMessages, + msgsReceived: make(map[byte][]PeerMessage), + } + tr.BaseReactor = *NewBaseReactor("TestReactor", tr) + tr.SetLogger(log.TestingLogger()) + return tr +} + +func (tr *TestReactor) GetChannels() []*conn.ChannelDescriptor { + return tr.channels +} + +func (tr *TestReactor) AddPeer(peer Peer) {} + +func (tr *TestReactor) RemovePeer(peer Peer, reason interface{}) {} + +func (tr *TestReactor) Receive(chID byte, peer Peer, msgBytes []byte) { + if tr.logMessages { + tr.mtx.Lock() + defer tr.mtx.Unlock() + //fmt.Printf("Received: %X, %X\n", chID, msgBytes) + tr.msgsReceived[chID] = append(tr.msgsReceived[chID], PeerMessage{peer.ID(), msgBytes, tr.msgsCounter}) + tr.msgsCounter++ + } +} + +func (tr *TestReactor) getMsgs(chID byte) []PeerMessage { + tr.mtx.Lock() + defer tr.mtx.Unlock() + return tr.msgsReceived[chID] +} + +//----------------------------------------------------------------------------- + +// convenience method for creating two switches connected to each other. +// XXX: note this uses net.Pipe and not a proper TCP conn +func MakeSwitchPair(t testing.TB, initSwitch func(int, *Switch) *Switch) (*Switch, *Switch) { + // Create two switches that will be interconnected. + switches := MakeConnectedSwitches(cfg, 2, initSwitch, Connect2Switches) + return switches[0], switches[1] +} + +func initSwitchFunc(i int, sw *Switch) *Switch { + sw.SetAddrBook(&addrBookMock{ + addrs: make(map[string]struct{}), + ourAddrs: make(map[string]struct{})}) + + // Make two reactors of two channels each + sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ + {ID: byte(0x00), Priority: 10}, + {ID: byte(0x01), Priority: 10}, + }, true)) + sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{ + {ID: byte(0x02), Priority: 10}, + {ID: byte(0x03), Priority: 10}, + }, true)) + + return sw +} + +func TestSwitches(t *testing.T) { + s1, s2 := MakeSwitchPair(t, initSwitchFunc) + defer s1.Stop() + defer s2.Stop() + + if s1.Peers().Size() != 1 { + t.Errorf("Expected exactly 1 peer in s1, got %v", s1.Peers().Size()) + } + if s2.Peers().Size() != 1 { + t.Errorf("Expected exactly 1 peer in s2, got %v", s2.Peers().Size()) + } + + // Lets send some messages + ch0Msg := []byte("channel zero") + ch1Msg := []byte("channel foo") + ch2Msg := []byte("channel bar") + + s1.Broadcast(byte(0x00), ch0Msg) + s1.Broadcast(byte(0x01), ch1Msg) + s1.Broadcast(byte(0x02), ch2Msg) + + assertMsgReceivedWithTimeout(t, ch0Msg, byte(0x00), s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second) + assertMsgReceivedWithTimeout(t, ch1Msg, byte(0x01), s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second) + assertMsgReceivedWithTimeout(t, ch2Msg, byte(0x02), s2.Reactor("bar").(*TestReactor), 10*time.Millisecond, 5*time.Second) +} + +func assertMsgReceivedWithTimeout(t *testing.T, msgBytes []byte, channel byte, reactor *TestReactor, checkPeriod, timeout time.Duration) { + ticker := time.NewTicker(checkPeriod) + for { + select { + case <-ticker.C: + msgs := reactor.getMsgs(channel) + if len(msgs) > 0 { + if !bytes.Equal(msgs[0].Bytes, msgBytes) { + t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", msgBytes, msgs[0].Bytes) + } + return + } + case <-time.After(timeout): + t.Fatalf("Expected to have received 1 message in channel #%v, got zero", channel) + } + } +} + +func TestConnAddrFilter(t *testing.T) { + s1 := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) + s2 := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) + defer s1.Stop() + defer s2.Stop() + + c1, c2 := conn.NetPipe() + + s1.SetAddrFilter(func(addr net.Addr) error { + if addr.String() == c1.RemoteAddr().String() { + return fmt.Errorf("Error: pipe is blacklisted") + } + return nil + }) + + // connect to good peer + go func() { + err := s1.addPeerWithConnection(c1) + assert.NotNil(t, err, "expected err") + }() + go func() { + err := s2.addPeerWithConnection(c2) + assert.NotNil(t, err, "expected err") + }() + + assertNoPeersAfterTimeout(t, s1, 400*time.Millisecond) + assertNoPeersAfterTimeout(t, s2, 400*time.Millisecond) +} + +func TestSwitchFiltersOutItself(t *testing.T) { + s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc) + // addr := s1.NodeInfo().NetAddress() + + // // add ourselves like we do in node.go#427 + // s1.addrBook.AddOurAddress(addr) + + // simulate s1 having a public IP by creating a remote peer with the same ID + rp := &remotePeer{PrivKey: s1.nodeKey.PrivKey, Config: cfg} + rp.Start() + + // addr should be rejected in addPeer based on the same ID + err := s1.DialPeerWithAddress(rp.Addr(), false) + if assert.Error(t, err) { + assert.Equal(t, ErrSwitchConnectToSelf{rp.Addr()}.Error(), err.Error()) + } + + assert.True(t, s1.addrBook.OurAddress(rp.Addr())) + + assert.False(t, s1.addrBook.HasAddress(rp.Addr())) + + rp.Stop() + + assertNoPeersAfterTimeout(t, s1, 100*time.Millisecond) +} + +func assertNoPeersAfterTimeout(t *testing.T, sw *Switch, timeout time.Duration) { + time.Sleep(timeout) + if sw.Peers().Size() != 0 { + t.Fatalf("Expected %v to not connect to some peers, got %d", sw, sw.Peers().Size()) + } +} + +func TestConnIDFilter(t *testing.T) { + s1 := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) + s2 := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) + defer s1.Stop() + defer s2.Stop() + + c1, c2 := conn.NetPipe() + + s1.SetIDFilter(func(id ID) error { + if id == s2.nodeInfo.ID { + return fmt.Errorf("Error: pipe is blacklisted") + } + return nil + }) + + s2.SetIDFilter(func(id ID) error { + if id == s1.nodeInfo.ID { + return fmt.Errorf("Error: pipe is blacklisted") + } + return nil + }) + + go func() { + err := s1.addPeerWithConnection(c1) + assert.NotNil(t, err, "expected error") + }() + go func() { + err := s2.addPeerWithConnection(c2) + assert.NotNil(t, err, "expected error") + }() + + assertNoPeersAfterTimeout(t, s1, 400*time.Millisecond) + assertNoPeersAfterTimeout(t, s2, 400*time.Millisecond) +} + +func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) + err := sw.Start() + if err != nil { + t.Error(err) + } + defer sw.Stop() + + // simulate remote peer + rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: cfg} + rp.Start() + defer rp.Stop() + + pc, err := newOutboundPeerConn(rp.Addr(), cfg, false, sw.nodeKey.PrivKey) + require.Nil(err) + err = sw.addPeer(pc) + require.Nil(err) + + peer := sw.Peers().Get(rp.ID()) + require.NotNil(peer) + + // simulate failure by closing connection + pc.CloseConn() + + assertNoPeersAfterTimeout(t, sw, 100*time.Millisecond) + assert.False(peer.IsRunning()) +} + +func TestSwitchReconnectsToPersistentPeer(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) + err := sw.Start() + if err != nil { + t.Error(err) + } + defer sw.Stop() + + // simulate remote peer + rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: cfg} + rp.Start() + defer rp.Stop() + + pc, err := newOutboundPeerConn(rp.Addr(), cfg, true, sw.nodeKey.PrivKey) + // sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodeKey.PrivKey, + require.Nil(err) + + require.Nil(sw.addPeer(pc)) + + peer := sw.Peers().Get(rp.ID()) + require.NotNil(peer) + + // simulate failure by closing connection + pc.CloseConn() + + // TODO: remove sleep, detect the disconnection, wait for reconnect + npeers := sw.Peers().Size() + for i := 0; i < 20; i++ { + time.Sleep(250 * time.Millisecond) + npeers = sw.Peers().Size() + if npeers > 0 { + break + } + } + assert.NotZero(npeers) + assert.False(peer.IsRunning()) + + // simulate another remote peer + rp = &remotePeer{ + PrivKey: crypto.GenPrivKeyEd25519(), + Config: cfg, + // Use different interface to prevent duplicate IP filter, this will break + // beyond two peers. + listenAddr: "127.0.0.1:0", + } + rp.Start() + defer rp.Stop() + + // simulate first time dial failure + conf := config.DefaultP2PConfig() + conf.TestDialFail = true + err = sw.addOutboundPeerWithConfig(rp.Addr(), conf, true) + require.NotNil(err) + + // DialPeerWithAddres - sw.peerConfig resets the dialer + + // TODO: same as above + for i := 0; i < 20; i++ { + time.Sleep(250 * time.Millisecond) + npeers = sw.Peers().Size() + if npeers > 1 { + break + } + } + assert.EqualValues(2, npeers) +} + +func TestSwitchFullConnectivity(t *testing.T) { + switches := MakeConnectedSwitches(cfg, 3, initSwitchFunc, Connect2Switches) + defer func() { + for _, sw := range switches { + sw.Stop() + } + }() + + for i, sw := range switches { + if sw.Peers().Size() != 2 { + t.Fatalf("Expected each switch to be connected to 2 other, but %d switch only connected to %d", sw.Peers().Size(), i) + } + } +} + +func BenchmarkSwitchBroadcast(b *testing.B) { + s1, s2 := MakeSwitchPair(b, func(i int, sw *Switch) *Switch { + // Make bar reactors of bar channels each + sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ + {ID: byte(0x00), Priority: 10}, + {ID: byte(0x01), Priority: 10}, + }, false)) + sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{ + {ID: byte(0x02), Priority: 10}, + {ID: byte(0x03), Priority: 10}, + }, false)) + return sw + }) + defer s1.Stop() + defer s2.Stop() + + // Allow time for goroutines to boot up + time.Sleep(1 * time.Second) + + b.ResetTimer() + + numSuccess, numFailure := 0, 0 + + // Send random message from foo channel to another + for i := 0; i < b.N; i++ { + chID := byte(i % 4) + successChan := s1.Broadcast(chID, []byte("test data")) + for s := range successChan { + if s { + numSuccess++ + } else { + numFailure++ + } + } + } + + b.Logf("success: %v, failure: %v", numSuccess, numFailure) +} + +type addrBookMock struct { + addrs map[string]struct{} + ourAddrs map[string]struct{} +} + +var _ AddrBook = (*addrBookMock)(nil) + +func (book *addrBookMock) AddAddress(addr *NetAddress, src *NetAddress) error { + book.addrs[addr.String()] = struct{}{} + return nil +} +func (book *addrBookMock) AddOurAddress(addr *NetAddress) { book.ourAddrs[addr.String()] = struct{}{} } +func (book *addrBookMock) OurAddress(addr *NetAddress) bool { + _, ok := book.ourAddrs[addr.String()] + return ok +} +func (book *addrBookMock) MarkGood(*NetAddress) {} +func (book *addrBookMock) HasAddress(addr *NetAddress) bool { + _, ok := book.addrs[addr.String()] + return ok +} +func (book *addrBookMock) RemoveAddress(addr *NetAddress) { + delete(book.addrs, addr.String()) +} +func (book *addrBookMock) Save() {} diff --git a/p2p/test_util.go b/p2p/test_util.go new file mode 100644 index 000000000..467532f0f --- /dev/null +++ b/p2p/test_util.go @@ -0,0 +1,156 @@ +package p2p + +import ( + "fmt" + "net" + + crypto "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/p2p/conn" +) + +func AddPeerToSwitch(sw *Switch, peer Peer) { + sw.peers.Add(peer) +} + +func CreateRandomPeer(outbound bool) *peer { + addr, netAddr := CreateRoutableAddr() + p := &peer{ + peerConn: peerConn{ + outbound: outbound, + }, + nodeInfo: NodeInfo{ + ID: netAddr.ID, + ListenAddr: netAddr.DialString(), + }, + mconn: &conn.MConnection{}, + } + p.SetLogger(log.TestingLogger().With("peer", addr)) + return p +} + +func CreateRoutableAddr() (addr string, netAddr *NetAddress) { + for { + var err error + addr = cmn.Fmt("%X@%v.%v.%v.%v:26656", cmn.RandBytes(20), cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256) + netAddr, err = NewNetAddressString(addr) + if err != nil { + panic(err) + } + if netAddr.Routable() { + break + } + } + return +} + +//------------------------------------------------------------------ +// Connects switches via arbitrary net.Conn. Used for testing. + +const TEST_HOST = "localhost" + +// MakeConnectedSwitches returns n switches, connected according to the connect func. +// If connect==Connect2Switches, the switches will be fully connected. +// initSwitch defines how the i'th switch should be initialized (ie. with what reactors). +// NOTE: panics if any switch fails to start. +func MakeConnectedSwitches(cfg *config.P2PConfig, n int, initSwitch func(int, *Switch) *Switch, connect func([]*Switch, int, int)) []*Switch { + switches := make([]*Switch, n) + for i := 0; i < n; i++ { + switches[i] = MakeSwitch(cfg, i, TEST_HOST, "123.123.123", initSwitch) + } + + if err := StartSwitches(switches); err != nil { + panic(err) + } + + for i := 0; i < n; i++ { + for j := i + 1; j < n; j++ { + connect(switches, i, j) + } + } + + return switches +} + +// Connect2Switches will connect switches i and j via net.Pipe(). +// Blocks until a connection is established. +// NOTE: caller ensures i and j are within bounds. +func Connect2Switches(switches []*Switch, i, j int) { + switchI := switches[i] + switchJ := switches[j] + + c1, c2 := conn.NetPipe() + + doneCh := make(chan struct{}) + go func() { + err := switchI.addPeerWithConnection(c1) + if err != nil { + panic(err) + } + doneCh <- struct{}{} + }() + go func() { + err := switchJ.addPeerWithConnection(c2) + if err != nil { + panic(err) + } + doneCh <- struct{}{} + }() + <-doneCh + <-doneCh +} + +func (sw *Switch) addPeerWithConnection(conn net.Conn) error { + pc, err := newInboundPeerConn(conn, sw.config, sw.nodeKey.PrivKey) + if err != nil { + if err := conn.Close(); err != nil { + sw.Logger.Error("Error closing connection", "err", err) + } + return err + } + if err = sw.addPeer(pc); err != nil { + pc.CloseConn() + return err + } + + return nil +} + +// StartSwitches calls sw.Start() for each given switch. +// It returns the first encountered error. +func StartSwitches(switches []*Switch) error { + for _, s := range switches { + err := s.Start() // start switch and reactors + if err != nil { + return err + } + } + return nil +} + +func MakeSwitch(cfg *config.P2PConfig, i int, network, version string, initSwitch func(int, *Switch) *Switch) *Switch { + // new switch, add reactors + // TODO: let the config be passed in? + nodeKey := &NodeKey{ + PrivKey: crypto.GenPrivKeyEd25519(), + } + sw := NewSwitch(cfg) + sw.SetLogger(log.TestingLogger()) + sw = initSwitch(i, sw) + ni := NodeInfo{ + ID: nodeKey.ID(), + Moniker: cmn.Fmt("switch%d", i), + Network: network, + Version: version, + ListenAddr: fmt.Sprintf("127.0.0.1:%d", cmn.RandIntn(64512)+1023), + } + for ch := range sw.reactorsByCh { + ni.Channels = append(ni.Channels, ch) + } + sw.SetNodeInfo(ni) + sw.SetNodeKey(nodeKey) + return sw +} diff --git a/p2p/trust/config.go b/p2p/trust/config.go new file mode 100644 index 000000000..b20a8b2cb --- /dev/null +++ b/p2p/trust/config.go @@ -0,0 +1,55 @@ +package trust + +import "time" + +// TrustMetricConfig - Configures the weight functions and time intervals for the metric +type TrustMetricConfig struct { + // Determines the percentage given to current behavior + ProportionalWeight float64 + + // Determines the percentage given to prior behavior + IntegralWeight float64 + + // The window of time that the trust metric will track events across. + // This can be set to cover many days without issue + TrackingWindow time.Duration + + // Each interval should be short for adapability. + // Less than 30 seconds is too sensitive, + // and greater than 5 minutes will make the metric numb + IntervalLength time.Duration +} + +// DefaultConfig returns a config with values that have been tested and produce desirable results +func DefaultConfig() TrustMetricConfig { + return TrustMetricConfig{ + ProportionalWeight: 0.4, + IntegralWeight: 0.6, + TrackingWindow: (time.Minute * 60 * 24) * 14, // 14 days. + IntervalLength: 1 * time.Minute, + } +} + +// Ensures that all configuration elements have valid values +func customConfig(tmc TrustMetricConfig) TrustMetricConfig { + config := DefaultConfig() + + // Check the config for set values, and setup appropriately + if tmc.ProportionalWeight > 0 { + config.ProportionalWeight = tmc.ProportionalWeight + } + + if tmc.IntegralWeight > 0 { + config.IntegralWeight = tmc.IntegralWeight + } + + if tmc.IntervalLength > time.Duration(0) { + config.IntervalLength = tmc.IntervalLength + } + + if tmc.TrackingWindow > time.Duration(0) && + tmc.TrackingWindow >= config.IntervalLength { + config.TrackingWindow = tmc.TrackingWindow + } + return config +} diff --git a/p2p/trust/metric.go b/p2p/trust/metric.go new file mode 100644 index 000000000..c0175a93f --- /dev/null +++ b/p2p/trust/metric.go @@ -0,0 +1,412 @@ +// Copyright 2017 Tendermint. All rights reserved. +// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. + +package trust + +import ( + "math" + "sync" + "time" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +//--------------------------------------------------------------------------------------- + +const ( + // The weight applied to the derivative when current behavior is >= previous behavior + defaultDerivativeGamma1 = 0 + + // The weight applied to the derivative when current behavior is less than previous behavior + defaultDerivativeGamma2 = 1.0 + + // The weight applied to history data values when calculating the history value + defaultHistoryDataWeight = 0.8 +) + +// MetricHistoryJSON - history data necessary to save the trust metric +type MetricHistoryJSON struct { + NumIntervals int `json:"intervals"` + History []float64 `json:"history"` +} + +// TrustMetric - keeps track of peer reliability +// See tendermint/docs/architecture/adr-006-trust-metric.md for details +type TrustMetric struct { + cmn.BaseService + + // Mutex that protects the metric from concurrent access + mtx sync.Mutex + + // Determines the percentage given to current behavior + proportionalWeight float64 + + // Determines the percentage given to prior behavior + integralWeight float64 + + // Count of how many time intervals this metric has been tracking + numIntervals int + + // Size of the time interval window for this trust metric + maxIntervals int + + // The time duration for a single time interval + intervalLen time.Duration + + // Stores the trust history data for this metric + history []float64 + + // Weights applied to the history data when calculating the history value + historyWeights []float64 + + // The sum of the history weights used when calculating the history value + historyWeightSum float64 + + // The current number of history data elements + historySize int + + // The maximum number of history data elements + historyMaxSize int + + // The calculated history value for the current time interval + historyValue float64 + + // The number of recorded good and bad events for the current time interval + bad, good float64 + + // While true, history data is not modified + paused bool + + // Used during testing in order to control the passing of time intervals + testTicker MetricTicker +} + +// NewMetric returns a trust metric with the default configuration. +// Use Start to begin tracking the quality of peer behavior over time +func NewMetric() *TrustMetric { + return NewMetricWithConfig(DefaultConfig()) +} + +// NewMetricWithConfig returns a trust metric with a custom configuration. +// Use Start to begin tracking the quality of peer behavior over time +func NewMetricWithConfig(tmc TrustMetricConfig) *TrustMetric { + tm := new(TrustMetric) + config := customConfig(tmc) + + // Setup using the configuration values + tm.proportionalWeight = config.ProportionalWeight + tm.integralWeight = config.IntegralWeight + tm.intervalLen = config.IntervalLength + // The maximum number of time intervals is the tracking window / interval length + tm.maxIntervals = int(config.TrackingWindow / tm.intervalLen) + // The history size will be determined by the maximum number of time intervals + tm.historyMaxSize = intervalToHistoryOffset(tm.maxIntervals) + 1 + // This metric has a perfect history so far + tm.historyValue = 1.0 + + tm.BaseService = *cmn.NewBaseService(nil, "TrustMetric", tm) + return tm +} + +// OnStart implements Service +func (tm *TrustMetric) OnStart() error { + if err := tm.BaseService.OnStart(); err != nil { + return err + } + go tm.processRequests() + return nil +} + +// OnStop implements Service +// Nothing to do since the goroutine shuts down by itself via BaseService.Quit() +func (tm *TrustMetric) OnStop() {} + +// Returns a snapshot of the trust metric history data +func (tm *TrustMetric) HistoryJSON() MetricHistoryJSON { + tm.mtx.Lock() + defer tm.mtx.Unlock() + + return MetricHistoryJSON{ + NumIntervals: tm.numIntervals, + History: tm.history, + } +} + +// Instantiates a trust metric by loading the history data for a single peer. +// This is called only once and only right after creation, which is why the +// lock is not held while accessing the trust metric struct members +func (tm *TrustMetric) Init(hist MetricHistoryJSON) { + // Restore the number of time intervals we have previously tracked + if hist.NumIntervals > tm.maxIntervals { + hist.NumIntervals = tm.maxIntervals + } + tm.numIntervals = hist.NumIntervals + // Restore the history and its current size + if len(hist.History) > tm.historyMaxSize { + // Keep the history no larger than historyMaxSize + last := len(hist.History) - tm.historyMaxSize + hist.History = hist.History[last:] + } + tm.history = hist.History + tm.historySize = len(tm.history) + // Create the history weight values and weight sum + for i := 1; i <= tm.numIntervals; i++ { + x := math.Pow(defaultHistoryDataWeight, float64(i)) // Optimistic weight + tm.historyWeights = append(tm.historyWeights, x) + } + + for _, v := range tm.historyWeights { + tm.historyWeightSum += v + } + // Calculate the history value based on the loaded history data + tm.historyValue = tm.calcHistoryValue() +} + +// Pause tells the metric to pause recording data over time intervals. +// All method calls that indicate events will unpause the metric +func (tm *TrustMetric) Pause() { + tm.mtx.Lock() + defer tm.mtx.Unlock() + + // Pause the metric for now + tm.paused = true +} + +// BadEvents indicates that an undesirable event(s) took place +func (tm *TrustMetric) BadEvents(num int) { + tm.mtx.Lock() + defer tm.mtx.Unlock() + + tm.unpause() + tm.bad += float64(num) +} + +// GoodEvents indicates that a desirable event(s) took place +func (tm *TrustMetric) GoodEvents(num int) { + tm.mtx.Lock() + defer tm.mtx.Unlock() + + tm.unpause() + tm.good += float64(num) +} + +// TrustValue gets the dependable trust value; always between 0 and 1 +func (tm *TrustMetric) TrustValue() float64 { + tm.mtx.Lock() + defer tm.mtx.Unlock() + + return tm.calcTrustValue() +} + +// TrustScore gets a score based on the trust value always between 0 and 100 +func (tm *TrustMetric) TrustScore() int { + score := tm.TrustValue() * 100 + + return int(math.Floor(score)) +} + +// NextTimeInterval saves current time interval data and prepares for the following interval +func (tm *TrustMetric) NextTimeInterval() { + tm.mtx.Lock() + defer tm.mtx.Unlock() + + if tm.paused { + // Do not prepare for the next time interval while paused + return + } + + // Add the current trust value to the history data + newHist := tm.calcTrustValue() + tm.history = append(tm.history, newHist) + + // Update history and interval counters + if tm.historySize < tm.historyMaxSize { + tm.historySize++ + } else { + // Keep the history no larger than historyMaxSize + last := len(tm.history) - tm.historyMaxSize + tm.history = tm.history[last:] + } + + if tm.numIntervals < tm.maxIntervals { + tm.numIntervals++ + // Add the optimistic weight for the new time interval + wk := math.Pow(defaultHistoryDataWeight, float64(tm.numIntervals)) + tm.historyWeights = append(tm.historyWeights, wk) + tm.historyWeightSum += wk + } + + // Update the history data using Faded Memories + tm.updateFadedMemory() + // Calculate the history value for the upcoming time interval + tm.historyValue = tm.calcHistoryValue() + tm.good = 0 + tm.bad = 0 +} + +// SetTicker allows a TestTicker to be provided that will manually control +// the passing of time from the perspective of the TrustMetric. +// The ticker must be set before Start is called on the metric +func (tm *TrustMetric) SetTicker(ticker MetricTicker) { + tm.mtx.Lock() + defer tm.mtx.Unlock() + + tm.testTicker = ticker +} + +// Copy returns a new trust metric with members containing the same values +func (tm *TrustMetric) Copy() *TrustMetric { + if tm == nil { + return nil + } + + tm.mtx.Lock() + defer tm.mtx.Unlock() + + return &TrustMetric{ + proportionalWeight: tm.proportionalWeight, + integralWeight: tm.integralWeight, + numIntervals: tm.numIntervals, + maxIntervals: tm.maxIntervals, + intervalLen: tm.intervalLen, + history: tm.history, + historyWeights: tm.historyWeights, + historyWeightSum: tm.historyWeightSum, + historySize: tm.historySize, + historyMaxSize: tm.historyMaxSize, + historyValue: tm.historyValue, + good: tm.good, + bad: tm.bad, + paused: tm.paused, + } + +} + +/* Private methods */ + +// This method is for a goroutine that handles all requests on the metric +func (tm *TrustMetric) processRequests() { + t := tm.testTicker + if t == nil { + // No test ticker was provided, so we create a normal ticker + t = NewTicker(tm.intervalLen) + } + defer t.Stop() + // Obtain the raw channel + tick := t.GetChannel() +loop: + for { + select { + case <-tick: + tm.NextTimeInterval() + case <-tm.Quit(): + // Stop all further tracking for this metric + break loop + } + } +} + +// Wakes the trust metric up if it is currently paused +// This method needs to be called with the mutex locked +func (tm *TrustMetric) unpause() { + // Check if this is the first experience with + // what we are tracking since being paused + if tm.paused { + tm.good = 0 + tm.bad = 0 + // New events cause us to unpause the metric + tm.paused = false + } +} + +// Calculates the trust value for the request processing +func (tm *TrustMetric) calcTrustValue() float64 { + weightedP := tm.proportionalWeight * tm.proportionalValue() + weightedI := tm.integralWeight * tm.historyValue + weightedD := tm.weightedDerivative() + + tv := weightedP + weightedI + weightedD + // Do not return a negative value. + if tv < 0 { + tv = 0 + } + return tv +} + +// Calculates the current score for good/bad experiences +func (tm *TrustMetric) proportionalValue() float64 { + value := 1.0 + + total := tm.good + tm.bad + if total > 0 { + value = tm.good / total + } + return value +} + +// Strengthens the derivative component when the change is negative +func (tm *TrustMetric) weightedDerivative() float64 { + var weight float64 = defaultDerivativeGamma1 + + d := tm.derivativeValue() + if d < 0 { + weight = defaultDerivativeGamma2 + } + return weight * d +} + +// Calculates the derivative component +func (tm *TrustMetric) derivativeValue() float64 { + return tm.proportionalValue() - tm.historyValue +} + +// Calculates the integral (history) component of the trust value +func (tm *TrustMetric) calcHistoryValue() float64 { + var hv float64 + + for i := 0; i < tm.numIntervals; i++ { + hv += tm.fadedMemoryValue(i) * tm.historyWeights[i] + } + + return hv / tm.historyWeightSum +} + +// Retrieves the actual history data value that represents the requested time interval +func (tm *TrustMetric) fadedMemoryValue(interval int) float64 { + first := tm.historySize - 1 + + if interval == 0 { + // Base case + return tm.history[first] + } + + offset := intervalToHistoryOffset(interval) + return tm.history[first-offset] +} + +// Performs the update for our Faded Memories process, which allows the +// trust metric tracking window to be large while maintaining a small +// number of history data values +func (tm *TrustMetric) updateFadedMemory() { + if tm.historySize < 2 { + return + } + + end := tm.historySize - 1 + // Keep the most recent history element + for count := 1; count < tm.historySize; count++ { + i := end - count + // The older the data is, the more we spread it out + x := math.Pow(2, float64(count)) + // Two history data values are merged into a single value + tm.history[i] = ((tm.history[i] * (x - 1)) + tm.history[i+1]) / x + } +} + +// Map the interval value down to an offset from the beginning of history +func intervalToHistoryOffset(interval int) int { + // The system maintains 2^m interval values in the form of m history + // data values. Therefore, we access the ith interval by obtaining + // the history data index = the floor of log2(i) + return int(math.Floor(math.Log2(float64(interval)))) +} diff --git a/p2p/trust/metric_test.go b/p2p/trust/metric_test.go new file mode 100644 index 000000000..f690ce557 --- /dev/null +++ b/p2p/trust/metric_test.go @@ -0,0 +1,108 @@ +package trust + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestTrustMetricScores(t *testing.T) { + tm := NewMetric() + tm.Start() + + // Perfect score + tm.GoodEvents(1) + score := tm.TrustScore() + assert.Equal(t, 100, score) + + // Less than perfect score + tm.BadEvents(10) + score = tm.TrustScore() + assert.NotEqual(t, 100, score) + tm.Stop() +} + +func TestTrustMetricConfig(t *testing.T) { + // 7 days + window := time.Minute * 60 * 24 * 7 + config := TrustMetricConfig{ + TrackingWindow: window, + IntervalLength: 2 * time.Minute, + } + + tm := NewMetricWithConfig(config) + tm.Start() + + // The max time intervals should be the TrackingWindow / IntervalLen + assert.Equal(t, int(config.TrackingWindow/config.IntervalLength), tm.maxIntervals) + + dc := DefaultConfig() + // These weights should still be the default values + assert.Equal(t, dc.ProportionalWeight, tm.proportionalWeight) + assert.Equal(t, dc.IntegralWeight, tm.integralWeight) + tm.Stop() + tm.Wait() + + config.ProportionalWeight = 0.3 + config.IntegralWeight = 0.7 + tm = NewMetricWithConfig(config) + tm.Start() + + // These weights should be equal to our custom values + assert.Equal(t, config.ProportionalWeight, tm.proportionalWeight) + assert.Equal(t, config.IntegralWeight, tm.integralWeight) + tm.Stop() + tm.Wait() +} + +func TestTrustMetricCopyNilPointer(t *testing.T) { + var tm *TrustMetric + + ctm := tm.Copy() + + assert.Nil(t, ctm) +} + +// XXX: This test fails non-deterministically +func _TestTrustMetricStopPause(t *testing.T) { + // The TestTicker will provide manual control over + // the passing of time within the metric + tt := NewTestTicker() + tm := NewMetric() + tm.SetTicker(tt) + tm.Start() + // Allow some time intervals to pass and pause + tt.NextTick() + tt.NextTick() + tm.Pause() + + // could be 1 or 2 because Pause and NextTick race + first := tm.Copy().numIntervals + + // Allow more time to pass and check the intervals are unchanged + tt.NextTick() + tt.NextTick() + assert.Equal(t, first, tm.Copy().numIntervals) + + // Get the trust metric activated again + tm.GoodEvents(5) + // Allow some time intervals to pass and stop + tt.NextTick() + tt.NextTick() + tm.Stop() + tm.Wait() + + second := tm.Copy().numIntervals + // Allow more intervals to pass while the metric is stopped + // and check that the number of intervals match + tm.NextTimeInterval() + tm.NextTimeInterval() + // XXX: fails non-deterministically: + // expected 5, got 6 + assert.Equal(t, second+2, tm.Copy().numIntervals) + + if first > second { + t.Fatalf("numIntervals should always increase or stay the same over time") + } +} diff --git a/p2p/trust/store.go b/p2p/trust/store.go new file mode 100644 index 000000000..31f659a43 --- /dev/null +++ b/p2p/trust/store.go @@ -0,0 +1,207 @@ +// Copyright 2017 Tendermint. All rights reserved. +// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. + +package trust + +import ( + "encoding/json" + "sync" + "time" + + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" +) + +const defaultStorePeriodicSaveInterval = 1 * time.Minute + +var trustMetricKey = []byte("trustMetricStore") + +// TrustMetricStore - Manages all trust metrics for peers +type TrustMetricStore struct { + cmn.BaseService + + // Maps a Peer.Key to that peer's TrustMetric + peerMetrics map[string]*TrustMetric + + // Mutex that protects the map and history data file + mtx sync.Mutex + + // The db where peer trust metric history data will be stored + db dbm.DB + + // This configuration will be used when creating new TrustMetrics + config TrustMetricConfig +} + +// NewTrustMetricStore returns a store that saves data to the DB +// and uses the config when creating new trust metrics. +// Use Start to to initialize the trust metric store +func NewTrustMetricStore(db dbm.DB, tmc TrustMetricConfig) *TrustMetricStore { + tms := &TrustMetricStore{ + peerMetrics: make(map[string]*TrustMetric), + db: db, + config: tmc, + } + + tms.BaseService = *cmn.NewBaseService(nil, "TrustMetricStore", tms) + return tms +} + +// OnStart implements Service +func (tms *TrustMetricStore) OnStart() error { + if err := tms.BaseService.OnStart(); err != nil { + return err + } + + tms.mtx.Lock() + defer tms.mtx.Unlock() + + tms.loadFromDB() + go tms.saveRoutine() + return nil +} + +// OnStop implements Service +func (tms *TrustMetricStore) OnStop() { + tms.BaseService.OnStop() + + tms.mtx.Lock() + defer tms.mtx.Unlock() + + // Stop all trust metric go-routines + for _, tm := range tms.peerMetrics { + tm.Stop() + } + + // Make the final trust history data save + tms.saveToDB() +} + +// Size returns the number of entries in the trust metric store +func (tms *TrustMetricStore) Size() int { + tms.mtx.Lock() + defer tms.mtx.Unlock() + + return tms.size() +} + +// AddPeerTrustMetric takes an existing trust metric and associates it with a peer key. +// The caller is expected to call Start on the TrustMetric being added +func (tms *TrustMetricStore) AddPeerTrustMetric(key string, tm *TrustMetric) { + tms.mtx.Lock() + defer tms.mtx.Unlock() + + if key == "" || tm == nil { + return + } + tms.peerMetrics[key] = tm +} + +// GetPeerTrustMetric returns a trust metric by peer key +func (tms *TrustMetricStore) GetPeerTrustMetric(key string) *TrustMetric { + tms.mtx.Lock() + defer tms.mtx.Unlock() + + tm, ok := tms.peerMetrics[key] + if !ok { + // If the metric is not available, we will create it + tm = NewMetricWithConfig(tms.config) + tm.Start() + // The metric needs to be in the map + tms.peerMetrics[key] = tm + } + return tm +} + +// PeerDisconnected pauses the trust metric associated with the peer identified by the key +func (tms *TrustMetricStore) PeerDisconnected(key string) { + tms.mtx.Lock() + defer tms.mtx.Unlock() + + // If the Peer that disconnected has a metric, pause it + if tm, ok := tms.peerMetrics[key]; ok { + tm.Pause() + } +} + +// Saves the history data for all peers to the store DB. +// This public method acquires the trust metric store lock +func (tms *TrustMetricStore) SaveToDB() { + tms.mtx.Lock() + defer tms.mtx.Unlock() + + tms.saveToDB() +} + +/* Private methods */ + +// size returns the number of entries in the store without acquiring the mutex +func (tms *TrustMetricStore) size() int { + return len(tms.peerMetrics) +} + +/* Loading & Saving */ +/* Both loadFromDB and savetoDB assume the mutex has been acquired */ + +// Loads the history data for all peers from the store DB +// cmn.Panics if file is corrupt +func (tms *TrustMetricStore) loadFromDB() bool { + // Obtain the history data we have so far + bytes := tms.db.Get(trustMetricKey) + if bytes == nil { + return false + } + + peers := make(map[string]MetricHistoryJSON) + err := json.Unmarshal(bytes, &peers) + if err != nil { + cmn.PanicCrisis(cmn.Fmt("Could not unmarshal Trust Metric Store DB data: %v", err)) + } + + // If history data exists in the file, + // load it into trust metric + for key, p := range peers { + tm := NewMetricWithConfig(tms.config) + + tm.Start() + tm.Init(p) + // Load the peer trust metric into the store + tms.peerMetrics[key] = tm + } + return true +} + +// Saves the history data for all peers to the store DB +func (tms *TrustMetricStore) saveToDB() { + tms.Logger.Debug("Saving TrustHistory to DB", "size", tms.size()) + + peers := make(map[string]MetricHistoryJSON) + + for key, tm := range tms.peerMetrics { + // Add an entry for the peer identified by key + peers[key] = tm.HistoryJSON() + } + + // Write all the data back to the DB + bytes, err := json.Marshal(peers) + if err != nil { + tms.Logger.Error("Failed to encode the TrustHistory", "err", err) + return + } + tms.db.SetSync(trustMetricKey, bytes) +} + +// Periodically saves the trust history data to the DB +func (tms *TrustMetricStore) saveRoutine() { + t := time.NewTicker(defaultStorePeriodicSaveInterval) + defer t.Stop() +loop: + for { + select { + case <-t.C: + tms.SaveToDB() + case <-tms.Quit(): + break loop + } + } +} diff --git a/p2p/trust/store_test.go b/p2p/trust/store_test.go new file mode 100644 index 000000000..e1bea8636 --- /dev/null +++ b/p2p/trust/store_test.go @@ -0,0 +1,152 @@ +// Copyright 2017 Tendermint. All rights reserved. +// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. + +package trust + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" +) + +func TestTrustMetricStoreSaveLoad(t *testing.T) { + dir, err := ioutil.TempDir("", "trust_test") + if err != nil { + panic(err) + } + defer os.Remove(dir) + + historyDB := dbm.NewDB("trusthistory", "goleveldb", dir) + + // 0 peers saved + store := NewTrustMetricStore(historyDB, DefaultConfig()) + store.SetLogger(log.TestingLogger()) + store.saveToDB() + // Load the data from the file + store = NewTrustMetricStore(historyDB, DefaultConfig()) + store.SetLogger(log.TestingLogger()) + store.Start() + // Make sure we still have 0 entries + assert.Zero(t, store.Size()) + + // 100 TestTickers + var tt []*TestTicker + for i := 0; i < 100; i++ { + // The TestTicker will provide manual control over + // the passing of time within the metric + tt = append(tt, NewTestTicker()) + } + // 100 peers + for i := 0; i < 100; i++ { + key := fmt.Sprintf("peer_%d", i) + tm := NewMetric() + + tm.SetTicker(tt[i]) + tm.Start() + store.AddPeerTrustMetric(key, tm) + + tm.BadEvents(10) + tm.GoodEvents(1) + } + // Check that we have 100 entries and save + assert.Equal(t, 100, store.Size()) + // Give the 100 metrics time to process the history data + for i := 0; i < 100; i++ { + tt[i].NextTick() + tt[i].NextTick() + } + // Stop all the trust metrics and save + store.Stop() + + // Load the data from the DB + store = NewTrustMetricStore(historyDB, DefaultConfig()) + store.SetLogger(log.TestingLogger()) + store.Start() + + // Check that we still have 100 peers with imperfect trust values + assert.Equal(t, 100, store.Size()) + for _, tm := range store.peerMetrics { + assert.NotEqual(t, 1.0, tm.TrustValue()) + } + + store.Stop() +} + +func TestTrustMetricStoreConfig(t *testing.T) { + historyDB := dbm.NewDB("", "memdb", "") + + config := TrustMetricConfig{ + ProportionalWeight: 0.5, + IntegralWeight: 0.5, + } + + // Create a store with custom config + store := NewTrustMetricStore(historyDB, config) + store.SetLogger(log.TestingLogger()) + store.Start() + + // Have the store make us a metric with the config + tm := store.GetPeerTrustMetric("TestKey") + + // Check that the options made it to the metric + assert.Equal(t, 0.5, tm.proportionalWeight) + assert.Equal(t, 0.5, tm.integralWeight) + store.Stop() +} + +func TestTrustMetricStoreLookup(t *testing.T) { + historyDB := dbm.NewDB("", "memdb", "") + + store := NewTrustMetricStore(historyDB, DefaultConfig()) + store.SetLogger(log.TestingLogger()) + store.Start() + + // Create 100 peers in the trust metric store + for i := 0; i < 100; i++ { + key := fmt.Sprintf("peer_%d", i) + store.GetPeerTrustMetric(key) + + // Check that the trust metric was successfully entered + ktm := store.peerMetrics[key] + assert.NotNil(t, ktm, "Expected to find TrustMetric %s but wasn't there.", key) + } + + store.Stop() +} + +func TestTrustMetricStorePeerScore(t *testing.T) { + historyDB := dbm.NewDB("", "memdb", "") + + store := NewTrustMetricStore(historyDB, DefaultConfig()) + store.SetLogger(log.TestingLogger()) + store.Start() + + key := "TestKey" + tm := store.GetPeerTrustMetric(key) + + // This peer is innocent so far + first := tm.TrustScore() + assert.Equal(t, 100, first) + + // Add some undesirable events and disconnect + tm.BadEvents(1) + first = tm.TrustScore() + assert.NotEqual(t, 100, first) + tm.BadEvents(10) + second := tm.TrustScore() + + if second > first { + t.Errorf("A greater number of bad events should lower the trust score") + } + store.PeerDisconnected(key) + + // We will remember our experiences with this peer + tm = store.GetPeerTrustMetric(key) + assert.NotEqual(t, 100, tm.TrustScore()) + store.Stop() +} diff --git a/p2p/trust/ticker.go b/p2p/trust/ticker.go new file mode 100644 index 000000000..3f0f30919 --- /dev/null +++ b/p2p/trust/ticker.go @@ -0,0 +1,62 @@ +// Copyright 2017 Tendermint. All rights reserved. +// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. + +package trust + +import ( + "time" +) + +// MetricTicker provides a single ticker interface for the trust metric +type MetricTicker interface { + // GetChannel returns the receive only channel that fires at each time interval + GetChannel() <-chan time.Time + + // Stop will halt further activity on the ticker channel + Stop() +} + +// The ticker used during testing that provides manual control over time intervals +type TestTicker struct { + C chan time.Time + stopped bool +} + +// NewTestTicker returns our ticker used within test routines +func NewTestTicker() *TestTicker { + c := make(chan time.Time) + return &TestTicker{ + C: c, + } +} + +func (t *TestTicker) GetChannel() <-chan time.Time { + return t.C +} + +func (t *TestTicker) Stop() { + t.stopped = true +} + +// NextInterval manually sends Time on the ticker channel +func (t *TestTicker) NextTick() { + if t.stopped { + return + } + t.C <- time.Now() +} + +// Ticker is just a wrap around time.Ticker that allows it +// to meet the requirements of our interface +type Ticker struct { + *time.Ticker +} + +// NewTicker returns a normal time.Ticker wrapped to meet our interface +func NewTicker(d time.Duration) *Ticker { + return &Ticker{time.NewTicker(d)} +} + +func (t *Ticker) GetChannel() <-chan time.Time { + return t.C +} diff --git a/p2p/types.go b/p2p/types.go new file mode 100644 index 000000000..b11765bb5 --- /dev/null +++ b/p2p/types.go @@ -0,0 +1,8 @@ +package p2p + +import ( + "github.com/tendermint/tendermint/p2p/conn" +) + +type ChannelDescriptor = conn.ChannelDescriptor +type ConnectionStatus = conn.ConnectionStatus diff --git a/p2p/upnp/probe.go b/p2p/upnp/probe.go new file mode 100644 index 000000000..2de5e7905 --- /dev/null +++ b/p2p/upnp/probe.go @@ -0,0 +1,112 @@ +package upnp + +import ( + "fmt" + "net" + "time" + + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" +) + +type UPNPCapabilities struct { + PortMapping bool + Hairpin bool +} + +func makeUPNPListener(intPort int, extPort int, logger log.Logger) (NAT, net.Listener, net.IP, error) { + nat, err := Discover() + if err != nil { + return nil, nil, nil, fmt.Errorf("NAT upnp could not be discovered: %v", err) + } + logger.Info(cmn.Fmt("ourIP: %v", nat.(*upnpNAT).ourIP)) + + ext, err := nat.GetExternalAddress() + if err != nil { + return nat, nil, nil, fmt.Errorf("External address error: %v", err) + } + logger.Info(cmn.Fmt("External address: %v", ext)) + + port, err := nat.AddPortMapping("tcp", extPort, intPort, "Tendermint UPnP Probe", 0) + if err != nil { + return nat, nil, ext, fmt.Errorf("Port mapping error: %v", err) + } + logger.Info(cmn.Fmt("Port mapping mapped: %v", port)) + + // also run the listener, open for all remote addresses. + listener, err := net.Listen("tcp", fmt.Sprintf(":%v", intPort)) + if err != nil { + return nat, nil, ext, fmt.Errorf("Error establishing listener: %v", err) + } + return nat, listener, ext, nil +} + +func testHairpin(listener net.Listener, extAddr string, logger log.Logger) (supportsHairpin bool) { + // Listener + go func() { + inConn, err := listener.Accept() + if err != nil { + logger.Info(cmn.Fmt("Listener.Accept() error: %v", err)) + return + } + logger.Info(cmn.Fmt("Accepted incoming connection: %v -> %v", inConn.LocalAddr(), inConn.RemoteAddr())) + buf := make([]byte, 1024) + n, err := inConn.Read(buf) + if err != nil { + logger.Info(cmn.Fmt("Incoming connection read error: %v", err)) + return + } + logger.Info(cmn.Fmt("Incoming connection read %v bytes: %X", n, buf)) + if string(buf) == "test data" { + supportsHairpin = true + return + } + }() + + // Establish outgoing + outConn, err := net.Dial("tcp", extAddr) + if err != nil { + logger.Info(cmn.Fmt("Outgoing connection dial error: %v", err)) + return + } + + n, err := outConn.Write([]byte("test data")) + if err != nil { + logger.Info(cmn.Fmt("Outgoing connection write error: %v", err)) + return + } + logger.Info(cmn.Fmt("Outgoing connection wrote %v bytes", n)) + + // Wait for data receipt + time.Sleep(1 * time.Second) + return +} + +func Probe(logger log.Logger) (caps UPNPCapabilities, err error) { + logger.Info("Probing for UPnP!") + + intPort, extPort := 8001, 8001 + + nat, listener, ext, err := makeUPNPListener(intPort, extPort, logger) + if err != nil { + return + } + caps.PortMapping = true + + // Deferred cleanup + defer func() { + if err := nat.DeletePortMapping("tcp", intPort, extPort); err != nil { + logger.Error(cmn.Fmt("Port mapping delete error: %v", err)) + } + if err := listener.Close(); err != nil { + logger.Error(cmn.Fmt("Listener closing error: %v", err)) + } + }() + + supportsHairpin := testHairpin(listener, fmt.Sprintf("%v:%v", ext, extPort), logger) + if supportsHairpin { + caps.Hairpin = true + } + + return +} diff --git a/p2p/upnp/upnp.go b/p2p/upnp/upnp.go new file mode 100644 index 000000000..d53974fc4 --- /dev/null +++ b/p2p/upnp/upnp.go @@ -0,0 +1,393 @@ +// Taken from taipei-torrent. +// Just enough UPnP to be able to forward ports +// For more information, see: http://www.upnp-hacks.org/upnp.html +package upnp + +// TODO: use syscalls to get actual ourIP, see issue #712 + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "strconv" + "strings" + "time" +) + +type upnpNAT struct { + serviceURL string + ourIP string + urnDomain string +} + +// protocol is either "udp" or "tcp" +type NAT interface { + GetExternalAddress() (addr net.IP, err error) + AddPortMapping(protocol string, externalPort, internalPort int, description string, timeout int) (mappedExternalPort int, err error) + DeletePortMapping(protocol string, externalPort, internalPort int) (err error) +} + +func Discover() (nat NAT, err error) { + ssdp, err := net.ResolveUDPAddr("udp4", "239.255.255.250:1900") + if err != nil { + return + } + conn, err := net.ListenPacket("udp4", ":0") + if err != nil { + return + } + socket := conn.(*net.UDPConn) + defer socket.Close() // nolint: errcheck + + if err := socket.SetDeadline(time.Now().Add(3 * time.Second)); err != nil { + return nil, err + } + + st := "InternetGatewayDevice:1" + + buf := bytes.NewBufferString( + "M-SEARCH * HTTP/1.1\r\n" + + "HOST: 239.255.255.250:1900\r\n" + + "ST: ssdp:all\r\n" + + "MAN: \"ssdp:discover\"\r\n" + + "MX: 2\r\n\r\n") + message := buf.Bytes() + answerBytes := make([]byte, 1024) + for i := 0; i < 3; i++ { + _, err = socket.WriteToUDP(message, ssdp) + if err != nil { + return + } + var n int + _, _, err = socket.ReadFromUDP(answerBytes) + if err != nil { + return + } + for { + n, _, err = socket.ReadFromUDP(answerBytes) + if err != nil { + break + } + answer := string(answerBytes[0:n]) + if !strings.Contains(answer, st) { + continue + } + // HTTP header field names are case-insensitive. + // http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 + locString := "\r\nlocation:" + answer = strings.ToLower(answer) + locIndex := strings.Index(answer, locString) + if locIndex < 0 { + continue + } + loc := answer[locIndex+len(locString):] + endIndex := strings.Index(loc, "\r\n") + if endIndex < 0 { + continue + } + locURL := strings.TrimSpace(loc[0:endIndex]) + var serviceURL, urnDomain string + serviceURL, urnDomain, err = getServiceURL(locURL) + if err != nil { + return + } + var ourIP net.IP + ourIP, err = localIPv4() + if err != nil { + return + } + nat = &upnpNAT{serviceURL: serviceURL, ourIP: ourIP.String(), urnDomain: urnDomain} + return + } + } + err = errors.New("UPnP port discovery failed") + return +} + +type Envelope struct { + XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Envelope"` + Soap *SoapBody +} +type SoapBody struct { + XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Body"` + ExternalIP *ExternalIPAddressResponse +} + +type ExternalIPAddressResponse struct { + XMLName xml.Name `xml:"GetExternalIPAddressResponse"` + IPAddress string `xml:"NewExternalIPAddress"` +} + +type ExternalIPAddress struct { + XMLName xml.Name `xml:"NewExternalIPAddress"` + IP string +} + +type UPNPService struct { + ServiceType string `xml:"serviceType"` + ControlURL string `xml:"controlURL"` +} + +type DeviceList struct { + Device []Device `xml:"device"` +} + +type ServiceList struct { + Service []UPNPService `xml:"service"` +} + +type Device struct { + XMLName xml.Name `xml:"device"` + DeviceType string `xml:"deviceType"` + DeviceList DeviceList `xml:"deviceList"` + ServiceList ServiceList `xml:"serviceList"` +} + +type Root struct { + Device Device +} + +func getChildDevice(d *Device, deviceType string) *Device { + dl := d.DeviceList.Device + for i := 0; i < len(dl); i++ { + if strings.Contains(dl[i].DeviceType, deviceType) { + return &dl[i] + } + } + return nil +} + +func getChildService(d *Device, serviceType string) *UPNPService { + sl := d.ServiceList.Service + for i := 0; i < len(sl); i++ { + if strings.Contains(sl[i].ServiceType, serviceType) { + return &sl[i] + } + } + return nil +} + +func localIPv4() (net.IP, error) { + tt, err := net.Interfaces() + if err != nil { + return nil, err + } + for _, t := range tt { + aa, err := t.Addrs() + if err != nil { + return nil, err + } + for _, a := range aa { + ipnet, ok := a.(*net.IPNet) + if !ok { + continue + } + v4 := ipnet.IP.To4() + if v4 == nil || v4[0] == 127 { // loopback address + continue + } + return v4, nil + } + } + return nil, errors.New("cannot find local IP address") +} + +func getServiceURL(rootURL string) (url, urnDomain string, err error) { + r, err := http.Get(rootURL) + if err != nil { + return + } + defer r.Body.Close() // nolint: errcheck + + if r.StatusCode >= 400 { + err = errors.New(string(r.StatusCode)) + return + } + var root Root + err = xml.NewDecoder(r.Body).Decode(&root) + if err != nil { + return + } + a := &root.Device + if !strings.Contains(a.DeviceType, "InternetGatewayDevice:1") { + err = errors.New("No InternetGatewayDevice") + return + } + b := getChildDevice(a, "WANDevice:1") + if b == nil { + err = errors.New("No WANDevice") + return + } + c := getChildDevice(b, "WANConnectionDevice:1") + if c == nil { + err = errors.New("No WANConnectionDevice") + return + } + d := getChildService(c, "WANIPConnection:1") + if d == nil { + // Some routers don't follow the UPnP spec, and put WanIPConnection under WanDevice, + // instead of under WanConnectionDevice + d = getChildService(b, "WANIPConnection:1") + + if d == nil { + err = errors.New("No WANIPConnection") + return + } + } + // Extract the domain name, which isn't always 'schemas-upnp-org' + urnDomain = strings.Split(d.ServiceType, ":")[1] + url = combineURL(rootURL, d.ControlURL) + return +} + +func combineURL(rootURL, subURL string) string { + protocolEnd := "://" + protoEndIndex := strings.Index(rootURL, protocolEnd) + a := rootURL[protoEndIndex+len(protocolEnd):] + rootIndex := strings.Index(a, "/") + return rootURL[0:protoEndIndex+len(protocolEnd)+rootIndex] + subURL +} + +func soapRequest(url, function, message, domain string) (r *http.Response, err error) { + fullMessage := "" + + "\r\n" + + "" + message + "" + + req, err := http.NewRequest("POST", url, strings.NewReader(fullMessage)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "text/xml ; charset=\"utf-8\"") + req.Header.Set("User-Agent", "Darwin/10.0.0, UPnP/1.0, MiniUPnPc/1.3") + //req.Header.Set("Transfer-Encoding", "chunked") + req.Header.Set("SOAPAction", "\"urn:"+domain+":service:WANIPConnection:1#"+function+"\"") + req.Header.Set("Connection", "Close") + req.Header.Set("Cache-Control", "no-cache") + req.Header.Set("Pragma", "no-cache") + + // log.Stderr("soapRequest ", req) + + r, err = http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + /*if r.Body != nil { + defer r.Body.Close() + }*/ + + if r.StatusCode >= 400 { + // log.Stderr(function, r.StatusCode) + err = errors.New("Error " + strconv.Itoa(r.StatusCode) + " for " + function) + r = nil + return + } + return +} + +type statusInfo struct { + externalIpAddress string +} + +func (n *upnpNAT) getExternalIPAddress() (info statusInfo, err error) { + + message := "\r\n" + + "" + + var response *http.Response + response, err = soapRequest(n.serviceURL, "GetExternalIPAddress", message, n.urnDomain) + if response != nil { + defer response.Body.Close() // nolint: errcheck + } + if err != nil { + return + } + var envelope Envelope + data, err := ioutil.ReadAll(response.Body) + if err != nil { + return + } + reader := bytes.NewReader(data) + err = xml.NewDecoder(reader).Decode(&envelope) + if err != nil { + return + } + + info = statusInfo{envelope.Soap.ExternalIP.IPAddress} + + if err != nil { + return + } + + return +} + +// GetExternalAddress returns an external IP. If GetExternalIPAddress action +// fails or IP returned is invalid, GetExternalAddress returns an error. +func (n *upnpNAT) GetExternalAddress() (addr net.IP, err error) { + info, err := n.getExternalIPAddress() + if err != nil { + return + } + addr = net.ParseIP(info.externalIpAddress) + if addr == nil { + err = fmt.Errorf("Failed to parse IP: %v", info.externalIpAddress) + } + return +} + +func (n *upnpNAT) AddPortMapping(protocol string, externalPort, internalPort int, description string, timeout int) (mappedExternalPort int, err error) { + // A single concatenation would break ARM compilation. + message := "\r\n" + + "" + strconv.Itoa(externalPort) + message += "" + protocol + "" + message += "" + strconv.Itoa(internalPort) + "" + + "" + n.ourIP + "" + + "1" + message += description + + "" + strconv.Itoa(timeout) + + "" + + var response *http.Response + response, err = soapRequest(n.serviceURL, "AddPortMapping", message, n.urnDomain) + if response != nil { + defer response.Body.Close() // nolint: errcheck + } + if err != nil { + return + } + + // TODO: check response to see if the port was forwarded + // log.Println(message, response) + // JAE: + // body, err := ioutil.ReadAll(response.Body) + // fmt.Println(string(body), err) + mappedExternalPort = externalPort + _ = response + return +} + +func (n *upnpNAT) DeletePortMapping(protocol string, externalPort, internalPort int) (err error) { + + message := "\r\n" + + "" + strconv.Itoa(externalPort) + + "" + protocol + "" + + "" + + var response *http.Response + response, err = soapRequest(n.serviceURL, "DeletePortMapping", message, n.urnDomain) + if response != nil { + defer response.Body.Close() // nolint: errcheck + } + if err != nil { + return + } + + // TODO: check response to see if the port was deleted + // log.Println(message, response) + _ = response + return +} diff --git a/p2p/version.go b/p2p/version.go new file mode 100644 index 000000000..9a4c7bbaf --- /dev/null +++ b/p2p/version.go @@ -0,0 +1,3 @@ +package p2p + +const Version = "0.5.0" diff --git a/p2p/wire.go b/p2p/wire.go new file mode 100644 index 000000000..b7ae41253 --- /dev/null +++ b/p2p/wire.go @@ -0,0 +1,12 @@ +package p2p + +import ( + "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" +) + +var cdc = amino.NewCodec() + +func init() { + crypto.RegisterAmino(cdc) +} diff --git a/privval/priv_validator.go b/privval/priv_validator.go new file mode 100644 index 000000000..1e85bf7b3 --- /dev/null +++ b/privval/priv_validator.go @@ -0,0 +1,359 @@ +package privval + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "sync" + "time" + + "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/types" +) + +// TODO: type ? +const ( + stepNone int8 = 0 // Used to distinguish the initial state + stepPropose int8 = 1 + stepPrevote int8 = 2 + stepPrecommit int8 = 3 +) + +func voteToStep(vote *types.Vote) int8 { + switch vote.Type { + case types.VoteTypePrevote: + return stepPrevote + case types.VoteTypePrecommit: + return stepPrecommit + default: + cmn.PanicSanity("Unknown vote type") + return 0 + } +} + +// FilePV implements PrivValidator using data persisted to disk +// to prevent double signing. +// NOTE: the directory containing the pv.filePath must already exist. +type FilePV struct { + Address types.Address `json:"address"` + PubKey crypto.PubKey `json:"pub_key"` + LastHeight int64 `json:"last_height"` + LastRound int `json:"last_round"` + LastStep int8 `json:"last_step"` + LastSignature crypto.Signature `json:"last_signature,omitempty"` // so we dont lose signatures XXX Why would we lose signatures? + LastSignBytes cmn.HexBytes `json:"last_signbytes,omitempty"` // so we dont lose signatures XXX Why would we lose signatures? + PrivKey crypto.PrivKey `json:"priv_key"` + + // For persistence. + // Overloaded for testing. + filePath string + mtx sync.Mutex +} + +// GetAddress returns the address of the validator. +// Implements PrivValidator. +func (pv *FilePV) GetAddress() types.Address { + return pv.Address +} + +// GetPubKey returns the public key of the validator. +// Implements PrivValidator. +func (pv *FilePV) GetPubKey() crypto.PubKey { + return pv.PubKey +} + +// GenFilePV generates a new validator with randomly generated private key +// and sets the filePath, but does not call Save(). +func GenFilePV(filePath string) *FilePV { + privKey := crypto.GenPrivKeyEd25519() + return &FilePV{ + Address: privKey.PubKey().Address(), + PubKey: privKey.PubKey(), + PrivKey: privKey, + LastStep: stepNone, + filePath: filePath, + } +} + +// LoadFilePV loads a FilePV from the filePath. The FilePV handles double +// signing prevention by persisting data to the filePath. If the filePath does +// not exist, the FilePV must be created manually and saved. +func LoadFilePV(filePath string) *FilePV { + pvJSONBytes, err := ioutil.ReadFile(filePath) + if err != nil { + cmn.Exit(err.Error()) + } + pv := &FilePV{} + err = cdc.UnmarshalJSON(pvJSONBytes, &pv) + if err != nil { + cmn.Exit(cmn.Fmt("Error reading PrivValidator from %v: %v\n", filePath, err)) + } + + // overwrite pubkey and address for convenience + pv.PubKey = pv.PrivKey.PubKey() + pv.Address = pv.PubKey.Address() + + pv.filePath = filePath + return pv +} + +// LoadOrGenFilePV loads a FilePV from the given filePath +// or else generates a new one and saves it to the filePath. +func LoadOrGenFilePV(filePath string) *FilePV { + var pv *FilePV + if cmn.FileExists(filePath) { + pv = LoadFilePV(filePath) + } else { + pv = GenFilePV(filePath) + pv.Save() + } + return pv +} + +// Save persists the FilePV to disk. +func (pv *FilePV) Save() { + pv.mtx.Lock() + defer pv.mtx.Unlock() + pv.save() +} + +func (pv *FilePV) save() { + outFile := pv.filePath + if outFile == "" { + panic("Cannot save PrivValidator: filePath not set") + } + jsonBytes, err := cdc.MarshalJSONIndent(pv, "", " ") + if err != nil { + panic(err) + } + err = cmn.WriteFileAtomic(outFile, jsonBytes, 0600) + if err != nil { + panic(err) + } +} + +// Reset resets all fields in the FilePV. +// NOTE: Unsafe! +func (pv *FilePV) Reset() { + var sig crypto.Signature + pv.LastHeight = 0 + pv.LastRound = 0 + pv.LastStep = 0 + pv.LastSignature = sig + pv.LastSignBytes = nil + pv.Save() +} + +// SignVote signs a canonical representation of the vote, along with the +// chainID. Implements PrivValidator. +func (pv *FilePV) SignVote(chainID string, vote *types.Vote) error { + pv.mtx.Lock() + defer pv.mtx.Unlock() + if err := pv.signVote(chainID, vote); err != nil { + return errors.New(cmn.Fmt("Error signing vote: %v", err)) + } + return nil +} + +// SignProposal signs a canonical representation of the proposal, along with +// the chainID. Implements PrivValidator. +func (pv *FilePV) SignProposal(chainID string, proposal *types.Proposal) error { + pv.mtx.Lock() + defer pv.mtx.Unlock() + if err := pv.signProposal(chainID, proposal); err != nil { + return fmt.Errorf("Error signing proposal: %v", err) + } + return nil +} + +// returns error if HRS regression or no LastSignBytes. returns true if HRS is unchanged +func (pv *FilePV) checkHRS(height int64, round int, step int8) (bool, error) { + if pv.LastHeight > height { + return false, errors.New("Height regression") + } + + if pv.LastHeight == height { + if pv.LastRound > round { + return false, errors.New("Round regression") + } + + if pv.LastRound == round { + if pv.LastStep > step { + return false, errors.New("Step regression") + } else if pv.LastStep == step { + if pv.LastSignBytes != nil { + if pv.LastSignature == nil { + panic("pv: LastSignature is nil but LastSignBytes is not!") + } + return true, nil + } + return false, errors.New("No LastSignature found") + } + } + } + return false, nil +} + +// signVote checks if the vote is good to sign and sets the vote signature. +// It may need to set the timestamp as well if the vote is otherwise the same as +// a previously signed vote (ie. we crashed after signing but before the vote hit the WAL). +func (pv *FilePV) signVote(chainID string, vote *types.Vote) error { + height, round, step := vote.Height, vote.Round, voteToStep(vote) + signBytes := vote.SignBytes(chainID) + + sameHRS, err := pv.checkHRS(height, round, step) + if err != nil { + return err + } + + // We might crash before writing to the wal, + // causing us to try to re-sign for the same HRS. + // If signbytes are the same, use the last signature. + // If they only differ by timestamp, use last timestamp and signature + // Otherwise, return error + if sameHRS { + if bytes.Equal(signBytes, pv.LastSignBytes) { + vote.Signature = pv.LastSignature + } else if timestamp, ok := checkVotesOnlyDifferByTimestamp(pv.LastSignBytes, signBytes); ok { + vote.Timestamp = timestamp + vote.Signature = pv.LastSignature + } else { + err = fmt.Errorf("Conflicting data") + } + return err + } + + // It passed the checks. Sign the vote + sig, err := pv.PrivKey.Sign(signBytes) + if err != nil { + return err + } + pv.saveSigned(height, round, step, signBytes, sig) + vote.Signature = sig + return nil +} + +// signProposal checks if the proposal is good to sign and sets the proposal signature. +// It may need to set the timestamp as well if the proposal is otherwise the same as +// a previously signed proposal ie. we crashed after signing but before the proposal hit the WAL). +func (pv *FilePV) signProposal(chainID string, proposal *types.Proposal) error { + height, round, step := proposal.Height, proposal.Round, stepPropose + signBytes := proposal.SignBytes(chainID) + + sameHRS, err := pv.checkHRS(height, round, step) + if err != nil { + return err + } + + // We might crash before writing to the wal, + // causing us to try to re-sign for the same HRS. + // If signbytes are the same, use the last signature. + // If they only differ by timestamp, use last timestamp and signature + // Otherwise, return error + if sameHRS { + if bytes.Equal(signBytes, pv.LastSignBytes) { + proposal.Signature = pv.LastSignature + } else if timestamp, ok := checkProposalsOnlyDifferByTimestamp(pv.LastSignBytes, signBytes); ok { + proposal.Timestamp = timestamp + proposal.Signature = pv.LastSignature + } else { + err = fmt.Errorf("Conflicting data") + } + return err + } + + // It passed the checks. Sign the proposal + sig, err := pv.PrivKey.Sign(signBytes) + if err != nil { + return err + } + pv.saveSigned(height, round, step, signBytes, sig) + proposal.Signature = sig + return nil +} + +// Persist height/round/step and signature +func (pv *FilePV) saveSigned(height int64, round int, step int8, + signBytes []byte, sig crypto.Signature) { + + pv.LastHeight = height + pv.LastRound = round + pv.LastStep = step + pv.LastSignature = sig + pv.LastSignBytes = signBytes + pv.save() +} + +// SignHeartbeat signs a canonical representation of the heartbeat, along with the chainID. +// Implements PrivValidator. +func (pv *FilePV) SignHeartbeat(chainID string, heartbeat *types.Heartbeat) error { + pv.mtx.Lock() + defer pv.mtx.Unlock() + sig, err := pv.PrivKey.Sign(heartbeat.SignBytes(chainID)) + if err != nil { + return err + } + heartbeat.Signature = sig + return nil +} + +// String returns a string representation of the FilePV. +func (pv *FilePV) String() string { + return fmt.Sprintf("PrivValidator{%v LH:%v, LR:%v, LS:%v}", pv.GetAddress(), pv.LastHeight, pv.LastRound, pv.LastStep) +} + +//------------------------------------- + +// returns the timestamp from the lastSignBytes. +// returns true if the only difference in the votes is their timestamp. +func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) { + var lastVote, newVote types.CanonicalJSONVote + if err := cdc.UnmarshalJSON(lastSignBytes, &lastVote); err != nil { + panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into vote: %v", err)) + } + if err := cdc.UnmarshalJSON(newSignBytes, &newVote); err != nil { + panic(fmt.Sprintf("signBytes cannot be unmarshalled into vote: %v", err)) + } + + lastTime, err := time.Parse(types.TimeFormat, lastVote.Timestamp) + if err != nil { + panic(err) + } + + // set the times to the same value and check equality + now := types.CanonicalTime(time.Now()) + lastVote.Timestamp = now + newVote.Timestamp = now + lastVoteBytes, _ := cdc.MarshalJSON(lastVote) + newVoteBytes, _ := cdc.MarshalJSON(newVote) + + return lastTime, bytes.Equal(newVoteBytes, lastVoteBytes) +} + +// returns the timestamp from the lastSignBytes. +// returns true if the only difference in the proposals is their timestamp +func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) { + var lastProposal, newProposal types.CanonicalJSONProposal + if err := cdc.UnmarshalJSON(lastSignBytes, &lastProposal); err != nil { + panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into proposal: %v", err)) + } + if err := cdc.UnmarshalJSON(newSignBytes, &newProposal); err != nil { + panic(fmt.Sprintf("signBytes cannot be unmarshalled into proposal: %v", err)) + } + + lastTime, err := time.Parse(types.TimeFormat, lastProposal.Timestamp) + if err != nil { + panic(err) + } + + // set the times to the same value and check equality + now := types.CanonicalTime(time.Now()) + lastProposal.Timestamp = now + newProposal.Timestamp = now + lastProposalBytes, _ := cdc.MarshalJSON(lastProposal) + newProposalBytes, _ := cdc.MarshalJSON(newProposal) + + return lastTime, bytes.Equal(newProposalBytes, lastProposalBytes) +} diff --git a/privval/priv_validator_test.go b/privval/priv_validator_test.go new file mode 100644 index 000000000..5889c0d68 --- /dev/null +++ b/privval/priv_validator_test.go @@ -0,0 +1,251 @@ +package privval + +import ( + "encoding/base64" + "fmt" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +func TestGenLoadValidator(t *testing.T) { + assert := assert.New(t) + + _, tempFilePath := cmn.Tempfile("priv_validator_") + privVal := GenFilePV(tempFilePath) + + height := int64(100) + privVal.LastHeight = height + privVal.Save() + addr := privVal.GetAddress() + + privVal = LoadFilePV(tempFilePath) + assert.Equal(addr, privVal.GetAddress(), "expected privval addr to be the same") + assert.Equal(height, privVal.LastHeight, "expected privval.LastHeight to have been saved") +} + +func TestLoadOrGenValidator(t *testing.T) { + assert := assert.New(t) + + _, tempFilePath := cmn.Tempfile("priv_validator_") + if err := os.Remove(tempFilePath); err != nil { + t.Error(err) + } + privVal := LoadOrGenFilePV(tempFilePath) + addr := privVal.GetAddress() + privVal = LoadOrGenFilePV(tempFilePath) + assert.Equal(addr, privVal.GetAddress(), "expected privval addr to be the same") +} + +func TestUnmarshalValidator(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + // create some fixed values + privKey := crypto.GenPrivKeyEd25519() + pubKey := privKey.PubKey() + addr := pubKey.Address() + pubArray := [32]byte(pubKey.(crypto.PubKeyEd25519)) + pubBytes := pubArray[:] + privArray := [64]byte(privKey) + privBytes := privArray[:] + pubB64 := base64.StdEncoding.EncodeToString(pubBytes) + privB64 := base64.StdEncoding.EncodeToString(privBytes) + + serialized := fmt.Sprintf(`{ + "address": "%s", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "%s" + }, + "last_height": "0", + "last_round": "0", + "last_step": 0, + "priv_key": { + "type": "tendermint/PrivKeyEd25519", + "value": "%s" + } +}`, addr, pubB64, privB64) + + val := FilePV{} + err := cdc.UnmarshalJSON([]byte(serialized), &val) + require.Nil(err, "%+v", err) + + // make sure the values match + assert.EqualValues(addr, val.GetAddress()) + assert.EqualValues(pubKey, val.GetPubKey()) + assert.EqualValues(privKey, val.PrivKey) + + // export it and make sure it is the same + out, err := cdc.MarshalJSON(val) + require.Nil(err, "%+v", err) + assert.JSONEq(serialized, string(out)) +} + +func TestSignVote(t *testing.T) { + assert := assert.New(t) + + _, tempFilePath := cmn.Tempfile("priv_validator_") + privVal := GenFilePV(tempFilePath) + + block1 := types.BlockID{[]byte{1, 2, 3}, types.PartSetHeader{}} + block2 := types.BlockID{[]byte{3, 2, 1}, types.PartSetHeader{}} + height, round := int64(10), 1 + voteType := types.VoteTypePrevote + + // sign a vote for first time + vote := newVote(privVal.Address, 0, height, round, voteType, block1) + err := privVal.SignVote("mychainid", vote) + assert.NoError(err, "expected no error signing vote") + + // try to sign the same vote again; should be fine + err = privVal.SignVote("mychainid", vote) + assert.NoError(err, "expected no error on signing same vote") + + // now try some bad votes + cases := []*types.Vote{ + newVote(privVal.Address, 0, height, round-1, voteType, block1), // round regression + newVote(privVal.Address, 0, height-1, round, voteType, block1), // height regression + newVote(privVal.Address, 0, height-2, round+4, voteType, block1), // height regression and different round + newVote(privVal.Address, 0, height, round, voteType, block2), // different block + } + + for _, c := range cases { + err = privVal.SignVote("mychainid", c) + assert.Error(err, "expected error on signing conflicting vote") + } + + // try signing a vote with a different time stamp + sig := vote.Signature + vote.Timestamp = vote.Timestamp.Add(time.Duration(1000)) + err = privVal.SignVote("mychainid", vote) + assert.NoError(err) + assert.Equal(sig, vote.Signature) +} + +func TestSignProposal(t *testing.T) { + assert := assert.New(t) + + _, tempFilePath := cmn.Tempfile("priv_validator_") + privVal := GenFilePV(tempFilePath) + + block1 := types.PartSetHeader{5, []byte{1, 2, 3}} + block2 := types.PartSetHeader{10, []byte{3, 2, 1}} + height, round := int64(10), 1 + + // sign a proposal for first time + proposal := newProposal(height, round, block1) + err := privVal.SignProposal("mychainid", proposal) + assert.NoError(err, "expected no error signing proposal") + + // try to sign the same proposal again; should be fine + err = privVal.SignProposal("mychainid", proposal) + assert.NoError(err, "expected no error on signing same proposal") + + // now try some bad Proposals + cases := []*types.Proposal{ + newProposal(height, round-1, block1), // round regression + newProposal(height-1, round, block1), // height regression + newProposal(height-2, round+4, block1), // height regression and different round + newProposal(height, round, block2), // different block + } + + for _, c := range cases { + err = privVal.SignProposal("mychainid", c) + assert.Error(err, "expected error on signing conflicting proposal") + } + + // try signing a proposal with a different time stamp + sig := proposal.Signature + proposal.Timestamp = proposal.Timestamp.Add(time.Duration(1000)) + err = privVal.SignProposal("mychainid", proposal) + assert.NoError(err) + assert.Equal(sig, proposal.Signature) +} + +func TestDifferByTimestamp(t *testing.T) { + _, tempFilePath := cmn.Tempfile("priv_validator_") + privVal := GenFilePV(tempFilePath) + + block1 := types.PartSetHeader{5, []byte{1, 2, 3}} + height, round := int64(10), 1 + chainID := "mychainid" + + // test proposal + { + proposal := newProposal(height, round, block1) + err := privVal.SignProposal(chainID, proposal) + assert.NoError(t, err, "expected no error signing proposal") + signBytes := proposal.SignBytes(chainID) + sig := proposal.Signature + timeStamp := clipToMS(proposal.Timestamp) + + // manipulate the timestamp. should get changed back + proposal.Timestamp = proposal.Timestamp.Add(time.Millisecond) + var emptySig crypto.Signature + proposal.Signature = emptySig + err = privVal.SignProposal("mychainid", proposal) + assert.NoError(t, err, "expected no error on signing same proposal") + + assert.Equal(t, timeStamp, proposal.Timestamp) + assert.Equal(t, signBytes, proposal.SignBytes(chainID)) + assert.Equal(t, sig, proposal.Signature) + } + + // test vote + { + voteType := types.VoteTypePrevote + blockID := types.BlockID{[]byte{1, 2, 3}, types.PartSetHeader{}} + vote := newVote(privVal.Address, 0, height, round, voteType, blockID) + err := privVal.SignVote("mychainid", vote) + assert.NoError(t, err, "expected no error signing vote") + + signBytes := vote.SignBytes(chainID) + sig := vote.Signature + timeStamp := clipToMS(vote.Timestamp) + + // manipulate the timestamp. should get changed back + vote.Timestamp = vote.Timestamp.Add(time.Millisecond) + var emptySig crypto.Signature + vote.Signature = emptySig + err = privVal.SignVote("mychainid", vote) + assert.NoError(t, err, "expected no error on signing same vote") + + assert.Equal(t, timeStamp, vote.Timestamp) + assert.Equal(t, signBytes, vote.SignBytes(chainID)) + assert.Equal(t, sig, vote.Signature) + } +} + +func newVote(addr types.Address, idx int, height int64, round int, typ byte, blockID types.BlockID) *types.Vote { + return &types.Vote{ + ValidatorAddress: addr, + ValidatorIndex: idx, + Height: height, + Round: round, + Type: typ, + Timestamp: time.Now().UTC(), + BlockID: blockID, + } +} + +func newProposal(height int64, round int, partsHeader types.PartSetHeader) *types.Proposal { + return &types.Proposal{ + Height: height, + Round: round, + BlockPartsHeader: partsHeader, + Timestamp: time.Now().UTC(), + } +} + +func clipToMS(t time.Time) time.Time { + nano := t.UnixNano() + million := int64(1000000) + nano = (nano / million) * million + return time.Unix(0, nano).UTC() +} diff --git a/privval/socket.go b/privval/socket.go new file mode 100644 index 000000000..1e8a3807b --- /dev/null +++ b/privval/socket.go @@ -0,0 +1,538 @@ +package privval + +import ( + "errors" + "fmt" + "io" + "net" + "time" + + "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + + p2pconn "github.com/tendermint/tendermint/p2p/conn" + "github.com/tendermint/tendermint/types" +) + +const ( + defaultAcceptDeadlineSeconds = 3 + defaultConnDeadlineSeconds = 3 + defaultConnHeartBeatSeconds = 30 + defaultConnWaitSeconds = 60 + defaultDialRetries = 10 +) + +// Socket errors. +var ( + ErrDialRetryMax = errors.New("dialed maximum retries") + ErrConnWaitTimeout = errors.New("waited for remote signer for too long") + ErrConnTimeout = errors.New("remote signer timed out") +) + +var ( + acceptDeadline = time.Second + defaultAcceptDeadlineSeconds + connDeadline = time.Second * defaultConnDeadlineSeconds + connHeartbeat = time.Second * defaultConnHeartBeatSeconds +) + +// SocketPVOption sets an optional parameter on the SocketPV. +type SocketPVOption func(*SocketPV) + +// SocketPVAcceptDeadline sets the deadline for the SocketPV listener. +// A zero time value disables the deadline. +func SocketPVAcceptDeadline(deadline time.Duration) SocketPVOption { + return func(sc *SocketPV) { sc.acceptDeadline = deadline } +} + +// SocketPVConnDeadline sets the read and write deadline for connections +// from external signing processes. +func SocketPVConnDeadline(deadline time.Duration) SocketPVOption { + return func(sc *SocketPV) { sc.connDeadline = deadline } +} + +// SocketPVHeartbeat sets the period on which to check the liveness of the +// connected Signer connections. +func SocketPVHeartbeat(period time.Duration) SocketPVOption { + return func(sc *SocketPV) { sc.connHeartbeat = period } +} + +// SocketPVConnWait sets the timeout duration before connection of external +// signing processes are considered to be unsuccessful. +func SocketPVConnWait(timeout time.Duration) SocketPVOption { + return func(sc *SocketPV) { sc.connWaitTimeout = timeout } +} + +// SocketPV implements PrivValidator, it uses a socket to request signatures +// from an external process. +type SocketPV struct { + cmn.BaseService + + addr string + acceptDeadline time.Duration + connDeadline time.Duration + connHeartbeat time.Duration + connWaitTimeout time.Duration + privKey crypto.PrivKeyEd25519 + + conn net.Conn + listener net.Listener +} + +// Check that SocketPV implements PrivValidator. +var _ types.PrivValidator = (*SocketPV)(nil) + +// NewSocketPV returns an instance of SocketPV. +func NewSocketPV( + logger log.Logger, + socketAddr string, + privKey crypto.PrivKeyEd25519, +) *SocketPV { + sc := &SocketPV{ + addr: socketAddr, + acceptDeadline: acceptDeadline, + connDeadline: connDeadline, + connHeartbeat: connHeartbeat, + connWaitTimeout: time.Second * defaultConnWaitSeconds, + privKey: privKey, + } + + sc.BaseService = *cmn.NewBaseService(logger, "SocketPV", sc) + + return sc +} + +// GetAddress implements PrivValidator. +func (sc *SocketPV) GetAddress() types.Address { + addr, err := sc.getAddress() + if err != nil { + panic(err) + } + + return addr +} + +// Address is an alias for PubKey().Address(). +func (sc *SocketPV) getAddress() (cmn.HexBytes, error) { + p, err := sc.getPubKey() + if err != nil { + return nil, err + } + + return p.Address(), nil +} + +// GetPubKey implements PrivValidator. +func (sc *SocketPV) GetPubKey() crypto.PubKey { + pubKey, err := sc.getPubKey() + if err != nil { + panic(err) + } + + return pubKey +} + +func (sc *SocketPV) getPubKey() (crypto.PubKey, error) { + err := writeMsg(sc.conn, &PubKeyMsg{}) + if err != nil { + return nil, err + } + + res, err := readMsg(sc.conn) + if err != nil { + return nil, err + } + + return res.(*PubKeyMsg).PubKey, nil +} + +// SignVote implements PrivValidator. +func (sc *SocketPV) SignVote(chainID string, vote *types.Vote) error { + err := writeMsg(sc.conn, &SignVoteMsg{Vote: vote}) + if err != nil { + return err + } + + res, err := readMsg(sc.conn) + if err != nil { + return err + } + + *vote = *res.(*SignVoteMsg).Vote + + return nil +} + +// SignProposal implements PrivValidator. +func (sc *SocketPV) SignProposal( + chainID string, + proposal *types.Proposal, +) error { + err := writeMsg(sc.conn, &SignProposalMsg{Proposal: proposal}) + if err != nil { + return err + } + + res, err := readMsg(sc.conn) + if err != nil { + return err + } + + *proposal = *res.(*SignProposalMsg).Proposal + + return nil +} + +// SignHeartbeat implements PrivValidator. +func (sc *SocketPV) SignHeartbeat( + chainID string, + heartbeat *types.Heartbeat, +) error { + err := writeMsg(sc.conn, &SignHeartbeatMsg{Heartbeat: heartbeat}) + if err != nil { + return err + } + + res, err := readMsg(sc.conn) + if err != nil { + return err + } + + *heartbeat = *res.(*SignHeartbeatMsg).Heartbeat + + return nil +} + +// OnStart implements cmn.Service. +func (sc *SocketPV) OnStart() error { + if err := sc.listen(); err != nil { + err = cmn.ErrorWrap(err, "failed to listen") + sc.Logger.Error( + "OnStart", + "err", err, + ) + return err + } + + conn, err := sc.waitConnection() + if err != nil { + err = cmn.ErrorWrap(err, "failed to accept connection") + sc.Logger.Error( + "OnStart", + "err", err, + ) + + return err + } + + sc.conn = conn + + return nil +} + +// OnStop implements cmn.Service. +func (sc *SocketPV) OnStop() { + if sc.conn != nil { + if err := sc.conn.Close(); err != nil { + err = cmn.ErrorWrap(err, "failed to close connection") + sc.Logger.Error( + "OnStop", + "err", err, + ) + } + } + + if sc.listener != nil { + if err := sc.listener.Close(); err != nil { + err = cmn.ErrorWrap(err, "failed to close listener") + sc.Logger.Error( + "OnStop", + "err", err, + ) + } + } +} + +func (sc *SocketPV) acceptConnection() (net.Conn, error) { + conn, err := sc.listener.Accept() + if err != nil { + if !sc.IsRunning() { + return nil, nil // Ignore error from listener closing. + } + return nil, err + + } + + conn, err = p2pconn.MakeSecretConnection(conn, sc.privKey) + if err != nil { + return nil, err + } + + return conn, nil +} + +func (sc *SocketPV) listen() error { + ln, err := net.Listen(cmn.ProtocolAndAddress(sc.addr)) + if err != nil { + return err + } + + sc.listener = newTCPTimeoutListener( + ln, + sc.acceptDeadline, + sc.connDeadline, + sc.connHeartbeat, + ) + + return nil +} + +// waitConnection uses the configured wait timeout to error if no external +// process connects in the time period. +func (sc *SocketPV) waitConnection() (net.Conn, error) { + var ( + connc = make(chan net.Conn, 1) + errc = make(chan error, 1) + ) + + go func(connc chan<- net.Conn, errc chan<- error) { + conn, err := sc.acceptConnection() + if err != nil { + errc <- err + return + } + + connc <- conn + }(connc, errc) + + select { + case conn := <-connc: + return conn, nil + case err := <-errc: + if _, ok := err.(timeoutError); ok { + return nil, cmn.ErrorWrap(ErrConnWaitTimeout, err.Error()) + } + return nil, err + case <-time.After(sc.connWaitTimeout): + return nil, ErrConnWaitTimeout + } +} + +//--------------------------------------------------------- + +// RemoteSignerOption sets an optional parameter on the RemoteSigner. +type RemoteSignerOption func(*RemoteSigner) + +// RemoteSignerConnDeadline sets the read and write deadline for connections +// from external signing processes. +func RemoteSignerConnDeadline(deadline time.Duration) RemoteSignerOption { + return func(ss *RemoteSigner) { ss.connDeadline = deadline } +} + +// RemoteSignerConnRetries sets the amount of attempted retries to connect. +func RemoteSignerConnRetries(retries int) RemoteSignerOption { + return func(ss *RemoteSigner) { ss.connRetries = retries } +} + +// RemoteSigner implements PrivValidator by dialing to a socket. +type RemoteSigner struct { + cmn.BaseService + + addr string + chainID string + connDeadline time.Duration + connRetries int + privKey crypto.PrivKeyEd25519 + privVal types.PrivValidator + + conn net.Conn +} + +// NewRemoteSigner returns an instance of RemoteSigner. +func NewRemoteSigner( + logger log.Logger, + chainID, socketAddr string, + privVal types.PrivValidator, + privKey crypto.PrivKeyEd25519, +) *RemoteSigner { + rs := &RemoteSigner{ + addr: socketAddr, + chainID: chainID, + connDeadline: time.Second * defaultConnDeadlineSeconds, + connRetries: defaultDialRetries, + privKey: privKey, + privVal: privVal, + } + + rs.BaseService = *cmn.NewBaseService(logger, "RemoteSigner", rs) + + return rs +} + +// OnStart implements cmn.Service. +func (rs *RemoteSigner) OnStart() error { + conn, err := rs.connect() + if err != nil { + err = cmn.ErrorWrap(err, "connect") + rs.Logger.Error("OnStart", "err", err) + return err + } + + go rs.handleConnection(conn) + + return nil +} + +// OnStop implements cmn.Service. +func (rs *RemoteSigner) OnStop() { + if rs.conn == nil { + return + } + + if err := rs.conn.Close(); err != nil { + rs.Logger.Error("OnStop", "err", cmn.ErrorWrap(err, "closing listener failed")) + } +} + +func (rs *RemoteSigner) connect() (net.Conn, error) { + for retries := rs.connRetries; retries > 0; retries-- { + // Don't sleep if it is the first retry. + if retries != rs.connRetries { + time.Sleep(rs.connDeadline) + } + + conn, err := cmn.Connect(rs.addr) + if err != nil { + err = cmn.ErrorWrap(err, "connection failed") + rs.Logger.Error( + "connect", + "addr", rs.addr, + "err", err, + ) + + continue + } + + if err := conn.SetDeadline(time.Now().Add(connDeadline)); err != nil { + err = cmn.ErrorWrap(err, "setting connection timeout failed") + rs.Logger.Error( + "connect", + "err", err, + ) + continue + } + + conn, err = p2pconn.MakeSecretConnection(conn, rs.privKey) + if err != nil { + err = cmn.ErrorWrap(err, "encrypting connection failed") + rs.Logger.Error( + "connect", + "err", err, + ) + + continue + } + + return conn, nil + } + + return nil, ErrDialRetryMax +} + +func (rs *RemoteSigner) handleConnection(conn net.Conn) { + for { + if !rs.IsRunning() { + return // Ignore error from listener closing. + } + + req, err := readMsg(conn) + if err != nil { + if err != io.EOF { + rs.Logger.Error("handleConnection", "err", err) + } + return + } + + var res SocketPVMsg + + switch r := req.(type) { + case *PubKeyMsg: + var p crypto.PubKey + p = rs.privVal.GetPubKey() + res = &PubKeyMsg{p} + case *SignVoteMsg: + err = rs.privVal.SignVote(rs.chainID, r.Vote) + res = &SignVoteMsg{r.Vote} + case *SignProposalMsg: + err = rs.privVal.SignProposal(rs.chainID, r.Proposal) + res = &SignProposalMsg{r.Proposal} + case *SignHeartbeatMsg: + err = rs.privVal.SignHeartbeat(rs.chainID, r.Heartbeat) + res = &SignHeartbeatMsg{r.Heartbeat} + default: + err = fmt.Errorf("unknown msg: %v", r) + } + + if err != nil { + rs.Logger.Error("handleConnection", "err", err) + return + } + + err = writeMsg(conn, res) + if err != nil { + rs.Logger.Error("handleConnection", "err", err) + return + } + } +} + +//--------------------------------------------------------- + +// SocketPVMsg is sent between RemoteSigner and SocketPV. +type SocketPVMsg interface{} + +func RegisterSocketPVMsg(cdc *amino.Codec) { + cdc.RegisterInterface((*SocketPVMsg)(nil), nil) + cdc.RegisterConcrete(&PubKeyMsg{}, "tendermint/socketpv/PubKeyMsg", nil) + cdc.RegisterConcrete(&SignVoteMsg{}, "tendermint/socketpv/SignVoteMsg", nil) + cdc.RegisterConcrete(&SignProposalMsg{}, "tendermint/socketpv/SignProposalMsg", nil) + cdc.RegisterConcrete(&SignHeartbeatMsg{}, "tendermint/socketpv/SignHeartbeatMsg", nil) +} + +// PubKeyMsg is a PrivValidatorSocket message containing the public key. +type PubKeyMsg struct { + PubKey crypto.PubKey +} + +// SignVoteMsg is a PrivValidatorSocket message containing a vote. +type SignVoteMsg struct { + Vote *types.Vote +} + +// SignProposalMsg is a PrivValidatorSocket message containing a Proposal. +type SignProposalMsg struct { + Proposal *types.Proposal +} + +// SignHeartbeatMsg is a PrivValidatorSocket message containing a Heartbeat. +type SignHeartbeatMsg struct { + Heartbeat *types.Heartbeat +} + +func readMsg(r io.Reader) (msg SocketPVMsg, err error) { + const maxSocketPVMsgSize = 1024 * 10 + _, err = cdc.UnmarshalBinaryReader(r, &msg, maxSocketPVMsgSize) + if _, ok := err.(timeoutError); ok { + err = cmn.ErrorWrap(ErrConnTimeout, err.Error()) + } + return +} + +func writeMsg(w io.Writer, msg interface{}) (err error) { + _, err = cdc.MarshalBinaryWriter(w, msg) + if _, ok := err.(timeoutError); ok { + err = cmn.ErrorWrap(ErrConnTimeout, err.Error()) + } + return +} diff --git a/privval/socket_tcp.go b/privval/socket_tcp.go new file mode 100644 index 000000000..b26db00c2 --- /dev/null +++ b/privval/socket_tcp.go @@ -0,0 +1,66 @@ +package privval + +import ( + "net" + "time" +) + +// timeoutError can be used to check if an error returned from the netp package +// was due to a timeout. +type timeoutError interface { + Timeout() bool +} + +// tcpTimeoutListener implements net.Listener. +var _ net.Listener = (*tcpTimeoutListener)(nil) + +// tcpTimeoutListener wraps a *net.TCPListener to standardise protocol timeouts +// and potentially other tuning parameters. +type tcpTimeoutListener struct { + *net.TCPListener + + acceptDeadline time.Duration + connDeadline time.Duration + period time.Duration +} + +// newTCPTimeoutListener returns an instance of tcpTimeoutListener. +func newTCPTimeoutListener( + ln net.Listener, + acceptDeadline, connDeadline time.Duration, + period time.Duration, +) tcpTimeoutListener { + return tcpTimeoutListener{ + TCPListener: ln.(*net.TCPListener), + acceptDeadline: acceptDeadline, + connDeadline: connDeadline, + period: period, + } +} + +// Accept implements net.Listener. +func (ln tcpTimeoutListener) Accept() (net.Conn, error) { + err := ln.SetDeadline(time.Now().Add(ln.acceptDeadline)) + if err != nil { + return nil, err + } + + tc, err := ln.AcceptTCP() + if err != nil { + return nil, err + } + + if err := tc.SetDeadline(time.Now().Add(ln.connDeadline)); err != nil { + return nil, err + } + + if err := tc.SetKeepAlive(true); err != nil { + return nil, err + } + + if err := tc.SetKeepAlivePeriod(ln.period); err != nil { + return nil, err + } + + return tc, nil +} diff --git a/privval/socket_tcp_test.go b/privval/socket_tcp_test.go new file mode 100644 index 000000000..44a673c0c --- /dev/null +++ b/privval/socket_tcp_test.go @@ -0,0 +1,64 @@ +package privval + +import ( + "net" + "testing" + "time" +) + +func TestTCPTimeoutListenerAcceptDeadline(t *testing.T) { + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + + ln = newTCPTimeoutListener(ln, time.Millisecond, time.Second, time.Second) + + _, err = ln.Accept() + opErr, ok := err.(*net.OpError) + if !ok { + t.Fatalf("have %v, want *net.OpError", err) + } + + if have, want := opErr.Op, "accept"; have != want { + t.Errorf("have %v, want %v", have, want) + } +} + +func TestTCPTimeoutListenerConnDeadline(t *testing.T) { + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + + ln = newTCPTimeoutListener(ln, time.Second, time.Millisecond, time.Second) + + donec := make(chan struct{}) + go func(ln net.Listener) { + defer close(donec) + + c, err := ln.Accept() + if err != nil { + t.Fatal(err) + } + + time.Sleep(2 * time.Millisecond) + + _, err = c.Write([]byte("foo")) + opErr, ok := err.(*net.OpError) + if !ok { + t.Fatalf("have %v, want *net.OpError", err) + } + + if have, want := opErr.Op, "write"; have != want { + t.Errorf("have %v, want %v", have, want) + } + }(ln) + + _, err = net.Dial("tcp", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + + <-donec +} diff --git a/privval/socket_test.go b/privval/socket_test.go new file mode 100644 index 000000000..7bcacd6e1 --- /dev/null +++ b/privval/socket_test.go @@ -0,0 +1,282 @@ +package privval + +import ( + "fmt" + "net" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + + p2pconn "github.com/tendermint/tendermint/p2p/conn" + "github.com/tendermint/tendermint/types" +) + +func TestSocketPVAddress(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupSocketPair(t, chainID) + ) + defer sc.Stop() + defer rs.Stop() + + serverAddr := rs.privVal.GetAddress() + + clientAddr, err := sc.getAddress() + require.NoError(t, err) + + assert.Equal(t, serverAddr, clientAddr) + + // TODO(xla): Remove when PrivValidator2 replaced PrivValidator. + assert.Equal(t, serverAddr, sc.GetAddress()) + +} + +func TestSocketPVPubKey(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupSocketPair(t, chainID) + ) + defer sc.Stop() + defer rs.Stop() + + clientKey, err := sc.getPubKey() + require.NoError(t, err) + + privKey := rs.privVal.GetPubKey() + + assert.Equal(t, privKey, clientKey) + + // TODO(xla): Remove when PrivValidator2 replaced PrivValidator. + assert.Equal(t, privKey, sc.GetPubKey()) +} + +func TestSocketPVProposal(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupSocketPair(t, chainID) + + ts = time.Now() + privProposal = &types.Proposal{Timestamp: ts} + clientProposal = &types.Proposal{Timestamp: ts} + ) + defer sc.Stop() + defer rs.Stop() + + require.NoError(t, rs.privVal.SignProposal(chainID, privProposal)) + require.NoError(t, sc.SignProposal(chainID, clientProposal)) + assert.Equal(t, privProposal.Signature, clientProposal.Signature) +} + +func TestSocketPVVote(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupSocketPair(t, chainID) + + ts = time.Now() + vType = types.VoteTypePrecommit + want = &types.Vote{Timestamp: ts, Type: vType} + have = &types.Vote{Timestamp: ts, Type: vType} + ) + defer sc.Stop() + defer rs.Stop() + + require.NoError(t, rs.privVal.SignVote(chainID, want)) + require.NoError(t, sc.SignVote(chainID, have)) + assert.Equal(t, want.Signature, have.Signature) +} + +func TestSocketPVHeartbeat(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupSocketPair(t, chainID) + + want = &types.Heartbeat{} + have = &types.Heartbeat{} + ) + defer sc.Stop() + defer rs.Stop() + + require.NoError(t, rs.privVal.SignHeartbeat(chainID, want)) + require.NoError(t, sc.SignHeartbeat(chainID, have)) + assert.Equal(t, want.Signature, have.Signature) +} + +func TestSocketPVAcceptDeadline(t *testing.T) { + var ( + sc = NewSocketPV( + log.TestingLogger(), + "127.0.0.1:0", + crypto.GenPrivKeyEd25519(), + ) + ) + defer sc.Stop() + + SocketPVAcceptDeadline(time.Millisecond)(sc) + + assert.Equal(t, sc.Start().(cmn.Error).Data(), ErrConnWaitTimeout) +} + +func TestSocketPVDeadline(t *testing.T) { + var ( + addr = testFreeAddr(t) + listenc = make(chan struct{}) + sc = NewSocketPV( + log.TestingLogger(), + addr, + crypto.GenPrivKeyEd25519(), + ) + ) + + SocketPVConnDeadline(100 * time.Millisecond)(sc) + SocketPVConnWait(500 * time.Millisecond)(sc) + + go func(sc *SocketPV) { + defer close(listenc) + + require.NoError(t, sc.Start()) + + assert.True(t, sc.IsRunning()) + }(sc) + + for { + conn, err := cmn.Connect(addr) + if err != nil { + continue + } + + _, err = p2pconn.MakeSecretConnection( + conn, + crypto.GenPrivKeyEd25519(), + ) + if err == nil { + break + } + } + + <-listenc + + // Sleep to guarantee deadline has been hit. + time.Sleep(20 * time.Microsecond) + + _, err := sc.getPubKey() + assert.Equal(t, err.(cmn.Error).Data(), ErrConnTimeout) +} + +func TestSocketPVWait(t *testing.T) { + sc := NewSocketPV( + log.TestingLogger(), + "127.0.0.1:0", + crypto.GenPrivKeyEd25519(), + ) + defer sc.Stop() + + SocketPVConnWait(time.Millisecond)(sc) + + assert.Equal(t, sc.Start().(cmn.Error).Data(), ErrConnWaitTimeout) +} + +func TestRemoteSignerRetry(t *testing.T) { + var ( + attemptc = make(chan int) + retries = 2 + ) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + go func(ln net.Listener, attemptc chan<- int) { + attempts := 0 + + for { + conn, err := ln.Accept() + require.NoError(t, err) + + err = conn.Close() + require.NoError(t, err) + + attempts++ + + if attempts == retries { + attemptc <- attempts + break + } + } + }(ln, attemptc) + + rs := NewRemoteSigner( + log.TestingLogger(), + cmn.RandStr(12), + ln.Addr().String(), + types.NewMockPV(), + crypto.GenPrivKeyEd25519(), + ) + defer rs.Stop() + + RemoteSignerConnDeadline(time.Millisecond)(rs) + RemoteSignerConnRetries(retries)(rs) + + assert.Equal(t, rs.Start().(cmn.Error).Data(), ErrDialRetryMax) + + select { + case attempts := <-attemptc: + assert.Equal(t, retries, attempts) + case <-time.After(100 * time.Millisecond): + t.Error("expected remote to observe connection attempts") + } +} + +func testSetupSocketPair( + t *testing.T, + chainID string, +) (*SocketPV, *RemoteSigner) { + var ( + addr = testFreeAddr(t) + logger = log.TestingLogger() + privVal = types.NewMockPV() + readyc = make(chan struct{}) + rs = NewRemoteSigner( + logger, + chainID, + addr, + privVal, + crypto.GenPrivKeyEd25519(), + ) + sc = NewSocketPV( + logger, + addr, + crypto.GenPrivKeyEd25519(), + ) + ) + + go func(sc *SocketPV) { + require.NoError(t, sc.Start()) + assert.True(t, sc.IsRunning()) + + readyc <- struct{}{} + }(sc) + + RemoteSignerConnDeadline(time.Millisecond)(rs) + RemoteSignerConnRetries(1e6)(rs) + + require.NoError(t, rs.Start()) + assert.True(t, rs.IsRunning()) + + <-readyc + + return sc, rs +} + +// testFreeAddr claims a free port so we don't block on listener being ready. +func testFreeAddr(t *testing.T) string { + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer ln.Close() + + return fmt.Sprintf("127.0.0.1:%d", ln.Addr().(*net.TCPAddr).Port) +} diff --git a/privval/wire.go b/privval/wire.go new file mode 100644 index 000000000..c42ba40d6 --- /dev/null +++ b/privval/wire.go @@ -0,0 +1,13 @@ +package privval + +import ( + "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" +) + +var cdc = amino.NewCodec() + +func init() { + crypto.RegisterAmino(cdc) + RegisterSocketPVMsg(cdc) +} diff --git a/proxy/app_conn.go b/proxy/app_conn.go new file mode 100644 index 000000000..2f792671e --- /dev/null +++ b/proxy/app_conn.go @@ -0,0 +1,144 @@ +package proxy + +import ( + abcicli "github.com/tendermint/tendermint/abci/client" + "github.com/tendermint/tendermint/abci/types" +) + +//---------------------------------------------------------------------------------------- +// Enforce which abci msgs can be sent on a connection at the type level + +type AppConnConsensus interface { + SetResponseCallback(abcicli.Callback) + Error() error + + InitChainSync(types.RequestInitChain) (*types.ResponseInitChain, error) + + BeginBlockSync(types.RequestBeginBlock) (*types.ResponseBeginBlock, error) + DeliverTxAsync(tx []byte) *abcicli.ReqRes + EndBlockSync(types.RequestEndBlock) (*types.ResponseEndBlock, error) + CommitSync() (*types.ResponseCommit, error) +} + +type AppConnMempool interface { + SetResponseCallback(abcicli.Callback) + Error() error + + CheckTxAsync(tx []byte) *abcicli.ReqRes + + FlushAsync() *abcicli.ReqRes + FlushSync() error +} + +type AppConnQuery interface { + Error() error + + EchoSync(string) (*types.ResponseEcho, error) + InfoSync(types.RequestInfo) (*types.ResponseInfo, error) + QuerySync(types.RequestQuery) (*types.ResponseQuery, error) + + // SetOptionSync(key string, value string) (res types.Result) +} + +//----------------------------------------------------------------------------------------- +// Implements AppConnConsensus (subset of abcicli.Client) + +type appConnConsensus struct { + appConn abcicli.Client +} + +func NewAppConnConsensus(appConn abcicli.Client) *appConnConsensus { + return &appConnConsensus{ + appConn: appConn, + } +} + +func (app *appConnConsensus) SetResponseCallback(cb abcicli.Callback) { + app.appConn.SetResponseCallback(cb) +} + +func (app *appConnConsensus) Error() error { + return app.appConn.Error() +} + +func (app *appConnConsensus) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) { + return app.appConn.InitChainSync(req) +} + +func (app *appConnConsensus) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { + return app.appConn.BeginBlockSync(req) +} + +func (app *appConnConsensus) DeliverTxAsync(tx []byte) *abcicli.ReqRes { + return app.appConn.DeliverTxAsync(tx) +} + +func (app *appConnConsensus) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) { + return app.appConn.EndBlockSync(req) +} + +func (app *appConnConsensus) CommitSync() (*types.ResponseCommit, error) { + return app.appConn.CommitSync() +} + +//------------------------------------------------ +// Implements AppConnMempool (subset of abcicli.Client) + +type appConnMempool struct { + appConn abcicli.Client +} + +func NewAppConnMempool(appConn abcicli.Client) *appConnMempool { + return &appConnMempool{ + appConn: appConn, + } +} + +func (app *appConnMempool) SetResponseCallback(cb abcicli.Callback) { + app.appConn.SetResponseCallback(cb) +} + +func (app *appConnMempool) Error() error { + return app.appConn.Error() +} + +func (app *appConnMempool) FlushAsync() *abcicli.ReqRes { + return app.appConn.FlushAsync() +} + +func (app *appConnMempool) FlushSync() error { + return app.appConn.FlushSync() +} + +func (app *appConnMempool) CheckTxAsync(tx []byte) *abcicli.ReqRes { + return app.appConn.CheckTxAsync(tx) +} + +//------------------------------------------------ +// Implements AppConnQuery (subset of abcicli.Client) + +type appConnQuery struct { + appConn abcicli.Client +} + +func NewAppConnQuery(appConn abcicli.Client) *appConnQuery { + return &appConnQuery{ + appConn: appConn, + } +} + +func (app *appConnQuery) Error() error { + return app.appConn.Error() +} + +func (app *appConnQuery) EchoSync(msg string) (*types.ResponseEcho, error) { + return app.appConn.EchoSync(msg) +} + +func (app *appConnQuery) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) { + return app.appConn.InfoSync(req) +} + +func (app *appConnQuery) QuerySync(reqQuery types.RequestQuery) (*types.ResponseQuery, error) { + return app.appConn.QuerySync(reqQuery) +} diff --git a/proxy/app_conn_test.go b/proxy/app_conn_test.go new file mode 100644 index 000000000..3c556d4f0 --- /dev/null +++ b/proxy/app_conn_test.go @@ -0,0 +1,152 @@ +package proxy + +import ( + "strings" + "testing" + + abcicli "github.com/tendermint/tendermint/abci/client" + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/abci/server" + "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" +) + +//---------------------------------------- + +type AppConnTest interface { + EchoAsync(string) *abcicli.ReqRes + FlushSync() error + InfoSync(types.RequestInfo) (*types.ResponseInfo, error) +} + +type appConnTest struct { + appConn abcicli.Client +} + +func NewAppConnTest(appConn abcicli.Client) AppConnTest { + return &appConnTest{appConn} +} + +func (app *appConnTest) EchoAsync(msg string) *abcicli.ReqRes { + return app.appConn.EchoAsync(msg) +} + +func (app *appConnTest) FlushSync() error { + return app.appConn.FlushSync() +} + +func (app *appConnTest) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) { + return app.appConn.InfoSync(req) +} + +//---------------------------------------- + +var SOCKET = "socket" + +func TestEcho(t *testing.T) { + sockPath := cmn.Fmt("unix:///tmp/echo_%v.sock", cmn.RandStr(6)) + clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true) + + // Start server + s := server.NewSocketServer(sockPath, kvstore.NewKVStoreApplication()) + s.SetLogger(log.TestingLogger().With("module", "abci-server")) + if err := s.Start(); err != nil { + t.Fatalf("Error starting socket server: %v", err.Error()) + } + defer s.Stop() + + // Start client + cli, err := clientCreator.NewABCIClient() + if err != nil { + t.Fatalf("Error creating ABCI client: %v", err.Error()) + } + cli.SetLogger(log.TestingLogger().With("module", "abci-client")) + if err := cli.Start(); err != nil { + t.Fatalf("Error starting ABCI client: %v", err.Error()) + } + + proxy := NewAppConnTest(cli) + t.Log("Connected") + + for i := 0; i < 1000; i++ { + proxy.EchoAsync(cmn.Fmt("echo-%v", i)) + } + if err := proxy.FlushSync(); err != nil { + t.Error(err) + } +} + +func BenchmarkEcho(b *testing.B) { + b.StopTimer() // Initialize + sockPath := cmn.Fmt("unix:///tmp/echo_%v.sock", cmn.RandStr(6)) + clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true) + + // Start server + s := server.NewSocketServer(sockPath, kvstore.NewKVStoreApplication()) + s.SetLogger(log.TestingLogger().With("module", "abci-server")) + if err := s.Start(); err != nil { + b.Fatalf("Error starting socket server: %v", err.Error()) + } + defer s.Stop() + + // Start client + cli, err := clientCreator.NewABCIClient() + if err != nil { + b.Fatalf("Error creating ABCI client: %v", err.Error()) + } + cli.SetLogger(log.TestingLogger().With("module", "abci-client")) + if err := cli.Start(); err != nil { + b.Fatalf("Error starting ABCI client: %v", err.Error()) + } + + proxy := NewAppConnTest(cli) + b.Log("Connected") + echoString := strings.Repeat(" ", 200) + b.StartTimer() // Start benchmarking tests + + for i := 0; i < b.N; i++ { + proxy.EchoAsync(echoString) + } + if err := proxy.FlushSync(); err != nil { + b.Error(err) + } + + b.StopTimer() + // info := proxy.InfoSync(types.RequestInfo{""}) + //b.Log("N: ", b.N, info) +} + +func TestInfo(t *testing.T) { + sockPath := cmn.Fmt("unix:///tmp/echo_%v.sock", cmn.RandStr(6)) + clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true) + + // Start server + s := server.NewSocketServer(sockPath, kvstore.NewKVStoreApplication()) + s.SetLogger(log.TestingLogger().With("module", "abci-server")) + if err := s.Start(); err != nil { + t.Fatalf("Error starting socket server: %v", err.Error()) + } + defer s.Stop() + + // Start client + cli, err := clientCreator.NewABCIClient() + if err != nil { + t.Fatalf("Error creating ABCI client: %v", err.Error()) + } + cli.SetLogger(log.TestingLogger().With("module", "abci-client")) + if err := cli.Start(); err != nil { + t.Fatalf("Error starting ABCI client: %v", err.Error()) + } + + proxy := NewAppConnTest(cli) + t.Log("Connected") + + resInfo, err := proxy.InfoSync(types.RequestInfo{""}) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if string(resInfo.Data) != "{\"size\":0}" { + t.Error("Expected ResponseInfo with one element '{\"size\":0}' but got something else") + } +} diff --git a/proxy/client.go b/proxy/client.go new file mode 100644 index 000000000..87f4e716d --- /dev/null +++ b/proxy/client.go @@ -0,0 +1,81 @@ +package proxy + +import ( + "sync" + + "github.com/pkg/errors" + + abcicli "github.com/tendermint/tendermint/abci/client" + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/abci/types" +) + +// NewABCIClient returns newly connected client +type ClientCreator interface { + NewABCIClient() (abcicli.Client, error) +} + +//---------------------------------------------------- +// local proxy uses a mutex on an in-proc app + +type localClientCreator struct { + mtx *sync.Mutex + app types.Application +} + +func NewLocalClientCreator(app types.Application) ClientCreator { + return &localClientCreator{ + mtx: new(sync.Mutex), + app: app, + } +} + +func (l *localClientCreator) NewABCIClient() (abcicli.Client, error) { + return abcicli.NewLocalClient(l.mtx, l.app), nil +} + +//--------------------------------------------------------------- +// remote proxy opens new connections to an external app process + +type remoteClientCreator struct { + addr string + transport string + mustConnect bool +} + +func NewRemoteClientCreator(addr, transport string, mustConnect bool) ClientCreator { + return &remoteClientCreator{ + addr: addr, + transport: transport, + mustConnect: mustConnect, + } +} + +func (r *remoteClientCreator) NewABCIClient() (abcicli.Client, error) { + remoteApp, err := abcicli.NewClient(r.addr, r.transport, r.mustConnect) + if err != nil { + return nil, errors.Wrap(err, "Failed to connect to proxy") + } + return remoteApp, nil +} + +//----------------------------------------------------------------- +// default + +func DefaultClientCreator(addr, transport, dbDir string) ClientCreator { + switch addr { + case "kvstore": + fallthrough + case "dummy": + return NewLocalClientCreator(kvstore.NewKVStoreApplication()) + case "persistent_kvstore": + fallthrough + case "persistent_dummy": + return NewLocalClientCreator(kvstore.NewPersistentKVStoreApplication(dbDir)) + case "nilapp": + return NewLocalClientCreator(types.NewBaseApplication()) + default: + mustConnect := false // loop retrying + return NewRemoteClientCreator(addr, transport, mustConnect) + } +} diff --git a/proxy/multi_app_conn.go b/proxy/multi_app_conn.go new file mode 100644 index 000000000..279fa42ee --- /dev/null +++ b/proxy/multi_app_conn.go @@ -0,0 +1,112 @@ +package proxy + +import ( + "github.com/pkg/errors" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +//----------------------------- + +// Tendermint's interface to the application consists of multiple connections +type AppConns interface { + cmn.Service + + Mempool() AppConnMempool + Consensus() AppConnConsensus + Query() AppConnQuery +} + +func NewAppConns(clientCreator ClientCreator, handshaker Handshaker) AppConns { + return NewMultiAppConn(clientCreator, handshaker) +} + +//----------------------------- +// multiAppConn implements AppConns + +type Handshaker interface { + Handshake(AppConns) error +} + +// a multiAppConn is made of a few appConns (mempool, consensus, query) +// and manages their underlying abci clients, including the handshake +// which ensures the app and tendermint are synced. +// TODO: on app restart, clients must reboot together +type multiAppConn struct { + cmn.BaseService + + handshaker Handshaker + + mempoolConn *appConnMempool + consensusConn *appConnConsensus + queryConn *appConnQuery + + clientCreator ClientCreator +} + +// Make all necessary abci connections to the application +func NewMultiAppConn(clientCreator ClientCreator, handshaker Handshaker) *multiAppConn { + multiAppConn := &multiAppConn{ + handshaker: handshaker, + clientCreator: clientCreator, + } + multiAppConn.BaseService = *cmn.NewBaseService(nil, "multiAppConn", multiAppConn) + return multiAppConn +} + +// Returns the mempool connection +func (app *multiAppConn) Mempool() AppConnMempool { + return app.mempoolConn +} + +// Returns the consensus Connection +func (app *multiAppConn) Consensus() AppConnConsensus { + return app.consensusConn +} + +// Returns the query Connection +func (app *multiAppConn) Query() AppConnQuery { + return app.queryConn +} + +func (app *multiAppConn) OnStart() error { + // query connection + querycli, err := app.clientCreator.NewABCIClient() + if err != nil { + return errors.Wrap(err, "Error creating ABCI client (query connection)") + } + querycli.SetLogger(app.Logger.With("module", "abci-client", "connection", "query")) + if err := querycli.Start(); err != nil { + return errors.Wrap(err, "Error starting ABCI client (query connection)") + } + app.queryConn = NewAppConnQuery(querycli) + + // mempool connection + memcli, err := app.clientCreator.NewABCIClient() + if err != nil { + return errors.Wrap(err, "Error creating ABCI client (mempool connection)") + } + memcli.SetLogger(app.Logger.With("module", "abci-client", "connection", "mempool")) + if err := memcli.Start(); err != nil { + return errors.Wrap(err, "Error starting ABCI client (mempool connection)") + } + app.mempoolConn = NewAppConnMempool(memcli) + + // consensus connection + concli, err := app.clientCreator.NewABCIClient() + if err != nil { + return errors.Wrap(err, "Error creating ABCI client (consensus connection)") + } + concli.SetLogger(app.Logger.With("module", "abci-client", "connection", "consensus")) + if err := concli.Start(); err != nil { + return errors.Wrap(err, "Error starting ABCI client (consensus connection)") + } + app.consensusConn = NewAppConnConsensus(concli) + + // ensure app is synced to the latest state + if app.handshaker != nil { + return app.handshaker.Handshake(app) + } + + return nil +} diff --git a/rpc/client/event_test.go b/rpc/client/event_test.go new file mode 100644 index 000000000..79c452fc9 --- /dev/null +++ b/rpc/client/event_test.go @@ -0,0 +1,149 @@ +package client_test + +import ( + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/require" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/rpc/client" + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +var waitForEventTimeout = 5 * time.Second + +// MakeTxKV returns a text transaction, allong with expected key, value pair +func MakeTxKV() ([]byte, []byte, []byte) { + k := []byte(cmn.RandStr(8)) + v := []byte(cmn.RandStr(8)) + return k, v, append(k, append([]byte("="), v...)...) +} + +func TestHeaderEvents(t *testing.T) { + for i, c := range GetClients() { + i, c := i, c // capture params + t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { + // start for this test it if it wasn't already running + if !c.IsRunning() { + // if so, then we start it, listen, and stop it. + err := c.Start() + require.Nil(t, err, "%d: %+v", i, err) + defer c.Stop() + } + + evtTyp := types.EventNewBlockHeader + evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout) + require.Nil(t, err, "%d: %+v", i, err) + _, ok := evt.(types.EventDataNewBlockHeader) + require.True(t, ok, "%d: %#v", i, evt) + // TODO: more checks... + }) + } +} + +func TestBlockEvents(t *testing.T) { + for i, c := range GetClients() { + i, c := i, c // capture params + t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { + + // start for this test it if it wasn't already running + if !c.IsRunning() { + // if so, then we start it, listen, and stop it. + err := c.Start() + require.Nil(t, err, "%d: %+v", i, err) + defer c.Stop() + } + + // listen for a new block; ensure height increases by 1 + var firstBlockHeight int64 + for j := 0; j < 3; j++ { + evtTyp := types.EventNewBlock + evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout) + require.Nil(t, err, "%d: %+v", j, err) + blockEvent, ok := evt.(types.EventDataNewBlock) + require.True(t, ok, "%d: %#v", j, evt) + + block := blockEvent.Block + if j == 0 { + firstBlockHeight = block.Header.Height + continue + } + + require.Equal(t, block.Header.Height, firstBlockHeight+int64(j)) + } + }) + } +} + +func TestTxEventsSentWithBroadcastTxAsync(t *testing.T) { + for i, c := range GetClients() { + i, c := i, c // capture params + t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { + + // start for this test it if it wasn't already running + if !c.IsRunning() { + // if so, then we start it, listen, and stop it. + err := c.Start() + require.Nil(t, err, "%d: %+v", i, err) + defer c.Stop() + } + + // make the tx + _, _, tx := MakeTxKV() + evtTyp := types.EventTx + + // send async + txres, err := c.BroadcastTxAsync(tx) + require.Nil(t, err, "%+v", err) + require.Equal(t, txres.Code, abci.CodeTypeOK) // FIXME + + // and wait for confirmation + evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout) + require.Nil(t, err, "%d: %+v", i, err) + // and make sure it has the proper info + txe, ok := evt.(types.EventDataTx) + require.True(t, ok, "%d: %#v", i, evt) + // make sure this is the proper tx + require.EqualValues(t, tx, txe.Tx) + require.True(t, txe.Result.IsOK()) + }) + } +} + +func TestTxEventsSentWithBroadcastTxSync(t *testing.T) { + for i, c := range GetClients() { + i, c := i, c // capture params + t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { + + // start for this test it if it wasn't already running + if !c.IsRunning() { + // if so, then we start it, listen, and stop it. + err := c.Start() + require.Nil(t, err, "%d: %+v", i, err) + defer c.Stop() + } + + // make the tx + _, _, tx := MakeTxKV() + evtTyp := types.EventTx + + // send sync + txres, err := c.BroadcastTxSync(tx) + require.Nil(t, err, "%+v", err) + require.Equal(t, txres.Code, abci.CodeTypeOK) // FIXME + + // and wait for confirmation + evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout) + require.Nil(t, err, "%d: %+v", i, err) + // and make sure it has the proper info + txe, ok := evt.(types.EventDataTx) + require.True(t, ok, "%d: %#v", i, evt) + // make sure this is the proper tx + require.EqualValues(t, tx, txe.Tx) + require.True(t, txe.Result.IsOK()) + }) + } +} diff --git a/rpc/client/helpers.go b/rpc/client/helpers.go new file mode 100644 index 000000000..7e64d1164 --- /dev/null +++ b/rpc/client/helpers.go @@ -0,0 +1,80 @@ +package client + +import ( + "context" + "time" + + "github.com/pkg/errors" + "github.com/tendermint/tendermint/types" +) + +// Waiter is informed of current height, decided whether to quit early +type Waiter func(delta int64) (abort error) + +// DefaultWaitStrategy is the standard backoff algorithm, +// but you can plug in another one +func DefaultWaitStrategy(delta int64) (abort error) { + if delta > 10 { + return errors.Errorf("Waiting for %d blocks... aborting", delta) + } else if delta > 0 { + // estimate of wait time.... + // wait half a second for the next block (in progress) + // plus one second for every full block + delay := time.Duration(delta-1)*time.Second + 500*time.Millisecond + time.Sleep(delay) + } + return nil +} + +// Wait for height will poll status at reasonable intervals until +// the block at the given height is available. +// +// If waiter is nil, we use DefaultWaitStrategy, but you can also +// provide your own implementation +func WaitForHeight(c StatusClient, h int64, waiter Waiter) error { + if waiter == nil { + waiter = DefaultWaitStrategy + } + delta := int64(1) + for delta > 0 { + s, err := c.Status() + if err != nil { + return err + } + delta = h - s.SyncInfo.LatestBlockHeight + // wait for the time, or abort early + if err := waiter(delta); err != nil { + return err + } + } + return nil +} + +// WaitForOneEvent subscribes to a websocket event for the given +// event time and returns upon receiving it one time, or +// when the timeout duration has expired. +// +// This handles subscribing and unsubscribing under the hood +func WaitForOneEvent(c EventsClient, evtTyp string, timeout time.Duration) (types.TMEventData, error) { + const subscriber = "helpers" + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + evts := make(chan interface{}, 1) + + // register for the next event of this type + query := types.QueryForEvent(evtTyp) + err := c.Subscribe(ctx, subscriber, query, evts) + if err != nil { + return nil, errors.Wrap(err, "failed to subscribe") + } + + // make sure to unregister after the test is over + defer c.UnsubscribeAll(ctx, subscriber) + + select { + case evt := <-evts: + return evt.(types.TMEventData), nil + case <-ctx.Done(): + return nil, errors.New("timed out waiting for event") + } +} diff --git a/rpc/client/helpers_test.go b/rpc/client/helpers_test.go new file mode 100644 index 000000000..8b843fcdb --- /dev/null +++ b/rpc/client/helpers_test.go @@ -0,0 +1,76 @@ +package client_test + +import ( + "errors" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/rpc/client" + "github.com/tendermint/tendermint/rpc/client/mock" + ctypes "github.com/tendermint/tendermint/rpc/core/types" +) + +func TestWaitForHeight(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + // test with error result - immediate failure + m := &mock.StatusMock{ + Call: mock.Call{ + Error: errors.New("bye"), + }, + } + r := mock.NewStatusRecorder(m) + + // connection failure always leads to error + err := client.WaitForHeight(r, 8, nil) + require.NotNil(err) + require.Equal("bye", err.Error()) + // we called status once to check + require.Equal(1, len(r.Calls)) + + // now set current block height to 10 + m.Call = mock.Call{ + Response: &ctypes.ResultStatus{SyncInfo: ctypes.SyncInfo{LatestBlockHeight: 10}}, + } + + // we will not wait for more than 10 blocks + err = client.WaitForHeight(r, 40, nil) + require.NotNil(err) + require.True(strings.Contains(err.Error(), "aborting")) + // we called status once more to check + require.Equal(2, len(r.Calls)) + + // waiting for the past returns immediately + err = client.WaitForHeight(r, 5, nil) + require.Nil(err) + // we called status once more to check + require.Equal(3, len(r.Calls)) + + // since we can't update in a background goroutine (test --race) + // we use the callback to update the status height + myWaiter := func(delta int64) error { + // update the height for the next call + m.Call.Response = &ctypes.ResultStatus{SyncInfo: ctypes.SyncInfo{LatestBlockHeight: 15}} + return client.DefaultWaitStrategy(delta) + } + + // we wait for a few blocks + err = client.WaitForHeight(r, 12, myWaiter) + require.Nil(err) + // we called status once to check + require.Equal(5, len(r.Calls)) + + pre := r.Calls[3] + require.Nil(pre.Error) + prer, ok := pre.Response.(*ctypes.ResultStatus) + require.True(ok) + assert.Equal(int64(10), prer.SyncInfo.LatestBlockHeight) + + post := r.Calls[4] + require.Nil(post.Error) + postr, ok := post.Response.(*ctypes.ResultStatus) + require.True(ok) + assert.Equal(int64(15), postr.SyncInfo.LatestBlockHeight) +} diff --git a/rpc/client/httpclient.go b/rpc/client/httpclient.go new file mode 100644 index 000000000..4b85bf01d --- /dev/null +++ b/rpc/client/httpclient.go @@ -0,0 +1,374 @@ +package client + +import ( + "context" + "sync" + + "github.com/pkg/errors" + + amino "github.com/tendermint/go-amino" + tmpubsub "github.com/tendermint/tendermint/libs/pubsub" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + rpcclient "github.com/tendermint/tendermint/rpc/lib/client" + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +/* +HTTP is a Client implementation that communicates +with a tendermint node over json rpc and websockets. + +This is the main implementation you probably want to use in +production code. There are other implementations when calling +the tendermint node in-process (local), or when you want to mock +out the server for test code (mock). +*/ +type HTTP struct { + remote string + rpc *rpcclient.JSONRPCClient + *WSEvents +} + +// NewHTTP takes a remote endpoint in the form tcp://: +// and the websocket path (which always seems to be "/websocket") +func NewHTTP(remote, wsEndpoint string) *HTTP { + rc := rpcclient.NewJSONRPCClient(remote) + cdc := rc.Codec() + ctypes.RegisterAmino(cdc) + rc.SetCodec(cdc) + + return &HTTP{ + rpc: rc, + remote: remote, + WSEvents: newWSEvents(cdc, remote, wsEndpoint), + } +} + +var ( + _ Client = (*HTTP)(nil) + _ NetworkClient = (*HTTP)(nil) + _ EventsClient = (*HTTP)(nil) +) + +func (c *HTTP) Status() (*ctypes.ResultStatus, error) { + result := new(ctypes.ResultStatus) + _, err := c.rpc.Call("status", map[string]interface{}{}, result) + if err != nil { + return nil, errors.Wrap(err, "Status") + } + return result, nil +} + +func (c *HTTP) ABCIInfo() (*ctypes.ResultABCIInfo, error) { + result := new(ctypes.ResultABCIInfo) + _, err := c.rpc.Call("abci_info", map[string]interface{}{}, result) + if err != nil { + return nil, errors.Wrap(err, "ABCIInfo") + } + return result, nil +} + +func (c *HTTP) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { + return c.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions) +} + +func (c *HTTP) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + result := new(ctypes.ResultABCIQuery) + _, err := c.rpc.Call("abci_query", + map[string]interface{}{"path": path, "data": data, "height": opts.Height, "trusted": opts.Trusted}, + result) + if err != nil { + return nil, errors.Wrap(err, "ABCIQuery") + } + return result, nil +} + +func (c *HTTP) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { + result := new(ctypes.ResultBroadcastTxCommit) + _, err := c.rpc.Call("broadcast_tx_commit", map[string]interface{}{"tx": tx}, result) + if err != nil { + return nil, errors.Wrap(err, "broadcast_tx_commit") + } + return result, nil +} + +func (c *HTTP) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return c.broadcastTX("broadcast_tx_async", tx) +} + +func (c *HTTP) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return c.broadcastTX("broadcast_tx_sync", tx) +} + +func (c *HTTP) broadcastTX(route string, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + result := new(ctypes.ResultBroadcastTx) + _, err := c.rpc.Call(route, map[string]interface{}{"tx": tx}, result) + if err != nil { + return nil, errors.Wrap(err, route) + } + return result, nil +} + +func (c *HTTP) NetInfo() (*ctypes.ResultNetInfo, error) { + result := new(ctypes.ResultNetInfo) + _, err := c.rpc.Call("net_info", map[string]interface{}{}, result) + if err != nil { + return nil, errors.Wrap(err, "NetInfo") + } + return result, nil +} + +func (c *HTTP) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { + result := new(ctypes.ResultDumpConsensusState) + _, err := c.rpc.Call("dump_consensus_state", map[string]interface{}{}, result) + if err != nil { + return nil, errors.Wrap(err, "DumpConsensusState") + } + return result, nil +} + +func (c *HTTP) ConsensusState() (*ctypes.ResultConsensusState, error) { + result := new(ctypes.ResultConsensusState) + _, err := c.rpc.Call("consensus_state", map[string]interface{}{}, result) + if err != nil { + return nil, errors.Wrap(err, "ConsensusState") + } + return result, nil +} + +func (c *HTTP) Health() (*ctypes.ResultHealth, error) { + result := new(ctypes.ResultHealth) + _, err := c.rpc.Call("health", map[string]interface{}{}, result) + if err != nil { + return nil, errors.Wrap(err, "Health") + } + return result, nil +} + +func (c *HTTP) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { + result := new(ctypes.ResultBlockchainInfo) + _, err := c.rpc.Call("blockchain", + map[string]interface{}{"minHeight": minHeight, "maxHeight": maxHeight}, + result) + if err != nil { + return nil, errors.Wrap(err, "BlockchainInfo") + } + return result, nil +} + +func (c *HTTP) Genesis() (*ctypes.ResultGenesis, error) { + result := new(ctypes.ResultGenesis) + _, err := c.rpc.Call("genesis", map[string]interface{}{}, result) + if err != nil { + return nil, errors.Wrap(err, "Genesis") + } + return result, nil +} + +func (c *HTTP) Block(height *int64) (*ctypes.ResultBlock, error) { + result := new(ctypes.ResultBlock) + _, err := c.rpc.Call("block", map[string]interface{}{"height": height}, result) + if err != nil { + return nil, errors.Wrap(err, "Block") + } + return result, nil +} + +func (c *HTTP) BlockResults(height *int64) (*ctypes.ResultBlockResults, error) { + result := new(ctypes.ResultBlockResults) + _, err := c.rpc.Call("block_results", map[string]interface{}{"height": height}, result) + if err != nil { + return nil, errors.Wrap(err, "Block Result") + } + return result, nil +} + +func (c *HTTP) Commit(height *int64) (*ctypes.ResultCommit, error) { + result := new(ctypes.ResultCommit) + _, err := c.rpc.Call("commit", map[string]interface{}{"height": height}, result) + if err != nil { + return nil, errors.Wrap(err, "Commit") + } + return result, nil +} + +func (c *HTTP) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { + result := new(ctypes.ResultTx) + params := map[string]interface{}{ + "hash": hash, + "prove": prove, + } + _, err := c.rpc.Call("tx", params, result) + if err != nil { + return nil, errors.Wrap(err, "Tx") + } + return result, nil +} + +func (c *HTTP) TxSearch(query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error) { + result := new(ctypes.ResultTxSearch) + params := map[string]interface{}{ + "query": query, + "prove": prove, + "page": page, + "per_page": perPage, + } + _, err := c.rpc.Call("tx_search", params, result) + if err != nil { + return nil, errors.Wrap(err, "TxSearch") + } + return result, nil +} + +func (c *HTTP) Validators(height *int64) (*ctypes.ResultValidators, error) { + result := new(ctypes.ResultValidators) + _, err := c.rpc.Call("validators", map[string]interface{}{"height": height}, result) + if err != nil { + return nil, errors.Wrap(err, "Validators") + } + return result, nil +} + +/** websocket event stuff here... **/ + +type WSEvents struct { + cmn.BaseService + cdc *amino.Codec + remote string + endpoint string + ws *rpcclient.WSClient + + mtx sync.RWMutex + subscriptions map[string]chan<- interface{} +} + +func newWSEvents(cdc *amino.Codec, remote, endpoint string) *WSEvents { + wsEvents := &WSEvents{ + cdc: cdc, + endpoint: endpoint, + remote: remote, + subscriptions: make(map[string]chan<- interface{}), + } + + wsEvents.BaseService = *cmn.NewBaseService(nil, "WSEvents", wsEvents) + return wsEvents +} + +func (w *WSEvents) OnStart() error { + w.ws = rpcclient.NewWSClient(w.remote, w.endpoint, rpcclient.OnReconnect(func() { + w.redoSubscriptions() + })) + w.ws.SetCodec(w.cdc) + + err := w.ws.Start() + if err != nil { + return err + } + + go w.eventListener() + return nil +} + +// Stop wraps the BaseService/eventSwitch actions as Start does +func (w *WSEvents) OnStop() { + err := w.ws.Stop() + if err != nil { + w.Logger.Error("failed to stop WSClient", "err", err) + } +} + +func (w *WSEvents) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error { + q := query.String() + + err := w.ws.Subscribe(ctx, q) + if err != nil { + return err + } + + w.mtx.Lock() + // subscriber param is ignored because Tendermint will override it with + // remote IP anyway. + w.subscriptions[q] = out + w.mtx.Unlock() + + return nil +} + +func (w *WSEvents) Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error { + q := query.String() + + err := w.ws.Unsubscribe(ctx, q) + if err != nil { + return err + } + + w.mtx.Lock() + ch, ok := w.subscriptions[q] + if ok { + close(ch) + delete(w.subscriptions, q) + } + w.mtx.Unlock() + + return nil +} + +func (w *WSEvents) UnsubscribeAll(ctx context.Context, subscriber string) error { + err := w.ws.UnsubscribeAll(ctx) + if err != nil { + return err + } + + w.mtx.Lock() + for _, ch := range w.subscriptions { + close(ch) + } + w.subscriptions = make(map[string]chan<- interface{}) + w.mtx.Unlock() + + return nil +} + +// After being reconnected, it is necessary to redo subscription to server +// otherwise no data will be automatically received. +func (w *WSEvents) redoSubscriptions() { + for q := range w.subscriptions { + // NOTE: no timeout for resubscribing + // FIXME: better logging/handling of errors?? + w.ws.Subscribe(context.Background(), q) + } +} + +// eventListener is an infinite loop pulling all websocket events +// and pushing them to the EventSwitch. +// +// the goroutine only stops by closing quit +func (w *WSEvents) eventListener() { + for { + select { + case resp, ok := <-w.ws.ResponsesCh: + if !ok { + return + } + if resp.Error != nil { + w.Logger.Error("WS error", "err", resp.Error.Error()) + continue + } + result := new(ctypes.ResultEvent) + err := w.cdc.UnmarshalJSON(resp.Result, result) + if err != nil { + w.Logger.Error("failed to unmarshal response", "err", err) + continue + } + // NOTE: writing also happens inside mutex so we can't close a channel in + // Unsubscribe/UnsubscribeAll. + w.mtx.RLock() + if ch, ok := w.subscriptions[result.Query]; ok { + ch <- result.Data + } + w.mtx.RUnlock() + case <-w.Quit(): + return + } + } +} diff --git a/rpc/client/interface.go b/rpc/client/interface.go new file mode 100644 index 000000000..f939c855b --- /dev/null +++ b/rpc/client/interface.go @@ -0,0 +1,95 @@ +package client + +/* +The client package provides a general purpose interface (Client) for connecting +to a tendermint node, as well as higher-level functionality. + +The main implementation for production code is client.HTTP, which +connects via http to the jsonrpc interface of the tendermint node. + +For connecting to a node running in the same process (eg. when +compiling the abci app in the same process), you can use the client.Local +implementation. + +For mocking out server responses during testing to see behavior for +arbitrary return values, use the mock package. + +In addition to the Client interface, which should be used externally +for maximum flexibility and testability, and two implementations, +this package also provides helper functions that work on any Client +implementation. +*/ + +import ( + ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +// ABCIClient groups together the functionality that principally +// affects the ABCI app. In many cases this will be all we want, +// so we can accept an interface which is easier to mock +type ABCIClient interface { + // Reading from abci app + ABCIInfo() (*ctypes.ResultABCIInfo, error) + ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) + ABCIQueryWithOptions(path string, data cmn.HexBytes, + opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) + + // Writing to abci app + BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) + BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) + BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) +} + +// SignClient groups together the interfaces need to get valid +// signatures and prove anything about the chain +type SignClient interface { + Block(height *int64) (*ctypes.ResultBlock, error) + BlockResults(height *int64) (*ctypes.ResultBlockResults, error) + Commit(height *int64) (*ctypes.ResultCommit, error) + Validators(height *int64) (*ctypes.ResultValidators, error) + Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) + TxSearch(query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error) +} + +// HistoryClient shows us data from genesis to now in large chunks. +type HistoryClient interface { + Genesis() (*ctypes.ResultGenesis, error) + BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) +} + +type StatusClient interface { + // General chain info + Status() (*ctypes.ResultStatus, error) +} + +// Client wraps most important rpc calls a client would make +// if you want to listen for events, test if it also +// implements events.EventSwitch +type Client interface { + cmn.Service + ABCIClient + SignClient + HistoryClient + StatusClient + EventsClient +} + +// NetworkClient is general info about the network state. May not +// be needed usually. +// +// Not included in the Client interface, but generally implemented +// by concrete implementations. +type NetworkClient interface { + NetInfo() (*ctypes.ResultNetInfo, error) + DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) + ConsensusState() (*ctypes.ResultConsensusState, error) + Health() (*ctypes.ResultHealth, error) +} + +// EventsClient is reactive, you can subscribe to any message, given the proper +// string. see tendermint/types/events.go +type EventsClient interface { + types.EventBusSubscriber +} diff --git a/rpc/client/localclient.go b/rpc/client/localclient.go new file mode 100644 index 000000000..df3daf907 --- /dev/null +++ b/rpc/client/localclient.go @@ -0,0 +1,145 @@ +package client + +import ( + "context" + + tmpubsub "github.com/tendermint/tendermint/libs/pubsub" + nm "github.com/tendermint/tendermint/node" + "github.com/tendermint/tendermint/rpc/core" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +/* +Local is a Client implementation that directly executes the rpc +functions on a given node, without going through HTTP or GRPC. + +This implementation is useful for: + +* Running tests against a node in-process without the overhead +of going through an http server +* Communication between an ABCI app and Tendermint core when they +are compiled in process. + +For real clients, you probably want to use client.HTTP. For more +powerful control during testing, you probably want the "client/mock" package. +*/ +type Local struct { + *types.EventBus +} + +// NewLocal configures a client that calls the Node directly. +// +// Note that given how rpc/core works with package singletons, that +// you can only have one node per process. So make sure test cases +// don't run in parallel, or try to simulate an entire network in +// one process... +func NewLocal(node *nm.Node) *Local { + node.ConfigureRPC() + return &Local{ + EventBus: node.EventBus(), + } +} + +var ( + _ Client = (*Local)(nil) + _ NetworkClient = Local{} + _ EventsClient = (*Local)(nil) +) + +func (Local) Status() (*ctypes.ResultStatus, error) { + return core.Status() +} + +func (Local) ABCIInfo() (*ctypes.ResultABCIInfo, error) { + return core.ABCIInfo() +} + +func (c *Local) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { + return c.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions) +} + +func (Local) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + return core.ABCIQuery(path, data, opts.Height, opts.Trusted) +} + +func (Local) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { + return core.BroadcastTxCommit(tx) +} + +func (Local) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return core.BroadcastTxAsync(tx) +} + +func (Local) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return core.BroadcastTxSync(tx) +} + +func (Local) NetInfo() (*ctypes.ResultNetInfo, error) { + return core.NetInfo() +} + +func (Local) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { + return core.DumpConsensusState() +} + +func (Local) ConsensusState() (*ctypes.ResultConsensusState, error) { + return core.ConsensusState() +} + +func (Local) Health() (*ctypes.ResultHealth, error) { + return core.Health() +} + +func (Local) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) { + return core.UnsafeDialSeeds(seeds) +} + +func (Local) DialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, error) { + return core.UnsafeDialPeers(peers, persistent) +} + +func (Local) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { + return core.BlockchainInfo(minHeight, maxHeight) +} + +func (Local) Genesis() (*ctypes.ResultGenesis, error) { + return core.Genesis() +} + +func (Local) Block(height *int64) (*ctypes.ResultBlock, error) { + return core.Block(height) +} + +func (Local) BlockResults(height *int64) (*ctypes.ResultBlockResults, error) { + return core.BlockResults(height) +} + +func (Local) Commit(height *int64) (*ctypes.ResultCommit, error) { + return core.Commit(height) +} + +func (Local) Validators(height *int64) (*ctypes.ResultValidators, error) { + return core.Validators(height) +} + +func (Local) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { + return core.Tx(hash, prove) +} + +func (Local) TxSearch(query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error) { + return core.TxSearch(query, prove, page, perPage) +} + +func (c *Local) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error { + return c.EventBus.Subscribe(ctx, subscriber, query, out) +} + +func (c *Local) Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error { + return c.EventBus.Unsubscribe(ctx, subscriber, query) +} + +func (c *Local) UnsubscribeAll(ctx context.Context, subscriber string) error { + return c.EventBus.UnsubscribeAll(ctx, subscriber) +} diff --git a/rpc/client/main_test.go b/rpc/client/main_test.go new file mode 100644 index 000000000..1e911bbe6 --- /dev/null +++ b/rpc/client/main_test.go @@ -0,0 +1,24 @@ +package client_test + +import ( + "os" + "testing" + + "github.com/tendermint/tendermint/abci/example/kvstore" + nm "github.com/tendermint/tendermint/node" + rpctest "github.com/tendermint/tendermint/rpc/test" +) + +var node *nm.Node + +func TestMain(m *testing.M) { + // start a tendermint node (and kvstore) in the background to test against + app := kvstore.NewKVStoreApplication() + node = rpctest.StartTendermint(app) + code := m.Run() + + // and shut down proper at the end + node.Stop() + node.Wait() + os.Exit(code) +} diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go new file mode 100644 index 000000000..c8ca060c6 --- /dev/null +++ b/rpc/client/mock/abci.go @@ -0,0 +1,202 @@ +package mock + +import ( + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/rpc/client" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/version" + cmn "github.com/tendermint/tendermint/libs/common" +) + +// ABCIApp will send all abci related request to the named app, +// so you can test app behavior from a client without needing +// an entire tendermint node +type ABCIApp struct { + App abci.Application +} + +var ( + _ client.ABCIClient = ABCIApp{} + _ client.ABCIClient = ABCIMock{} + _ client.ABCIClient = (*ABCIRecorder)(nil) +) + +func (a ABCIApp) ABCIInfo() (*ctypes.ResultABCIInfo, error) { + return &ctypes.ResultABCIInfo{a.App.Info(abci.RequestInfo{version.Version})}, nil +} + +func (a ABCIApp) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { + return a.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions) +} + +func (a ABCIApp) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + q := a.App.Query(abci.RequestQuery{data, path, opts.Height, opts.Trusted}) + return &ctypes.ResultABCIQuery{q}, nil +} + +func (a ABCIApp) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { + res := ctypes.ResultBroadcastTxCommit{} + res.CheckTx = a.App.CheckTx(tx) + if res.CheckTx.IsErr() { + return &res, nil + } + res.DeliverTx = a.App.DeliverTx(tx) + return &res, nil +} + +func (a ABCIApp) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + c := a.App.CheckTx(tx) + // and this gets written in a background thread... + if !c.IsErr() { + go func() { a.App.DeliverTx(tx) }() // nolint: errcheck + } + return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil +} + +func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + c := a.App.CheckTx(tx) + // and this gets written in a background thread... + if !c.IsErr() { + go func() { a.App.DeliverTx(tx) }() // nolint: errcheck + } + return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil +} + +// ABCIMock will send all abci related request to the named app, +// so you can test app behavior from a client without needing +// an entire tendermint node +type ABCIMock struct { + Info Call + Query Call + BroadcastCommit Call + Broadcast Call +} + +func (m ABCIMock) ABCIInfo() (*ctypes.ResultABCIInfo, error) { + res, err := m.Info.GetResponse(nil) + if err != nil { + return nil, err + } + return &ctypes.ResultABCIInfo{res.(abci.ResponseInfo)}, nil +} + +func (m ABCIMock) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { + return m.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions) +} + +func (m ABCIMock) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + res, err := m.Query.GetResponse(QueryArgs{path, data, opts.Height, opts.Trusted}) + if err != nil { + return nil, err + } + resQuery := res.(abci.ResponseQuery) + return &ctypes.ResultABCIQuery{resQuery}, nil +} + +func (m ABCIMock) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { + res, err := m.BroadcastCommit.GetResponse(tx) + if err != nil { + return nil, err + } + return res.(*ctypes.ResultBroadcastTxCommit), nil +} + +func (m ABCIMock) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + res, err := m.Broadcast.GetResponse(tx) + if err != nil { + return nil, err + } + return res.(*ctypes.ResultBroadcastTx), nil +} + +func (m ABCIMock) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + res, err := m.Broadcast.GetResponse(tx) + if err != nil { + return nil, err + } + return res.(*ctypes.ResultBroadcastTx), nil +} + +// ABCIRecorder can wrap another type (ABCIApp, ABCIMock, or Client) +// and record all ABCI related calls. +type ABCIRecorder struct { + Client client.ABCIClient + Calls []Call +} + +func NewABCIRecorder(client client.ABCIClient) *ABCIRecorder { + return &ABCIRecorder{ + Client: client, + Calls: []Call{}, + } +} + +type QueryArgs struct { + Path string + Data cmn.HexBytes + Height int64 + Trusted bool +} + +func (r *ABCIRecorder) addCall(call Call) { + r.Calls = append(r.Calls, call) +} + +func (r *ABCIRecorder) ABCIInfo() (*ctypes.ResultABCIInfo, error) { + res, err := r.Client.ABCIInfo() + r.addCall(Call{ + Name: "abci_info", + Response: res, + Error: err, + }) + return res, err +} + +func (r *ABCIRecorder) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { + return r.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions) +} + +func (r *ABCIRecorder) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + res, err := r.Client.ABCIQueryWithOptions(path, data, opts) + r.addCall(Call{ + Name: "abci_query", + Args: QueryArgs{path, data, opts.Height, opts.Trusted}, + Response: res, + Error: err, + }) + return res, err +} + +func (r *ABCIRecorder) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { + res, err := r.Client.BroadcastTxCommit(tx) + r.addCall(Call{ + Name: "broadcast_tx_commit", + Args: tx, + Response: res, + Error: err, + }) + return res, err +} + +func (r *ABCIRecorder) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + res, err := r.Client.BroadcastTxAsync(tx) + r.addCall(Call{ + Name: "broadcast_tx_async", + Args: tx, + Response: res, + Error: err, + }) + return res, err +} + +func (r *ABCIRecorder) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + res, err := r.Client.BroadcastTxSync(tx) + r.addCall(Call{ + Name: "broadcast_tx_sync", + Args: tx, + Response: res, + Error: err, + }) + return res, err +} diff --git a/rpc/client/mock/abci_test.go b/rpc/client/mock/abci_test.go new file mode 100644 index 000000000..bcf443cf0 --- /dev/null +++ b/rpc/client/mock/abci_test.go @@ -0,0 +1,181 @@ +package mock_test + +import ( + "fmt" + "testing" + + "github.com/pkg/errors" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/abci/example/kvstore" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/rpc/client" + "github.com/tendermint/tendermint/rpc/client/mock" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +func TestABCIMock(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + key, value := []byte("foo"), []byte("bar") + height := int64(10) + goodTx := types.Tx{0x01, 0xff} + badTx := types.Tx{0x12, 0x21} + + m := mock.ABCIMock{ + Info: mock.Call{Error: errors.New("foobar")}, + Query: mock.Call{Response: abci.ResponseQuery{ + Key: key, + Value: value, + Height: height, + }}, + // Broadcast commit depends on call + BroadcastCommit: mock.Call{ + Args: goodTx, + Response: &ctypes.ResultBroadcastTxCommit{ + CheckTx: abci.ResponseCheckTx{Data: cmn.HexBytes("stand")}, + DeliverTx: abci.ResponseDeliverTx{Data: cmn.HexBytes("deliver")}, + }, + Error: errors.New("bad tx"), + }, + Broadcast: mock.Call{Error: errors.New("must commit")}, + } + + // now, let's try to make some calls + _, err := m.ABCIInfo() + require.NotNil(err) + assert.Equal("foobar", err.Error()) + + // query always returns the response + _query, err := m.ABCIQueryWithOptions("/", nil, client.ABCIQueryOptions{Trusted: true}) + query := _query.Response + require.Nil(err) + require.NotNil(query) + assert.EqualValues(key, query.Key) + assert.EqualValues(value, query.Value) + assert.Equal(height, query.Height) + + // non-commit calls always return errors + _, err = m.BroadcastTxSync(goodTx) + require.NotNil(err) + assert.Equal("must commit", err.Error()) + _, err = m.BroadcastTxAsync(goodTx) + require.NotNil(err) + assert.Equal("must commit", err.Error()) + + // commit depends on the input + _, err = m.BroadcastTxCommit(badTx) + require.NotNil(err) + assert.Equal("bad tx", err.Error()) + bres, err := m.BroadcastTxCommit(goodTx) + require.Nil(err, "%+v", err) + assert.EqualValues(0, bres.CheckTx.Code) + assert.EqualValues("stand", bres.CheckTx.Data) + assert.EqualValues("deliver", bres.DeliverTx.Data) +} + +func TestABCIRecorder(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + // This mock returns errors on everything but Query + m := mock.ABCIMock{ + Info: mock.Call{Response: abci.ResponseInfo{ + Data: "data", + Version: "v0.9.9", + }}, + Query: mock.Call{Error: errors.New("query")}, + Broadcast: mock.Call{Error: errors.New("broadcast")}, + BroadcastCommit: mock.Call{Error: errors.New("broadcast_commit")}, + } + r := mock.NewABCIRecorder(m) + + require.Equal(0, len(r.Calls)) + + _, err := r.ABCIInfo() + assert.Nil(err, "expected no err on info") + + _, err = r.ABCIQueryWithOptions("path", cmn.HexBytes("data"), client.ABCIQueryOptions{Trusted: false}) + assert.NotNil(err, "expected error on query") + require.Equal(2, len(r.Calls)) + + info := r.Calls[0] + assert.Equal("abci_info", info.Name) + assert.Nil(info.Error) + assert.Nil(info.Args) + require.NotNil(info.Response) + ir, ok := info.Response.(*ctypes.ResultABCIInfo) + require.True(ok) + assert.Equal("data", ir.Response.Data) + assert.Equal("v0.9.9", ir.Response.Version) + + query := r.Calls[1] + assert.Equal("abci_query", query.Name) + assert.Nil(query.Response) + require.NotNil(query.Error) + assert.Equal("query", query.Error.Error()) + require.NotNil(query.Args) + qa, ok := query.Args.(mock.QueryArgs) + require.True(ok) + assert.Equal("path", qa.Path) + assert.EqualValues("data", qa.Data) + assert.False(qa.Trusted) + + // now add some broadcasts (should all err) + txs := []types.Tx{{1}, {2}, {3}} + _, err = r.BroadcastTxCommit(txs[0]) + assert.NotNil(err, "expected err on broadcast") + _, err = r.BroadcastTxSync(txs[1]) + assert.NotNil(err, "expected err on broadcast") + _, err = r.BroadcastTxAsync(txs[2]) + assert.NotNil(err, "expected err on broadcast") + + require.Equal(5, len(r.Calls)) + + bc := r.Calls[2] + assert.Equal("broadcast_tx_commit", bc.Name) + assert.Nil(bc.Response) + require.NotNil(bc.Error) + assert.EqualValues(bc.Args, txs[0]) + + bs := r.Calls[3] + assert.Equal("broadcast_tx_sync", bs.Name) + assert.Nil(bs.Response) + require.NotNil(bs.Error) + assert.EqualValues(bs.Args, txs[1]) + + ba := r.Calls[4] + assert.Equal("broadcast_tx_async", ba.Name) + assert.Nil(ba.Response) + require.NotNil(ba.Error) + assert.EqualValues(ba.Args, txs[2]) +} + +func TestABCIApp(t *testing.T) { + assert, require := assert.New(t), require.New(t) + app := kvstore.NewKVStoreApplication() + m := mock.ABCIApp{app} + + // get some info + info, err := m.ABCIInfo() + require.Nil(err) + assert.Equal(`{"size":0}`, info.Response.GetData()) + + // add a key + key, value := "foo", "bar" + tx := fmt.Sprintf("%s=%s", key, value) + res, err := m.BroadcastTxCommit(types.Tx(tx)) + require.Nil(err) + assert.True(res.CheckTx.IsOK()) + require.NotNil(res.DeliverTx) + assert.True(res.DeliverTx.IsOK()) + + // check the key + _qres, err := m.ABCIQueryWithOptions("/key", cmn.HexBytes(key), client.ABCIQueryOptions{Trusted: true}) + qres := _qres.Response + require.Nil(err) + assert.EqualValues(value, qres.Value) +} diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go new file mode 100644 index 000000000..955df6277 --- /dev/null +++ b/rpc/client/mock/client.go @@ -0,0 +1,135 @@ +/* +package mock returns a Client implementation that +accepts various (mock) implementations of the various methods. + +This implementation is useful for using in tests, when you don't +need a real server, but want a high-level of control about +the server response you want to mock (eg. error handling), +or if you just want to record the calls to verify in your tests. + +For real clients, you probably want the "http" package. If you +want to directly call a tendermint node in process, you can use the +"local" package. +*/ +package mock + +import ( + "reflect" + + "github.com/tendermint/tendermint/rpc/client" + "github.com/tendermint/tendermint/rpc/core" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +// Client wraps arbitrary implementations of the various interfaces. +// +// We provide a few choices to mock out each one in this package. +// Nothing hidden here, so no New function, just construct it from +// some parts, and swap them out them during the tests. +type Client struct { + client.ABCIClient + client.SignClient + client.HistoryClient + client.StatusClient + client.EventsClient + cmn.Service +} + +var _ client.Client = Client{} + +// Call is used by recorders to save a call and response. +// It can also be used to configure mock responses. +// +type Call struct { + Name string + Args interface{} + Response interface{} + Error error +} + +// GetResponse will generate the apporiate response for us, when +// using the Call struct to configure a Mock handler. +// +// When configuring a response, if only one of Response or Error is +// set then that will always be returned. If both are set, then +// we return Response if the Args match the set args, Error otherwise. +func (c Call) GetResponse(args interface{}) (interface{}, error) { + // handle the case with no response + if c.Response == nil { + if c.Error == nil { + panic("Misconfigured call, you must set either Response or Error") + } + return nil, c.Error + } + // response without error + if c.Error == nil { + return c.Response, nil + } + // have both, we must check args.... + if reflect.DeepEqual(args, c.Args) { + return c.Response, nil + } + return nil, c.Error +} + +func (c Client) Status() (*ctypes.ResultStatus, error) { + return core.Status() +} + +func (c Client) ABCIInfo() (*ctypes.ResultABCIInfo, error) { + return core.ABCIInfo() +} + +func (c Client) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { + return c.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions) +} + +func (c Client) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + return core.ABCIQuery(path, data, opts.Height, opts.Trusted) +} + +func (c Client) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { + return core.BroadcastTxCommit(tx) +} + +func (c Client) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return core.BroadcastTxAsync(tx) +} + +func (c Client) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return core.BroadcastTxSync(tx) +} + +func (c Client) NetInfo() (*ctypes.ResultNetInfo, error) { + return core.NetInfo() +} + +func (c Client) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) { + return core.UnsafeDialSeeds(seeds) +} + +func (c Client) DialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, error) { + return core.UnsafeDialPeers(peers, persistent) +} + +func (c Client) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { + return core.BlockchainInfo(minHeight, maxHeight) +} + +func (c Client) Genesis() (*ctypes.ResultGenesis, error) { + return core.Genesis() +} + +func (c Client) Block(height *int64) (*ctypes.ResultBlock, error) { + return core.Block(height) +} + +func (c Client) Commit(height *int64) (*ctypes.ResultCommit, error) { + return core.Commit(height) +} + +func (c Client) Validators(height *int64) (*ctypes.ResultValidators, error) { + return core.Validators(height) +} diff --git a/rpc/client/mock/status.go b/rpc/client/mock/status.go new file mode 100644 index 000000000..58b29d573 --- /dev/null +++ b/rpc/client/mock/status.go @@ -0,0 +1,52 @@ +package mock + +import ( + "github.com/tendermint/tendermint/rpc/client" + ctypes "github.com/tendermint/tendermint/rpc/core/types" +) + +// StatusMock returns the result specified by the Call +type StatusMock struct { + Call +} + +var ( + _ client.StatusClient = (*StatusMock)(nil) + _ client.StatusClient = (*StatusRecorder)(nil) +) + +func (m *StatusMock) Status() (*ctypes.ResultStatus, error) { + res, err := m.GetResponse(nil) + if err != nil { + return nil, err + } + return res.(*ctypes.ResultStatus), nil +} + +// StatusRecorder can wrap another type (StatusMock, full client) +// and record the status calls +type StatusRecorder struct { + Client client.StatusClient + Calls []Call +} + +func NewStatusRecorder(client client.StatusClient) *StatusRecorder { + return &StatusRecorder{ + Client: client, + Calls: []Call{}, + } +} + +func (r *StatusRecorder) addCall(call Call) { + r.Calls = append(r.Calls, call) +} + +func (r *StatusRecorder) Status() (*ctypes.ResultStatus, error) { + res, err := r.Client.Status() + r.addCall(Call{ + Name: "status", + Response: res, + Error: err, + }) + return res, err +} diff --git a/rpc/client/mock/status_test.go b/rpc/client/mock/status_test.go new file mode 100644 index 000000000..8e3c15061 --- /dev/null +++ b/rpc/client/mock/status_test.go @@ -0,0 +1,48 @@ +package mock_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/rpc/client/mock" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +func TestStatus(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + m := &mock.StatusMock{ + Call: mock.Call{ + Response: &ctypes.ResultStatus{ + SyncInfo: ctypes.SyncInfo{ + LatestBlockHash: cmn.HexBytes("block"), + LatestAppHash: cmn.HexBytes("app"), + LatestBlockHeight: 10, + }, + }}, + } + + r := mock.NewStatusRecorder(m) + require.Equal(0, len(r.Calls)) + + // make sure response works proper + status, err := r.Status() + require.Nil(err, "%+v", err) + assert.EqualValues("block", status.SyncInfo.LatestBlockHash) + assert.EqualValues(10, status.SyncInfo.LatestBlockHeight) + + // make sure recorder works properly + require.Equal(1, len(r.Calls)) + rs := r.Calls[0] + assert.Equal("status", rs.Name) + assert.Nil(rs.Args) + assert.Nil(rs.Error) + require.NotNil(rs.Response) + st, ok := rs.Response.(*ctypes.ResultStatus) + require.True(ok) + assert.EqualValues("block", st.SyncInfo.LatestBlockHash) + assert.EqualValues(10, st.SyncInfo.LatestBlockHeight) +} diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go new file mode 100644 index 000000000..e7e9042a7 --- /dev/null +++ b/rpc/client/rpc_test.go @@ -0,0 +1,366 @@ +package client_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/tendermint/tendermint/rpc/client" + rpctest "github.com/tendermint/tendermint/rpc/test" + "github.com/tendermint/tendermint/types" +) + +func getHTTPClient() *client.HTTP { + rpcAddr := rpctest.GetConfig().RPC.ListenAddress + return client.NewHTTP(rpcAddr, "/websocket") +} + +func getLocalClient() *client.Local { + return client.NewLocal(node) +} + +// GetClients returns a slice of clients for table-driven tests +func GetClients() []client.Client { + return []client.Client{ + getHTTPClient(), + getLocalClient(), + } +} + +// Make sure status is correct (we connect properly) +func TestStatus(t *testing.T) { + for i, c := range GetClients() { + moniker := rpctest.GetConfig().Moniker + status, err := c.Status() + require.Nil(t, err, "%d: %+v", i, err) + assert.Equal(t, moniker, status.NodeInfo.Moniker) + } +} + +// Make sure info is correct (we connect properly) +func TestInfo(t *testing.T) { + for i, c := range GetClients() { + // status, err := c.Status() + // require.Nil(t, err, "%+v", err) + info, err := c.ABCIInfo() + require.Nil(t, err, "%d: %+v", i, err) + // TODO: this is not correct - fix merkleeyes! + // assert.EqualValues(t, status.SyncInfo.LatestBlockHeight, info.Response.LastBlockHeight) + assert.True(t, strings.Contains(info.Response.Data, "size")) + } +} + +func TestNetInfo(t *testing.T) { + for i, c := range GetClients() { + nc, ok := c.(client.NetworkClient) + require.True(t, ok, "%d", i) + netinfo, err := nc.NetInfo() + require.Nil(t, err, "%d: %+v", i, err) + assert.True(t, netinfo.Listening) + assert.Equal(t, 0, len(netinfo.Peers)) + } +} + +func TestDumpConsensusState(t *testing.T) { + for i, c := range GetClients() { + // FIXME: fix server so it doesn't panic on invalid input + nc, ok := c.(client.NetworkClient) + require.True(t, ok, "%d", i) + cons, err := nc.DumpConsensusState() + require.Nil(t, err, "%d: %+v", i, err) + assert.NotEmpty(t, cons.RoundState) + assert.Empty(t, cons.Peers) + } +} + +func TestConsensusState(t *testing.T) { + for i, c := range GetClients() { + // FIXME: fix server so it doesn't panic on invalid input + nc, ok := c.(client.NetworkClient) + require.True(t, ok, "%d", i) + cons, err := nc.ConsensusState() + require.Nil(t, err, "%d: %+v", i, err) + assert.NotEmpty(t, cons.RoundState) + } +} + +func TestHealth(t *testing.T) { + for i, c := range GetClients() { + nc, ok := c.(client.NetworkClient) + require.True(t, ok, "%d", i) + _, err := nc.Health() + require.Nil(t, err, "%d: %+v", i, err) + } +} + +func TestGenesisAndValidators(t *testing.T) { + for i, c := range GetClients() { + + // make sure this is the right genesis file + gen, err := c.Genesis() + require.Nil(t, err, "%d: %+v", i, err) + // get the genesis validator + require.Equal(t, 1, len(gen.Genesis.Validators)) + gval := gen.Genesis.Validators[0] + + // get the current validators + vals, err := c.Validators(nil) + require.Nil(t, err, "%d: %+v", i, err) + require.Equal(t, 1, len(vals.Validators)) + val := vals.Validators[0] + + // make sure the current set is also the genesis set + assert.Equal(t, gval.Power, val.VotingPower) + assert.Equal(t, gval.PubKey, val.PubKey) + } +} + +func TestABCIQuery(t *testing.T) { + for i, c := range GetClients() { + // write something + k, v, tx := MakeTxKV() + bres, err := c.BroadcastTxCommit(tx) + require.Nil(t, err, "%d: %+v", i, err) + apph := bres.Height + 1 // this is where the tx will be applied to the state + + // wait before querying + client.WaitForHeight(c, apph, nil) + res, err := c.ABCIQuery("/key", k) + qres := res.Response + if assert.Nil(t, err) && assert.True(t, qres.IsOK()) { + assert.EqualValues(t, v, qres.Value) + } + } +} + +// Make some app checks +func TestAppCalls(t *testing.T) { + assert, require := assert.New(t), require.New(t) + for i, c := range GetClients() { + + // get an offset of height to avoid racing and guessing + s, err := c.Status() + require.Nil(err, "%d: %+v", i, err) + // sh is start height or status height + sh := s.SyncInfo.LatestBlockHeight + + // look for the future + h := sh + 2 + _, err = c.Block(&h) + assert.NotNil(err) // no block yet + + // write something + k, v, tx := MakeTxKV() + bres, err := c.BroadcastTxCommit(tx) + require.Nil(err, "%d: %+v", i, err) + require.True(bres.DeliverTx.IsOK()) + txh := bres.Height + apph := txh + 1 // this is where the tx will be applied to the state + + // wait before querying + if err := client.WaitForHeight(c, apph, nil); err != nil { + t.Error(err) + } + _qres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Trusted: true}) + qres := _qres.Response + if assert.Nil(err) && assert.True(qres.IsOK()) { + // assert.Equal(k, data.GetKey()) // only returned for proofs + assert.EqualValues(v, qres.Value) + } + + // make sure we can lookup the tx with proof + ptx, err := c.Tx(bres.Hash, true) + require.Nil(err, "%d: %+v", i, err) + assert.EqualValues(txh, ptx.Height) + assert.EqualValues(tx, ptx.Tx) + + // and we can even check the block is added + block, err := c.Block(&apph) + require.Nil(err, "%d: %+v", i, err) + appHash := block.BlockMeta.Header.AppHash + assert.True(len(appHash) > 0) + assert.EqualValues(apph, block.BlockMeta.Header.Height) + + // now check the results + blockResults, err := c.BlockResults(&txh) + require.Nil(err, "%d: %+v", i, err) + assert.Equal(txh, blockResults.Height) + if assert.Equal(1, len(blockResults.Results.DeliverTx)) { + // check success code + assert.EqualValues(0, blockResults.Results.DeliverTx[0].Code) + } + + // check blockchain info, now that we know there is info + info, err := c.BlockchainInfo(apph, apph) + require.Nil(err, "%d: %+v", i, err) + assert.True(info.LastHeight >= apph) + if assert.Equal(1, len(info.BlockMetas)) { + lastMeta := info.BlockMetas[0] + assert.EqualValues(apph, lastMeta.Header.Height) + bMeta := block.BlockMeta + assert.Equal(bMeta.Header.AppHash, lastMeta.Header.AppHash) + assert.Equal(bMeta.BlockID, lastMeta.BlockID) + } + + // and get the corresponding commit with the same apphash + commit, err := c.Commit(&apph) + require.Nil(err, "%d: %+v", i, err) + cappHash := commit.Header.AppHash + assert.Equal(appHash, cappHash) + assert.NotNil(commit.Commit) + + // compare the commits (note Commit(2) has commit from Block(3)) + h = apph - 1 + commit2, err := c.Commit(&h) + require.Nil(err, "%d: %+v", i, err) + assert.Equal(block.Block.LastCommit, commit2.Commit) + + // and we got a proof that works! + _pres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Trusted: false}) + pres := _pres.Response + assert.Nil(err) + assert.True(pres.IsOK()) + } +} + +func TestBroadcastTxSync(t *testing.T) { + require := require.New(t) + + mempool := node.MempoolReactor().Mempool + initMempoolSize := mempool.Size() + + for i, c := range GetClients() { + _, _, tx := MakeTxKV() + bres, err := c.BroadcastTxSync(tx) + require.Nil(err, "%d: %+v", i, err) + require.Equal(bres.Code, abci.CodeTypeOK) // FIXME + + require.Equal(initMempoolSize+1, mempool.Size()) + + txs := mempool.Reap(1) + require.EqualValues(tx, txs[0]) + mempool.Flush() + } +} + +func TestBroadcastTxCommit(t *testing.T) { + require := require.New(t) + + mempool := node.MempoolReactor().Mempool + for i, c := range GetClients() { + _, _, tx := MakeTxKV() + bres, err := c.BroadcastTxCommit(tx) + require.Nil(err, "%d: %+v", i, err) + require.True(bres.CheckTx.IsOK()) + require.True(bres.DeliverTx.IsOK()) + + require.Equal(0, mempool.Size()) + } +} + +func TestTx(t *testing.T) { + // first we broadcast a tx + c := getHTTPClient() + _, _, tx := MakeTxKV() + bres, err := c.BroadcastTxCommit(tx) + require.Nil(t, err, "%+v", err) + + txHeight := bres.Height + txHash := bres.Hash + + anotherTxHash := types.Tx("a different tx").Hash() + + cases := []struct { + valid bool + hash []byte + prove bool + }{ + // only valid if correct hash provided + {true, txHash, false}, + {true, txHash, true}, + {false, anotherTxHash, false}, + {false, anotherTxHash, true}, + {false, nil, false}, + {false, nil, true}, + } + + for i, c := range GetClients() { + for j, tc := range cases { + t.Logf("client %d, case %d", i, j) + + // now we query for the tx. + // since there's only one tx, we know index=0. + ptx, err := c.Tx(tc.hash, tc.prove) + + if !tc.valid { + require.NotNil(t, err) + } else { + require.Nil(t, err, "%+v", err) + assert.EqualValues(t, txHeight, ptx.Height) + assert.EqualValues(t, tx, ptx.Tx) + assert.Zero(t, ptx.Index) + assert.True(t, ptx.TxResult.IsOK()) + assert.EqualValues(t, txHash, ptx.Hash) + + // time to verify the proof + proof := ptx.Proof + if tc.prove && assert.EqualValues(t, tx, proof.Data) { + assert.True(t, proof.Proof.Verify(proof.Index, proof.Total, txHash, proof.RootHash)) + } + } + } + } +} + +func TestTxSearch(t *testing.T) { + // first we broadcast a tx + c := getHTTPClient() + _, _, tx := MakeTxKV() + bres, err := c.BroadcastTxCommit(tx) + require.Nil(t, err, "%+v", err) + + txHeight := bres.Height + txHash := bres.Hash + + anotherTxHash := types.Tx("a different tx").Hash() + + for i, c := range GetClients() { + t.Logf("client %d", i) + + // now we query for the tx. + // since there's only one tx, we know index=0. + result, err := c.TxSearch(fmt.Sprintf("tx.hash='%v'", txHash), true, 1, 30) + require.Nil(t, err, "%+v", err) + require.Len(t, result.Txs, 1) + + ptx := result.Txs[0] + assert.EqualValues(t, txHeight, ptx.Height) + assert.EqualValues(t, tx, ptx.Tx) + assert.Zero(t, ptx.Index) + assert.True(t, ptx.TxResult.IsOK()) + assert.EqualValues(t, txHash, ptx.Hash) + + // time to verify the proof + proof := ptx.Proof + if assert.EqualValues(t, tx, proof.Data) { + assert.True(t, proof.Proof.Verify(proof.Index, proof.Total, txHash, proof.RootHash)) + } + + // we query for non existing tx + result, err = c.TxSearch(fmt.Sprintf("tx.hash='%X'", anotherTxHash), false, 1, 30) + require.Nil(t, err, "%+v", err) + require.Len(t, result.Txs, 0) + + // we query using a tag (see kvstore application) + result, err = c.TxSearch("app.creator='jae'", false, 1, 30) + require.Nil(t, err, "%+v", err) + if len(result.Txs) == 0 { + t.Fatal("expected a lot of transactions") + } + } +} diff --git a/rpc/client/types.go b/rpc/client/types.go new file mode 100644 index 000000000..89bd2f98c --- /dev/null +++ b/rpc/client/types.go @@ -0,0 +1,12 @@ +package client + +// ABCIQueryOptions can be used to provide options for ABCIQuery call other +// than the DefaultABCIQueryOptions. +type ABCIQueryOptions struct { + Height int64 + Trusted bool +} + +// DefaultABCIQueryOptions are latest height (0) and trusted equal to false +// (which will result in a proof being returned). +var DefaultABCIQueryOptions = ABCIQueryOptions{Height: 0, Trusted: false} diff --git a/rpc/core/README.md b/rpc/core/README.md new file mode 100644 index 000000000..9547079b2 --- /dev/null +++ b/rpc/core/README.md @@ -0,0 +1,20 @@ +# Tendermint RPC + +## Generate markdown for [Slate](https://github.com/tendermint/slate) + +We are using [Slate](https://github.com/tendermint/slate) to power our RPC +documentation. For generating markdown use: + +```shell +go get github.com/davecheney/godoc2md + +godoc2md -template rpc/core/doc_template.txt github.com/tendermint/tendermint/rpc/core | grep -v -e "pipe.go" -e "routes.go" -e "dev.go" | sed 's$/src/target$https://github.com/tendermint/tendermint/tree/master/rpc/core$' +``` + +For more information see the [CI script for building the Slate docs](/scripts/slate.sh) + +## Pagination + +Requests that return multiple items will be paginated to 30 items by default. +You can specify further pages with the ?page parameter. You can also set a +custom page size up to 100 with the ?per_page parameter. diff --git a/rpc/core/abci.go b/rpc/core/abci.go new file mode 100644 index 000000000..a5eede3fc --- /dev/null +++ b/rpc/core/abci.go @@ -0,0 +1,95 @@ +package core + +import ( + abci "github.com/tendermint/tendermint/abci/types" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/version" + cmn "github.com/tendermint/tendermint/libs/common" +) + +// Query the application for some information. +// +// ```shell +// curl 'localhost:26657/abci_query?path=""&data="abcd"&trusted=false' +// ``` +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// result, err := client.ABCIQuery("", "abcd", true) +// ``` +// +// > The above command returns JSON structured like this: +// +// ```json +// { +// "error": "", +// "result": { +// "response": { +// "log": "exists", +// "height": 0, +// "proof": "010114FED0DAD959F36091AD761C922ABA3CBF1D8349990101020103011406AA2262E2F448242DF2C2607C3CDC705313EE3B0001149D16177BC71E445476174622EA559715C293740C", +// "value": "61626364", +// "key": "61626364", +// "index": -1, +// "code": 0 +// } +// }, +// "id": "", +// "jsonrpc": "2.0" +// } +// ``` +// +// ### Query Parameters +// +// | Parameter | Type | Default | Required | Description | +// |-----------+--------+---------+----------+------------------------------------------------| +// | path | string | false | false | Path to the data ("/a/b/c") | +// | data | []byte | false | true | Data | +// | height | int64 | 0 | false | Height (0 means latest) | +// | trusted | bool | false | false | Does not include a proof of the data inclusion | +func ABCIQuery(path string, data cmn.HexBytes, height int64, trusted bool) (*ctypes.ResultABCIQuery, error) { + resQuery, err := proxyAppQuery.QuerySync(abci.RequestQuery{ + Path: path, + Data: data, + Height: height, + Prove: !trusted, + }) + if err != nil { + return nil, err + } + logger.Info("ABCIQuery", "path", path, "data", data, "result", resQuery) + return &ctypes.ResultABCIQuery{*resQuery}, nil +} + +// Get some info about the application. +// +// ```shell +// curl 'localhost:26657/abci_info' +// ``` +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// info, err := client.ABCIInfo() +// ``` +// +// > The above command returns JSON structured like this: +// +// ```json +// { +// "error": "", +// "result": { +// "response": { +// "data": "{\"size\":3}" +// } +// }, +// "id": "", +// "jsonrpc": "2.0" +// } +// ``` +func ABCIInfo() (*ctypes.ResultABCIInfo, error) { + resInfo, err := proxyAppQuery.InfoSync(abci.RequestInfo{version.Version}) + if err != nil { + return nil, err + } + return &ctypes.ResultABCIInfo{*resInfo}, nil +} diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go new file mode 100644 index 000000000..0e8873152 --- /dev/null +++ b/rpc/core/blocks.go @@ -0,0 +1,364 @@ +package core + +import ( + "fmt" + + ctypes "github.com/tendermint/tendermint/rpc/core/types" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +// Get block headers for minHeight <= height <= maxHeight. +// Block headers are returned in descending order (highest first). +// +// ```shell +// curl 'localhost:26657/blockchain?minHeight=10&maxHeight=10' +// ``` +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// info, err := client.BlockchainInfo(10, 10) +// ``` +// +// > The above command returns JSON structured like this: +// +// ```json +// { +// "error": "", +// "result": { +// "block_metas": [ +// { +// "header": { +// "app_hash": "", +// "chain_id": "test-chain-6UTNIN", +// "height": 10, +// "time": "2017-05-29T15:05:53.877Z", +// "num_txs": 0, +// "last_block_id": { +// "parts": { +// "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", +// "total": 1 +// }, +// "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" +// }, +// "last_commit_hash": "F31CC4282E50B3F2A58D763D233D76F26D26CABE", +// "data_hash": "", +// "validators_hash": "9365FC80F234C967BD233F5A3E2AB2F1E4B0E5AA" +// }, +// "block_id": { +// "parts": { +// "hash": "277A4DBEF91483A18B85F2F5677ABF9694DFA40F", +// "total": 1 +// }, +// "hash": "96B1D2F2D201BA4BC383EB8224139DB1294944E5" +// } +// } +// ], +// "last_height": 5493 +// }, +// "id": "", +// "jsonrpc": "2.0" +// } +// ``` +// +// +func BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { + if minHeight == 0 { + minHeight = 1 + } + + if maxHeight == 0 { + maxHeight = blockStore.Height() + } else { + maxHeight = cmn.MinInt64(blockStore.Height(), maxHeight) + } + + // maximum 20 block metas + const limit int64 = 20 + minHeight = cmn.MaxInt64(minHeight, maxHeight-limit) + + logger.Debug("BlockchainInfoHandler", "maxHeight", maxHeight, "minHeight", minHeight) + + if minHeight > maxHeight { + return nil, fmt.Errorf("min height %d can't be greater than max height %d", minHeight, maxHeight) + } + + blockMetas := []*types.BlockMeta{} + for height := maxHeight; height >= minHeight; height-- { + blockMeta := blockStore.LoadBlockMeta(height) + blockMetas = append(blockMetas, blockMeta) + } + + return &ctypes.ResultBlockchainInfo{blockStore.Height(), blockMetas}, nil +} + +// Get block at a given height. +// If no height is provided, it will fetch the latest block. +// +// ```shell +// curl 'localhost:26657/block?height=10' +// ``` +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// info, err := client.Block(10) +// ``` +// +// > The above command returns JSON structured like this: +// +// ```json +// { +// "error": "", +// "result": { +// "block": { +// "last_commit": { +// "precommits": [ +// { +// "signature": { +// "data": "12C0D8893B8A38224488DC1DE6270DF76BB1A5E9DB1C68577706A6A97C6EC34FFD12339183D5CA8BC2F46148773823DE905B7F6F5862FD564038BB7AE03BF50D", +// "type": "ed25519" +// }, +// "block_id": { +// "parts": { +// "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", +// "total": 1 +// }, +// "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" +// }, +// "type": 2, +// "round": 0, +// "height": 9, +// "validator_index": 0, +// "validator_address": "E89A51D60F68385E09E716D353373B11F8FACD62" +// } +// ], +// "blockID": { +// "parts": { +// "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", +// "total": 1 +// }, +// "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" +// } +// }, +// "data": { +// "txs": [] +// }, +// "header": { +// "app_hash": "", +// "chain_id": "test-chain-6UTNIN", +// "height": 10, +// "time": "2017-05-29T15:05:53.877Z", +// "num_txs": 0, +// "last_block_id": { +// "parts": { +// "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", +// "total": 1 +// }, +// "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" +// }, +// "last_commit_hash": "F31CC4282E50B3F2A58D763D233D76F26D26CABE", +// "data_hash": "", +// "validators_hash": "9365FC80F234C967BD233F5A3E2AB2F1E4B0E5AA" +// } +// }, +// "block_meta": { +// "header": { +// "app_hash": "", +// "chain_id": "test-chain-6UTNIN", +// "height": 10, +// "time": "2017-05-29T15:05:53.877Z", +// "num_txs": 0, +// "last_block_id": { +// "parts": { +// "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", +// "total": 1 +// }, +// "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" +// }, +// "last_commit_hash": "F31CC4282E50B3F2A58D763D233D76F26D26CABE", +// "data_hash": "", +// "validators_hash": "9365FC80F234C967BD233F5A3E2AB2F1E4B0E5AA" +// }, +// "block_id": { +// "parts": { +// "hash": "277A4DBEF91483A18B85F2F5677ABF9694DFA40F", +// "total": 1 +// }, +// "hash": "96B1D2F2D201BA4BC383EB8224139DB1294944E5" +// } +// } +// }, +// "id": "", +// "jsonrpc": "2.0" +// } +// ``` +func Block(heightPtr *int64) (*ctypes.ResultBlock, error) { + storeHeight := blockStore.Height() + height, err := getHeight(storeHeight, heightPtr) + if err != nil { + return nil, err + } + + blockMeta := blockStore.LoadBlockMeta(height) + block := blockStore.LoadBlock(height) + return &ctypes.ResultBlock{blockMeta, block}, nil +} + +// Get block commit at a given height. +// If no height is provided, it will fetch the commit for the latest block. +// +// ```shell +// curl 'localhost:26657/commit?height=11' +// ``` +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// info, err := client.Commit(11) +// ``` +// +// > The above command returns JSON structured like this: +// +// ```json +// { +// "error": "", +// "result": { +// "canonical": true, +// "commit": { +// "precommits": [ +// { +// "signature": { +// "data": "00970429FEC652E9E21D106A90AE8C5413759A7488775CEF4A3F44DC46C7F9D941070E4FBE9ED54DF247FA3983359A0C3A238D61DE55C75C9116D72ABC9CF50F", +// "type": "ed25519" +// }, +// "block_id": { +// "parts": { +// "hash": "9E37CBF266BC044A779E09D81C456E653B89E006", +// "total": 1 +// }, +// "hash": "CC6E861E31CA4334E9888381B4A9137D1458AB6A" +// }, +// "type": 2, +// "round": 0, +// "height": 11, +// "validator_index": 0, +// "validator_address": "E89A51D60F68385E09E716D353373B11F8FACD62" +// } +// ], +// "blockID": { +// "parts": { +// "hash": "9E37CBF266BC044A779E09D81C456E653B89E006", +// "total": 1 +// }, +// "hash": "CC6E861E31CA4334E9888381B4A9137D1458AB6A" +// } +// }, +// "header": { +// "app_hash": "", +// "chain_id": "test-chain-6UTNIN", +// "height": 11, +// "time": "2017-05-29T15:05:54.893Z", +// "num_txs": 0, +// "last_block_id": { +// "parts": { +// "hash": "277A4DBEF91483A18B85F2F5677ABF9694DFA40F", +// "total": 1 +// }, +// "hash": "96B1D2F2D201BA4BC383EB8224139DB1294944E5" +// }, +// "last_commit_hash": "3CE0C9727CE524BA9CB7C91E28F08E2B94001087", +// "data_hash": "", +// "validators_hash": "9365FC80F234C967BD233F5A3E2AB2F1E4B0E5AA" +// } +// }, +// "id": "", +// "jsonrpc": "2.0" +// } +// ``` +func Commit(heightPtr *int64) (*ctypes.ResultCommit, error) { + storeHeight := blockStore.Height() + height, err := getHeight(storeHeight, heightPtr) + if err != nil { + return nil, err + } + + header := blockStore.LoadBlockMeta(height).Header + + // If the next block has not been committed yet, + // use a non-canonical commit + if height == storeHeight { + commit := blockStore.LoadSeenCommit(height) + return ctypes.NewResultCommit(header, commit, false), nil + } + + // Return the canonical commit (comes from the block at height+1) + commit := blockStore.LoadBlockCommit(height) + return ctypes.NewResultCommit(header, commit, true), nil +} + +// BlockResults gets ABCIResults at a given height. +// If no height is provided, it will fetch results for the latest block. +// +// Results are for the height of the block containing the txs. +// Thus response.results[5] is the results of executing getBlock(h).Txs[5] +// +// ```shell +// curl 'localhost:26657/block_results?height=10' +// ``` +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// info, err := client.BlockResults(10) +// ``` +// +// +// > The above command returns JSON structured like this: +// +// ```json +// { +// "height": 10, +// "results": [ +// { +// "code": 0, +// "data": "CAFE00F00D" +// }, +// { +// "code": 102, +// "data": "" +// } +// ] +// } +// ``` +func BlockResults(heightPtr *int64) (*ctypes.ResultBlockResults, error) { + storeHeight := blockStore.Height() + height, err := getHeight(storeHeight, heightPtr) + if err != nil { + return nil, err + } + + // load the results + results, err := sm.LoadABCIResponses(stateDB, height) + if err != nil { + return nil, err + } + + res := &ctypes.ResultBlockResults{ + Height: height, + Results: results, + } + return res, nil +} + +func getHeight(storeHeight int64, heightPtr *int64) (int64, error) { + if heightPtr != nil { + height := *heightPtr + if height <= 0 { + return 0, fmt.Errorf("Height must be greater than 0") + } + if height > storeHeight { + return 0, fmt.Errorf("Height must be less than or equal to the current blockchain height") + } + return height, nil + } + return storeHeight, nil +} diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go new file mode 100644 index 000000000..c026cd91f --- /dev/null +++ b/rpc/core/consensus.go @@ -0,0 +1,261 @@ +package core + +import ( + cm "github.com/tendermint/tendermint/consensus" + "github.com/tendermint/tendermint/p2p" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" +) + +// Get the validator set at the given block height. +// If no height is provided, it will fetch the current validator set. +// +// ```shell +// curl 'localhost:26657/validators' +// ``` +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// state, err := client.Validators() +// ``` +// +// The above command returns JSON structured like this: +// +// ```json +// { +// "error": "", +// "result": { +// "validators": [ +// { +// "accum": 0, +// "voting_power": 10, +// "pub_key": { +// "data": "68DFDA7E50F82946E7E8546BED37944A422CD1B831E70DF66BA3B8430593944D", +// "type": "ed25519" +// }, +// "address": "E89A51D60F68385E09E716D353373B11F8FACD62" +// } +// ], +// "block_height": 5241 +// }, +// "id": "", +// "jsonrpc": "2.0" +// } +// ``` +func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { + storeHeight := blockStore.Height() + height, err := getHeight(storeHeight, heightPtr) + if err != nil { + return nil, err + } + + validators, err := sm.LoadValidators(stateDB, height) + if err != nil { + return nil, err + } + return &ctypes.ResultValidators{height, validators.Validators}, nil +} + +// DumpConsensusState dumps consensus state. +// UNSTABLE +// +// ```shell +// curl 'localhost:26657/dump_consensus_state' +// ``` +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// state, err := client.DumpConsensusState() +// ``` +// +// The above command returns JSON structured like this: +// +// ```json +// { +// "jsonrpc": "2.0", +// "id": "", +// "result": { +// "round_state": { +// "height": 7185, +// "round": 0, +// "step": 1, +// "start_time": "2018-05-12T13:57:28.440293621-07:00", +// "commit_time": "2018-05-12T13:57:27.440293621-07:00", +// "validators": { +// "validators": [ +// { +// "address": "B5B3D40BE53982AD294EF99FF5A34C0C3E5A3244", +// "pub_key": { +// "type": "tendermint/PubKeyEd25519", +// "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" +// }, +// "voting_power": 10, +// "accum": 0 +// } +// ], +// "proposer": { +// "address": "B5B3D40BE53982AD294EF99FF5A34C0C3E5A3244", +// "pub_key": { +// "type": "tendermint/PubKeyEd25519", +// "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" +// }, +// "voting_power": 10, +// "accum": 0 +// } +// }, +// "proposal": null, +// "proposal_block": null, +// "proposal_block_parts": null, +// "locked_round": 0, +// "locked_block": null, +// "locked_block_parts": null, +// "valid_round": 0, +// "valid_block": null, +// "valid_block_parts": null, +// "votes": [ +// { +// "round": 0, +// "prevotes": "_", +// "precommits": "_" +// } +// ], +// "commit_round": -1, +// "last_commit": { +// "votes": [ +// "Vote{0:B5B3D40BE539 7184/00/2(Precommit) 14F946FA7EF0 /702B1B1A602A.../ @ 2018-05-12T20:57:27.342Z}" +// ], +// "votes_bit_array": "x", +// "peer_maj_23s": {} +// }, +// "last_validators": { +// "validators": [ +// { +// "address": "B5B3D40BE53982AD294EF99FF5A34C0C3E5A3244", +// "pub_key": { +// "type": "tendermint/PubKeyEd25519", +// "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" +// }, +// "voting_power": 10, +// "accum": 0 +// } +// ], +// "proposer": { +// "address": "B5B3D40BE53982AD294EF99FF5A34C0C3E5A3244", +// "pub_key": { +// "type": "tendermint/PubKeyEd25519", +// "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" +// }, +// "voting_power": 10, +// "accum": 0 +// } +// } +// }, +// "peers": [ +// { +// "node_address": "30ad1854af22506383c3f0e57fb3c7f90984c5e8@172.16.63.221:26656", +// "peer_state": { +// "round_state": { +// "height": 7185, +// "round": 0, +// "step": 1, +// "start_time": "2018-05-12T13:57:27.438039872-07:00", +// "proposal": false, +// "proposal_block_parts_header": { +// "total": 0, +// "hash": "" +// }, +// "proposal_block_parts": null, +// "proposal_pol_round": -1, +// "proposal_pol": "_", +// "prevotes": "_", +// "precommits": "_", +// "last_commit_round": 0, +// "last_commit": "x", +// "catchup_commit_round": -1, +// "catchup_commit": "_" +// }, +// "stats": { +// "last_vote_height": 7184, +// "votes": 255, +// "last_block_part_height": 7184, +// "block_parts": 255 +// } +// } +// } +// ] +// } +// } +// ``` +func DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { + // Get Peer consensus states. + peers := p2pSwitch.Peers().List() + peerStates := make([]ctypes.PeerStateInfo, len(peers)) + for i, peer := range peers { + peerState := peer.Get(types.PeerStateKey).(*cm.PeerState) + peerStateJSON, err := peerState.ToJSON() + if err != nil { + return nil, err + } + peerStates[i] = ctypes.PeerStateInfo{ + // Peer basic info. + NodeAddress: p2p.IDAddressString(peer.ID(), peer.NodeInfo().ListenAddr), + // Peer consensus state. + PeerState: peerStateJSON, + } + } + // Get self round state. + roundState, err := consensusState.GetRoundStateJSON() + if err != nil { + return nil, err + } + return &ctypes.ResultDumpConsensusState{roundState, peerStates}, nil +} + +// ConsensusState returns a concise summary of the consensus state. +// UNSTABLE +// +// ```shell +// curl 'localhost:26657/consensus_state' +// ``` +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// state, err := client.ConsensusState() +// ``` +// +// The above command returns JSON structured like this: +// +// ```json +//{ +// "jsonrpc": "2.0", +// "id": "", +// "result": { +// "round_state": { +// "height/round/step": "9336/0/1", +// "start_time": "2018-05-14T10:25:45.72595357-04:00", +// "proposal_block_hash": "", +// "locked_block_hash": "", +// "valid_block_hash": "", +// "height_vote_set": [ +// { +// "round": 0, +// "prevotes": [ +// "nil-Vote" +// ], +// "prevotes_bit_array": "BA{1:_} 0/10 = 0.00", +// "precommits": [ +// "nil-Vote" +// ], +// "precommits_bit_array": "BA{1:_} 0/10 = 0.00" +// } +// ] +// } +// } +//} +//``` +func ConsensusState() (*ctypes.ResultConsensusState, error) { + // Get self round state. + bz, err := consensusState.GetRoundStateSimpleJSON() + return &ctypes.ResultConsensusState{bz}, err +} diff --git a/rpc/core/dev.go b/rpc/core/dev.go new file mode 100644 index 000000000..0b5154769 --- /dev/null +++ b/rpc/core/dev.go @@ -0,0 +1,51 @@ +package core + +import ( + "os" + "runtime/pprof" + + ctypes "github.com/tendermint/tendermint/rpc/core/types" +) + +func UnsafeFlushMempool() (*ctypes.ResultUnsafeFlushMempool, error) { + mempool.Flush() + return &ctypes.ResultUnsafeFlushMempool{}, nil +} + +var profFile *os.File + +func UnsafeStartCPUProfiler(filename string) (*ctypes.ResultUnsafeProfile, error) { + var err error + profFile, err = os.Create(filename) + if err != nil { + return nil, err + } + err = pprof.StartCPUProfile(profFile) + if err != nil { + return nil, err + } + return &ctypes.ResultUnsafeProfile{}, nil +} + +func UnsafeStopCPUProfiler() (*ctypes.ResultUnsafeProfile, error) { + pprof.StopCPUProfile() + if err := profFile.Close(); err != nil { + return nil, err + } + return &ctypes.ResultUnsafeProfile{}, nil +} + +func UnsafeWriteHeapProfile(filename string) (*ctypes.ResultUnsafeProfile, error) { + memProfFile, err := os.Create(filename) + if err != nil { + return nil, err + } + if err := pprof.WriteHeapProfile(memProfFile); err != nil { + return nil, err + } + if err := memProfFile.Close(); err != nil { + return nil, err + } + + return &ctypes.ResultUnsafeProfile{}, nil +} diff --git a/rpc/core/doc.go b/rpc/core/doc.go new file mode 100644 index 000000000..d076b3ecd --- /dev/null +++ b/rpc/core/doc.go @@ -0,0 +1,109 @@ +/* +# Introduction + +Tendermint supports the following RPC protocols: + +* URI over HTTP +* JSONRPC over HTTP +* JSONRPC over websockets + +Tendermint RPC is built using [our own RPC library](https://github.com/tendermint/tendermint/tree/master/rpc/lib) which contains its own set of documentation and tests. + +## Configuration + +Set the `laddr` config parameter under `[rpc]` table in the `$TMHOME/config/config.toml` file or the `--rpc.laddr` command-line flag to the desired protocol://host:port setting. Default: `tcp://0.0.0.0:26657`. + +## Arguments + +Arguments which expect strings or byte arrays may be passed as quoted strings, like `"abc"` or as `0x`-prefixed strings, like `0x616263`. + +## URI/HTTP + +```bash +curl 'localhost:26657/broadcast_tx_sync?tx="abc"' +``` + +> Response: + +```json +{ + "error": "", + "result": { + "hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF", + "log": "", + "data": "", + "code": 0 + }, + "id": "", + "jsonrpc": "2.0" +} +``` + +The first entry in the result-array (`96`) is the method this response correlates with. `96` refers to "ResultTypeBroadcastTx", see [responses.go](https://github.com/tendermint/tendermint/blob/master/rpc/core/types/responses.go) for a complete overview. + +## JSONRPC/HTTP + +JSONRPC requests can be POST'd to the root RPC endpoint via HTTP (e.g. `http://localhost:26657/`). + +```json +{ + "method": "broadcast_tx_sync", + "jsonrpc": "2.0", + "params": [ "abc" ], + "id": "dontcare" +} +``` + +## JSONRPC/websockets + +JSONRPC requests can be made via websocket. The websocket endpoint is at `/websocket`, e.g. `localhost:26657/websocket`. Asynchronous RPC functions like event `subscribe` and `unsubscribe` are only available via websockets. + + +## More Examples + +See the various bash tests using curl in `test/`, and examples using the `Go` API in `rpc/client/`. + +## Get the list + +An HTTP Get request to the root RPC endpoint shows a list of available endpoints. + +```bash +curl 'localhost:26657' +``` + +> Response: + +```plain +Available endpoints: +/abci_info +/dump_consensus_state +/genesis +/net_info +/num_unconfirmed_txs +/status +/health +/unconfirmed_txs +/unsafe_flush_mempool +/unsafe_stop_cpu_profiler +/validators + +Endpoints that require arguments: +/abci_query?path=_&data=_&prove=_ +/block?height=_ +/blockchain?minHeight=_&maxHeight=_ +/broadcast_tx_async?tx=_ +/broadcast_tx_commit?tx=_ +/broadcast_tx_sync?tx=_ +/commit?height=_ +/dial_seeds?seeds=_ +/dial_persistent_peers?persistent_peers=_ +/subscribe?event=_ +/tx?hash=_&prove=_ +/unsafe_start_cpu_profiler?filename=_ +/unsafe_write_heap_profile?filename=_ +/unsubscribe?event=_ +``` + +# Endpoints +*/ +package core diff --git a/rpc/core/doc_template.txt b/rpc/core/doc_template.txt new file mode 100644 index 000000000..896d0c271 --- /dev/null +++ b/rpc/core/doc_template.txt @@ -0,0 +1,8 @@ +{{with .PDoc}} +{{comment_md .Doc}} +{{example_html $ ""}} + +{{range .Funcs}}{{$name_html := html .Name}}## [{{$name_html}}]({{posLink_url $ .Decl}}) +{{comment_md .Doc}}{{end}} +{{end}} +--- diff --git a/rpc/core/events.go b/rpc/core/events.go new file mode 100644 index 000000000..6f679e33d --- /dev/null +++ b/rpc/core/events.go @@ -0,0 +1,188 @@ +package core + +import ( + "context" + + "github.com/pkg/errors" + + tmquery "github.com/tendermint/tendermint/libs/pubsub/query" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/lib/types" + tmtypes "github.com/tendermint/tendermint/types" +) + +// Subscribe for events via WebSocket. +// +// To tell which events you want, you need to provide a query. query is a +// string, which has a form: "condition AND condition ..." (no OR at the +// moment). condition has a form: "key operation operand". key is a string with +// a restricted set of possible symbols ( \t\n\r\\()"'=>< are not allowed). +// operation can be "=", "<", "<=", ">", ">=", "CONTAINS". operand can be a +// string (escaped with single quotes), number, date or time. +// +// Examples: +// tm.event = 'NewBlock' # new blocks +// tm.event = 'CompleteProposal' # node got a complete proposal +// tm.event = 'Tx' AND tx.hash = 'XYZ' # single transaction +// tm.event = 'Tx' AND tx.height = 5 # all txs of the fifth block +// tx.height = 5 # all txs of the fifth block +// +// Tendermint provides a few predefined keys: tm.event, tx.hash and tx.height. +// Note for transactions, you can define additional keys by providing tags with +// DeliverTx response. +// +// DeliverTx{ +// Tags: []*KVPair{ +// "agent.name": "K", +// } +// } +// +// tm.event = 'Tx' AND agent.name = 'K' +// tm.event = 'Tx' AND account.created_at >= TIME 2013-05-03T14:45:00Z +// tm.event = 'Tx' AND contract.sign_date = DATE 2017-01-01 +// tm.event = 'Tx' AND account.owner CONTAINS 'Igor' +// +// See list of all possible events here +// https://godoc.org/github.com/tendermint/tendermint/types#pkg-constants +// +// For complete query syntax, check out +// https://godoc.org/github.com/tendermint/tendermint/libs/pubsub/query. +// +// ```go +// import "github.com/tendermint/tendermint/libs/pubsub/query" +// import "github.com/tendermint/tendermint/types" +// +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// ctx, cancel := context.WithTimeout(context.Background(), timeout) +// defer cancel() +// query := query.MustParse("tm.event = 'Tx' AND tx.height = 3") +// txs := make(chan interface{}) +// err := client.Subscribe(ctx, "test-client", query, txs) +// +// go func() { +// for e := range txs { +// fmt.Println("got ", e.(types.EventDataTx)) +// } +// }() +// ``` +// +// > The above command returns JSON structured like this: +// +// ```json +// { +// "error": "", +// "result": {}, +// "id": "", +// "jsonrpc": "2.0" +// } +// ``` +// +// ### Query Parameters +// +// | Parameter | Type | Default | Required | Description | +// |-----------+--------+---------+----------+-------------| +// | query | string | "" | true | Query | +// +// +func Subscribe(wsCtx rpctypes.WSRPCContext, query string) (*ctypes.ResultSubscribe, error) { + addr := wsCtx.GetRemoteAddr() + logger.Info("Subscribe to query", "remote", addr, "query", query) + + q, err := tmquery.New(query) + if err != nil { + return nil, errors.Wrap(err, "failed to parse query") + } + + ctx, cancel := context.WithTimeout(context.Background(), subscribeTimeout) + defer cancel() + ch := make(chan interface{}) + err = eventBusFor(wsCtx).Subscribe(ctx, addr, q, ch) + if err != nil { + return nil, err + } + + go func() { + for event := range ch { + tmResult := &ctypes.ResultEvent{query, event.(tmtypes.TMEventData)} + wsCtx.TryWriteRPCResponse(rpctypes.NewRPCSuccessResponse(wsCtx.Codec(), wsCtx.Request.ID+"#event", tmResult)) + } + }() + + return &ctypes.ResultSubscribe{}, nil +} + +// Unsubscribe from events via WebSocket. +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.Unsubscribe("test-client", query) +// ``` +// +// > The above command returns JSON structured like this: +// +// ```json +// { +// "error": "", +// "result": {}, +// "id": "", +// "jsonrpc": "2.0" +// } +// ``` +// +// ### Query Parameters +// +// | Parameter | Type | Default | Required | Description | +// |-----------+--------+---------+----------+-------------| +// | query | string | "" | true | Query | +// +// +func Unsubscribe(wsCtx rpctypes.WSRPCContext, query string) (*ctypes.ResultUnsubscribe, error) { + addr := wsCtx.GetRemoteAddr() + logger.Info("Unsubscribe from query", "remote", addr, "query", query) + q, err := tmquery.New(query) + if err != nil { + return nil, errors.Wrap(err, "failed to parse query") + } + err = eventBusFor(wsCtx).Unsubscribe(context.Background(), addr, q) + if err != nil { + return nil, err + } + return &ctypes.ResultUnsubscribe{}, nil +} + +// Unsubscribe from all events via WebSocket. +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.UnsubscribeAll("test-client") +// ``` +// +// > The above command returns JSON structured like this: +// +// ```json +// { +// "error": "", +// "result": {}, +// "id": "", +// "jsonrpc": "2.0" +// } +// ``` +// +// +func UnsubscribeAll(wsCtx rpctypes.WSRPCContext) (*ctypes.ResultUnsubscribe, error) { + addr := wsCtx.GetRemoteAddr() + logger.Info("Unsubscribe from all", "remote", addr) + err := eventBusFor(wsCtx).UnsubscribeAll(context.Background(), addr) + if err != nil { + return nil, err + } + return &ctypes.ResultUnsubscribe{}, nil +} + +func eventBusFor(wsCtx rpctypes.WSRPCContext) tmtypes.EventBusSubscriber { + es := wsCtx.GetEventSubscriber() + if es == nil { + es = eventBus + } + return es +} diff --git a/rpc/core/health.go b/rpc/core/health.go new file mode 100644 index 000000000..0ec4b5b4a --- /dev/null +++ b/rpc/core/health.go @@ -0,0 +1,31 @@ +package core + +import ( + ctypes "github.com/tendermint/tendermint/rpc/core/types" +) + +// Get node health. Returns empty result (200 OK) on success, no response - in +// case of an error. +// +// ```shell +// curl 'localhost:26657/health' +// ``` +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// result, err := client.Health() +// ``` +// +// > The above command returns JSON structured like this: +// +// ```json +// { +// "error": "", +// "result": {}, +// "id": "", +// "jsonrpc": "2.0" +// } +// ``` +func Health() (*ctypes.ResultHealth, error) { + return &ctypes.ResultHealth{}, nil +} diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go new file mode 100644 index 000000000..ecc41ce12 --- /dev/null +++ b/rpc/core/mempool.go @@ -0,0 +1,276 @@ +package core + +import ( + "context" + "fmt" + "time" + + "github.com/pkg/errors" + + abci "github.com/tendermint/tendermint/abci/types" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +//----------------------------------------------------------------------------- +// NOTE: tx should be signed, but this is only checked at the app level (not by Tendermint!) + +// Returns right away, with no response +// +// ```shell +// curl 'localhost:26657/broadcast_tx_async?tx="123"' +// ``` +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// result, err := client.BroadcastTxAsync("123") +// ``` +// +// > The above command returns JSON structured like this: +// +// ```json +// { +// "error": "", +// "result": { +// "hash": "E39AAB7A537ABAA237831742DCE1117F187C3C52", +// "log": "", +// "data": "", +// "code": 0 +// }, +// "id": "", +// "jsonrpc": "2.0" +// } +// ``` +// +// ### Query Parameters +// +// | Parameter | Type | Default | Required | Description | +// |-----------+------+---------+----------+-----------------| +// | tx | Tx | nil | true | The transaction | +func BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + err := mempool.CheckTx(tx, nil) + if err != nil { + return nil, fmt.Errorf("Error broadcasting transaction: %v", err) + } + return &ctypes.ResultBroadcastTx{Hash: tx.Hash()}, nil +} + +// Returns with the response from CheckTx. +// +// ```shell +// curl 'localhost:26657/broadcast_tx_sync?tx="456"' +// ``` +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// result, err := client.BroadcastTxSync("456") +// ``` +// +// > The above command returns JSON structured like this: +// +// ```json +// { +// "jsonrpc": "2.0", +// "id": "", +// "result": { +// "code": 0, +// "data": "", +// "log": "", +// "hash": "0D33F2F03A5234F38706E43004489E061AC40A2E" +// }, +// "error": "" +// } +// ``` +// +// ### Query Parameters +// +// | Parameter | Type | Default | Required | Description | +// |-----------+------+---------+----------+-----------------| +// | tx | Tx | nil | true | The transaction | +func BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + resCh := make(chan *abci.Response, 1) + err := mempool.CheckTx(tx, func(res *abci.Response) { + resCh <- res + }) + if err != nil { + return nil, fmt.Errorf("Error broadcasting transaction: %v", err) + } + res := <-resCh + r := res.GetCheckTx() + return &ctypes.ResultBroadcastTx{ + Code: r.Code, + Data: r.Data, + Log: r.Log, + Hash: tx.Hash(), + }, nil +} + +// CONTRACT: only returns error if mempool.BroadcastTx errs (ie. problem with the app) +// or if we timeout waiting for tx to commit. +// If CheckTx or DeliverTx fail, no error will be returned, but the returned result +// will contain a non-OK ABCI code. +// +// ```shell +// curl 'localhost:26657/broadcast_tx_commit?tx="789"' +// ``` +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// result, err := client.BroadcastTxCommit("789") +// ``` +// +// > The above command returns JSON structured like this: +// +// ```json +// { +// "error": "", +// "result": { +// "height": 26682, +// "hash": "75CA0F856A4DA078FC4911580360E70CEFB2EBEE", +// "deliver_tx": { +// "log": "", +// "data": "", +// "code": 0 +// }, +// "check_tx": { +// "log": "", +// "data": "", +// "code": 0 +// } +// }, +// "id": "", +// "jsonrpc": "2.0" +// } +// ``` +// +// ### Query Parameters +// +// | Parameter | Type | Default | Required | Description | +// |-----------+------+---------+----------+-----------------| +// | tx | Tx | nil | true | The transaction | +func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { + // subscribe to tx being committed in block + ctx, cancel := context.WithTimeout(context.Background(), subscribeTimeout) + defer cancel() + deliverTxResCh := make(chan interface{}) + q := types.EventQueryTxFor(tx) + err := eventBus.Subscribe(ctx, "mempool", q, deliverTxResCh) + if err != nil { + err = errors.Wrap(err, "failed to subscribe to tx") + logger.Error("Error on broadcastTxCommit", "err", err) + return nil, fmt.Errorf("Error on broadcastTxCommit: %v", err) + } + defer eventBus.Unsubscribe(context.Background(), "mempool", q) + + // broadcast the tx and register checktx callback + checkTxResCh := make(chan *abci.Response, 1) + err = mempool.CheckTx(tx, func(res *abci.Response) { + checkTxResCh <- res + }) + if err != nil { + logger.Error("Error on broadcastTxCommit", "err", err) + return nil, fmt.Errorf("Error on broadcastTxCommit: %v", err) + } + checkTxRes := <-checkTxResCh + checkTxR := checkTxRes.GetCheckTx() + if checkTxR.Code != abci.CodeTypeOK { + // CheckTx failed! + return &ctypes.ResultBroadcastTxCommit{ + CheckTx: *checkTxR, + DeliverTx: abci.ResponseDeliverTx{}, + Hash: tx.Hash(), + }, nil + } + + // Wait for the tx to be included in a block, + // timeout after something reasonable. + // TODO: configurable? + timer := time.NewTimer(60 * 2 * time.Second) + select { + case deliverTxResMsg := <-deliverTxResCh: + deliverTxRes := deliverTxResMsg.(types.EventDataTx) + // The tx was included in a block. + deliverTxR := deliverTxRes.Result + logger.Info("DeliverTx passed ", "tx", cmn.HexBytes(tx), "response", deliverTxR) + return &ctypes.ResultBroadcastTxCommit{ + CheckTx: *checkTxR, + DeliverTx: deliverTxR, + Hash: tx.Hash(), + Height: deliverTxRes.Height, + }, nil + case <-timer.C: + logger.Error("failed to include tx") + return &ctypes.ResultBroadcastTxCommit{ + CheckTx: *checkTxR, + DeliverTx: abci.ResponseDeliverTx{}, + Hash: tx.Hash(), + }, fmt.Errorf("Timed out waiting for transaction to be included in a block") + } +} + +// Get unconfirmed transactions (maximum ?limit entries) including their number. +// +// ```shell +// curl 'localhost:26657/unconfirmed_txs' +// ``` +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// result, err := client.UnconfirmedTxs() +// ``` +// +// > The above command returns JSON structured like this: +// +// ```json +// { +// "error": "", +// "result": { +// "txs": [], +// "n_txs": 0 +// }, +// "id": "", +// "jsonrpc": "2.0" +// } +// +// ### Query Parameters +// +// | Parameter | Type | Default | Required | Description | +// |-----------+------+---------+----------+--------------------------------------| +// | limit | int | 30 | false | Maximum number of entries (max: 100) | +// ``` +func UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) { + // reuse per_page validator + limit = validatePerPage(limit) + + txs := mempool.Reap(limit) + return &ctypes.ResultUnconfirmedTxs{len(txs), txs}, nil +} + +// Get number of unconfirmed transactions. +// +// ```shell +// curl 'localhost:26657/num_unconfirmed_txs' +// ``` +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// result, err := client.UnconfirmedTxs() +// ``` +// +// > The above command returns JSON structured like this: +// +// ```json +// { +// "error": "", +// "result": { +// "txs": null, +// "n_txs": 0 +// }, +// "id": "", +// "jsonrpc": "2.0" +// } +// ``` +func NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) { + return &ctypes.ResultUnconfirmedTxs{N: mempool.Size()}, nil +} diff --git a/rpc/core/net.go b/rpc/core/net.go new file mode 100644 index 000000000..ba9753d81 --- /dev/null +++ b/rpc/core/net.go @@ -0,0 +1,127 @@ +package core + +import ( + "github.com/pkg/errors" + + ctypes "github.com/tendermint/tendermint/rpc/core/types" +) + +// Get network info. +// +// ```shell +// curl 'localhost:26657/net_info' +// ``` +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// info, err := client.NetInfo() +// ``` +// +// > The above command returns JSON structured like this: +// +// ```json +// { +// "error": "", +// "result": { +// "n_peers": 0, +// "peers": [], +// "listeners": [ +// "Listener(@10.0.2.15:26656)" +// ], +// "listening": true +// }, +// "id": "", +// "jsonrpc": "2.0" +// } +// ``` +func NetInfo() (*ctypes.ResultNetInfo, error) { + listening := p2pSwitch.IsListening() + listeners := []string{} + for _, listener := range p2pSwitch.Listeners() { + listeners = append(listeners, listener.String()) + } + peers := []ctypes.Peer{} + for _, peer := range p2pSwitch.Peers().List() { + peers = append(peers, ctypes.Peer{ + NodeInfo: peer.NodeInfo(), + IsOutbound: peer.IsOutbound(), + ConnectionStatus: peer.Status(), + }) + } + // TODO: Should we include PersistentPeers and Seeds in here? + // PRO: useful info + // CON: privacy + return &ctypes.ResultNetInfo{ + Listening: listening, + Listeners: listeners, + NPeers: len(peers), + Peers: peers, + }, nil +} + +func UnsafeDialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) { + if len(seeds) == 0 { + return &ctypes.ResultDialSeeds{}, errors.New("No seeds provided") + } + // starts go routines to dial each peer after random delays + logger.Info("DialSeeds", "addrBook", addrBook, "seeds", seeds) + err := p2pSwitch.DialPeersAsync(addrBook, seeds, false) + if err != nil { + return &ctypes.ResultDialSeeds{}, err + } + return &ctypes.ResultDialSeeds{"Dialing seeds in progress. See /net_info for details"}, nil +} + +func UnsafeDialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, error) { + if len(peers) == 0 { + return &ctypes.ResultDialPeers{}, errors.New("No peers provided") + } + // starts go routines to dial each peer after random delays + logger.Info("DialPeers", "addrBook", addrBook, "peers", peers, "persistent", persistent) + err := p2pSwitch.DialPeersAsync(addrBook, peers, persistent) + if err != nil { + return &ctypes.ResultDialPeers{}, err + } + return &ctypes.ResultDialPeers{"Dialing peers in progress. See /net_info for details"}, nil +} + +// Get genesis file. +// +// ```shell +// curl 'localhost:26657/genesis' +// ``` +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// genesis, err := client.Genesis() +// ``` +// +// > The above command returns JSON structured like this: +// +// ```json +// { +// "error": "", +// "result": { +// "genesis": { +// "app_hash": "", +// "validators": [ +// { +// "name": "", +// "power": 10, +// "pub_key": { +// "data": "68DFDA7E50F82946E7E8546BED37944A422CD1B831E70DF66BA3B8430593944D", +// "type": "ed25519" +// } +// } +// ], +// "chain_id": "test-chain-6UTNIN", +// "genesis_time": "2017-05-29T15:05:41.671Z" +// } +// }, +// "id": "", +// "jsonrpc": "2.0" +// } +// ``` +func Genesis() (*ctypes.ResultGenesis, error) { + return &ctypes.ResultGenesis{genDoc}, nil +} diff --git a/rpc/core/pipe.go b/rpc/core/pipe.go new file mode 100644 index 000000000..128b3e9a7 --- /dev/null +++ b/rpc/core/pipe.go @@ -0,0 +1,147 @@ +package core + +import ( + "time" + + "github.com/tendermint/tendermint/consensus" + crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/state/txindex" + "github.com/tendermint/tendermint/types" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" +) + +const ( + // see README + defaultPerPage = 30 + maxPerPage = 100 +) + +var subscribeTimeout = 5 * time.Second + +//---------------------------------------------- +// These interfaces are used by RPC and must be thread safe + +type Consensus interface { + GetState() sm.State + GetValidators() (int64, []*types.Validator) + GetRoundStateJSON() ([]byte, error) + GetRoundStateSimpleJSON() ([]byte, error) +} + +type P2P interface { + Listeners() []p2p.Listener + Peers() p2p.IPeerSet + NumPeers() (outbound, inbound, dialig int) + NodeInfo() p2p.NodeInfo + IsListening() bool + DialPeersAsync(p2p.AddrBook, []string, bool) error +} + +//---------------------------------------------- +// These package level globals come with setters +// that are expected to be called only once, on startup + +var ( + // external, thread safe interfaces + proxyAppQuery proxy.AppConnQuery + + // interfaces defined in types and above + stateDB dbm.DB + blockStore sm.BlockStore + mempool sm.Mempool + evidencePool sm.EvidencePool + consensusState Consensus + p2pSwitch P2P + + // objects + pubKey crypto.PubKey + genDoc *types.GenesisDoc // cache the genesis structure + addrBook p2p.AddrBook + txIndexer txindex.TxIndexer + consensusReactor *consensus.ConsensusReactor + eventBus *types.EventBus // thread safe + + logger log.Logger +) + +func SetStateDB(db dbm.DB) { + stateDB = db +} + +func SetBlockStore(bs sm.BlockStore) { + blockStore = bs +} + +func SetMempool(mem sm.Mempool) { + mempool = mem +} + +func SetEvidencePool(evpool sm.EvidencePool) { + evidencePool = evpool +} + +func SetConsensusState(cs Consensus) { + consensusState = cs +} + +func SetSwitch(sw P2P) { + p2pSwitch = sw +} + +func SetPubKey(pk crypto.PubKey) { + pubKey = pk +} + +func SetGenesisDoc(doc *types.GenesisDoc) { + genDoc = doc +} + +func SetAddrBook(book p2p.AddrBook) { + addrBook = book +} + +func SetProxyAppQuery(appConn proxy.AppConnQuery) { + proxyAppQuery = appConn +} + +func SetTxIndexer(indexer txindex.TxIndexer) { + txIndexer = indexer +} + +func SetConsensusReactor(conR *consensus.ConsensusReactor) { + consensusReactor = conR +} + +func SetLogger(l log.Logger) { + logger = l +} + +func SetEventBus(b *types.EventBus) { + eventBus = b +} + +func validatePage(page, perPage, totalCount int) int { + if perPage < 1 { + return 1 + } + + pages := ((totalCount - 1) / perPage) + 1 + if page < 1 { + page = 1 + } else if page > pages { + page = pages + } + + return page +} + +func validatePerPage(perPage int) int { + if perPage < 1 || perPage > maxPerPage { + return defaultPerPage + } + return perPage +} diff --git a/rpc/core/pipe_test.go b/rpc/core/pipe_test.go new file mode 100644 index 000000000..225e36492 --- /dev/null +++ b/rpc/core/pipe_test.go @@ -0,0 +1,69 @@ +package core + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPaginationPage(t *testing.T) { + + cases := []struct { + totalCount int + perPage int + page int + newPage int + }{ + {0, 0, 1, 1}, + + {0, 10, 0, 1}, + {0, 10, 1, 1}, + {0, 10, 2, 1}, + + {5, 10, -1, 1}, + {5, 10, 0, 1}, + {5, 10, 1, 1}, + {5, 10, 2, 1}, + {5, 10, 2, 1}, + + {5, 5, 1, 1}, + {5, 5, 2, 1}, + {5, 5, 3, 1}, + + {5, 3, 2, 2}, + {5, 3, 3, 2}, + + {5, 2, 2, 2}, + {5, 2, 3, 3}, + {5, 2, 4, 3}, + } + + for _, c := range cases { + p := validatePage(c.page, c.perPage, c.totalCount) + assert.Equal(t, c.newPage, p, fmt.Sprintf("%v", c)) + } + +} + +func TestPaginationPerPage(t *testing.T) { + + cases := []struct { + totalCount int + perPage int + newPerPage int + }{ + {5, 0, defaultPerPage}, + {5, 1, 1}, + {5, 2, 2}, + {5, defaultPerPage, defaultPerPage}, + {5, maxPerPage - 1, maxPerPage - 1}, + {5, maxPerPage, maxPerPage}, + {5, maxPerPage + 1, defaultPerPage}, + } + + for _, c := range cases { + p := validatePerPage(c.perPage) + assert.Equal(t, c.newPerPage, p, fmt.Sprintf("%v", c)) + } +} diff --git a/rpc/core/routes.go b/rpc/core/routes.go new file mode 100644 index 000000000..f26fadb62 --- /dev/null +++ b/rpc/core/routes.go @@ -0,0 +1,52 @@ +package core + +import ( + rpc "github.com/tendermint/tendermint/rpc/lib/server" +) + +// TODO: better system than "unsafe" prefix +// NOTE: Amino is registered in rpc/core/types/wire.go. +var Routes = map[string]*rpc.RPCFunc{ + // subscribe/unsubscribe are reserved for websocket events. + "subscribe": rpc.NewWSRPCFunc(Subscribe, "query"), + "unsubscribe": rpc.NewWSRPCFunc(Unsubscribe, "query"), + "unsubscribe_all": rpc.NewWSRPCFunc(UnsubscribeAll, ""), + + // info API + "health": rpc.NewRPCFunc(Health, ""), + "status": rpc.NewRPCFunc(Status, ""), + "net_info": rpc.NewRPCFunc(NetInfo, ""), + "blockchain": rpc.NewRPCFunc(BlockchainInfo, "minHeight,maxHeight"), + "genesis": rpc.NewRPCFunc(Genesis, ""), + "block": rpc.NewRPCFunc(Block, "height"), + "block_results": rpc.NewRPCFunc(BlockResults, "height"), + "commit": rpc.NewRPCFunc(Commit, "height"), + "tx": rpc.NewRPCFunc(Tx, "hash,prove"), + "tx_search": rpc.NewRPCFunc(TxSearch, "query,prove,page,per_page"), + "validators": rpc.NewRPCFunc(Validators, "height"), + "dump_consensus_state": rpc.NewRPCFunc(DumpConsensusState, ""), + "consensus_state": rpc.NewRPCFunc(ConsensusState, ""), + "unconfirmed_txs": rpc.NewRPCFunc(UnconfirmedTxs, "limit"), + "num_unconfirmed_txs": rpc.NewRPCFunc(NumUnconfirmedTxs, ""), + + // broadcast API + "broadcast_tx_commit": rpc.NewRPCFunc(BroadcastTxCommit, "tx"), + "broadcast_tx_sync": rpc.NewRPCFunc(BroadcastTxSync, "tx"), + "broadcast_tx_async": rpc.NewRPCFunc(BroadcastTxAsync, "tx"), + + // abci API + "abci_query": rpc.NewRPCFunc(ABCIQuery, "path,data,height,trusted"), + "abci_info": rpc.NewRPCFunc(ABCIInfo, ""), +} + +func AddUnsafeRoutes() { + // control API + Routes["dial_seeds"] = rpc.NewRPCFunc(UnsafeDialSeeds, "seeds") + Routes["dial_peers"] = rpc.NewRPCFunc(UnsafeDialPeers, "peers,persistent") + Routes["unsafe_flush_mempool"] = rpc.NewRPCFunc(UnsafeFlushMempool, "") + + // profiler API + Routes["unsafe_start_cpu_profiler"] = rpc.NewRPCFunc(UnsafeStartCPUProfiler, "filename") + Routes["unsafe_stop_cpu_profiler"] = rpc.NewRPCFunc(UnsafeStopCPUProfiler, "") + Routes["unsafe_write_heap_profile"] = rpc.NewRPCFunc(UnsafeWriteHeapProfile, "filename") +} diff --git a/rpc/core/status.go b/rpc/core/status.go new file mode 100644 index 000000000..63e62b2c7 --- /dev/null +++ b/rpc/core/status.go @@ -0,0 +1,133 @@ +package core + +import ( + "bytes" + "time" + + ctypes "github.com/tendermint/tendermint/rpc/core/types" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tendermint/libs/common" +) + +// Get Tendermint status including node info, pubkey, latest block +// hash, app hash, block height and time. +// +// ```shell +// curl 'localhost:26657/status' +// ``` +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// result, err := client.Status() +// ``` +// +// > The above command returns JSON structured like this: +// +// ```json +//{ +// "jsonrpc": "2.0", +// "id": "", +// "result": { +// "node_info": { +// "id": "562dd7f579f0ecee8c94a11a3c1e378c1876f433", +// "listen_addr": "192.168.1.2:26656", +// "network": "test-chain-I6zScH", +// "version": "0.19.0", +// "channels": "4020212223303800", +// "moniker": "Ethans-MacBook-Pro.local", +// "other": [ +// "amino_version=0.9.8", +// "p2p_version=0.5.0", +// "consensus_version=v1/0.2.2", +// "rpc_version=0.7.0/3", +// "tx_index=on", +// "rpc_addr=tcp://0.0.0.0:26657" +// ] +// }, +// "sync_info": { +// "latest_block_hash": "2D4D7055BE685E3CB2410603C92AD37AE557AC59", +// "latest_app_hash": "0000000000000000", +// "latest_block_height": 231, +// "latest_block_time": "2018-04-27T23:18:08.459766485-04:00", +// "catching_up": false +// }, +// "validator_info": { +// "address": "5875562FF0FFDECC895C20E32FC14988952E99E7", +// "pub_key": { +// "type": "tendermint/PubKeyEd25519", +// "value": "PpDJRUrLG2RgFqYYjawfn/AcAgacSXpLFrmfYYQnuzE=" +// }, +// "voting_power": 10 +// } +// } +//} +// ``` +func Status() (*ctypes.ResultStatus, error) { + latestHeight := blockStore.Height() + var ( + latestBlockMeta *types.BlockMeta + latestBlockHash cmn.HexBytes + latestAppHash cmn.HexBytes + latestBlockTimeNano int64 + ) + if latestHeight != 0 { + latestBlockMeta = blockStore.LoadBlockMeta(latestHeight) + latestBlockHash = latestBlockMeta.BlockID.Hash + latestAppHash = latestBlockMeta.Header.AppHash + latestBlockTimeNano = latestBlockMeta.Header.Time.UnixNano() + } + + latestBlockTime := time.Unix(0, latestBlockTimeNano) + + var votingPower int64 + if val := validatorAtHeight(latestHeight); val != nil { + votingPower = val.VotingPower + } + + result := &ctypes.ResultStatus{ + NodeInfo: p2pSwitch.NodeInfo(), + SyncInfo: ctypes.SyncInfo{ + LatestBlockHash: latestBlockHash, + LatestAppHash: latestAppHash, + LatestBlockHeight: latestHeight, + LatestBlockTime: latestBlockTime, + CatchingUp: consensusReactor.FastSync(), + }, + ValidatorInfo: ctypes.ValidatorInfo{ + Address: pubKey.Address(), + PubKey: pubKey, + VotingPower: votingPower, + }, + } + + return result, nil +} + +func validatorAtHeight(h int64) *types.Validator { + lastBlockHeight, vals := consensusState.GetValidators() + + privValAddress := pubKey.Address() + + // if we're still at height h, search in the current validator set + if lastBlockHeight == h { + for _, val := range vals { + if bytes.Equal(val.Address, privValAddress) { + return val + } + } + } + + // if we've moved to the next height, retrieve the validator set from DB + if lastBlockHeight > h { + vals, err := sm.LoadValidators(stateDB, h) + if err != nil { + // should not happen + return nil + } + _, val := vals.GetByAddress(privValAddress) + return val + } + + return nil +} diff --git a/rpc/core/tx.go b/rpc/core/tx.go new file mode 100644 index 000000000..f53d82f14 --- /dev/null +++ b/rpc/core/tx.go @@ -0,0 +1,219 @@ +package core + +import ( + "fmt" + + cmn "github.com/tendermint/tendermint/libs/common" + + tmquery "github.com/tendermint/tendermint/libs/pubsub/query" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/state/txindex/null" + "github.com/tendermint/tendermint/types" +) + +// Tx allows you to query the transaction results. `nil` could mean the +// transaction is in the mempool, invalidated, or was not sent in the first +// place. +// +// ```shell +// curl "localhost:26657/tx?hash=0x2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF" +// ``` +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// tx, err := client.Tx([]byte("2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF"), true) +// ``` +// +// > The above command returns JSON structured like this: +// +// ```json +// { +// "error": "", +// "result": { +// "proof": { +// "Proof": { +// "aunts": [] +// }, +// "Data": "YWJjZA==", +// "RootHash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF", +// "Total": 1, +// "Index": 0 +// }, +// "tx": "YWJjZA==", +// "tx_result": { +// "log": "", +// "data": "", +// "code": 0 +// }, +// "index": 0, +// "height": 52, +// "hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF" +// }, +// "id": "", +// "jsonrpc": "2.0" +// } +// ``` +// +// Returns a transaction matching the given transaction hash. +// +// ### Query Parameters +// +// | Parameter | Type | Default | Required | Description | +// |-----------+--------+---------+----------+-----------------------------------------------------------| +// | hash | []byte | nil | true | The transaction hash | +// | prove | bool | false | false | Include a proof of the transaction inclusion in the block | +// +// ### Returns +// +// - `proof`: the `types.TxProof` object +// - `tx`: `[]byte` - the transaction +// - `tx_result`: the `abci.Result` object +// - `index`: `int` - index of the transaction +// - `height`: `int` - height of the block where this transaction was in +// - `hash`: `[]byte` - hash of the transaction +func Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { + + // if index is disabled, return error + if _, ok := txIndexer.(*null.TxIndex); ok { + return nil, fmt.Errorf("Transaction indexing is disabled") + } + + r, err := txIndexer.Get(hash) + if err != nil { + return nil, err + } + + if r == nil { + return nil, fmt.Errorf("Tx (%X) not found", hash) + } + + height := r.Height + index := r.Index + + var proof types.TxProof + if prove { + block := blockStore.LoadBlock(height) + proof = block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines + } + + return &ctypes.ResultTx{ + Hash: hash, + Height: height, + Index: uint32(index), + TxResult: r.Result, + Tx: r.Tx, + Proof: proof, + }, nil +} + +// TxSearch allows you to query for multiple transactions results. It returns a +// list of transactions (maximum ?per_page entries) and the total count. +// +// ```shell +// curl "localhost:26657/tx_search?query=\"account.owner='Ivan'\"&prove=true" +// ``` +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// q, err := tmquery.New("account.owner='Ivan'") +// tx, err := client.TxSearch(q, true) +// ``` +// +// > The above command returns JSON structured like this: +// +// ```json +// { +// "jsonrpc": "2.0", +// "id": "", +// "result": { +// "txs": [ +// { +// "proof": { +// "Proof": { +// "aunts": [ +// "J3LHbizt806uKnABNLwG4l7gXCA=", +// "iblMO/M1TnNtlAefJyNCeVhjAb0=", +// "iVk3ryurVaEEhdeS0ohAJZ3wtB8=", +// "5hqMkTeGqpct51ohX0lZLIdsn7Q=", +// "afhsNxFnLlZgFDoyPpdQSe0bR8g=" +// ] +// }, +// "Data": "mvZHHa7HhZ4aRT0xMDA=", +// "RootHash": "F6541223AA46E428CB1070E9840D2C3DF3B6D776", +// "Total": 32, +// "Index": 31 +// }, +// "tx": "mvZHHa7HhZ4aRT0xMDA=", +// "tx_result": {}, +// "index": 31, +// "height": 12, +// "hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF" +// } +// ], +// "total_count": 1 +// } +// } +// ``` +// +// ### Query Parameters +// +// | Parameter | Type | Default | Required | Description | +// |-----------+--------+---------+----------+-----------------------------------------------------------| +// | query | string | "" | true | Query | +// | prove | bool | false | false | Include proofs of the transactions inclusion in the block | +// | page | int | 1 | false | Page number (1-based) | +// | per_page | int | 30 | false | Number of entries per page (max: 100) | +// +// ### Returns +// +// - `proof`: the `types.TxProof` object +// - `tx`: `[]byte` - the transaction +// - `tx_result`: the `abci.Result` object +// - `index`: `int` - index of the transaction +// - `height`: `int` - height of the block where this transaction was in +// - `hash`: `[]byte` - hash of the transaction +func TxSearch(query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error) { + // if index is disabled, return error + if _, ok := txIndexer.(*null.TxIndex); ok { + return nil, fmt.Errorf("Transaction indexing is disabled") + } + + q, err := tmquery.New(query) + if err != nil { + return nil, err + } + + results, err := txIndexer.Search(q) + if err != nil { + return nil, err + } + + totalCount := len(results) + perPage = validatePerPage(perPage) + page = validatePage(page, perPage, totalCount) + skipCount := (page - 1) * perPage + + apiResults := make([]*ctypes.ResultTx, cmn.MinInt(perPage, totalCount-skipCount)) + var proof types.TxProof + for i := 0; i < len(apiResults); i++ { + r := results[skipCount+i] + height := r.Height + index := r.Index + + if prove { + block := blockStore.LoadBlock(height) + proof = block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines + } + + apiResults[i] = &ctypes.ResultTx{ + Hash: r.Tx.Hash(), + Height: height, + Index: index, + TxResult: r.Result, + Tx: r.Tx, + Proof: proof, + } + } + + return &ctypes.ResultTxSearch{Txs: apiResults, TotalCount: totalCount}, nil +} diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go new file mode 100644 index 000000000..4fec416ed --- /dev/null +++ b/rpc/core/types/responses.go @@ -0,0 +1,210 @@ +package core_types + +import ( + "encoding/json" + "strings" + "time" + + abci "github.com/tendermint/tendermint/abci/types" + crypto "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" + + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" +) + +// List of blocks +type ResultBlockchainInfo struct { + LastHeight int64 `json:"last_height"` + BlockMetas []*types.BlockMeta `json:"block_metas"` +} + +// Genesis file +type ResultGenesis struct { + Genesis *types.GenesisDoc `json:"genesis"` +} + +// Single block (with meta) +type ResultBlock struct { + BlockMeta *types.BlockMeta `json:"block_meta"` + Block *types.Block `json:"block"` +} + +// Commit and Header +type ResultCommit struct { + // SignedHeader is header and commit, embedded so we only have + // one level in the json output + types.SignedHeader + CanonicalCommit bool `json:"canonical"` +} + +// ABCI results from a block +type ResultBlockResults struct { + Height int64 `json:"height"` + Results *state.ABCIResponses `json:"results"` +} + +// NewResultCommit is a helper to initialize the ResultCommit with +// the embedded struct +func NewResultCommit(header *types.Header, commit *types.Commit, + canonical bool) *ResultCommit { + + return &ResultCommit{ + SignedHeader: types.SignedHeader{ + Header: header, + Commit: commit, + }, + CanonicalCommit: canonical, + } +} + +// Info about the node's syncing state +type SyncInfo struct { + LatestBlockHash cmn.HexBytes `json:"latest_block_hash"` + LatestAppHash cmn.HexBytes `json:"latest_app_hash"` + LatestBlockHeight int64 `json:"latest_block_height"` + LatestBlockTime time.Time `json:"latest_block_time"` + CatchingUp bool `json:"catching_up"` +} + +// Info about the node's validator +type ValidatorInfo struct { + Address cmn.HexBytes `json:"address"` + PubKey crypto.PubKey `json:"pub_key"` + VotingPower int64 `json:"voting_power"` +} + +// Node Status +type ResultStatus struct { + NodeInfo p2p.NodeInfo `json:"node_info"` + SyncInfo SyncInfo `json:"sync_info"` + ValidatorInfo ValidatorInfo `json:"validator_info"` +} + +// Is TxIndexing enabled +func (s *ResultStatus) TxIndexEnabled() bool { + if s == nil { + return false + } + for _, s := range s.NodeInfo.Other { + info := strings.Split(s, "=") + if len(info) == 2 && info[0] == "tx_index" { + return info[1] == "on" + } + } + return false +} + +// Info about peer connections +type ResultNetInfo struct { + Listening bool `json:"listening"` + Listeners []string `json:"listeners"` + NPeers int `json:"n_peers"` + Peers []Peer `json:"peers"` +} + +// Log from dialing seeds +type ResultDialSeeds struct { + Log string `json:"log"` +} + +// Log from dialing peers +type ResultDialPeers struct { + Log string `json:"log"` +} + +// A peer +type Peer struct { + p2p.NodeInfo `json:"node_info"` + IsOutbound bool `json:"is_outbound"` + ConnectionStatus p2p.ConnectionStatus `json:"connection_status"` +} + +// Validators for a height +type ResultValidators struct { + BlockHeight int64 `json:"block_height"` + Validators []*types.Validator `json:"validators"` +} + +// Info about the consensus state. +// UNSTABLE +type ResultDumpConsensusState struct { + RoundState json.RawMessage `json:"round_state"` + Peers []PeerStateInfo `json:"peers"` +} + +// UNSTABLE +type PeerStateInfo struct { + NodeAddress string `json:"node_address"` + PeerState json.RawMessage `json:"peer_state"` +} + +// UNSTABLE +type ResultConsensusState struct { + RoundState json.RawMessage `json:"round_state"` +} + +// CheckTx result +type ResultBroadcastTx struct { + Code uint32 `json:"code"` + Data cmn.HexBytes `json:"data"` + Log string `json:"log"` + + Hash cmn.HexBytes `json:"hash"` +} + +// CheckTx and DeliverTx results +type ResultBroadcastTxCommit struct { + CheckTx abci.ResponseCheckTx `json:"check_tx"` + DeliverTx abci.ResponseDeliverTx `json:"deliver_tx"` + Hash cmn.HexBytes `json:"hash"` + Height int64 `json:"height"` +} + +// Result of querying for a tx +type ResultTx struct { + Hash cmn.HexBytes `json:"hash"` + Height int64 `json:"height"` + Index uint32 `json:"index"` + TxResult abci.ResponseDeliverTx `json:"tx_result"` + Tx types.Tx `json:"tx"` + Proof types.TxProof `json:"proof,omitempty"` +} + +// Result of searching for txs +type ResultTxSearch struct { + Txs []*ResultTx `json:"txs"` + TotalCount int `json:"total_count"` +} + +// List of mempool txs +type ResultUnconfirmedTxs struct { + N int `json:"n_txs"` + Txs []types.Tx `json:"txs"` +} + +// Info abci msg +type ResultABCIInfo struct { + Response abci.ResponseInfo `json:"response"` +} + +// Query abci msg +type ResultABCIQuery struct { + Response abci.ResponseQuery `json:"response"` +} + +// empty results +type ( + ResultUnsafeFlushMempool struct{} + ResultUnsafeProfile struct{} + ResultSubscribe struct{} + ResultUnsubscribe struct{} + ResultHealth struct{} +) + +// Event data from a subscription +type ResultEvent struct { + Query string `json:"query"` + Data types.TMEventData `json:"data"` +} diff --git a/rpc/core/types/responses_test.go b/rpc/core/types/responses_test.go new file mode 100644 index 000000000..e410d47ae --- /dev/null +++ b/rpc/core/types/responses_test.go @@ -0,0 +1,39 @@ +package core_types + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/tendermint/tendermint/p2p" +) + +func TestStatusIndexer(t *testing.T) { + assert := assert.New(t) + + var status *ResultStatus + assert.False(status.TxIndexEnabled()) + + status = &ResultStatus{} + assert.False(status.TxIndexEnabled()) + + status.NodeInfo = p2p.NodeInfo{} + assert.False(status.TxIndexEnabled()) + + cases := []struct { + expected bool + other []string + }{ + {false, nil}, + {false, []string{}}, + {false, []string{"a=b"}}, + {false, []string{"tx_indexiskv", "some=dood"}}, + {true, []string{"tx_index=on", "tx_index=other"}}, + {true, []string{"^(*^(", "tx_index=on", "a=n=b=d="}}, + } + + for _, tc := range cases { + status.NodeInfo.Other = tc.other + assert.Equal(tc.expected, status.TxIndexEnabled()) + } +} diff --git a/rpc/core/types/wire.go b/rpc/core/types/wire.go new file mode 100644 index 000000000..d3a31dc35 --- /dev/null +++ b/rpc/core/types/wire.go @@ -0,0 +1,13 @@ +package core_types + +import ( + "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/types" +) + +func RegisterAmino(cdc *amino.Codec) { + types.RegisterEventDatas(cdc) + types.RegisterEvidences(cdc) + crypto.RegisterAmino(cdc) +} diff --git a/rpc/core/version.go b/rpc/core/version.go new file mode 100644 index 000000000..e283de479 --- /dev/null +++ b/rpc/core/version.go @@ -0,0 +1,5 @@ +package core + +// a single integer is sufficient here + +const Version = "3" // rpc routes for profiling, setting config diff --git a/rpc/grpc/api.go b/rpc/grpc/api.go new file mode 100644 index 000000000..0b840e3e9 --- /dev/null +++ b/rpc/grpc/api.go @@ -0,0 +1,36 @@ +package core_grpc + +import ( + "context" + + abci "github.com/tendermint/tendermint/abci/types" + core "github.com/tendermint/tendermint/rpc/core" +) + +type broadcastAPI struct { +} + +func (bapi *broadcastAPI) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) { + // kvstore so we can check if the server is up + return &ResponsePing{}, nil +} + +func (bapi *broadcastAPI) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { + res, err := core.BroadcastTxCommit(req.Tx) + if err != nil { + return nil, err + } + return &ResponseBroadcastTx{ + + CheckTx: &abci.ResponseCheckTx{ + Code: res.CheckTx.Code, + Data: res.CheckTx.Data, + Log: res.CheckTx.Log, + }, + DeliverTx: &abci.ResponseDeliverTx{ + Code: res.DeliverTx.Code, + Data: res.DeliverTx.Data, + Log: res.DeliverTx.Log, + }, + }, nil +} diff --git a/rpc/grpc/client_server.go b/rpc/grpc/client_server.go new file mode 100644 index 000000000..c88989685 --- /dev/null +++ b/rpc/grpc/client_server.go @@ -0,0 +1,56 @@ +package core_grpc + +import ( + "fmt" + "net" + "strings" + "time" + + "golang.org/x/net/netutil" + "google.golang.org/grpc" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +// Config is an gRPC server configuration. +type Config struct { + MaxOpenConnections int +} + +// StartGRPCServer starts a new gRPC BroadcastAPIServer, listening on +// protoAddr, in a goroutine. Returns a listener and an error, if it fails to +// parse an address. +func StartGRPCServer(protoAddr string, config Config) (net.Listener, error) { + parts := strings.SplitN(protoAddr, "://", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("Invalid listen address for grpc server (did you forget a tcp:// prefix?) : %s", protoAddr) + } + proto, addr := parts[0], parts[1] + ln, err := net.Listen(proto, addr) + if err != nil { + return nil, err + } + if config.MaxOpenConnections > 0 { + ln = netutil.LimitListener(ln, config.MaxOpenConnections) + } + + grpcServer := grpc.NewServer() + RegisterBroadcastAPIServer(grpcServer, &broadcastAPI{}) + go grpcServer.Serve(ln) // nolint: errcheck + + return ln, nil +} + +// StartGRPCClient dials the gRPC server using protoAddr and returns a new +// BroadcastAPIClient. +func StartGRPCClient(protoAddr string) BroadcastAPIClient { + conn, err := grpc.Dial(protoAddr, grpc.WithInsecure(), grpc.WithDialer(dialerFunc)) + if err != nil { + panic(err) + } + return NewBroadcastAPIClient(conn) +} + +func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) { + return cmn.Connect(addr) +} diff --git a/rpc/grpc/compile.sh b/rpc/grpc/compile.sh new file mode 100644 index 000000000..2c4629c8e --- /dev/null +++ b/rpc/grpc/compile.sh @@ -0,0 +1,3 @@ +#! /bin/bash + +protoc --go_out=plugins=grpc:. -I $GOPATH/src/ -I . types.proto diff --git a/rpc/grpc/grpc_test.go b/rpc/grpc/grpc_test.go new file mode 100644 index 000000000..fe979c549 --- /dev/null +++ b/rpc/grpc/grpc_test.go @@ -0,0 +1,33 @@ +package core_grpc_test + +import ( + "context" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/rpc/grpc" + "github.com/tendermint/tendermint/rpc/test" +) + +func TestMain(m *testing.M) { + // start a tendermint node in the background to test against + app := kvstore.NewKVStoreApplication() + node := rpctest.StartTendermint(app) + code := m.Run() + + // and shut down proper at the end + node.Stop() + node.Wait() + os.Exit(code) +} + +func TestBroadcastTx(t *testing.T) { + require := require.New(t) + res, err := rpctest.GetGRPCClient().BroadcastTx(context.Background(), &core_grpc.RequestBroadcastTx{[]byte("this is a tx")}) + require.Nil(err, "%+v", err) + require.EqualValues(0, res.CheckTx.Code) + require.EqualValues(0, res.DeliverTx.Code) +} diff --git a/rpc/grpc/types.pb.go b/rpc/grpc/types.pb.go new file mode 100644 index 000000000..be16b711a --- /dev/null +++ b/rpc/grpc/types.pb.go @@ -0,0 +1,230 @@ +// Code generated by protoc-gen-go. +// source: types.proto +// DO NOT EDIT! + +/* +Package core_grpc is a generated protocol buffer package. + +It is generated from these files: + types.proto + +It has these top-level messages: + RequestPing + RequestBroadcastTx + ResponsePing + ResponseBroadcastTx +*/ +package core_grpc + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import types "github.com/tendermint/tendermint/abci/types" + +import ( + "context" + + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type RequestPing struct { +} + +func (m *RequestPing) Reset() { *m = RequestPing{} } +func (m *RequestPing) String() string { return proto.CompactTextString(m) } +func (*RequestPing) ProtoMessage() {} +func (*RequestPing) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type RequestBroadcastTx struct { + Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` +} + +func (m *RequestBroadcastTx) Reset() { *m = RequestBroadcastTx{} } +func (m *RequestBroadcastTx) String() string { return proto.CompactTextString(m) } +func (*RequestBroadcastTx) ProtoMessage() {} +func (*RequestBroadcastTx) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *RequestBroadcastTx) GetTx() []byte { + if m != nil { + return m.Tx + } + return nil +} + +type ResponsePing struct { +} + +func (m *ResponsePing) Reset() { *m = ResponsePing{} } +func (m *ResponsePing) String() string { return proto.CompactTextString(m) } +func (*ResponsePing) ProtoMessage() {} +func (*ResponsePing) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type ResponseBroadcastTx struct { + CheckTx *types.ResponseCheckTx `protobuf:"bytes,1,opt,name=check_tx,json=checkTx" json:"check_tx,omitempty"` + DeliverTx *types.ResponseDeliverTx `protobuf:"bytes,2,opt,name=deliver_tx,json=deliverTx" json:"deliver_tx,omitempty"` +} + +func (m *ResponseBroadcastTx) Reset() { *m = ResponseBroadcastTx{} } +func (m *ResponseBroadcastTx) String() string { return proto.CompactTextString(m) } +func (*ResponseBroadcastTx) ProtoMessage() {} +func (*ResponseBroadcastTx) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *ResponseBroadcastTx) GetCheckTx() *types.ResponseCheckTx { + if m != nil { + return m.CheckTx + } + return nil +} + +func (m *ResponseBroadcastTx) GetDeliverTx() *types.ResponseDeliverTx { + if m != nil { + return m.DeliverTx + } + return nil +} + +func init() { + proto.RegisterType((*RequestPing)(nil), "core_grpc.RequestPing") + proto.RegisterType((*RequestBroadcastTx)(nil), "core_grpc.RequestBroadcastTx") + proto.RegisterType((*ResponsePing)(nil), "core_grpc.ResponsePing") + proto.RegisterType((*ResponseBroadcastTx)(nil), "core_grpc.ResponseBroadcastTx") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for BroadcastAPI service + +type BroadcastAPIClient interface { + Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) + BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) +} + +type broadcastAPIClient struct { + cc *grpc.ClientConn +} + +func NewBroadcastAPIClient(cc *grpc.ClientConn) BroadcastAPIClient { + return &broadcastAPIClient{cc} +} + +func (c *broadcastAPIClient) Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) { + out := new(ResponsePing) + err := grpc.Invoke(ctx, "/core_grpc.BroadcastAPI/Ping", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *broadcastAPIClient) BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) { + out := new(ResponseBroadcastTx) + err := grpc.Invoke(ctx, "/core_grpc.BroadcastAPI/BroadcastTx", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for BroadcastAPI service + +type BroadcastAPIServer interface { + Ping(context.Context, *RequestPing) (*ResponsePing, error) + BroadcastTx(context.Context, *RequestBroadcastTx) (*ResponseBroadcastTx, error) +} + +func RegisterBroadcastAPIServer(s *grpc.Server, srv BroadcastAPIServer) { + s.RegisterService(&_BroadcastAPI_serviceDesc, srv) +} + +func _BroadcastAPI_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestPing) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BroadcastAPIServer).Ping(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/core_grpc.BroadcastAPI/Ping", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BroadcastAPIServer).Ping(ctx, req.(*RequestPing)) + } + return interceptor(ctx, in, info, handler) +} + +func _BroadcastAPI_BroadcastTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestBroadcastTx) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BroadcastAPIServer).BroadcastTx(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/core_grpc.BroadcastAPI/BroadcastTx", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BroadcastAPIServer).BroadcastTx(ctx, req.(*RequestBroadcastTx)) + } + return interceptor(ctx, in, info, handler) +} + +var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ + ServiceName: "core_grpc.BroadcastAPI", + HandlerType: (*BroadcastAPIServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Ping", + Handler: _BroadcastAPI_Ping_Handler, + }, + { + MethodName: "BroadcastTx", + Handler: _BroadcastAPI_BroadcastTx_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "types.proto", +} + +func init() { proto.RegisterFile("types.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 264 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2e, 0xa9, 0x2c, 0x48, + 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4c, 0xce, 0x2f, 0x4a, 0x8d, 0x4f, 0x2f, + 0x2a, 0x48, 0x96, 0xd2, 0x49, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x2f, + 0x49, 0xcd, 0x4b, 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x4f, 0x4c, 0x4a, 0xce, 0xd4, 0x07, + 0x6b, 0xd1, 0x47, 0xd2, 0xa8, 0xc4, 0xcb, 0xc5, 0x1d, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0x12, + 0x90, 0x99, 0x97, 0xae, 0xa4, 0xc2, 0x25, 0x04, 0xe5, 0x3a, 0x15, 0xe5, 0x27, 0xa6, 0x24, 0x27, + 0x16, 0x97, 0x84, 0x54, 0x08, 0xf1, 0x71, 0x31, 0x95, 0x54, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0xf0, + 0x04, 0x31, 0x95, 0x54, 0x28, 0xf1, 0x71, 0xf1, 0x04, 0xa5, 0x16, 0x17, 0xe4, 0xe7, 0x15, 0xa7, + 0x82, 0x75, 0x35, 0x32, 0x72, 0x09, 0xc3, 0x04, 0x90, 0xf5, 0x19, 0x72, 0x71, 0x24, 0x67, 0xa4, + 0x26, 0x67, 0xc7, 0x43, 0x75, 0x73, 0x1b, 0x89, 0xe9, 0x41, 0x2c, 0x87, 0xa9, 0x76, 0x06, 0x49, + 0x87, 0x54, 0x04, 0xb1, 0x27, 0x43, 0x18, 0x42, 0xe6, 0x5c, 0x5c, 0x29, 0xa9, 0x39, 0x99, 0x65, + 0xa9, 0x45, 0x20, 0x4d, 0x4c, 0x60, 0x4d, 0x12, 0x68, 0x9a, 0x5c, 0x20, 0x0a, 0x42, 0x2a, 0x82, + 0x38, 0x53, 0x60, 0x4c, 0xa3, 0xa9, 0x8c, 0x5c, 0x3c, 0x70, 0xbb, 0x1d, 0x03, 0x3c, 0x85, 0xcc, + 0xb9, 0x58, 0x40, 0x8e, 0x13, 0x12, 0xd3, 0x83, 0x87, 0x8d, 0x1e, 0x92, 0x57, 0xa5, 0xc4, 0x51, + 0xc4, 0x11, 0xbe, 0x11, 0xf2, 0xe1, 0xe2, 0x46, 0xf6, 0x84, 0x2c, 0xa6, 0x7e, 0x24, 0x69, 0x29, + 0x39, 0x2c, 0xc6, 0x20, 0xc9, 0x27, 0xb1, 0x81, 0xc3, 0xd9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, + 0x92, 0x29, 0xd9, 0x42, 0xaf, 0x01, 0x00, 0x00, +} diff --git a/rpc/grpc/types.proto b/rpc/grpc/types.proto new file mode 100644 index 000000000..d7980d5e0 --- /dev/null +++ b/rpc/grpc/types.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; +package core_grpc; + +import "github.com/tendermint/tendermint/abci/types/types.proto"; + +//---------------------------------------- +// Message types + +//---------------------------------------- +// Request types + +message RequestPing { +} + +message RequestBroadcastTx { + bytes tx = 1; +} + +//---------------------------------------- +// Response types + +message ResponsePing{ +} + +message ResponseBroadcastTx{ + types.ResponseCheckTx check_tx = 1; + types.ResponseDeliverTx deliver_tx = 2; +} + +//---------------------------------------- +// Service Definition + +service BroadcastAPI { + rpc Ping(RequestPing) returns (ResponsePing) ; + rpc BroadcastTx(RequestBroadcastTx) returns (ResponseBroadcastTx) ; +} diff --git a/rpc/lib/client/args_test.go b/rpc/lib/client/args_test.go new file mode 100644 index 000000000..cb7a56bd5 --- /dev/null +++ b/rpc/lib/client/args_test.go @@ -0,0 +1,43 @@ +package rpcclient + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/go-amino" +) + +type Tx []byte + +type Foo struct { + Bar int + Baz string +} + +func TestArgToJSON(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + cases := []struct { + input interface{} + expected string + }{ + {[]byte("1234"), "0x31323334"}, + {Tx("654"), "0x363534"}, + {Foo{7, "hello"}, `{"Bar":"7","Baz":"hello"}`}, + } + + cdc := amino.NewCodec() + + for i, tc := range cases { + args := map[string]interface{}{"data": tc.input} + err := argsToJSON(cdc, args) + require.Nil(err, "%d: %+v", i, err) + require.Equal(1, len(args), "%d", i) + data, ok := args["data"].(string) + require.True(ok, "%d: %#v", i, args["data"]) + assert.Equal(tc.expected, data, "%d", i) + } +} diff --git a/rpc/lib/client/http_client.go b/rpc/lib/client/http_client.go new file mode 100644 index 000000000..bd440289b --- /dev/null +++ b/rpc/lib/client/http_client.go @@ -0,0 +1,235 @@ +package rpcclient + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "reflect" + "strings" + + "github.com/pkg/errors" + "github.com/tendermint/go-amino" + + types "github.com/tendermint/tendermint/rpc/lib/types" +) + +const ( + protoHTTP = "http" + protoHTTPS = "https" + protoWSS = "wss" + protoWS = "ws" + protoTCP = "tcp" +) + +// HTTPClient is a common interface for JSONRPCClient and URIClient. +type HTTPClient interface { + Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) + Codec() *amino.Codec + SetCodec(*amino.Codec) +} + +// TODO: Deprecate support for IP:PORT or /path/to/socket +func makeHTTPDialer(remoteAddr string) (string, string, func(string, string) (net.Conn, error)) { + // protocol to use for http operations, to support both http and https + clientProtocol := protoHTTP + + parts := strings.SplitN(remoteAddr, "://", 2) + var protocol, address string + if len(parts) == 1 { + // default to tcp if nothing specified + protocol, address = protoTCP, remoteAddr + } else if len(parts) == 2 { + protocol, address = parts[0], parts[1] + } else { + // return a invalid message + msg := fmt.Sprintf("Invalid addr: %s", remoteAddr) + return clientProtocol, msg, func(_ string, _ string) (net.Conn, error) { + return nil, errors.New(msg) + } + } + + // accept http as an alias for tcp and set the client protocol + switch protocol { + case protoHTTP, protoHTTPS: + clientProtocol = protocol + protocol = protoTCP + case protoWS, protoWSS: + clientProtocol = protocol + } + + // replace / with . for http requests (kvstore domain) + trimmedAddress := strings.Replace(address, "/", ".", -1) + return clientProtocol, trimmedAddress, func(proto, addr string) (net.Conn, error) { + return net.Dial(protocol, address) + } +} + +// We overwrite the http.Client.Dial so we can do http over tcp or unix. +// remoteAddr should be fully featured (eg. with tcp:// or unix://) +func makeHTTPClient(remoteAddr string) (string, *http.Client) { + protocol, address, dialer := makeHTTPDialer(remoteAddr) + return protocol + "://" + address, &http.Client{ + Transport: &http.Transport{ + Dial: dialer, + }, + } +} + +//------------------------------------------------------------------------------------ + +// JSONRPCClient takes params as a slice +type JSONRPCClient struct { + address string + client *http.Client + cdc *amino.Codec +} + +// NewJSONRPCClient returns a JSONRPCClient pointed at the given address. +func NewJSONRPCClient(remote string) *JSONRPCClient { + address, client := makeHTTPClient(remote) + return &JSONRPCClient{ + address: address, + client: client, + cdc: amino.NewCodec(), + } +} + +func (c *JSONRPCClient) Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) { + request, err := types.MapToRequest(c.cdc, "jsonrpc-client", method, params) + if err != nil { + return nil, err + } + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + // log.Info(string(requestBytes)) + requestBuf := bytes.NewBuffer(requestBytes) + // log.Info(Fmt("RPC request to %v (%v): %v", c.remote, method, string(requestBytes))) + httpResponse, err := c.client.Post(c.address, "text/json", requestBuf) + if err != nil { + return nil, err + } + defer httpResponse.Body.Close() // nolint: errcheck + + responseBytes, err := ioutil.ReadAll(httpResponse.Body) + if err != nil { + return nil, err + } + // log.Info(Fmt("RPC response: %v", string(responseBytes))) + return unmarshalResponseBytes(c.cdc, responseBytes, result) +} + +func (c *JSONRPCClient) Codec() *amino.Codec { + return c.cdc +} + +func (c *JSONRPCClient) SetCodec(cdc *amino.Codec) { + c.cdc = cdc +} + +//------------------------------------------------------------- + +// URI takes params as a map +type URIClient struct { + address string + client *http.Client + cdc *amino.Codec +} + +func NewURIClient(remote string) *URIClient { + address, client := makeHTTPClient(remote) + return &URIClient{ + address: address, + client: client, + cdc: amino.NewCodec(), + } +} + +func (c *URIClient) Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) { + values, err := argsToURLValues(c.cdc, params) + if err != nil { + return nil, err + } + // log.Info(Fmt("URI request to %v (%v): %v", c.address, method, values)) + resp, err := c.client.PostForm(c.address+"/"+method, values) + if err != nil { + return nil, err + } + defer resp.Body.Close() // nolint: errcheck + + responseBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return unmarshalResponseBytes(c.cdc, responseBytes, result) +} + +func (c *URIClient) Codec() *amino.Codec { + return c.cdc +} + +func (c *URIClient) SetCodec(cdc *amino.Codec) { + c.cdc = cdc +} + +//------------------------------------------------ + +func unmarshalResponseBytes(cdc *amino.Codec, responseBytes []byte, result interface{}) (interface{}, error) { + // Read response. If rpc/core/types is imported, the result will unmarshal + // into the correct type. + // log.Notice("response", "response", string(responseBytes)) + var err error + response := &types.RPCResponse{} + err = json.Unmarshal(responseBytes, response) + if err != nil { + return nil, errors.Errorf("Error unmarshalling rpc response: %v", err) + } + if response.Error != nil { + return nil, errors.Errorf("Response error: %v", response.Error) + } + // Unmarshal the RawMessage into the result. + err = cdc.UnmarshalJSON(response.Result, result) + if err != nil { + return nil, errors.Errorf("Error unmarshalling rpc response result: %v", err) + } + return result, nil +} + +func argsToURLValues(cdc *amino.Codec, args map[string]interface{}) (url.Values, error) { + values := make(url.Values) + if len(args) == 0 { + return values, nil + } + err := argsToJSON(cdc, args) + if err != nil { + return nil, err + } + for key, val := range args { + values.Set(key, val.(string)) + } + return values, nil +} + +func argsToJSON(cdc *amino.Codec, args map[string]interface{}) error { + for k, v := range args { + rt := reflect.TypeOf(v) + isByteSlice := rt.Kind() == reflect.Slice && rt.Elem().Kind() == reflect.Uint8 + if isByteSlice { + bytes := reflect.ValueOf(v).Bytes() + args[k] = fmt.Sprintf("0x%X", bytes) + continue + } + + data, err := cdc.MarshalJSON(v) + if err != nil { + return err + } + args[k] = string(data) + } + return nil +} diff --git a/rpc/lib/client/integration_test.go b/rpc/lib/client/integration_test.go new file mode 100644 index 000000000..93a32388f --- /dev/null +++ b/rpc/lib/client/integration_test.go @@ -0,0 +1,66 @@ +// +build release + +// The code in here is comprehensive as an integration +// test and is long, hence is only run before releases. + +package rpcclient + +import ( + "bytes" + "errors" + "net" + "regexp" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" +) + +func TestWSClientReconnectWithJitter(t *testing.T) { + n := 8 + maxReconnectAttempts := 3 + // Max wait time is ceil(1+0.999) + ceil(2+0.999) + ceil(4+0.999) + ceil(...) = 2 + 3 + 5 = 10s + ... + maxSleepTime := time.Second * time.Duration(((1< c.maxReconnectAttempts { + return errors.Wrap(err, "reached maximum reconnect attempts") + } + } +} + +func (c *WSClient) startReadWriteRoutines() { + c.wg.Add(2) + c.readRoutineQuit = make(chan struct{}) + go c.readRoutine() + go c.writeRoutine() +} + +func (c *WSClient) processBacklog() error { + select { + case request := <-c.backlog: + if c.writeWait > 0 { + if err := c.conn.SetWriteDeadline(time.Now().Add(c.writeWait)); err != nil { + c.Logger.Error("failed to set write deadline", "err", err) + } + } + if err := c.conn.WriteJSON(request); err != nil { + c.Logger.Error("failed to resend request", "err", err) + c.reconnectAfter <- err + // requeue request + c.backlog <- request + return err + } + c.Logger.Info("resend a request", "req", request) + default: + } + return nil +} + +func (c *WSClient) reconnectRoutine() { + for { + select { + case originalError := <-c.reconnectAfter: + // wait until writeRoutine and readRoutine finish + c.wg.Wait() + if err := c.reconnect(); err != nil { + c.Logger.Error("failed to reconnect", "err", err, "original_err", originalError) + c.Stop() + return + } + // drain reconnectAfter + LOOP: + for { + select { + case <-c.reconnectAfter: + default: + break LOOP + } + } + err := c.processBacklog() + if err == nil { + c.startReadWriteRoutines() + } + + case <-c.Quit(): + return + } + } +} + +// The client ensures that there is at most one writer to a connection by +// executing all writes from this goroutine. +func (c *WSClient) writeRoutine() { + var ticker *time.Ticker + if c.pingPeriod > 0 { + // ticker with a predefined period + ticker = time.NewTicker(c.pingPeriod) + } else { + // ticker that never fires + ticker = &time.Ticker{C: make(<-chan time.Time)} + } + + defer func() { + ticker.Stop() + if err := c.conn.Close(); err != nil { + // ignore error; it will trigger in tests + // likely because it's closing an already closed connection + } + c.wg.Done() + }() + + for { + select { + case request := <-c.send: + if c.writeWait > 0 { + if err := c.conn.SetWriteDeadline(time.Now().Add(c.writeWait)); err != nil { + c.Logger.Error("failed to set write deadline", "err", err) + } + } + if err := c.conn.WriteJSON(request); err != nil { + c.Logger.Error("failed to send request", "err", err) + c.reconnectAfter <- err + // add request to the backlog, so we don't lose it + c.backlog <- request + return + } + case <-ticker.C: + if c.writeWait > 0 { + if err := c.conn.SetWriteDeadline(time.Now().Add(c.writeWait)); err != nil { + c.Logger.Error("failed to set write deadline", "err", err) + } + } + if err := c.conn.WriteMessage(websocket.PingMessage, []byte{}); err != nil { + c.Logger.Error("failed to write ping", "err", err) + c.reconnectAfter <- err + return + } + c.mtx.Lock() + c.sentLastPingAt = time.Now() + c.mtx.Unlock() + c.Logger.Debug("sent ping") + case <-c.readRoutineQuit: + return + case <-c.Quit(): + if err := c.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")); err != nil { + c.Logger.Error("failed to write message", "err", err) + } + return + } + } +} + +// The client ensures that there is at most one reader to a connection by +// executing all reads from this goroutine. +func (c *WSClient) readRoutine() { + defer func() { + if err := c.conn.Close(); err != nil { + // ignore error; it will trigger in tests + // likely because it's closing an already closed connection + } + c.wg.Done() + }() + + c.conn.SetPongHandler(func(string) error { + // gather latency stats + c.mtx.RLock() + t := c.sentLastPingAt + c.mtx.RUnlock() + c.PingPongLatencyTimer.UpdateSince(t) + + c.Logger.Debug("got pong") + return nil + }) + + for { + // reset deadline for every message type (control or data) + if c.readWait > 0 { + if err := c.conn.SetReadDeadline(time.Now().Add(c.readWait)); err != nil { + c.Logger.Error("failed to set read deadline", "err", err) + } + } + _, data, err := c.conn.ReadMessage() + if err != nil { + if !websocket.IsUnexpectedCloseError(err, websocket.CloseNormalClosure) { + return + } + + c.Logger.Error("failed to read response", "err", err) + close(c.readRoutineQuit) + c.reconnectAfter <- err + return + } + + var response types.RPCResponse + err = json.Unmarshal(data, &response) + if err != nil { + c.Logger.Error("failed to parse response", "err", err, "data", string(data)) + continue + } + c.Logger.Info("got response", "resp", response.Result) + // Combine a non-blocking read on BaseService.Quit with a non-blocking write on ResponsesCh to avoid blocking + // c.wg.Wait() in c.Stop(). Note we rely on Quit being closed so that it sends unlimited Quit signals to stop + // both readRoutine and writeRoutine + select { + case <-c.Quit(): + case c.ResponsesCh <- response: + } + } +} + +/////////////////////////////////////////////////////////////////////////////// +// Predefined methods + +// Subscribe to a query. Note the server must have a "subscribe" route +// defined. +func (c *WSClient) Subscribe(ctx context.Context, query string) error { + params := map[string]interface{}{"query": query} + return c.Call(ctx, "subscribe", params) +} + +// Unsubscribe from a query. Note the server must have a "unsubscribe" route +// defined. +func (c *WSClient) Unsubscribe(ctx context.Context, query string) error { + params := map[string]interface{}{"query": query} + return c.Call(ctx, "unsubscribe", params) +} + +// UnsubscribeAll from all. Note the server must have a "unsubscribe_all" route +// defined. +func (c *WSClient) UnsubscribeAll(ctx context.Context) error { + params := map[string]interface{}{} + return c.Call(ctx, "unsubscribe_all", params) +} diff --git a/rpc/lib/client/ws_client_test.go b/rpc/lib/client/ws_client_test.go new file mode 100644 index 000000000..e902fe21a --- /dev/null +++ b/rpc/lib/client/ws_client_test.go @@ -0,0 +1,224 @@ +package rpcclient + +import ( + "context" + "encoding/json" + "net" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + "github.com/gorilla/websocket" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" + + types "github.com/tendermint/tendermint/rpc/lib/types" +) + +var wsCallTimeout = 5 * time.Second + +type myHandler struct { + closeConnAfterRead bool + mtx sync.RWMutex +} + +var upgrader = websocket.Upgrader{ + ReadBufferSize: 1024, + WriteBufferSize: 1024, +} + +func (h *myHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + panic(err) + } + defer conn.Close() // nolint: errcheck + for { + messageType, _, err := conn.ReadMessage() + if err != nil { + return + } + + h.mtx.RLock() + if h.closeConnAfterRead { + if err := conn.Close(); err != nil { + panic(err) + } + } + h.mtx.RUnlock() + + res := json.RawMessage(`{}`) + emptyRespBytes, _ := json.Marshal(types.RPCResponse{Result: res}) + if err := conn.WriteMessage(messageType, emptyRespBytes); err != nil { + return + } + } +} + +func TestWSClientReconnectsAfterReadFailure(t *testing.T) { + var wg sync.WaitGroup + + // start server + h := &myHandler{} + s := httptest.NewServer(h) + defer s.Close() + + c := startClient(t, s.Listener.Addr()) + defer c.Stop() + + wg.Add(1) + go callWgDoneOnResult(t, c, &wg) + + h.mtx.Lock() + h.closeConnAfterRead = true + h.mtx.Unlock() + + // results in WS read error, no send retry because write succeeded + call(t, "a", c) + + // expect to reconnect almost immediately + time.Sleep(10 * time.Millisecond) + h.mtx.Lock() + h.closeConnAfterRead = false + h.mtx.Unlock() + + // should succeed + call(t, "b", c) + + wg.Wait() +} + +func TestWSClientReconnectsAfterWriteFailure(t *testing.T) { + var wg sync.WaitGroup + + // start server + h := &myHandler{} + s := httptest.NewServer(h) + + c := startClient(t, s.Listener.Addr()) + defer c.Stop() + + wg.Add(2) + go callWgDoneOnResult(t, c, &wg) + + // hacky way to abort the connection before write + if err := c.conn.Close(); err != nil { + t.Error(err) + } + + // results in WS write error, the client should resend on reconnect + call(t, "a", c) + + // expect to reconnect almost immediately + time.Sleep(10 * time.Millisecond) + + // should succeed + call(t, "b", c) + + wg.Wait() +} + +func TestWSClientReconnectFailure(t *testing.T) { + // start server + h := &myHandler{} + s := httptest.NewServer(h) + + c := startClient(t, s.Listener.Addr()) + defer c.Stop() + + go func() { + for { + select { + case <-c.ResponsesCh: + case <-c.Quit(): + return + } + } + }() + + // hacky way to abort the connection before write + if err := c.conn.Close(); err != nil { + t.Error(err) + } + s.Close() + + // results in WS write error + // provide timeout to avoid blocking + ctx, cancel := context.WithTimeout(context.Background(), wsCallTimeout) + defer cancel() + if err := c.Call(ctx, "a", make(map[string]interface{})); err != nil { + t.Error(err) + } + + // expect to reconnect almost immediately + time.Sleep(10 * time.Millisecond) + + done := make(chan struct{}) + go func() { + // client should block on this + call(t, "b", c) + close(done) + }() + + // test that client blocks on the second send + select { + case <-done: + t.Fatal("client should block on calling 'b' during reconnect") + case <-time.After(5 * time.Second): + t.Log("All good") + } +} + +func TestNotBlockingOnStop(t *testing.T) { + timeout := 2 * time.Second + s := httptest.NewServer(&myHandler{}) + c := startClient(t, s.Listener.Addr()) + c.Call(context.Background(), "a", make(map[string]interface{})) + // Let the readRoutine get around to blocking + time.Sleep(time.Second) + passCh := make(chan struct{}) + go func() { + // Unless we have a non-blocking write to ResponsesCh from readRoutine + // this blocks forever ont the waitgroup + c.Stop() + passCh <- struct{}{} + }() + select { + case <-passCh: + // Pass + case <-time.After(timeout): + t.Fatalf("WSClient did failed to stop within %v seconds - is one of the read/write routines blocking?", + timeout.Seconds()) + } +} + +func startClient(t *testing.T, addr net.Addr) *WSClient { + c := NewWSClient(addr.String(), "/websocket") + err := c.Start() + require.Nil(t, err) + c.SetLogger(log.TestingLogger()) + return c +} + +func call(t *testing.T, method string, c *WSClient) { + err := c.Call(context.Background(), method, make(map[string]interface{})) + require.NoError(t, err) +} + +func callWgDoneOnResult(t *testing.T, c *WSClient, wg *sync.WaitGroup) { + for { + select { + case resp := <-c.ResponsesCh: + if resp.Error != nil { + t.Fatalf("unexpected error: %v", resp.Error) + } + if resp.Result != nil { + wg.Done() + } + case <-c.Quit(): + return + } + } +} diff --git a/rpc/lib/doc.go b/rpc/lib/doc.go new file mode 100644 index 000000000..2bc438593 --- /dev/null +++ b/rpc/lib/doc.go @@ -0,0 +1,103 @@ +/* +HTTP RPC server supporting calls via uri params, jsonrpc, and jsonrpc over websockets + +# Client Requests + +Suppose we want to expose the rpc function `HelloWorld(name string, num int)`. + +## GET (URI) + +As a GET request, it would have URI encoded parameters, and look like: + +``` +curl 'http://localhost:8008/hello_world?name="my_world"&num=5' +``` + +Note the `'` around the url, which is just so bash doesn't ignore the quotes in `"my_world"`. +This should also work: + +``` +curl http://localhost:8008/hello_world?name=\"my_world\"&num=5 +``` + +A GET request to `/` returns a list of available endpoints. +For those which take arguments, the arguments will be listed in order, with `_` where the actual value should be. + +## POST (JSONRPC) + +As a POST request, we use JSONRPC. For instance, the same request would have this as the body: + +``` +{ + "jsonrpc": "2.0", + "id": "anything", + "method": "hello_world", + "params": { + "name": "my_world", + "num": 5 + } +} +``` + +With the above saved in file `data.json`, we can make the request with + +``` +curl --data @data.json http://localhost:8008 +``` + +## WebSocket (JSONRPC) + +All requests are exposed over websocket in the same form as the POST JSONRPC. +Websocket connections are available at their own endpoint, typically `/websocket`, +though this is configurable when starting the server. + +# Server Definition + +Define some types and routes: + +``` +type ResultStatus struct { + Value string +} + +// Define some routes +var Routes = map[string]*rpcserver.RPCFunc{ + "status": rpcserver.NewRPCFunc(Status, "arg"), +} + +// an rpc function +func Status(v string) (*ResultStatus, error) { + return &ResultStatus{v}, nil +} + +``` + +Now start the server: + +``` +mux := http.NewServeMux() +rpcserver.RegisterRPCFuncs(mux, Routes) +wm := rpcserver.NewWebsocketManager(Routes) +mux.HandleFunc("/websocket", wm.WebsocketHandler) +logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) +go func() { + _, err := rpcserver.StartHTTPServer("0.0.0.0:8008", mux, logger) + if err != nil { + panic(err) + } +}() + +``` + +Note that unix sockets are supported as well (eg. `/path/to/socket` instead of `0.0.0.0:8008`) + +Now see all available endpoints by sending a GET request to `0.0.0.0:8008`. +Each route is available as a GET request, as a JSONRPCv2 POST request, and via JSONRPCv2 over websockets. + + +# Examples + +* [Tendermint](https://github.com/tendermint/tendermint/blob/master/rpc/core/routes.go) +* [tm-monitor](https://github.com/tendermint/tools/blob/master/tm-monitor/rpc.go) +*/ +package rpc diff --git a/rpc/lib/rpc_test.go b/rpc/lib/rpc_test.go new file mode 100644 index 000000000..31839dcab --- /dev/null +++ b/rpc/lib/rpc_test.go @@ -0,0 +1,378 @@ +package rpc + +import ( + "bytes" + "context" + crand "crypto/rand" + "encoding/json" + "fmt" + "math/rand" + "net/http" + "os" + "os/exec" + "testing" + "time" + + "github.com/go-kit/kit/log/term" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + amino "github.com/tendermint/go-amino" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + + client "github.com/tendermint/tendermint/rpc/lib/client" + server "github.com/tendermint/tendermint/rpc/lib/server" + types "github.com/tendermint/tendermint/rpc/lib/types" +) + +// Client and Server should work over tcp or unix sockets +const ( + tcpAddr = "tcp://0.0.0.0:47768" + + unixSocket = "/tmp/rpc_test.sock" + unixAddr = "unix://" + unixSocket + + websocketEndpoint = "/websocket/endpoint" +) + +type ResultEcho struct { + Value string `json:"value"` +} + +type ResultEchoInt struct { + Value int `json:"value"` +} + +type ResultEchoBytes struct { + Value []byte `json:"value"` +} + +type ResultEchoDataBytes struct { + Value cmn.HexBytes `json:"value"` +} + +// Define some routes +var Routes = map[string]*server.RPCFunc{ + "echo": server.NewRPCFunc(EchoResult, "arg"), + "echo_ws": server.NewWSRPCFunc(EchoWSResult, "arg"), + "echo_bytes": server.NewRPCFunc(EchoBytesResult, "arg"), + "echo_data_bytes": server.NewRPCFunc(EchoDataBytesResult, "arg"), + "echo_int": server.NewRPCFunc(EchoIntResult, "arg"), +} + +// Amino codec required to encode/decode everything above. +var RoutesCdc = amino.NewCodec() + +func EchoResult(v string) (*ResultEcho, error) { + return &ResultEcho{v}, nil +} + +func EchoWSResult(wsCtx types.WSRPCContext, v string) (*ResultEcho, error) { + return &ResultEcho{v}, nil +} + +func EchoIntResult(v int) (*ResultEchoInt, error) { + return &ResultEchoInt{v}, nil +} + +func EchoBytesResult(v []byte) (*ResultEchoBytes, error) { + return &ResultEchoBytes{v}, nil +} + +func EchoDataBytesResult(v cmn.HexBytes) (*ResultEchoDataBytes, error) { + return &ResultEchoDataBytes{v}, nil +} + +func TestMain(m *testing.M) { + setup() + code := m.Run() + os.Exit(code) +} + +var colorFn = func(keyvals ...interface{}) term.FgBgColor { + for i := 0; i < len(keyvals)-1; i += 2 { + if keyvals[i] == "socket" { + if keyvals[i+1] == "tcp" { + return term.FgBgColor{Fg: term.DarkBlue} + } else if keyvals[i+1] == "unix" { + return term.FgBgColor{Fg: term.DarkCyan} + } + } + } + return term.FgBgColor{} +} + +// launch unix and tcp servers +func setup() { + logger := log.NewTMLoggerWithColorFn(log.NewSyncWriter(os.Stdout), colorFn) + + cmd := exec.Command("rm", "-f", unixSocket) + err := cmd.Start() + if err != nil { + panic(err) + } + if err = cmd.Wait(); err != nil { + panic(err) + } + + tcpLogger := logger.With("socket", "tcp") + mux := http.NewServeMux() + server.RegisterRPCFuncs(mux, Routes, RoutesCdc, tcpLogger) + wm := server.NewWebsocketManager(Routes, RoutesCdc, server.ReadWait(5*time.Second), server.PingPeriod(1*time.Second)) + wm.SetLogger(tcpLogger) + mux.HandleFunc(websocketEndpoint, wm.WebsocketHandler) + go func() { + _, err := server.StartHTTPServer(tcpAddr, mux, tcpLogger, server.Config{}) + if err != nil { + panic(err) + } + }() + + unixLogger := logger.With("socket", "unix") + mux2 := http.NewServeMux() + server.RegisterRPCFuncs(mux2, Routes, RoutesCdc, unixLogger) + wm = server.NewWebsocketManager(Routes, RoutesCdc) + wm.SetLogger(unixLogger) + mux2.HandleFunc(websocketEndpoint, wm.WebsocketHandler) + go func() { + _, err := server.StartHTTPServer(unixAddr, mux2, unixLogger, server.Config{}) + if err != nil { + panic(err) + } + }() + + // wait for servers to start + time.Sleep(time.Second * 2) +} + +func echoViaHTTP(cl client.HTTPClient, val string) (string, error) { + params := map[string]interface{}{ + "arg": val, + } + result := new(ResultEcho) + if _, err := cl.Call("echo", params, result); err != nil { + return "", err + } + return result.Value, nil +} + +func echoIntViaHTTP(cl client.HTTPClient, val int) (int, error) { + params := map[string]interface{}{ + "arg": val, + } + result := new(ResultEchoInt) + if _, err := cl.Call("echo_int", params, result); err != nil { + return 0, err + } + return result.Value, nil +} + +func echoBytesViaHTTP(cl client.HTTPClient, bytes []byte) ([]byte, error) { + params := map[string]interface{}{ + "arg": bytes, + } + result := new(ResultEchoBytes) + if _, err := cl.Call("echo_bytes", params, result); err != nil { + return []byte{}, err + } + return result.Value, nil +} + +func echoDataBytesViaHTTP(cl client.HTTPClient, bytes cmn.HexBytes) (cmn.HexBytes, error) { + params := map[string]interface{}{ + "arg": bytes, + } + result := new(ResultEchoDataBytes) + if _, err := cl.Call("echo_data_bytes", params, result); err != nil { + return []byte{}, err + } + return result.Value, nil +} + +func testWithHTTPClient(t *testing.T, cl client.HTTPClient) { + val := "acbd" + got, err := echoViaHTTP(cl, val) + require.Nil(t, err) + assert.Equal(t, got, val) + + val2 := randBytes(t) + got2, err := echoBytesViaHTTP(cl, val2) + require.Nil(t, err) + assert.Equal(t, got2, val2) + + val3 := cmn.HexBytes(randBytes(t)) + got3, err := echoDataBytesViaHTTP(cl, val3) + require.Nil(t, err) + assert.Equal(t, got3, val3) + + val4 := rand.Intn(10000) + got4, err := echoIntViaHTTP(cl, val4) + require.Nil(t, err) + assert.Equal(t, got4, val4) +} + +func echoViaWS(cl *client.WSClient, val string) (string, error) { + params := map[string]interface{}{ + "arg": val, + } + err := cl.Call(context.Background(), "echo", params) + if err != nil { + return "", err + } + + msg := <-cl.ResponsesCh + if msg.Error != nil { + return "", err + + } + result := new(ResultEcho) + err = json.Unmarshal(msg.Result, result) + if err != nil { + return "", nil + } + return result.Value, nil +} + +func echoBytesViaWS(cl *client.WSClient, bytes []byte) ([]byte, error) { + params := map[string]interface{}{ + "arg": bytes, + } + err := cl.Call(context.Background(), "echo_bytes", params) + if err != nil { + return []byte{}, err + } + + msg := <-cl.ResponsesCh + if msg.Error != nil { + return []byte{}, msg.Error + + } + result := new(ResultEchoBytes) + err = json.Unmarshal(msg.Result, result) + if err != nil { + return []byte{}, nil + } + return result.Value, nil +} + +func testWithWSClient(t *testing.T, cl *client.WSClient) { + val := "acbd" + got, err := echoViaWS(cl, val) + require.Nil(t, err) + assert.Equal(t, got, val) + + val2 := randBytes(t) + got2, err := echoBytesViaWS(cl, val2) + require.Nil(t, err) + assert.Equal(t, got2, val2) +} + +//------------- + +func TestServersAndClientsBasic(t *testing.T) { + serverAddrs := [...]string{tcpAddr, unixAddr} + for _, addr := range serverAddrs { + cl1 := client.NewURIClient(addr) + fmt.Printf("=== testing server on %s using URI client", addr) + testWithHTTPClient(t, cl1) + + cl2 := client.NewJSONRPCClient(addr) + fmt.Printf("=== testing server on %s using JSONRPC client", addr) + testWithHTTPClient(t, cl2) + + cl3 := client.NewWSClient(addr, websocketEndpoint) + cl3.SetLogger(log.TestingLogger()) + err := cl3.Start() + require.Nil(t, err) + fmt.Printf("=== testing server on %s using WS client", addr) + testWithWSClient(t, cl3) + cl3.Stop() + } +} + +func TestHexStringArg(t *testing.T) { + cl := client.NewURIClient(tcpAddr) + // should NOT be handled as hex + val := "0xabc" + got, err := echoViaHTTP(cl, val) + require.Nil(t, err) + assert.Equal(t, got, val) +} + +func TestQuotedStringArg(t *testing.T) { + cl := client.NewURIClient(tcpAddr) + // should NOT be unquoted + val := "\"abc\"" + got, err := echoViaHTTP(cl, val) + require.Nil(t, err) + assert.Equal(t, got, val) +} + +func TestWSNewWSRPCFunc(t *testing.T) { + cl := client.NewWSClient(tcpAddr, websocketEndpoint) + cl.SetLogger(log.TestingLogger()) + err := cl.Start() + require.Nil(t, err) + defer cl.Stop() + + val := "acbd" + params := map[string]interface{}{ + "arg": val, + } + err = cl.Call(context.Background(), "echo_ws", params) + require.Nil(t, err) + + msg := <-cl.ResponsesCh + if msg.Error != nil { + t.Fatal(err) + } + result := new(ResultEcho) + err = json.Unmarshal(msg.Result, result) + require.Nil(t, err) + got := result.Value + assert.Equal(t, got, val) +} + +func TestWSHandlesArrayParams(t *testing.T) { + cl := client.NewWSClient(tcpAddr, websocketEndpoint) + cl.SetLogger(log.TestingLogger()) + err := cl.Start() + require.Nil(t, err) + defer cl.Stop() + + val := "acbd" + params := []interface{}{val} + err = cl.CallWithArrayParams(context.Background(), "echo_ws", params) + require.Nil(t, err) + + msg := <-cl.ResponsesCh + if msg.Error != nil { + t.Fatalf("%+v", err) + } + result := new(ResultEcho) + err = json.Unmarshal(msg.Result, result) + require.Nil(t, err) + got := result.Value + assert.Equal(t, got, val) +} + +// TestWSClientPingPong checks that a client & server exchange pings +// & pongs so connection stays alive. +func TestWSClientPingPong(t *testing.T) { + cl := client.NewWSClient(tcpAddr, websocketEndpoint) + cl.SetLogger(log.TestingLogger()) + err := cl.Start() + require.Nil(t, err) + defer cl.Stop() + + time.Sleep(6 * time.Second) +} + +func randBytes(t *testing.T) []byte { + n := rand.Intn(10) + 2 + buf := make([]byte, n) + _, err := crand.Read(buf) + require.Nil(t, err) + return bytes.Replace(buf, []byte("="), []byte{100}, -1) +} diff --git a/rpc/lib/server/handlers.go b/rpc/lib/server/handlers.go new file mode 100644 index 000000000..3ec5f81e3 --- /dev/null +++ b/rpc/lib/server/handlers.go @@ -0,0 +1,838 @@ +package rpcserver + +import ( + "bytes" + "context" + "encoding/hex" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "runtime/debug" + "sort" + "strings" + "time" + + "github.com/gorilla/websocket" + "github.com/pkg/errors" + + amino "github.com/tendermint/go-amino" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + types "github.com/tendermint/tendermint/rpc/lib/types" +) + +// RegisterRPCFuncs adds a route for each function in the funcMap, as well as general jsonrpc and websocket handlers for all functions. +// "result" is the interface on which the result objects are registered, and is popualted with every RPCResponse +func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, cdc *amino.Codec, logger log.Logger) { + // HTTP endpoints + for funcName, rpcFunc := range funcMap { + mux.HandleFunc("/"+funcName, makeHTTPHandler(rpcFunc, cdc, logger)) + } + + // JSONRPC endpoints + mux.HandleFunc("/", handleInvalidJSONRPCPaths(makeJSONRPCHandler(funcMap, cdc, logger))) +} + +//------------------------------------- +// function introspection + +// RPCFunc contains the introspected type information for a function +type RPCFunc struct { + f reflect.Value // underlying rpc function + args []reflect.Type // type of each function arg + returns []reflect.Type // type of each return arg + argNames []string // name of each argument + ws bool // websocket only +} + +// NewRPCFunc wraps a function for introspection. +// f is the function, args are comma separated argument names +func NewRPCFunc(f interface{}, args string) *RPCFunc { + return newRPCFunc(f, args, false) +} + +// NewWSRPCFunc wraps a function for introspection and use in the websockets. +func NewWSRPCFunc(f interface{}, args string) *RPCFunc { + return newRPCFunc(f, args, true) +} + +func newRPCFunc(f interface{}, args string, ws bool) *RPCFunc { + var argNames []string + if args != "" { + argNames = strings.Split(args, ",") + } + return &RPCFunc{ + f: reflect.ValueOf(f), + args: funcArgTypes(f), + returns: funcReturnTypes(f), + argNames: argNames, + ws: ws, + } +} + +// return a function's argument types +func funcArgTypes(f interface{}) []reflect.Type { + t := reflect.TypeOf(f) + n := t.NumIn() + typez := make([]reflect.Type, n) + for i := 0; i < n; i++ { + typez[i] = t.In(i) + } + return typez +} + +// return a function's return types +func funcReturnTypes(f interface{}) []reflect.Type { + t := reflect.TypeOf(f) + n := t.NumOut() + typez := make([]reflect.Type, n) + for i := 0; i < n; i++ { + typez[i] = t.Out(i) + } + return typez +} + +// function introspection +//----------------------------------------------------------------------------- +// rpc.json + +// jsonrpc calls grab the given method's function info and runs reflect.Call +func makeJSONRPCHandler(funcMap map[string]*RPCFunc, cdc *amino.Codec, logger log.Logger) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + b, err := ioutil.ReadAll(r.Body) + if err != nil { + WriteRPCResponseHTTP(w, types.RPCInvalidRequestError("", errors.Wrap(err, "Error reading request body"))) + return + } + // if its an empty request (like from a browser), + // just display a list of functions + if len(b) == 0 { + writeListOfEndpoints(w, r, funcMap) + return + } + + var request types.RPCRequest + err = json.Unmarshal(b, &request) + if err != nil { + WriteRPCResponseHTTP(w, types.RPCParseError("", errors.Wrap(err, "Error unmarshalling request"))) + return + } + // A Notification is a Request object without an "id" member. + // The Server MUST NOT reply to a Notification, including those that are within a batch request. + if request.ID == "" { + logger.Debug("HTTPJSONRPC received a notification, skipping... (please send a non-empty ID if you want to call a method)") + return + } + if len(r.URL.Path) > 1 { + WriteRPCResponseHTTP(w, types.RPCInvalidRequestError(request.ID, errors.Errorf("Path %s is invalid", r.URL.Path))) + return + } + rpcFunc := funcMap[request.Method] + if rpcFunc == nil || rpcFunc.ws { + WriteRPCResponseHTTP(w, types.RPCMethodNotFoundError(request.ID)) + return + } + var args []reflect.Value + if len(request.Params) > 0 { + args, err = jsonParamsToArgsRPC(rpcFunc, cdc, request.Params) + if err != nil { + WriteRPCResponseHTTP(w, types.RPCInvalidParamsError(request.ID, errors.Wrap(err, "Error converting json params to arguments"))) + return + } + } + returns := rpcFunc.f.Call(args) + logger.Info("HTTPJSONRPC", "method", request.Method, "args", args, "returns", returns) + result, err := unreflectResult(returns) + if err != nil { + WriteRPCResponseHTTP(w, types.RPCInternalError(request.ID, err)) + return + } + WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse(cdc, request.ID, result)) + } +} + +func handleInvalidJSONRPCPaths(next http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // Since the pattern "/" matches all paths not matched by other registered patterns we check whether the path is indeed + // "/", otherwise return a 404 error + if r.URL.Path != "/" { + http.NotFound(w, r) + return + } + + next(w, r) + } +} + +func mapParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, params map[string]json.RawMessage, argsOffset int) ([]reflect.Value, error) { + values := make([]reflect.Value, len(rpcFunc.argNames)) + for i, argName := range rpcFunc.argNames { + argType := rpcFunc.args[i+argsOffset] + + if p, ok := params[argName]; ok && p != nil && len(p) > 0 { + val := reflect.New(argType) + err := cdc.UnmarshalJSON(p, val.Interface()) + if err != nil { + return nil, err + } + values[i] = val.Elem() + } else { // use default for that type + values[i] = reflect.Zero(argType) + } + } + + return values, nil +} + +func arrayParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, params []json.RawMessage, argsOffset int) ([]reflect.Value, error) { + if len(rpcFunc.argNames) != len(params) { + return nil, errors.Errorf("Expected %v parameters (%v), got %v (%v)", + len(rpcFunc.argNames), rpcFunc.argNames, len(params), params) + } + + values := make([]reflect.Value, len(params)) + for i, p := range params { + argType := rpcFunc.args[i+argsOffset] + val := reflect.New(argType) + err := cdc.UnmarshalJSON(p, val.Interface()) + if err != nil { + return nil, err + } + values[i] = val.Elem() + } + return values, nil +} + +// `raw` is unparsed json (from json.RawMessage) encoding either a map or an array. +// `argsOffset` should be 0 for RPC calls, and 1 for WS requests, where len(rpcFunc.args) != len(rpcFunc.argNames). +// +// Example: +// rpcFunc.args = [rpctypes.WSRPCContext string] +// rpcFunc.argNames = ["arg"] +func jsonParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, raw []byte, argsOffset int) ([]reflect.Value, error) { + + // TODO: Make more efficient, perhaps by checking the first character for '{' or '['? + // First, try to get the map. + var m map[string]json.RawMessage + err := json.Unmarshal(raw, &m) + if err == nil { + return mapParamsToArgs(rpcFunc, cdc, m, argsOffset) + } + + // Otherwise, try an array. + var a []json.RawMessage + err = json.Unmarshal(raw, &a) + if err == nil { + return arrayParamsToArgs(rpcFunc, cdc, a, argsOffset) + } + + // Otherwise, bad format, we cannot parse + return nil, errors.Errorf("Unknown type for JSON params: %v. Expected map or array", err) +} + +// Convert a []interface{} OR a map[string]interface{} to properly typed values +func jsonParamsToArgsRPC(rpcFunc *RPCFunc, cdc *amino.Codec, params json.RawMessage) ([]reflect.Value, error) { + return jsonParamsToArgs(rpcFunc, cdc, params, 0) +} + +// Same as above, but with the first param the websocket connection +func jsonParamsToArgsWS(rpcFunc *RPCFunc, cdc *amino.Codec, params json.RawMessage, wsCtx types.WSRPCContext) ([]reflect.Value, error) { + values, err := jsonParamsToArgs(rpcFunc, cdc, params, 1) + if err != nil { + return nil, err + } + return append([]reflect.Value{reflect.ValueOf(wsCtx)}, values...), nil +} + +// rpc.json +//----------------------------------------------------------------------------- +// rpc.http + +// convert from a function name to the http handler +func makeHTTPHandler(rpcFunc *RPCFunc, cdc *amino.Codec, logger log.Logger) func(http.ResponseWriter, *http.Request) { + // Exception for websocket endpoints + if rpcFunc.ws { + return func(w http.ResponseWriter, r *http.Request) { + WriteRPCResponseHTTP(w, types.RPCMethodNotFoundError("")) + } + } + // All other endpoints + return func(w http.ResponseWriter, r *http.Request) { + logger.Debug("HTTP HANDLER", "req", r) + args, err := httpParamsToArgs(rpcFunc, cdc, r) + if err != nil { + WriteRPCResponseHTTP(w, types.RPCInvalidParamsError("", errors.Wrap(err, "Error converting http params to arguments"))) + return + } + returns := rpcFunc.f.Call(args) + logger.Info("HTTPRestRPC", "method", r.URL.Path, "args", args, "returns", returns) + result, err := unreflectResult(returns) + if err != nil { + WriteRPCResponseHTTP(w, types.RPCInternalError("", err)) + return + } + WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse(cdc, "", result)) + } +} + +// Covert an http query to a list of properly typed values. +// To be properly decoded the arg must be a concrete type from tendermint (if its an interface). +func httpParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, r *http.Request) ([]reflect.Value, error) { + values := make([]reflect.Value, len(rpcFunc.args)) + + for i, name := range rpcFunc.argNames { + argType := rpcFunc.args[i] + + values[i] = reflect.Zero(argType) // set default for that type + + arg := GetParam(r, name) + // log.Notice("param to arg", "argType", argType, "name", name, "arg", arg) + + if "" == arg { + continue + } + + v, err, ok := nonJSONStringToArg(cdc, argType, arg) + if err != nil { + return nil, err + } + if ok { + values[i] = v + continue + } + + values[i], err = jsonStringToArg(cdc, argType, arg) + if err != nil { + return nil, err + } + } + + return values, nil +} + +func jsonStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect.Value, error) { + rv := reflect.New(rt) + err := cdc.UnmarshalJSON([]byte(arg), rv.Interface()) + if err != nil { + return rv, err + } + rv = rv.Elem() + return rv, nil +} + +func nonJSONStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect.Value, error, bool) { + if rt.Kind() == reflect.Ptr { + rv_, err, ok := nonJSONStringToArg(cdc, rt.Elem(), arg) + if err != nil { + return reflect.Value{}, err, false + } else if ok { + rv := reflect.New(rt.Elem()) + rv.Elem().Set(rv_) + return rv, nil, true + } else { + return reflect.Value{}, nil, false + } + } else { + return _nonJSONStringToArg(cdc, rt, arg) + } +} + +// NOTE: rt.Kind() isn't a pointer. +func _nonJSONStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect.Value, error, bool) { + isIntString := RE_INT.Match([]byte(arg)) + isQuotedString := strings.HasPrefix(arg, `"`) && strings.HasSuffix(arg, `"`) + isHexString := strings.HasPrefix(strings.ToLower(arg), "0x") + + var expectingString, expectingByteSlice, expectingInt bool + switch rt.Kind() { + case reflect.Int, reflect.Uint, reflect.Int8, reflect.Uint8, reflect.Int16, reflect.Uint16, reflect.Int32, reflect.Uint32, reflect.Int64, reflect.Uint64: + expectingInt = true + case reflect.String: + expectingString = true + case reflect.Slice: + expectingByteSlice = rt.Elem().Kind() == reflect.Uint8 + } + + if isIntString && expectingInt { + qarg := `"` + arg + `"` + // jsonStringToArg + rv, err := jsonStringToArg(cdc, rt, qarg) + if err != nil { + return rv, err, false + } else { + return rv, nil, true + } + } + + if isHexString { + if !expectingString && !expectingByteSlice { + err := errors.Errorf("Got a hex string arg, but expected '%s'", + rt.Kind().String()) + return reflect.ValueOf(nil), err, false + } + + var value []byte + value, err := hex.DecodeString(arg[2:]) + if err != nil { + return reflect.ValueOf(nil), err, false + } + if rt.Kind() == reflect.String { + return reflect.ValueOf(string(value)), nil, true + } + return reflect.ValueOf([]byte(value)), nil, true + } + + if isQuotedString && expectingByteSlice { + v := reflect.New(reflect.TypeOf("")) + err := cdc.UnmarshalJSON([]byte(arg), v.Interface()) + if err != nil { + return reflect.ValueOf(nil), err, false + } + v = v.Elem() + return reflect.ValueOf([]byte(v.String())), nil, true + } + + return reflect.ValueOf(nil), nil, false +} + +// rpc.http +//----------------------------------------------------------------------------- +// rpc.websocket + +const ( + defaultWSWriteChanCapacity = 1000 + defaultWSWriteWait = 10 * time.Second + defaultWSReadWait = 30 * time.Second + defaultWSPingPeriod = (defaultWSReadWait * 9) / 10 +) + +// A single websocket connection contains listener id, underlying ws +// connection, and the event switch for subscribing to events. +// +// In case of an error, the connection is stopped. +type wsConnection struct { + cmn.BaseService + + remoteAddr string + baseConn *websocket.Conn + writeChan chan types.RPCResponse + + funcMap map[string]*RPCFunc + cdc *amino.Codec + + // write channel capacity + writeChanCapacity int + + // each write times out after this. + writeWait time.Duration + + // Connection times out if we haven't received *anything* in this long, not even pings. + readWait time.Duration + + // Send pings to server with this period. Must be less than readWait, but greater than zero. + pingPeriod time.Duration + + // object that is used to subscribe / unsubscribe from events + eventSub types.EventSubscriber +} + +// NewWSConnection wraps websocket.Conn. +// +// See the commentary on the func(*wsConnection) functions for a detailed +// description of how to configure ping period and pong wait time. NOTE: if the +// write buffer is full, pongs may be dropped, which may cause clients to +// disconnect. see https://github.com/gorilla/websocket/issues/97 +func NewWSConnection( + baseConn *websocket.Conn, + funcMap map[string]*RPCFunc, + cdc *amino.Codec, + options ...func(*wsConnection), +) *wsConnection { + baseConn.SetReadLimit(maxBodyBytes) + wsc := &wsConnection{ + remoteAddr: baseConn.RemoteAddr().String(), + baseConn: baseConn, + funcMap: funcMap, + cdc: cdc, + writeWait: defaultWSWriteWait, + writeChanCapacity: defaultWSWriteChanCapacity, + readWait: defaultWSReadWait, + pingPeriod: defaultWSPingPeriod, + } + for _, option := range options { + option(wsc) + } + wsc.BaseService = *cmn.NewBaseService(nil, "wsConnection", wsc) + return wsc +} + +// EventSubscriber sets object that is used to subscribe / unsubscribe from +// events - not Goroutine-safe. If none given, default node's eventBus will be +// used. +func EventSubscriber(eventSub types.EventSubscriber) func(*wsConnection) { + return func(wsc *wsConnection) { + wsc.eventSub = eventSub + } +} + +// WriteWait sets the amount of time to wait before a websocket write times out. +// It should only be used in the constructor - not Goroutine-safe. +func WriteWait(writeWait time.Duration) func(*wsConnection) { + return func(wsc *wsConnection) { + wsc.writeWait = writeWait + } +} + +// WriteChanCapacity sets the capacity of the websocket write channel. +// It should only be used in the constructor - not Goroutine-safe. +func WriteChanCapacity(cap int) func(*wsConnection) { + return func(wsc *wsConnection) { + wsc.writeChanCapacity = cap + } +} + +// ReadWait sets the amount of time to wait before a websocket read times out. +// It should only be used in the constructor - not Goroutine-safe. +func ReadWait(readWait time.Duration) func(*wsConnection) { + return func(wsc *wsConnection) { + wsc.readWait = readWait + } +} + +// PingPeriod sets the duration for sending websocket pings. +// It should only be used in the constructor - not Goroutine-safe. +func PingPeriod(pingPeriod time.Duration) func(*wsConnection) { + return func(wsc *wsConnection) { + wsc.pingPeriod = pingPeriod + } +} + +// OnStart implements cmn.Service by starting the read and write routines. It +// blocks until the connection closes. +func (wsc *wsConnection) OnStart() error { + wsc.writeChan = make(chan types.RPCResponse, wsc.writeChanCapacity) + + // Read subscriptions/unsubscriptions to events + go wsc.readRoutine() + // Write responses, BLOCKING. + wsc.writeRoutine() + + return nil +} + +// OnStop implements cmn.Service by unsubscribing remoteAddr from all subscriptions. +func (wsc *wsConnection) OnStop() { + // Both read and write loops close the websocket connection when they exit their loops. + // The writeChan is never closed, to allow WriteRPCResponse() to fail. + if wsc.eventSub != nil { + wsc.eventSub.UnsubscribeAll(context.TODO(), wsc.remoteAddr) + } +} + +// GetRemoteAddr returns the remote address of the underlying connection. +// It implements WSRPCConnection +func (wsc *wsConnection) GetRemoteAddr() string { + return wsc.remoteAddr +} + +// GetEventSubscriber implements WSRPCConnection by returning event subscriber. +func (wsc *wsConnection) GetEventSubscriber() types.EventSubscriber { + return wsc.eventSub +} + +// WriteRPCResponse pushes a response to the writeChan, and blocks until it is accepted. +// It implements WSRPCConnection. It is Goroutine-safe. +func (wsc *wsConnection) WriteRPCResponse(resp types.RPCResponse) { + select { + case <-wsc.Quit(): + return + case wsc.writeChan <- resp: + } +} + +// TryWriteRPCResponse attempts to push a response to the writeChan, but does not block. +// It implements WSRPCConnection. It is Goroutine-safe +func (wsc *wsConnection) TryWriteRPCResponse(resp types.RPCResponse) bool { + select { + case <-wsc.Quit(): + return false + case wsc.writeChan <- resp: + return true + default: + return false + } +} + +// Codec returns an amino codec used to decode parameters and encode results. +// It implements WSRPCConnection. +func (wsc *wsConnection) Codec() *amino.Codec { + return wsc.cdc +} + +// Read from the socket and subscribe to or unsubscribe from events +func (wsc *wsConnection) readRoutine() { + defer func() { + if r := recover(); r != nil { + err, ok := r.(error) + if !ok { + err = fmt.Errorf("WSJSONRPC: %v", r) + } + wsc.Logger.Error("Panic in WSJSONRPC handler", "err", err, "stack", string(debug.Stack())) + wsc.WriteRPCResponse(types.RPCInternalError("unknown", err)) + go wsc.readRoutine() + } else { + wsc.baseConn.Close() // nolint: errcheck + } + }() + + wsc.baseConn.SetPongHandler(func(m string) error { + return wsc.baseConn.SetReadDeadline(time.Now().Add(wsc.readWait)) + }) + + for { + select { + case <-wsc.Quit(): + return + default: + // reset deadline for every type of message (control or data) + if err := wsc.baseConn.SetReadDeadline(time.Now().Add(wsc.readWait)); err != nil { + wsc.Logger.Error("failed to set read deadline", "err", err) + } + var in []byte + _, in, err := wsc.baseConn.ReadMessage() + if err != nil { + if websocket.IsCloseError(err, websocket.CloseNormalClosure) { + wsc.Logger.Info("Client closed the connection") + } else { + wsc.Logger.Error("Failed to read request", "err", err) + } + wsc.Stop() + return + } + + var request types.RPCRequest + err = json.Unmarshal(in, &request) + if err != nil { + wsc.WriteRPCResponse(types.RPCParseError("", errors.Wrap(err, "Error unmarshaling request"))) + continue + } + + // A Notification is a Request object without an "id" member. + // The Server MUST NOT reply to a Notification, including those that are within a batch request. + if request.ID == "" { + wsc.Logger.Debug("WSJSONRPC received a notification, skipping... (please send a non-empty ID if you want to call a method)") + continue + } + + // Now, fetch the RPCFunc and execute it. + + rpcFunc := wsc.funcMap[request.Method] + if rpcFunc == nil { + wsc.WriteRPCResponse(types.RPCMethodNotFoundError(request.ID)) + continue + } + var args []reflect.Value + if rpcFunc.ws { + wsCtx := types.WSRPCContext{Request: request, WSRPCConnection: wsc} + if len(request.Params) > 0 { + args, err = jsonParamsToArgsWS(rpcFunc, wsc.cdc, request.Params, wsCtx) + } + } else { + if len(request.Params) > 0 { + args, err = jsonParamsToArgsRPC(rpcFunc, wsc.cdc, request.Params) + } + } + if err != nil { + wsc.WriteRPCResponse(types.RPCInternalError(request.ID, errors.Wrap(err, "Error converting json params to arguments"))) + continue + } + returns := rpcFunc.f.Call(args) + + // TODO: Need to encode args/returns to string if we want to log them + wsc.Logger.Info("WSJSONRPC", "method", request.Method) + + result, err := unreflectResult(returns) + if err != nil { + wsc.WriteRPCResponse(types.RPCInternalError(request.ID, err)) + continue + } + + wsc.WriteRPCResponse(types.NewRPCSuccessResponse(wsc.cdc, request.ID, result)) + } + } +} + +// receives on a write channel and writes out on the socket +func (wsc *wsConnection) writeRoutine() { + pingTicker := time.NewTicker(wsc.pingPeriod) + defer func() { + pingTicker.Stop() + if err := wsc.baseConn.Close(); err != nil { + wsc.Logger.Error("Error closing connection", "err", err) + } + }() + + // https://github.com/gorilla/websocket/issues/97 + pongs := make(chan string, 1) + wsc.baseConn.SetPingHandler(func(m string) error { + select { + case pongs <- m: + default: + } + return nil + }) + + for { + select { + case m := <-pongs: + err := wsc.writeMessageWithDeadline(websocket.PongMessage, []byte(m)) + if err != nil { + wsc.Logger.Info("Failed to write pong (client may disconnect)", "err", err) + } + case <-pingTicker.C: + err := wsc.writeMessageWithDeadline(websocket.PingMessage, []byte{}) + if err != nil { + wsc.Logger.Error("Failed to write ping", "err", err) + wsc.Stop() + return + } + case msg := <-wsc.writeChan: + jsonBytes, err := json.MarshalIndent(msg, "", " ") + if err != nil { + wsc.Logger.Error("Failed to marshal RPCResponse to JSON", "err", err) + } else { + if err = wsc.writeMessageWithDeadline(websocket.TextMessage, jsonBytes); err != nil { + wsc.Logger.Error("Failed to write response", "err", err) + wsc.Stop() + return + } + } + case <-wsc.Quit(): + return + } + } +} + +// All writes to the websocket must (re)set the write deadline. +// If some writes don't set it while others do, they may timeout incorrectly (https://github.com/tendermint/tendermint/issues/553) +func (wsc *wsConnection) writeMessageWithDeadline(msgType int, msg []byte) error { + if err := wsc.baseConn.SetWriteDeadline(time.Now().Add(wsc.writeWait)); err != nil { + return err + } + return wsc.baseConn.WriteMessage(msgType, msg) +} + +//---------------------------------------- + +// WebsocketManager provides a WS handler for incoming connections and passes a +// map of functions along with any additional params to new connections. +// NOTE: The websocket path is defined externally, e.g. in node/node.go +type WebsocketManager struct { + websocket.Upgrader + + funcMap map[string]*RPCFunc + cdc *amino.Codec + logger log.Logger + wsConnOptions []func(*wsConnection) +} + +// NewWebsocketManager returns a new WebsocketManager that passes a map of +// functions, connection options and logger to new WS connections. +func NewWebsocketManager(funcMap map[string]*RPCFunc, cdc *amino.Codec, wsConnOptions ...func(*wsConnection)) *WebsocketManager { + return &WebsocketManager{ + funcMap: funcMap, + cdc: cdc, + Upgrader: websocket.Upgrader{ + CheckOrigin: func(r *http.Request) bool { + // TODO ??? + return true + }, + }, + logger: log.NewNopLogger(), + wsConnOptions: wsConnOptions, + } +} + +// SetLogger sets the logger. +func (wm *WebsocketManager) SetLogger(l log.Logger) { + wm.logger = l +} + +// WebsocketHandler upgrades the request/response (via http.Hijack) and starts +// the wsConnection. +func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Request) { + wsConn, err := wm.Upgrade(w, r, nil) + if err != nil { + // TODO - return http error + wm.logger.Error("Failed to upgrade to websocket connection", "err", err) + return + } + + // register connection + con := NewWSConnection(wsConn, wm.funcMap, wm.cdc, wm.wsConnOptions...) + con.SetLogger(wm.logger.With("remote", wsConn.RemoteAddr())) + wm.logger.Info("New websocket connection", "remote", con.remoteAddr) + err = con.Start() // Blocking + if err != nil { + wm.logger.Error("Error starting connection", "err", err) + } +} + +// rpc.websocket +//----------------------------------------------------------------------------- + +// NOTE: assume returns is result struct and error. If error is not nil, return it +func unreflectResult(returns []reflect.Value) (interface{}, error) { + errV := returns[1] + if errV.Interface() != nil { + return nil, errors.Errorf("%v", errV.Interface()) + } + rv := returns[0] + // the result is a registered interface, + // we need a pointer to it so we can marshal with type byte + rvp := reflect.New(rv.Type()) + rvp.Elem().Set(rv) + return rvp.Interface(), nil +} + +// writes a list of available rpc endpoints as an html page +func writeListOfEndpoints(w http.ResponseWriter, r *http.Request, funcMap map[string]*RPCFunc) { + noArgNames := []string{} + argNames := []string{} + for name, funcData := range funcMap { + if len(funcData.args) == 0 { + noArgNames = append(noArgNames, name) + } else { + argNames = append(argNames, name) + } + } + sort.Strings(noArgNames) + sort.Strings(argNames) + buf := new(bytes.Buffer) + buf.WriteString("") + buf.WriteString("
Available endpoints:
") + + for _, name := range noArgNames { + link := fmt.Sprintf("//%s/%s", r.Host, name) + buf.WriteString(fmt.Sprintf("%s
", link, link)) + } + + buf.WriteString("
Endpoints that require arguments:
") + for _, name := range argNames { + link := fmt.Sprintf("//%s/%s?", r.Host, name) + funcData := funcMap[name] + for i, argName := range funcData.argNames { + link += argName + "=_" + if i < len(funcData.argNames)-1 { + link += "&" + } + } + buf.WriteString(fmt.Sprintf("%s
", link, link)) + } + buf.WriteString("") + w.Header().Set("Content-Type", "text/html") + w.WriteHeader(200) + w.Write(buf.Bytes()) // nolint: errcheck +} diff --git a/rpc/lib/server/handlers_test.go b/rpc/lib/server/handlers_test.go new file mode 100644 index 000000000..3471eb791 --- /dev/null +++ b/rpc/lib/server/handlers_test.go @@ -0,0 +1,159 @@ +package rpcserver_test + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/gorilla/websocket" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + amino "github.com/tendermint/go-amino" + rs "github.com/tendermint/tendermint/rpc/lib/server" + types "github.com/tendermint/tendermint/rpc/lib/types" + "github.com/tendermint/tendermint/libs/log" +) + +////////////////////////////////////////////////////////////////////////////// +// HTTP REST API +// TODO + +////////////////////////////////////////////////////////////////////////////// +// JSON-RPC over HTTP + +func testMux() *http.ServeMux { + funcMap := map[string]*rs.RPCFunc{ + "c": rs.NewRPCFunc(func(s string, i int) (string, error) { return "foo", nil }, "s,i"), + } + cdc := amino.NewCodec() + mux := http.NewServeMux() + buf := new(bytes.Buffer) + logger := log.NewTMLogger(buf) + rs.RegisterRPCFuncs(mux, funcMap, cdc, logger) + + return mux +} + +func statusOK(code int) bool { return code >= 200 && code <= 299 } + +// Ensure that nefarious/unintended inputs to `params` +// do not crash our RPC handlers. +// See Issue https://github.com/tendermint/tendermint/issues/708. +func TestRPCParams(t *testing.T) { + mux := testMux() + tests := []struct { + payload string + wantErr string + }{ + // bad + {`{"jsonrpc": "2.0", "id": "0"}`, "Method not found"}, + {`{"jsonrpc": "2.0", "method": "y", "id": "0"}`, "Method not found"}, + {`{"method": "c", "id": "0", "params": a}`, "invalid character"}, + {`{"method": "c", "id": "0", "params": ["a"]}`, "got 1"}, + {`{"method": "c", "id": "0", "params": ["a", "b"]}`, "invalid character"}, + {`{"method": "c", "id": "0", "params": [1, 1]}`, "of type string"}, + + // good + {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": null}`, ""}, + {`{"method": "c", "id": "0", "params": {}}`, ""}, + {`{"method": "c", "id": "0", "params": ["a", "10"]}`, ""}, + } + + for i, tt := range tests { + req, _ := http.NewRequest("POST", "http://localhost/", strings.NewReader(tt.payload)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + res := rec.Result() + // Always expecting back a JSONRPCResponse + assert.True(t, statusOK(res.StatusCode), "#%d: should always return 2XX", i) + blob, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Errorf("#%d: err reading body: %v", i, err) + continue + } + + recv := new(types.RPCResponse) + assert.Nil(t, json.Unmarshal(blob, recv), "#%d: expecting successful parsing of an RPCResponse:\nblob: %s", i, blob) + assert.NotEqual(t, recv, new(types.RPCResponse), "#%d: not expecting a blank RPCResponse", i) + + if tt.wantErr == "" { + assert.Nil(t, recv.Error, "#%d: not expecting an error", i) + } else { + assert.True(t, recv.Error.Code < 0, "#%d: not expecting a positive JSONRPC code", i) + // The wanted error is either in the message or the data + assert.Contains(t, recv.Error.Message+recv.Error.Data, tt.wantErr, "#%d: expected substring", i) + } + } +} + +func TestRPCNotification(t *testing.T) { + mux := testMux() + body := strings.NewReader(`{"jsonrpc": "2.0"}`) + req, _ := http.NewRequest("POST", "http://localhost/", body) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + res := rec.Result() + + // Always expecting back a JSONRPCResponse + require.True(t, statusOK(res.StatusCode), "should always return 2XX") + blob, err := ioutil.ReadAll(res.Body) + require.Nil(t, err, "reading from the body should not give back an error") + require.Equal(t, len(blob), 0, "a notification SHOULD NOT be responded to by the server") +} + +func TestUnknownRPCPath(t *testing.T) { + mux := testMux() + req, _ := http.NewRequest("GET", "http://localhost/unknownrpcpath", nil) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + res := rec.Result() + + // Always expecting back a 404 error + require.Equal(t, http.StatusNotFound, res.StatusCode, "should always return 404") +} + +////////////////////////////////////////////////////////////////////////////// +// JSON-RPC over WEBSOCKETS + +func TestWebsocketManagerHandler(t *testing.T) { + s := newWSServer() + defer s.Close() + + // check upgrader works + d := websocket.Dialer{} + c, dialResp, err := d.Dial("ws://"+s.Listener.Addr().String()+"/websocket", nil) + require.NoError(t, err) + + if got, want := dialResp.StatusCode, http.StatusSwitchingProtocols; got != want { + t.Errorf("dialResp.StatusCode = %q, want %q", got, want) + } + + // check basic functionality works + req, err := types.MapToRequest(amino.NewCodec(), "TestWebsocketManager", "c", map[string]interface{}{"s": "a", "i": 10}) + require.NoError(t, err) + err = c.WriteJSON(req) + require.NoError(t, err) + + var resp types.RPCResponse + err = c.ReadJSON(&resp) + require.NoError(t, err) + require.Nil(t, resp.Error) +} + +func newWSServer() *httptest.Server { + funcMap := map[string]*rs.RPCFunc{ + "c": rs.NewWSRPCFunc(func(wsCtx types.WSRPCContext, s string, i int) (string, error) { return "foo", nil }, "s,i"), + } + wm := rs.NewWebsocketManager(funcMap, amino.NewCodec()) + wm.SetLogger(log.TestingLogger()) + + mux := http.NewServeMux() + mux.HandleFunc("/websocket", wm.WebsocketHandler) + + return httptest.NewServer(mux) +} diff --git a/rpc/lib/server/http_params.go b/rpc/lib/server/http_params.go new file mode 100644 index 000000000..3c948c0ba --- /dev/null +++ b/rpc/lib/server/http_params.go @@ -0,0 +1,91 @@ +package rpcserver + +import ( + "encoding/hex" + "net/http" + "regexp" + "strconv" + + "github.com/pkg/errors" +) + +var ( + // Parts of regular expressions + atom = "[A-Z0-9!#$%&'*+\\-/=?^_`{|}~]+" + dotAtom = atom + `(?:\.` + atom + `)*` + domain = `[A-Z0-9.-]+\.[A-Z]{2,4}` + + RE_INT = regexp.MustCompile(`^-?[0-9]+$`) + RE_HEX = regexp.MustCompile(`^(?i)[a-f0-9]+$`) + RE_EMAIL = regexp.MustCompile(`^(?i)(` + dotAtom + `)@(` + dotAtom + `)$`) + RE_ADDRESS = regexp.MustCompile(`^(?i)[a-z0-9]{25,34}$`) + RE_HOST = regexp.MustCompile(`^(?i)(` + domain + `)$`) + + //RE_ID12 = regexp.MustCompile(`^[a-zA-Z0-9]{12}$`) +) + +func GetParam(r *http.Request, param string) string { + s := r.URL.Query().Get(param) + if s == "" { + s = r.FormValue(param) + } + return s +} + +func GetParamByteSlice(r *http.Request, param string) ([]byte, error) { + s := GetParam(r, param) + return hex.DecodeString(s) +} + +func GetParamInt64(r *http.Request, param string) (int64, error) { + s := GetParam(r, param) + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return 0, errors.Errorf(param, err.Error()) + } + return i, nil +} + +func GetParamInt32(r *http.Request, param string) (int32, error) { + s := GetParam(r, param) + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return 0, errors.Errorf(param, err.Error()) + } + return int32(i), nil +} + +func GetParamUint64(r *http.Request, param string) (uint64, error) { + s := GetParam(r, param) + i, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, errors.Errorf(param, err.Error()) + } + return i, nil +} + +func GetParamUint(r *http.Request, param string) (uint, error) { + s := GetParam(r, param) + i, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, errors.Errorf(param, err.Error()) + } + return uint(i), nil +} + +func GetParamRegexp(r *http.Request, param string, re *regexp.Regexp) (string, error) { + s := GetParam(r, param) + if !re.MatchString(s) { + return "", errors.Errorf(param, "Did not match regular expression %v", re.String()) + } + return s, nil +} + +func GetParamFloat64(r *http.Request, param string) (float64, error) { + s := GetParam(r, param) + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return 0, errors.Errorf(param, err.Error()) + } + return f, nil +} diff --git a/rpc/lib/server/http_server.go b/rpc/lib/server/http_server.go new file mode 100644 index 000000000..5d816ef22 --- /dev/null +++ b/rpc/lib/server/http_server.go @@ -0,0 +1,220 @@ +// Commons for HTTP handling +package rpcserver + +import ( + "bufio" + "encoding/json" + "fmt" + "net" + "net/http" + "runtime/debug" + "strings" + "time" + + "github.com/pkg/errors" + "golang.org/x/net/netutil" + + types "github.com/tendermint/tendermint/rpc/lib/types" + "github.com/tendermint/tendermint/libs/log" +) + +// Config is an RPC server configuration. +type Config struct { + MaxOpenConnections int +} + +const ( + // maxBodyBytes controls the maximum number of bytes the + // server will read parsing the request body. + maxBodyBytes = int64(1000000) // 1MB +) + +// StartHTTPServer starts an HTTP server on listenAddr with the given handler. +// It wraps handler with RecoverAndLogHandler. +func StartHTTPServer( + listenAddr string, + handler http.Handler, + logger log.Logger, + config Config, +) (listener net.Listener, err error) { + var proto, addr string + parts := strings.SplitN(listenAddr, "://", 2) + if len(parts) != 2 { + return nil, errors.Errorf( + "Invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)", + listenAddr, + ) + } + proto, addr = parts[0], parts[1] + + logger.Info(fmt.Sprintf("Starting RPC HTTP server on %s", listenAddr)) + listener, err = net.Listen(proto, addr) + if err != nil { + return nil, errors.Errorf("Failed to listen on %v: %v", listenAddr, err) + } + if config.MaxOpenConnections > 0 { + listener = netutil.LimitListener(listener, config.MaxOpenConnections) + } + + go func() { + err := http.Serve( + listener, + RecoverAndLogHandler(maxBytesHandler{h: handler, n: maxBodyBytes}, logger), + ) + logger.Error("RPC HTTP server stopped", "err", err) + }() + return listener, nil +} + +// StartHTTPAndTLSServer starts an HTTPS server on listenAddr with the given +// handler. +// It wraps handler with RecoverAndLogHandler. +func StartHTTPAndTLSServer( + listenAddr string, + handler http.Handler, + certFile, keyFile string, + logger log.Logger, + config Config, +) (listener net.Listener, err error) { + var proto, addr string + parts := strings.SplitN(listenAddr, "://", 2) + if len(parts) != 2 { + return nil, errors.Errorf( + "Invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)", + listenAddr, + ) + } + proto, addr = parts[0], parts[1] + + logger.Info( + fmt.Sprintf( + "Starting RPC HTTPS server on %s (cert: %q, key: %q)", + listenAddr, + certFile, + keyFile, + ), + ) + listener, err = net.Listen(proto, addr) + if err != nil { + return nil, errors.Errorf("Failed to listen on %v: %v", listenAddr, err) + } + if config.MaxOpenConnections > 0 { + listener = netutil.LimitListener(listener, config.MaxOpenConnections) + } + + go func() { + err := http.ServeTLS( + listener, + RecoverAndLogHandler(maxBytesHandler{h: handler, n: maxBodyBytes}, logger), + certFile, + keyFile, + ) + logger.Error("RPC HTTPS server stopped", "err", err) + }() + return listener, nil +} + +func WriteRPCResponseHTTPError( + w http.ResponseWriter, + httpCode int, + res types.RPCResponse, +) { + jsonBytes, err := json.MarshalIndent(res, "", " ") + if err != nil { + panic(err) + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(httpCode) + w.Write(jsonBytes) // nolint: errcheck, gas +} + +func WriteRPCResponseHTTP(w http.ResponseWriter, res types.RPCResponse) { + jsonBytes, err := json.MarshalIndent(res, "", " ") + if err != nil { + panic(err) + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + w.Write(jsonBytes) // nolint: errcheck, gas +} + +//----------------------------------------------------------------------------- + +// Wraps an HTTP handler, adding error logging. +// If the inner function panics, the outer function recovers, logs, sends an +// HTTP 500 error response. +func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Wrap the ResponseWriter to remember the status + rww := &ResponseWriterWrapper{-1, w} + begin := time.Now() + + // Common headers + origin := r.Header.Get("Origin") + rww.Header().Set("Access-Control-Allow-Origin", origin) + rww.Header().Set("Access-Control-Allow-Credentials", "true") + rww.Header().Set("Access-Control-Expose-Headers", "X-Server-Time") + rww.Header().Set("X-Server-Time", fmt.Sprintf("%v", begin.Unix())) + + defer func() { + // Send a 500 error if a panic happens during a handler. + // Without this, Chrome & Firefox were retrying aborted ajax requests, + // at least to my localhost. + if e := recover(); e != nil { + + // If RPCResponse + if res, ok := e.(types.RPCResponse); ok { + WriteRPCResponseHTTP(rww, res) + } else { + // For the rest, + logger.Error( + "Panic in RPC HTTP handler", "err", e, "stack", + string(debug.Stack()), + ) + rww.WriteHeader(http.StatusInternalServerError) + WriteRPCResponseHTTP(rww, types.RPCInternalError("", e.(error))) + } + } + + // Finally, log. + durationMS := time.Since(begin).Nanoseconds() / 1000000 + if rww.Status == -1 { + rww.Status = 200 + } + logger.Info("Served RPC HTTP response", + "method", r.Method, "url", r.URL, + "status", rww.Status, "duration", durationMS, + "remoteAddr", r.RemoteAddr, + ) + }() + + handler.ServeHTTP(rww, r) + }) +} + +// Remember the status for logging +type ResponseWriterWrapper struct { + Status int + http.ResponseWriter +} + +func (w *ResponseWriterWrapper) WriteHeader(status int) { + w.Status = status + w.ResponseWriter.WriteHeader(status) +} + +// implements http.Hijacker +func (w *ResponseWriterWrapper) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return w.ResponseWriter.(http.Hijacker).Hijack() +} + +type maxBytesHandler struct { + h http.Handler + n int64 +} + +func (h maxBytesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + r.Body = http.MaxBytesReader(w, r.Body, h.n) + h.h.ServeHTTP(w, r) +} diff --git a/rpc/lib/server/http_server_test.go b/rpc/lib/server/http_server_test.go new file mode 100644 index 000000000..3cbe0d906 --- /dev/null +++ b/rpc/lib/server/http_server_test.go @@ -0,0 +1,62 @@ +package rpcserver + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/tendermint/tendermint/libs/log" +) + +func TestMaxOpenConnections(t *testing.T) { + const max = 5 // max simultaneous connections + + // Start the server. + var open int32 + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + if n := atomic.AddInt32(&open, 1); n > int32(max) { + t.Errorf("%d open connections, want <= %d", n, max) + } + defer atomic.AddInt32(&open, -1) + time.Sleep(10 * time.Millisecond) + fmt.Fprint(w, "some body") + }) + l, err := StartHTTPServer("tcp://127.0.0.1:0", mux, log.TestingLogger(), Config{MaxOpenConnections: max}) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + // Make N GET calls to the server. + attempts := max * 2 + var wg sync.WaitGroup + var failed int32 + for i := 0; i < attempts; i++ { + wg.Add(1) + go func() { + defer wg.Done() + c := http.Client{Timeout: 3 * time.Second} + r, err := c.Get("http://" + l.Addr().String()) + if err != nil { + t.Log(err) + atomic.AddInt32(&failed, 1) + return + } + defer r.Body.Close() + io.Copy(ioutil.Discard, r.Body) + }() + } + wg.Wait() + + // We expect some Gets to fail as the server's accept queue is filled, + // but most should succeed. + if int(failed) >= attempts/2 { + t.Errorf("%d requests failed within %d attempts", failed, attempts) + } +} diff --git a/rpc/lib/server/parse_test.go b/rpc/lib/server/parse_test.go new file mode 100644 index 000000000..7b0aacdbe --- /dev/null +++ b/rpc/lib/server/parse_test.go @@ -0,0 +1,221 @@ +package rpcserver + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + amino "github.com/tendermint/go-amino" + cmn "github.com/tendermint/tendermint/libs/common" +) + +func TestParseJSONMap(t *testing.T) { + assert := assert.New(t) + + input := []byte(`{"value":"1234","height":22}`) + + // naive is float,string + var p1 map[string]interface{} + err := json.Unmarshal(input, &p1) + if assert.Nil(err) { + h, ok := p1["height"].(float64) + if assert.True(ok, "%#v", p1["height"]) { + assert.EqualValues(22, h) + } + v, ok := p1["value"].(string) + if assert.True(ok, "%#v", p1["value"]) { + assert.EqualValues("1234", v) + } + } + + // preloading map with values doesn't help + tmp := 0 + p2 := map[string]interface{}{ + "value": &cmn.HexBytes{}, + "height": &tmp, + } + err = json.Unmarshal(input, &p2) + if assert.Nil(err) { + h, ok := p2["height"].(float64) + if assert.True(ok, "%#v", p2["height"]) { + assert.EqualValues(22, h) + } + v, ok := p2["value"].(string) + if assert.True(ok, "%#v", p2["value"]) { + assert.EqualValues("1234", v) + } + } + + // preload here with *pointers* to the desired types + // struct has unknown types, but hard-coded keys + tmp = 0 + p3 := struct { + Value interface{} `json:"value"` + Height interface{} `json:"height"` + }{ + Height: &tmp, + Value: &cmn.HexBytes{}, + } + err = json.Unmarshal(input, &p3) + if assert.Nil(err) { + h, ok := p3.Height.(*int) + if assert.True(ok, "%#v", p3.Height) { + assert.Equal(22, *h) + } + v, ok := p3.Value.(*cmn.HexBytes) + if assert.True(ok, "%#v", p3.Value) { + assert.EqualValues([]byte{0x12, 0x34}, *v) + } + } + + // simplest solution, but hard-coded + p4 := struct { + Value cmn.HexBytes `json:"value"` + Height int `json:"height"` + }{} + err = json.Unmarshal(input, &p4) + if assert.Nil(err) { + assert.EqualValues(22, p4.Height) + assert.EqualValues([]byte{0x12, 0x34}, p4.Value) + } + + // so, let's use this trick... + // dynamic keys on map, and we can deserialize to the desired types + var p5 map[string]*json.RawMessage + err = json.Unmarshal(input, &p5) + if assert.Nil(err) { + var h int + err = json.Unmarshal(*p5["height"], &h) + if assert.Nil(err) { + assert.Equal(22, h) + } + + var v cmn.HexBytes + err = json.Unmarshal(*p5["value"], &v) + if assert.Nil(err) { + assert.Equal(cmn.HexBytes{0x12, 0x34}, v) + } + } +} + +func TestParseJSONArray(t *testing.T) { + assert := assert.New(t) + + input := []byte(`["1234",22]`) + + // naive is float,string + var p1 []interface{} + err := json.Unmarshal(input, &p1) + if assert.Nil(err) { + v, ok := p1[0].(string) + if assert.True(ok, "%#v", p1[0]) { + assert.EqualValues("1234", v) + } + h, ok := p1[1].(float64) + if assert.True(ok, "%#v", p1[1]) { + assert.EqualValues(22, h) + } + } + + // preloading map with values helps here (unlike map - p2 above) + tmp := 0 + p2 := []interface{}{&cmn.HexBytes{}, &tmp} + err = json.Unmarshal(input, &p2) + if assert.Nil(err) { + v, ok := p2[0].(*cmn.HexBytes) + if assert.True(ok, "%#v", p2[0]) { + assert.EqualValues([]byte{0x12, 0x34}, *v) + } + h, ok := p2[1].(*int) + if assert.True(ok, "%#v", p2[1]) { + assert.EqualValues(22, *h) + } + } +} + +func TestParseJSONRPC(t *testing.T) { + assert := assert.New(t) + + demo := func(height int, name string) {} + call := NewRPCFunc(demo, "height,name") + cdc := amino.NewCodec() + + cases := []struct { + raw string + height int64 + name string + fail bool + }{ + // should parse + {`["7", "flew"]`, 7, "flew", false}, + {`{"name": "john", "height": "22"}`, 22, "john", false}, + // defaults + {`{"name": "solo", "unused": "stuff"}`, 0, "solo", false}, + // should fail - wrong types/length + {`["flew", 7]`, 0, "", true}, + {`[7,"flew",100]`, 0, "", true}, + {`{"name": -12, "height": "fred"}`, 0, "", true}, + } + for idx, tc := range cases { + i := strconv.Itoa(idx) + data := []byte(tc.raw) + vals, err := jsonParamsToArgs(call, cdc, data, 0) + if tc.fail { + assert.NotNil(err, i) + } else { + assert.Nil(err, "%s: %+v", i, err) + if assert.Equal(2, len(vals), i) { + assert.Equal(tc.height, vals[0].Int(), i) + assert.Equal(tc.name, vals[1].String(), i) + } + } + + } +} + +func TestParseURI(t *testing.T) { + + demo := func(height int, name string) {} + call := NewRPCFunc(demo, "height,name") + cdc := amino.NewCodec() + + cases := []struct { + raw []string + height int64 + name string + fail bool + }{ + // can parse numbers unquoted and strings quoted + {[]string{"7", `"flew"`}, 7, "flew", false}, + {[]string{"22", `"john"`}, 22, "john", false}, + {[]string{"-10", `"bob"`}, -10, "bob", false}, + // can parse numbers quoted, too + {[]string{`"7"`, `"flew"`}, 7, "flew", false}, + {[]string{`"-10"`, `"bob"`}, -10, "bob", false}, + // cant parse strings uquoted + {[]string{`"-10"`, `bob`}, -10, "bob", true}, + } + for idx, tc := range cases { + i := strconv.Itoa(idx) + // data := []byte(tc.raw) + url := fmt.Sprintf( + "test.com/method?height=%v&name=%v", + tc.raw[0], tc.raw[1]) + req, err := http.NewRequest("GET", url, nil) + assert.NoError(t, err) + vals, err := httpParamsToArgs(call, cdc, req) + if tc.fail { + assert.NotNil(t, err, i) + } else { + assert.Nil(t, err, "%s: %+v", i, err) + if assert.Equal(t, 2, len(vals), i) { + assert.Equal(t, tc.height, vals[0].Int(), i) + assert.Equal(t, tc.name, vals[1].String(), i) + } + } + + } +} diff --git a/rpc/lib/test/data.json b/rpc/lib/test/data.json new file mode 100644 index 000000000..83283ec33 --- /dev/null +++ b/rpc/lib/test/data.json @@ -0,0 +1,9 @@ +{ + "jsonrpc": "2.0", + "id": "", + "method": "hello_world", + "params": { + "name": "my_world", + "num": 5 + } +} diff --git a/rpc/lib/test/integration_test.sh b/rpc/lib/test/integration_test.sh new file mode 100755 index 000000000..7c23be7d3 --- /dev/null +++ b/rpc/lib/test/integration_test.sh @@ -0,0 +1,95 @@ +#!/usr/bin/env bash +set -e + +# Get the directory of where this script is. +SOURCE="${BASH_SOURCE[0]}" +while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done +DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" + +# Change into that dir because we expect that. +pushd "$DIR" + +echo "==> Building the server" +go build -o rpcserver main.go + +echo "==> (Re)starting the server" +PID=$(pgrep rpcserver || echo "") +if [[ $PID != "" ]]; then + kill -9 "$PID" +fi +./rpcserver & +PID=$! +sleep 2 + +echo "==> simple request" +R1=$(curl -s 'http://localhost:8008/hello_world?name="my_world"&num=5') +R2=$(curl -s --data @data.json http://localhost:8008) +if [[ "$R1" != "$R2" ]]; then + echo "responses are not identical:" + echo "R1: $R1" + echo "R2: $R2" + echo "FAIL" + exit 1 +else + echo "OK" +fi + +echo "==> request with 0x-prefixed hex string arg" +R1=$(curl -s 'http://localhost:8008/hello_world?name=0x41424344&num=123') +R2='{"jsonrpc":"2.0","id":"","result":{"Result":"hi ABCD 123"},"error":""}' +if [[ "$R1" != "$R2" ]]; then + echo "responses are not identical:" + echo "R1: $R1" + echo "R2: $R2" + echo "FAIL" + exit 1 +else + echo "OK" +fi + +echo "==> request with missing params" +R1=$(curl -s 'http://localhost:8008/hello_world') +R2='{"jsonrpc":"2.0","id":"","result":{"Result":"hi 0"},"error":""}' +if [[ "$R1" != "$R2" ]]; then + echo "responses are not identical:" + echo "R1: $R1" + echo "R2: $R2" + echo "FAIL" + exit 1 +else + echo "OK" +fi + +echo "==> request with unquoted string arg" +R1=$(curl -s 'http://localhost:8008/hello_world?name=abcd&num=123') +R2="{\"jsonrpc\":\"2.0\",\"id\":\"\",\"result\":null,\"error\":\"Error converting http params to args: invalid character 'a' looking for beginning of value\"}" +if [[ "$R1" != "$R2" ]]; then + echo "responses are not identical:" + echo "R1: $R1" + echo "R2: $R2" + echo "FAIL" + exit 1 +else + echo "OK" +fi + +echo "==> request with string type when expecting number arg" +R1=$(curl -s 'http://localhost:8008/hello_world?name="abcd"&num=0xabcd') +R2="{\"jsonrpc\":\"2.0\",\"id\":\"\",\"result\":null,\"error\":\"Error converting http params to args: Got a hex string arg, but expected 'int'\"}" +if [[ "$R1" != "$R2" ]]; then + echo "responses are not identical:" + echo "R1: $R1" + echo "R2: $R2" + echo "FAIL" + exit 1 +else + echo "OK" +fi + +echo "==> Stopping the server" +kill -9 $PID + +rm -f rpcserver + +popd +exit 0 diff --git a/rpc/lib/test/main.go b/rpc/lib/test/main.go new file mode 100644 index 000000000..cb9560e12 --- /dev/null +++ b/rpc/lib/test/main.go @@ -0,0 +1,40 @@ +package main + +import ( + "fmt" + "net/http" + "os" + + amino "github.com/tendermint/go-amino" + rpcserver "github.com/tendermint/tendermint/rpc/lib/server" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" +) + +var routes = map[string]*rpcserver.RPCFunc{ + "hello_world": rpcserver.NewRPCFunc(HelloWorld, "name,num"), +} + +func HelloWorld(name string, num int) (Result, error) { + return Result{fmt.Sprintf("hi %s %d", name, num)}, nil +} + +type Result struct { + Result string +} + +func main() { + mux := http.NewServeMux() + cdc := amino.NewCodec() + logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + rpcserver.RegisterRPCFuncs(mux, routes, cdc, logger) + _, err := rpcserver.StartHTTPServer("0.0.0.0:8008", mux, logger, rpcserver.Config{}) + if err != nil { + cmn.Exit(err.Error()) + } + + // Wait forever + cmn.TrapSignal(func() { + }) + +} diff --git a/rpc/lib/types/types.go b/rpc/lib/types/types.go new file mode 100644 index 000000000..fe9a92531 --- /dev/null +++ b/rpc/lib/types/types.go @@ -0,0 +1,187 @@ +package rpctypes + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/pkg/errors" + + amino "github.com/tendermint/go-amino" + + tmpubsub "github.com/tendermint/tendermint/libs/pubsub" +) + +//---------------------------------------- +// REQUEST + +type RPCRequest struct { + JSONRPC string `json:"jsonrpc"` + ID string `json:"id"` + Method string `json:"method"` + Params json.RawMessage `json:"params"` // must be map[string]interface{} or []interface{} +} + +func NewRPCRequest(id string, method string, params json.RawMessage) RPCRequest { + return RPCRequest{ + JSONRPC: "2.0", + ID: id, + Method: method, + Params: params, + } +} + +func (req RPCRequest) String() string { + return fmt.Sprintf("[%s %s]", req.ID, req.Method) +} + +func MapToRequest(cdc *amino.Codec, id string, method string, params map[string]interface{}) (RPCRequest, error) { + var params_ = make(map[string]json.RawMessage, len(params)) + for name, value := range params { + valueJSON, err := cdc.MarshalJSON(value) + if err != nil { + return RPCRequest{}, err + } + params_[name] = valueJSON + } + payload, err := json.Marshal(params_) // NOTE: Amino doesn't handle maps yet. + if err != nil { + return RPCRequest{}, err + } + request := NewRPCRequest(id, method, payload) + return request, nil +} + +func ArrayToRequest(cdc *amino.Codec, id string, method string, params []interface{}) (RPCRequest, error) { + var params_ = make([]json.RawMessage, len(params)) + for i, value := range params { + valueJSON, err := cdc.MarshalJSON(value) + if err != nil { + return RPCRequest{}, err + } + params_[i] = valueJSON + } + payload, err := json.Marshal(params_) // NOTE: Amino doesn't handle maps yet. + if err != nil { + return RPCRequest{}, err + } + request := NewRPCRequest(id, method, payload) + return request, nil +} + +//---------------------------------------- +// RESPONSE + +type RPCError struct { + Code int `json:"code"` + Message string `json:"message"` + Data string `json:"data,omitempty"` +} + +func (err RPCError) Error() string { + const baseFormat = "RPC error %v - %s" + if err.Data != "" { + return fmt.Sprintf(baseFormat+": %s", err.Code, err.Message, err.Data) + } + return fmt.Sprintf(baseFormat, err.Code, err.Message) +} + +type RPCResponse struct { + JSONRPC string `json:"jsonrpc"` + ID string `json:"id"` + Result json.RawMessage `json:"result,omitempty"` + Error *RPCError `json:"error,omitempty"` +} + +func NewRPCSuccessResponse(cdc *amino.Codec, id string, res interface{}) RPCResponse { + var rawMsg json.RawMessage + + if res != nil { + var js []byte + js, err := cdc.MarshalJSON(res) + if err != nil { + return RPCInternalError(id, errors.Wrap(err, "Error marshalling response")) + } + rawMsg = json.RawMessage(js) + } + + return RPCResponse{JSONRPC: "2.0", ID: id, Result: rawMsg} +} + +func NewRPCErrorResponse(id string, code int, msg string, data string) RPCResponse { + return RPCResponse{ + JSONRPC: "2.0", + ID: id, + Error: &RPCError{Code: code, Message: msg, Data: data}, + } +} + +func (resp RPCResponse) String() string { + if resp.Error == nil { + return fmt.Sprintf("[%s %v]", resp.ID, resp.Result) + } + return fmt.Sprintf("[%s %s]", resp.ID, resp.Error) +} + +func RPCParseError(id string, err error) RPCResponse { + return NewRPCErrorResponse(id, -32700, "Parse error. Invalid JSON", err.Error()) +} + +func RPCInvalidRequestError(id string, err error) RPCResponse { + return NewRPCErrorResponse(id, -32600, "Invalid Request", err.Error()) +} + +func RPCMethodNotFoundError(id string) RPCResponse { + return NewRPCErrorResponse(id, -32601, "Method not found", "") +} + +func RPCInvalidParamsError(id string, err error) RPCResponse { + return NewRPCErrorResponse(id, -32602, "Invalid params", err.Error()) +} + +func RPCInternalError(id string, err error) RPCResponse { + return NewRPCErrorResponse(id, -32603, "Internal error", err.Error()) +} + +func RPCServerError(id string, err error) RPCResponse { + return NewRPCErrorResponse(id, -32000, "Server error", err.Error()) +} + +//---------------------------------------- + +// *wsConnection implements this interface. +type WSRPCConnection interface { + GetRemoteAddr() string + WriteRPCResponse(resp RPCResponse) + TryWriteRPCResponse(resp RPCResponse) bool + GetEventSubscriber() EventSubscriber + Codec() *amino.Codec +} + +// EventSubscriber mirros tendermint/tendermint/types.EventBusSubscriber +type EventSubscriber interface { + Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error + Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error + UnsubscribeAll(ctx context.Context, subscriber string) error +} + +// websocket-only RPCFuncs take this as the first parameter. +type WSRPCContext struct { + Request RPCRequest + WSRPCConnection +} + +//---------------------------------------- +// SOCKETS +// +// Determine if its a unix or tcp socket. +// If tcp, must specify the port; `0.0.0.0` will return incorrectly as "unix" since there's no port +// TODO: deprecate +func SocketType(listenAddr string) string { + socketType := "unix" + if len(strings.Split(listenAddr, ":")) >= 2 { + socketType = "tcp" + } + return socketType +} diff --git a/rpc/lib/types/types_test.go b/rpc/lib/types/types_test.go new file mode 100644 index 000000000..9dd1b7a18 --- /dev/null +++ b/rpc/lib/types/types_test.go @@ -0,0 +1,51 @@ +package rpctypes + +import ( + "encoding/json" + "testing" + + "fmt" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/tendermint/go-amino" +) + +type SampleResult struct { + Value string +} + +func TestResponses(t *testing.T) { + assert := assert.New(t) + cdc := amino.NewCodec() + + a := NewRPCSuccessResponse(cdc, "1", &SampleResult{"hello"}) + b, _ := json.Marshal(a) + s := `{"jsonrpc":"2.0","id":"1","result":{"Value":"hello"}}` + assert.Equal(string(s), string(b)) + + d := RPCParseError("1", errors.New("Hello world")) + e, _ := json.Marshal(d) + f := `{"jsonrpc":"2.0","id":"1","error":{"code":-32700,"message":"Parse error. Invalid JSON","data":"Hello world"}}` + assert.Equal(string(f), string(e)) + + g := RPCMethodNotFoundError("2") + h, _ := json.Marshal(g) + i := `{"jsonrpc":"2.0","id":"2","error":{"code":-32601,"message":"Method not found"}}` + assert.Equal(string(h), string(i)) +} + +func TestRPCError(t *testing.T) { + assert.Equal(t, "RPC error 12 - Badness: One worse than a code 11", + fmt.Sprintf("%v", &RPCError{ + Code: 12, + Message: "Badness", + Data: "One worse than a code 11", + })) + + assert.Equal(t, "RPC error 12 - Badness", + fmt.Sprintf("%v", &RPCError{ + Code: 12, + Message: "Badness", + })) +} diff --git a/rpc/lib/version.go b/rpc/lib/version.go new file mode 100644 index 000000000..8828f260b --- /dev/null +++ b/rpc/lib/version.go @@ -0,0 +1,7 @@ +package rpc + +const Maj = "0" +const Min = "7" +const Fix = "0" + +const Version = Maj + "." + Min + "." + Fix diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go new file mode 100644 index 000000000..915911818 --- /dev/null +++ b/rpc/test/helpers.go @@ -0,0 +1,132 @@ +package rpctest + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/tendermint/tendermint/libs/log" + + abci "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" + + cfg "github.com/tendermint/tendermint/config" + nm "github.com/tendermint/tendermint/node" + "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/proxy" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + core_grpc "github.com/tendermint/tendermint/rpc/grpc" + rpcclient "github.com/tendermint/tendermint/rpc/lib/client" +) + +var globalConfig *cfg.Config + +func waitForRPC() { + laddr := GetConfig().RPC.ListenAddress + client := rpcclient.NewJSONRPCClient(laddr) + ctypes.RegisterAmino(client.Codec()) + result := new(ctypes.ResultStatus) + for { + _, err := client.Call("status", map[string]interface{}{}, result) + if err == nil { + return + } else { + fmt.Println("error", err) + time.Sleep(time.Millisecond) + } + } +} + +func waitForGRPC() { + client := GetGRPCClient() + for { + _, err := client.Ping(context.Background(), &core_grpc.RequestPing{}) + if err == nil { + return + } + } +} + +// f**ing long, but unique for each test +func makePathname() string { + // get path + p, err := os.Getwd() + if err != nil { + panic(err) + } + // fmt.Println(p) + sep := string(filepath.Separator) + return strings.Replace(p, sep, "_", -1) +} + +func randPort() int { + return int(cmn.RandUint16()/2 + 10000) +} + +func makeAddrs() (string, string, string) { + start := randPort() + return fmt.Sprintf("tcp://0.0.0.0:%d", start), + fmt.Sprintf("tcp://0.0.0.0:%d", start+1), + fmt.Sprintf("tcp://0.0.0.0:%d", start+2) +} + +// GetConfig returns a config for the test cases as a singleton +func GetConfig() *cfg.Config { + if globalConfig == nil { + pathname := makePathname() + globalConfig = cfg.ResetTestRoot(pathname) + + // and we use random ports to run in parallel + tm, rpc, grpc := makeAddrs() + globalConfig.P2P.ListenAddress = tm + globalConfig.RPC.ListenAddress = rpc + globalConfig.RPC.GRPCListenAddress = grpc + globalConfig.TxIndex.IndexTags = "app.creator" // see kvstore application + } + return globalConfig +} + +func GetGRPCClient() core_grpc.BroadcastAPIClient { + grpcAddr := globalConfig.RPC.GRPCListenAddress + return core_grpc.StartGRPCClient(grpcAddr) +} + +// StartTendermint starts a test tendermint server in a go routine and returns when it is initialized +func StartTendermint(app abci.Application) *nm.Node { + node := NewTendermint(app) + err := node.Start() + if err != nil { + panic(err) + } + + // wait for rpc + waitForRPC() + waitForGRPC() + + fmt.Println("Tendermint running!") + + return node +} + +// NewTendermint creates a new tendermint server and sleeps forever +func NewTendermint(app abci.Application) *nm.Node { + // Create & start node + config := GetConfig() + logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + logger = log.NewFilter(logger, log.AllowError()) + pvFile := config.PrivValidatorFile() + pv := privval.LoadOrGenFilePV(pvFile) + papp := proxy.NewLocalClientCreator(app) + node, err := nm.NewNode(config, pv, papp, + nm.DefaultGenesisDocProviderFunc(config), + nm.DefaultDBProvider, + nm.DefaultMetricsProvider, + logger) + if err != nil { + panic(err) + } + return node +} diff --git a/scripts/README.md b/scripts/README.md new file mode 100644 index 000000000..186241178 --- /dev/null +++ b/scripts/README.md @@ -0,0 +1 @@ +* http://redsymbol.net/articles/unofficial-bash-strict-mode/ diff --git a/scripts/dist.sh b/scripts/dist.sh new file mode 100755 index 000000000..40aa71e98 --- /dev/null +++ b/scripts/dist.sh @@ -0,0 +1,81 @@ +#!/usr/bin/env bash +set -e + +# WARN: non hermetic build (people must run this script inside docker to +# produce deterministic binaries). + +# Get the version from the environment, or try to figure it out. +if [ -z $VERSION ]; then + VERSION=$(awk -F\" '/Version =/ { print $2; exit }' < version/version.go) +fi +if [ -z "$VERSION" ]; then + echo "Please specify a version." + exit 1 +fi +echo "==> Building version $VERSION..." + +# Delete the old dir +echo "==> Removing old directory..." +rm -rf build/pkg +mkdir -p build/pkg + +# Get the git commit +GIT_COMMIT="$(git rev-parse --short=8 HEAD)" +GIT_IMPORT="github.com/tendermint/tendermint/version" + +# Determine the arch/os combos we're building for +XC_ARCH=${XC_ARCH:-"386 amd64 arm"} +XC_OS=${XC_OS:-"solaris darwin freebsd linux windows"} +XC_EXCLUDE=${XC_EXCLUDE:-" darwin/arm solaris/amd64 solaris/386 solaris/arm freebsd/amd64 windows/arm "} + +# Make sure build tools are available. +make get_tools + +# Get VENDORED dependencies +make get_vendor_deps + +# Build! +# ldflags: -s Omit the symbol table and debug information. +# -w Omit the DWARF symbol table. +echo "==> Building..." +IFS=' ' read -ra arch_list <<< "$XC_ARCH" +IFS=' ' read -ra os_list <<< "$XC_OS" +for arch in "${arch_list[@]}"; do + for os in "${os_list[@]}"; do + if [[ "$XC_EXCLUDE" != *" $os/$arch "* ]]; then + echo "--> $os/$arch" + GOOS=${os} GOARCH=${arch} go build -ldflags "-s -w -X ${GIT_IMPORT}.GitCommit=${GIT_COMMIT}" -tags="${BUILD_TAGS}" -o "build/pkg/${os}_${arch}/tendermint" ./cmd/tendermint + fi + done +done + +# Zip all the files. +echo "==> Packaging..." +for PLATFORM in $(find ./build/pkg -mindepth 1 -maxdepth 1 -type d); do + OSARCH=$(basename "${PLATFORM}") + echo "--> ${OSARCH}" + + pushd "$PLATFORM" >/dev/null 2>&1 + zip "../${OSARCH}.zip" ./* + popd >/dev/null 2>&1 +done + +# Add "tendermint" and $VERSION prefix to package name. +rm -rf ./build/dist +mkdir -p ./build/dist +for FILENAME in $(find ./build/pkg -mindepth 1 -maxdepth 1 -type f); do + FILENAME=$(basename "$FILENAME") + cp "./build/pkg/${FILENAME}" "./build/dist/tendermint_${VERSION}_${FILENAME}" +done + +# Make the checksums. +pushd ./build/dist +shasum -a256 ./* > "./tendermint_${VERSION}_SHA256SUMS" +popd + +# Done +echo +echo "==> Results:" +ls -hl ./build/dist + +exit 0 diff --git a/scripts/install/install_tendermint_bsd.sh b/scripts/install/install_tendermint_bsd.sh new file mode 100644 index 000000000..aba584f2e --- /dev/null +++ b/scripts/install/install_tendermint_bsd.sh @@ -0,0 +1,54 @@ +#!/usr/bin/tcsh + +# XXX: this script is intended to be run from +# a fresh Digital Ocean droplet with FreeBSD +# Just run tcsh install_tendermint_bsd.sh + +# upon its completion, you must either reset +# your terminal or run `source ~/.tcshrc` + +# This assumes your installing it through tcsh as root. +# Change the relevant lines from tcsh to csh if your +# installing as a different user, along with changing the +# gopath. + +# change this to a specific release or branch +set BRANCH=master + +sudo pkg update + +sudo pkg upgrade -y +sudo pkg install -y gmake +sudo pkg install -y git + +# get and unpack golang +curl -O https://storage.googleapis.com/golang/go1.10.freebsd-amd64.tar.gz +tar -xvf go1.10.freebsd-amd64.tar.gz + +# move go binary and add to path +mv go /usr/local +set path=($path /usr/local/go/bin) + + +# create the go directory, set GOPATH, and put it on PATH +mkdir go +echo "setenv GOPATH /root/go" >> ~/.tcshrc +setenv GOPATH /root/go +echo "set path=($path $GOPATH/bin)" >> ~/.tcshrc + +source ~/.tcshrc + +# get the code and move into repo +set REPO=github.com/tendermint/tendermint +go get $REPO +cd $GOPATH/src/$REPO + +# build & install master +git checkout $BRANCH +gmake get_tools +gmake get_vendor_deps +gmake install + +# the binary is located in $GOPATH/bin +# run `source ~/.profile` or reset your terminal +# to persist the changes diff --git a/scripts/install/install_tendermint_ubuntu.sh b/scripts/install/install_tendermint_ubuntu.sh new file mode 100644 index 000000000..0e1de1177 --- /dev/null +++ b/scripts/install/install_tendermint_ubuntu.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +# XXX: this script is intended to be run from +# a fresh Digital Ocean droplet with Ubuntu + +# upon its completion, you must either reset +# your terminal or run `source ~/.profile` + +# as written, this script will install +# tendermint core from master branch +REPO=github.com/tendermint/tendermint + +# change this to a specific release or branch +BRANCH=master + +sudo apt-get update -y +sudo apt-get upgrade -y +sudo apt-get install -y make + +# get and unpack golang +curl -O https://storage.googleapis.com/golang/go1.10.linux-amd64.tar.gz +tar -xvf go1.10.linux-amd64.tar.gz + +# move go binary and add to path +mv go /usr/local +echo "export PATH=\$PATH:/usr/local/go/bin" >> ~/.profile + +# create the goApps directory, set GOPATH, and put it on PATH +mkdir goApps +echo "export GOPATH=/root/goApps" >> ~/.profile +echo "export PATH=\$PATH:\$GOPATH/bin" >> ~/.profile + +source ~/.profile + +# get the code and move into repo +go get $REPO +cd $GOPATH/src/$REPO + +# build & install +git checkout $BRANCH +# XXX: uncomment if branch isn't master +# git fetch origin $BRANCH +make get_tools +make get_vendor_deps +make install + +# the binary is located in $GOPATH/bin +# run `source ~/.profile` or reset your terminal +# to persist the changes diff --git a/scripts/publish.sh b/scripts/publish.sh new file mode 100755 index 000000000..ba9440878 --- /dev/null +++ b/scripts/publish.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +set -e + +VERSION=$1 +DIST_DIR=./build/dist + +# Get the version from the environment, or try to figure it out. +if [ -z $VERSION ]; then + VERSION=$(awk -F\" '/Version =/ { print $2; exit }' < version/version.go) +fi +if [ -z "$VERSION" ]; then + echo "Please specify a version." + exit 1 +fi +echo "==> Copying ${DIST_DIR} to S3..." + +# copy to s3 +aws s3 cp --recursive ${DIST_DIR} s3://tendermint/binaries/tendermint/v${VERSION} --acl public-read + +exit 0 diff --git a/scripts/release.sh b/scripts/release.sh new file mode 100755 index 000000000..9a4e508e1 --- /dev/null +++ b/scripts/release.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +set -e + +# Get the version from the environment, or try to figure it out. +if [ -z $VERSION ]; then + VERSION=$(awk -F\" '/Version =/ { print $2; exit }' < version/version.go) +fi +if [ -z "$VERSION" ]; then + echo "Please specify a version." + exit 1 +fi +echo "==> Releasing version $VERSION..." + +# Get the parent directory of where this script is. +SOURCE="${BASH_SOURCE[0]}" +while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done +DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )" + +# Change into that dir because we expect that. +cd "$DIR" + +# Building binaries +sh -c "'$DIR/scripts/dist.sh'" + +# Pushing binaries to S3 +sh -c "'$DIR/scripts/publish.sh'" + +# echo "==> Crafting a Github release" +# today=$(date +"%B-%d-%Y") +# ghr -b "https://github.com/tendermint/tendermint/blob/master/CHANGELOG.md#${VERSION//.}-${today,}" "v$VERSION" "$DIR/build/dist" + +# Build and push Docker image + +## Get SHA256SUM of the linux archive +SHA256SUM=$(shasum -a256 "${DIR}/build/dist/tendermint_${VERSION}_linux_amd64.zip" | awk '{print $1;}') + +## Replace TM_VERSION and TM_SHA256SUM with the new values +sed -i -e "s/TM_VERSION .*/TM_VERSION $VERSION/g" "$DIR/DOCKER/Dockerfile" +sed -i -e "s/TM_SHA256SUM .*/TM_SHA256SUM $SHA256SUM/g" "$DIR/DOCKER/Dockerfile" +git commit -m "update Dockerfile" -a "$DIR/DOCKER/Dockerfile" +echo "==> TODO: update DOCKER/README.md (latest Dockerfile's hash is $(git rev-parse HEAD)) and copy it's content to https://store.docker.com/community/images/tendermint/tendermint" + +pushd "$DIR/DOCKER" + +## Build Docker image +TAG=$VERSION sh -c "'./build.sh'" + +## Push Docker image +TAG=$VERSION sh -c "'./push.sh'" + +popd + +exit 0 diff --git a/scripts/slate.sh b/scripts/slate.sh new file mode 100644 index 000000000..e18babea7 --- /dev/null +++ b/scripts/slate.sh @@ -0,0 +1,77 @@ +#!/usr/bin/env bash +set -euo pipefail + +if [ "$CIRCLE_BRANCH" == "" ]; then + echo "this script is meant to be run on CircleCI, exiting" + echo 1 +fi + +# check for changes in the `rpc/core` directory +did_rpc_change=$(git diff --name-status $CIRCLE_BRANCH origin/master | grep rpc/core) + +if [ "$did_rpc_change" == "" ]; then + echo "no changes detected in rpc/core, exiting" + exit 0 +else + echo "changes detected in rpc/core, continuing" +fi + +# only run this script on changes to rpc/core committed to develop +if [ "$CIRCLE_BRANCH" != "master" ]; then + echo "the branch being built isn't master, exiting" + exit 0 +else + echo "on master, building the RPC docs" +fi + +# godoc2md used to convert the go documentation from +# `rpc/core` into a markdown file consumed by Slate +go get github.com/davecheney/godoc2md + +# slate works via forks, and we'll be committing to +# master branch, which will trigger our fork to run +# the `./deploy.sh` and publish via the `gh-pages` branch +slate_repo=github.com/tendermint/slate +slate_path="$GOPATH"/src/"$slate_repo" + +if [ ! -d "$slate_path" ]; then + git clone https://"$slate_repo".git $slate_path +fi + +# the main file we need to update if rpc/core changed +destination="$slate_path"/source/index.html.md + +# we remove it then re-create it with the latest changes +rm $destination + +header="--- +title: RPC Reference + +language_tabs: + - shell + - go + +toc_footers: + - Tendermint + - Documentation Powered by Slate + +search: true +---" + +# write header to the main slate file +echo "$header" > "$destination" + +# generate a markdown from the godoc comments, using a template +rpc_docs=$(godoc2md -template rpc/core/doc_template.txt github.com/tendermint/tendermint/rpc/core | grep -v -e "pipe.go" -e "routes.go" -e "dev.go" | sed 's$/src/target$https://github.com/tendermint/tendermint/tree/master/rpc/core$') + +# append core RPC docs +echo "$rpc_docs" >> "$destination" + +# commit the changes +cd $slate_path + +git config --global user.email "github@tendermint.com" +git config --global user.name "tenderbot" + +git commit -a -m "Update tendermint RPC docs via CircleCI" +git push -q https://${GITHUB_ACCESS_TOKEN}@github.com/tendermint/slate.git master diff --git a/scripts/txs/random.sh b/scripts/txs/random.sh new file mode 100644 index 000000000..231fabcfe --- /dev/null +++ b/scripts/txs/random.sh @@ -0,0 +1,19 @@ +#! /bin/bash +set -u + +function toHex() { + echo -n $1 | hexdump -ve '1/1 "%.2X"' +} + +N=$1 +PORT=$2 + +for i in `seq 1 $N`; do + # store key value pair + KEY=$(head -c 10 /dev/urandom) + VALUE="$i" + echo $(toHex $KEY=$VALUE) + curl 127.0.0.1:$PORT/broadcast_tx_sync?tx=0x$(toHex $KEY=$VALUE) +done + + diff --git a/scripts/wal2json/main.go b/scripts/wal2json/main.go new file mode 100644 index 000000000..f6ffea431 --- /dev/null +++ b/scripts/wal2json/main.go @@ -0,0 +1,59 @@ +/* + wal2json converts binary WAL file to JSON. + + Usage: + wal2json +*/ + +package main + +import ( + "encoding/json" + "fmt" + "io" + "os" + + cs "github.com/tendermint/tendermint/consensus" +) + +func main() { + if len(os.Args) < 2 { + fmt.Println("missing one argument: ") + os.Exit(1) + } + + f, err := os.Open(os.Args[1]) + if err != nil { + panic(fmt.Errorf("failed to open WAL file: %v", err)) + } + defer f.Close() + + dec := cs.NewWALDecoder(f) + for { + msg, err := dec.Decode() + if err == io.EOF { + break + } else if err != nil { + panic(fmt.Errorf("failed to decode msg: %v", err)) + } + + json, err := json.Marshal(msg) + if err != nil { + panic(fmt.Errorf("failed to marshal msg: %v", err)) + } + + _, err = os.Stdout.Write(json) + if err == nil { + _, err = os.Stdout.Write([]byte("\n")) + } + if err == nil { + if end, ok := msg.Msg.(cs.EndHeightMessage); ok { + _, err = os.Stdout.Write([]byte(fmt.Sprintf("ENDHEIGHT %d\n", end.Height))) // nolint: errcheck, gas + } + } + if err != nil { + fmt.Println("Failed to write message", err) + os.Exit(1) + } + } +} diff --git a/scripts/wire2amino.go b/scripts/wire2amino.go new file mode 100644 index 000000000..867c5735a --- /dev/null +++ b/scripts/wire2amino.go @@ -0,0 +1,181 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/tendermint/go-amino" + crypto "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" + + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/types" +) + +type GenesisValidator struct { + PubKey Data `json:"pub_key"` + Power int64 `json:"power"` + Name string `json:"name"` +} + +type Genesis struct { + GenesisTime time.Time `json:"genesis_time"` + ChainID string `json:"chain_id"` + ConsensusParams *types.ConsensusParams `json:"consensus_params,omitempty"` + Validators []GenesisValidator `json:"validators"` + AppHash cmn.HexBytes `json:"app_hash"` + AppStateJSON json.RawMessage `json:"app_state,omitempty"` + AppOptions json.RawMessage `json:"app_options,omitempty"` // DEPRECATED + +} + +type NodeKey struct { + PrivKey Data `json:"priv_key"` +} + +type PrivVal struct { + Address cmn.HexBytes `json:"address"` + LastHeight int64 `json:"last_height"` + LastRound int `json:"last_round"` + LastStep int8 `json:"last_step"` + PubKey Data `json:"pub_key"` + PrivKey Data `json:"priv_key"` +} + +type Data struct { + Type string `json:"type"` + Data cmn.HexBytes `json:"data"` +} + +func convertNodeKey(cdc *amino.Codec, jsonBytes []byte) ([]byte, error) { + var nodeKey NodeKey + err := json.Unmarshal(jsonBytes, &nodeKey) + if err != nil { + return nil, err + } + + var privKey crypto.PrivKeyEd25519 + copy(privKey[:], nodeKey.PrivKey.Data) + + nodeKeyNew := p2p.NodeKey{privKey} + + bz, err := cdc.MarshalJSON(nodeKeyNew) + if err != nil { + return nil, err + } + return bz, nil +} + +func convertPrivVal(cdc *amino.Codec, jsonBytes []byte) ([]byte, error) { + var privVal PrivVal + err := json.Unmarshal(jsonBytes, &privVal) + if err != nil { + return nil, err + } + + var privKey crypto.PrivKeyEd25519 + copy(privKey[:], privVal.PrivKey.Data) + + var pubKey crypto.PubKeyEd25519 + copy(pubKey[:], privVal.PubKey.Data) + + privValNew := privval.FilePV{ + Address: pubKey.Address(), + PubKey: pubKey, + LastHeight: privVal.LastHeight, + LastRound: privVal.LastRound, + LastStep: privVal.LastStep, + PrivKey: privKey, + } + + bz, err := cdc.MarshalJSON(privValNew) + if err != nil { + return nil, err + } + return bz, nil +} + +func convertGenesis(cdc *amino.Codec, jsonBytes []byte) ([]byte, error) { + var genesis Genesis + err := json.Unmarshal(jsonBytes, &genesis) + if err != nil { + return nil, err + } + + genesisNew := types.GenesisDoc{ + GenesisTime: genesis.GenesisTime, + ChainID: genesis.ChainID, + ConsensusParams: genesis.ConsensusParams, + // Validators + AppHash: genesis.AppHash, + AppStateJSON: genesis.AppStateJSON, + } + + if genesis.AppOptions != nil { + genesisNew.AppStateJSON = genesis.AppOptions + } + + for _, v := range genesis.Validators { + var pubKey crypto.PubKeyEd25519 + copy(pubKey[:], v.PubKey.Data) + genesisNew.Validators = append( + genesisNew.Validators, + types.GenesisValidator{ + PubKey: pubKey, + Power: v.Power, + Name: v.Name, + }, + ) + + } + + bz, err := cdc.MarshalJSON(genesisNew) + if err != nil { + return nil, err + } + return bz, nil +} + +func main() { + cdc := amino.NewCodec() + crypto.RegisterAmino(cdc) + + args := os.Args[1:] + if len(args) != 1 { + fmt.Println("Please specify a file to convert") + os.Exit(1) + } + + filePath := args[0] + fileName := filepath.Base(filePath) + + fileBytes, err := ioutil.ReadFile(filePath) + if err != nil { + panic(err) + } + + var bz []byte + + switch fileName { + case "node_key.json": + bz, err = convertNodeKey(cdc, fileBytes) + case "priv_validator.json": + bz, err = convertPrivVal(cdc, fileBytes) + case "genesis.json": + bz, err = convertGenesis(cdc, fileBytes) + default: + fmt.Println("Expected file name to be in (node_key.json, priv_validator.json, genesis.json)") + os.Exit(1) + } + + if err != nil { + panic(err) + } + fmt.Println(string(bz)) + +} diff --git a/state/errors.go b/state/errors.go new file mode 100644 index 000000000..d40c7e141 --- /dev/null +++ b/state/errors.go @@ -0,0 +1,79 @@ +package state + +import ( + cmn "github.com/tendermint/tendermint/libs/common" +) + +type ( + ErrInvalidBlock error + ErrProxyAppConn error + + ErrUnknownBlock struct { + Height int64 + } + + ErrBlockHashMismatch struct { + CoreHash []byte + AppHash []byte + Height int64 + } + + ErrAppBlockHeightTooHigh struct { + CoreHeight int64 + AppHeight int64 + } + + ErrLastStateMismatch struct { + Height int64 + Core []byte + App []byte + } + + ErrStateMismatch struct { + Got *State + Expected *State + } + + ErrNoValSetForHeight struct { + Height int64 + } + + ErrNoConsensusParamsForHeight struct { + Height int64 + } + + ErrNoABCIResponsesForHeight struct { + Height int64 + } +) + +func (e ErrUnknownBlock) Error() string { + return cmn.Fmt("Could not find block #%d", e.Height) +} + +func (e ErrBlockHashMismatch) Error() string { + return cmn.Fmt("App block hash (%X) does not match core block hash (%X) for height %d", e.AppHash, e.CoreHash, e.Height) +} + +func (e ErrAppBlockHeightTooHigh) Error() string { + return cmn.Fmt("App block height (%d) is higher than core (%d)", e.AppHeight, e.CoreHeight) +} +func (e ErrLastStateMismatch) Error() string { + return cmn.Fmt("Latest tendermint block (%d) LastAppHash (%X) does not match app's AppHash (%X)", e.Height, e.Core, e.App) +} + +func (e ErrStateMismatch) Error() string { + return cmn.Fmt("State after replay does not match saved state. Got ----\n%v\nExpected ----\n%v\n", e.Got, e.Expected) +} + +func (e ErrNoValSetForHeight) Error() string { + return cmn.Fmt("Could not find validator set for height #%d", e.Height) +} + +func (e ErrNoConsensusParamsForHeight) Error() string { + return cmn.Fmt("Could not find consensus params for height #%d", e.Height) +} + +func (e ErrNoABCIResponsesForHeight) Error() string { + return cmn.Fmt("Could not find results for height #%d", e.Height) +} diff --git a/state/execution.go b/state/execution.go new file mode 100644 index 000000000..601abec9e --- /dev/null +++ b/state/execution.go @@ -0,0 +1,402 @@ +package state + +import ( + "fmt" + + fail "github.com/ebuchman/fail-test" + abci "github.com/tendermint/tendermint/abci/types" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/proxy" + "github.com/tendermint/tendermint/types" +) + +//----------------------------------------------------------------------------- +// BlockExecutor handles block execution and state updates. +// It exposes ApplyBlock(), which validates & executes the block, updates state w/ ABCI responses, +// then commits and updates the mempool atomically, then saves state. + +// BlockExecutor provides the context and accessories for properly executing a block. +type BlockExecutor struct { + // save state, validators, consensus params, abci responses here + db dbm.DB + + // execute the app against this + proxyApp proxy.AppConnConsensus + + // events + eventBus types.BlockEventPublisher + + // update these with block results after commit + mempool Mempool + evpool EvidencePool + + logger log.Logger +} + +// NewBlockExecutor returns a new BlockExecutor with a NopEventBus. +// Call SetEventBus to provide one. +func NewBlockExecutor(db dbm.DB, logger log.Logger, proxyApp proxy.AppConnConsensus, + mempool Mempool, evpool EvidencePool) *BlockExecutor { + return &BlockExecutor{ + db: db, + proxyApp: proxyApp, + eventBus: types.NopEventBus{}, + mempool: mempool, + evpool: evpool, + logger: logger, + } +} + +// SetEventBus - sets the event bus for publishing block related events. +// If not called, it defaults to types.NopEventBus. +func (blockExec *BlockExecutor) SetEventBus(eventBus types.BlockEventPublisher) { + blockExec.eventBus = eventBus +} + +// ValidateBlock validates the given block against the given state. +// If the block is invalid, it returns an error. +// Validation does not mutate state, but does require historical information from the stateDB, +// ie. to verify evidence from a validator at an old height. +func (blockExec *BlockExecutor) ValidateBlock(state State, block *types.Block) error { + return validateBlock(blockExec.db, state, block) +} + +// ApplyBlock validates the block against the state, executes it against the app, +// fires the relevant events, commits the app, and saves the new state and responses. +// It's the only function that needs to be called +// from outside this package to process and commit an entire block. +// It takes a blockID to avoid recomputing the parts hash. +func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, block *types.Block) (State, error) { + + if err := blockExec.ValidateBlock(state, block); err != nil { + return state, ErrInvalidBlock(err) + } + + abciResponses, err := execBlockOnProxyApp(blockExec.logger, blockExec.proxyApp, block, state.LastValidators, blockExec.db) + if err != nil { + return state, ErrProxyAppConn(err) + } + + fail.Fail() // XXX + + // save the results before we commit + saveABCIResponses(blockExec.db, block.Height, abciResponses) + + fail.Fail() // XXX + + // update the state with the block and responses + state, err = updateState(state, blockID, block.Header, abciResponses) + if err != nil { + return state, fmt.Errorf("Commit failed for application: %v", err) + } + + // lock mempool, commit app state, update mempoool + appHash, err := blockExec.Commit(block) + if err != nil { + return state, fmt.Errorf("Commit failed for application: %v", err) + } + + // Update evpool with the block and state. + blockExec.evpool.Update(block, state) + + fail.Fail() // XXX + + // update the app hash and save the state + state.AppHash = appHash + SaveState(blockExec.db, state) + + fail.Fail() // XXX + + // events are fired after everything else + // NOTE: if we crash between Commit and Save, events wont be fired during replay + fireEvents(blockExec.logger, blockExec.eventBus, block, abciResponses) + + return state, nil +} + +// Commit locks the mempool, runs the ABCI Commit message, and updates the mempool. +// It returns the result of calling abci.Commit (the AppHash), and an error. +// The Mempool must be locked during commit and update because state is typically reset on Commit and old txs must be replayed +// against committed state before new txs are run in the mempool, lest they be invalid. +func (blockExec *BlockExecutor) Commit(block *types.Block) ([]byte, error) { + blockExec.mempool.Lock() + defer blockExec.mempool.Unlock() + + // while mempool is Locked, flush to ensure all async requests have completed + // in the ABCI app before Commit. + err := blockExec.mempool.FlushAppConn() + if err != nil { + blockExec.logger.Error("Client error during mempool.FlushAppConn", "err", err) + return nil, err + } + + // Commit block, get hash back + res, err := blockExec.proxyApp.CommitSync() + if err != nil { + blockExec.logger.Error("Client error during proxyAppConn.CommitSync", "err", err) + return nil, err + } + // ResponseCommit has no error code - just data + + blockExec.logger.Info("Committed state", + "height", block.Height, + "txs", block.NumTxs, + "appHash", fmt.Sprintf("%X", res.Data)) + + // Update mempool. + if err := blockExec.mempool.Update(block.Height, block.Txs); err != nil { + return nil, err + } + + return res.Data, nil +} + +//--------------------------------------------------------- +// Helper functions for executing blocks and updating state + +// Executes block's transactions on proxyAppConn. +// Returns a list of transaction results and updates to the validator set +func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, + block *types.Block, lastValSet *types.ValidatorSet, stateDB dbm.DB) (*ABCIResponses, error) { + var validTxs, invalidTxs = 0, 0 + + txIndex := 0 + abciResponses := NewABCIResponses(block) + + // Execute transactions and get hash + proxyCb := func(req *abci.Request, res *abci.Response) { + switch r := res.Value.(type) { + case *abci.Response_DeliverTx: + // TODO: make use of res.Log + // TODO: make use of this info + // Blocks may include invalid txs. + txRes := r.DeliverTx + if txRes.Code == abci.CodeTypeOK { + validTxs++ + } else { + logger.Debug("Invalid tx", "code", txRes.Code, "log", txRes.Log) + invalidTxs++ + } + abciResponses.DeliverTx[txIndex] = txRes + txIndex++ + } + } + proxyAppConn.SetResponseCallback(proxyCb) + + signVals, byzVals := getBeginBlockValidatorInfo(block, lastValSet, stateDB) + + // Begin block + _, err := proxyAppConn.BeginBlockSync(abci.RequestBeginBlock{ + Hash: block.Hash(), + Header: types.TM2PB.Header(block.Header), + Validators: signVals, + ByzantineValidators: byzVals, + }) + if err != nil { + logger.Error("Error in proxyAppConn.BeginBlock", "err", err) + return nil, err + } + + // Run txs of block + for _, tx := range block.Txs { + proxyAppConn.DeliverTxAsync(tx) + if err := proxyAppConn.Error(); err != nil { + return nil, err + } + } + + // End block + abciResponses.EndBlock, err = proxyAppConn.EndBlockSync(abci.RequestEndBlock{block.Height}) + if err != nil { + logger.Error("Error in proxyAppConn.EndBlock", "err", err) + return nil, err + } + + logger.Info("Executed block", "height", block.Height, "validTxs", validTxs, "invalidTxs", invalidTxs) + + valUpdates := abciResponses.EndBlock.ValidatorUpdates + if len(valUpdates) > 0 { + logger.Info("Updates to validators", "updates", abci.ValidatorsString(valUpdates)) + } + + return abciResponses, nil +} + +func getBeginBlockValidatorInfo(block *types.Block, lastValSet *types.ValidatorSet, stateDB dbm.DB) ([]abci.SigningValidator, []abci.Evidence) { + + // Sanity check that commit length matches validator set size - + // only applies after first block + if block.Height > 1 { + precommitLen := len(block.LastCommit.Precommits) + valSetLen := len(lastValSet.Validators) + if precommitLen != valSetLen { + // sanity check + panic(fmt.Sprintf("precommit length (%d) doesn't match valset length (%d) at height %d\n\n%v\n\n%v", + precommitLen, valSetLen, block.Height, block.LastCommit.Precommits, lastValSet.Validators)) + } + } + + // determine which validators did not sign last block. + signVals := make([]abci.SigningValidator, len(lastValSet.Validators)) + for i, val := range lastValSet.Validators { + var vote *types.Vote + if i < len(block.LastCommit.Precommits) { + vote = block.LastCommit.Precommits[i] + } + val := abci.SigningValidator{ + Validator: types.TM2PB.Validator(val), + SignedLastBlock: vote != nil, + } + signVals[i] = val + } + + byzVals := make([]abci.Evidence, len(block.Evidence.Evidence)) + for i, ev := range block.Evidence.Evidence { + // We need the validator set. We already did this in validateBlock. + // TODO: Should we instead cache the valset in the evidence itself and add + // `SetValidatorSet()` and `ToABCI` methods ? + valset, err := LoadValidators(stateDB, ev.Height()) + if err != nil { + panic(err) // shouldn't happen + } + byzVals[i] = types.TM2PB.Evidence(ev, valset, block.Time) + } + + return signVals, byzVals + +} + +// If more or equal than 1/3 of total voting power changed in one block, then +// a light client could never prove the transition externally. See +// ./lite/doc.go for details on how a light client tracks validators. +func updateValidators(currentSet *types.ValidatorSet, abciUpdates []abci.Validator) error { + updates, err := types.PB2TM.Validators(abciUpdates) + if err != nil { + return err + } + + // these are tendermint types now + for _, valUpdate := range updates { + if valUpdate.VotingPower < 0 { + return fmt.Errorf("Voting power can't be negative %v", valUpdate) + } + + address := valUpdate.Address + _, val := currentSet.GetByAddress(address) + if valUpdate.VotingPower == 0 { + // remove val + _, removed := currentSet.Remove(address) + if !removed { + return fmt.Errorf("Failed to remove validator %X", address) + } + } else if val == nil { + // add val + added := currentSet.Add(valUpdate) + if !added { + return fmt.Errorf("Failed to add new validator %v", valUpdate) + } + } else { + // update val + updated := currentSet.Update(valUpdate) + if !updated { + return fmt.Errorf("Failed to update validator %X to %v", address, valUpdate) + } + } + } + return nil +} + +// updateState returns a new State updated according to the header and responses. +func updateState(state State, blockID types.BlockID, header *types.Header, + abciResponses *ABCIResponses) (State, error) { + + // copy the valset so we can apply changes from EndBlock + // and update s.LastValidators and s.Validators + prevValSet := state.Validators.Copy() + nextValSet := prevValSet.Copy() + + // update the validator set with the latest abciResponses + lastHeightValsChanged := state.LastHeightValidatorsChanged + if len(abciResponses.EndBlock.ValidatorUpdates) > 0 { + err := updateValidators(nextValSet, abciResponses.EndBlock.ValidatorUpdates) + if err != nil { + return state, fmt.Errorf("Error changing validator set: %v", err) + } + // change results from this height but only applies to the next height + lastHeightValsChanged = header.Height + 1 + } + + // Update validator accums and set state variables + nextValSet.IncrementAccum(1) + + // update the params with the latest abciResponses + nextParams := state.ConsensusParams + lastHeightParamsChanged := state.LastHeightConsensusParamsChanged + if abciResponses.EndBlock.ConsensusParamUpdates != nil { + // NOTE: must not mutate s.ConsensusParams + nextParams = state.ConsensusParams.Update(abciResponses.EndBlock.ConsensusParamUpdates) + err := nextParams.Validate() + if err != nil { + return state, fmt.Errorf("Error updating consensus params: %v", err) + } + // change results from this height but only applies to the next height + lastHeightParamsChanged = header.Height + 1 + } + + // NOTE: the AppHash has not been populated. + // It will be filled on state.Save. + return State{ + ChainID: state.ChainID, + LastBlockHeight: header.Height, + LastBlockTotalTx: state.LastBlockTotalTx + header.NumTxs, + LastBlockID: blockID, + LastBlockTime: header.Time, + Validators: nextValSet, + LastValidators: state.Validators.Copy(), + LastHeightValidatorsChanged: lastHeightValsChanged, + ConsensusParams: nextParams, + LastHeightConsensusParamsChanged: lastHeightParamsChanged, + LastResultsHash: abciResponses.ResultsHash(), + AppHash: nil, + }, nil +} + +// Fire NewBlock, NewBlockHeader. +// Fire TxEvent for every tx. +// NOTE: if Tendermint crashes before commit, some or all of these events may be published again. +func fireEvents(logger log.Logger, eventBus types.BlockEventPublisher, block *types.Block, abciResponses *ABCIResponses) { + eventBus.PublishEventNewBlock(types.EventDataNewBlock{block}) + eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{block.Header}) + + for i, tx := range block.Data.Txs { + eventBus.PublishEventTx(types.EventDataTx{types.TxResult{ + Height: block.Height, + Index: uint32(i), + Tx: tx, + Result: *(abciResponses.DeliverTx[i]), + }}) + } +} + +//---------------------------------------------------------------------------------------------------- +// Execute block without state. TODO: eliminate + +// ExecCommitBlock executes and commits a block on the proxyApp without validating or mutating the state. +// It returns the application root hash (result of abci.Commit). +func ExecCommitBlock(appConnConsensus proxy.AppConnConsensus, block *types.Block, + logger log.Logger, lastValSet *types.ValidatorSet, stateDB dbm.DB) ([]byte, error) { + _, err := execBlockOnProxyApp(logger, appConnConsensus, block, lastValSet, stateDB) + if err != nil { + logger.Error("Error executing block on proxy app", "height", block.Height, "err", err) + return nil, err + } + // Commit block, get hash back + res, err := appConnConsensus.CommitSync() + if err != nil { + logger.Error("Client error during proxyAppConn.CommitSync", "err", res) + return nil, err + } + // ResponseCommit has no error or log, just data + return res.Data, nil +} diff --git a/state/execution_test.go b/state/execution_test.go new file mode 100644 index 000000000..5e0072c30 --- /dev/null +++ b/state/execution_test.go @@ -0,0 +1,315 @@ +package state + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/abci/example/kvstore" + abci "github.com/tendermint/tendermint/abci/types" + crypto "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" + + "github.com/tendermint/tendermint/proxy" + "github.com/tendermint/tendermint/types" +) + +var ( + chainID = "execution_chain" + testPartSize = 65536 + nTxsPerBlock = 10 +) + +func TestApplyBlock(t *testing.T) { + cc := proxy.NewLocalClientCreator(kvstore.NewKVStoreApplication()) + proxyApp := proxy.NewAppConns(cc, nil) + err := proxyApp.Start() + require.Nil(t, err) + defer proxyApp.Stop() + + state, stateDB := state(1, 1) + + blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), + MockMempool{}, MockEvidencePool{}) + + block := makeBlock(state, 1) + blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} + + state, err = blockExec.ApplyBlock(state, blockID, block) + require.Nil(t, err) + + // TODO check state and mempool +} + +// TestBeginBlockValidators ensures we send absent validators list. +func TestBeginBlockValidators(t *testing.T) { + app := &testApp{} + cc := proxy.NewLocalClientCreator(app) + proxyApp := proxy.NewAppConns(cc, nil) + err := proxyApp.Start() + require.Nil(t, err) + defer proxyApp.Stop() + + state, stateDB := state(2, 2) + + prevHash := state.LastBlockID.Hash + prevParts := types.PartSetHeader{} + prevBlockID := types.BlockID{prevHash, prevParts} + + now := time.Now().UTC() + vote0 := &types.Vote{ValidatorIndex: 0, Timestamp: now, Type: types.VoteTypePrecommit} + vote1 := &types.Vote{ValidatorIndex: 1, Timestamp: now} + + testCases := []struct { + desc string + lastCommitPrecommits []*types.Vote + expectedAbsentValidators []int + }{ + {"none absent", []*types.Vote{vote0, vote1}, []int{}}, + {"one absent", []*types.Vote{vote0, nil}, []int{1}}, + {"multiple absent", []*types.Vote{nil, nil}, []int{0, 1}}, + } + + for _, tc := range testCases { + lastCommit := &types.Commit{BlockID: prevBlockID, Precommits: tc.lastCommitPrecommits} + + // block for height 2 + block, _ := state.MakeBlock(2, makeTxs(2), lastCommit) + _, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), state.Validators, stateDB) + require.Nil(t, err, tc.desc) + + // -> app receives a list of validators with a bool indicating if they signed + ctr := 0 + for i, v := range app.Validators { + if ctr < len(tc.expectedAbsentValidators) && + tc.expectedAbsentValidators[ctr] == i { + + assert.False(t, v.SignedLastBlock) + ctr++ + } else { + assert.True(t, v.SignedLastBlock) + } + } + } +} + +// TestBeginBlockByzantineValidators ensures we send byzantine validators list. +func TestBeginBlockByzantineValidators(t *testing.T) { + app := &testApp{} + cc := proxy.NewLocalClientCreator(app) + proxyApp := proxy.NewAppConns(cc, nil) + err := proxyApp.Start() + require.Nil(t, err) + defer proxyApp.Stop() + + state, stateDB := state(2, 12) + + prevHash := state.LastBlockID.Hash + prevParts := types.PartSetHeader{} + prevBlockID := types.BlockID{prevHash, prevParts} + + height1, idx1, val1 := int64(8), 0, state.Validators.Validators[0].Address + height2, idx2, val2 := int64(3), 1, state.Validators.Validators[1].Address + ev1 := types.NewMockGoodEvidence(height1, idx1, val1) + ev2 := types.NewMockGoodEvidence(height2, idx2, val2) + + now := time.Now() + valSet := state.Validators + testCases := []struct { + desc string + evidence []types.Evidence + expectedByzantineValidators []abci.Evidence + }{ + {"none byzantine", []types.Evidence{}, []abci.Evidence{}}, + {"one byzantine", []types.Evidence{ev1}, []abci.Evidence{types.TM2PB.Evidence(ev1, valSet, now)}}, + {"multiple byzantine", []types.Evidence{ev1, ev2}, []abci.Evidence{ + types.TM2PB.Evidence(ev1, valSet, now), + types.TM2PB.Evidence(ev2, valSet, now)}}, + } + + vote0 := &types.Vote{ValidatorIndex: 0, Timestamp: now, Type: types.VoteTypePrecommit} + vote1 := &types.Vote{ValidatorIndex: 1, Timestamp: now} + votes := []*types.Vote{vote0, vote1} + lastCommit := &types.Commit{BlockID: prevBlockID, Precommits: votes} + for _, tc := range testCases { + + block, _ := state.MakeBlock(10, makeTxs(2), lastCommit) + block.Time = now + block.Evidence.Evidence = tc.evidence + _, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), state.Validators, stateDB) + require.Nil(t, err, tc.desc) + + // -> app must receive an index of the byzantine validator + assert.Equal(t, tc.expectedByzantineValidators, app.ByzantineValidators, tc.desc) + } +} + +func TestUpdateValidators(t *testing.T) { + pubkey1 := crypto.GenPrivKeyEd25519().PubKey() + val1 := types.NewValidator(pubkey1, 10) + pubkey2 := crypto.GenPrivKeyEd25519().PubKey() + val2 := types.NewValidator(pubkey2, 20) + + testCases := []struct { + name string + + currentSet *types.ValidatorSet + abciUpdates []abci.Validator + + resultingSet *types.ValidatorSet + shouldErr bool + }{ + { + "adding a validator is OK", + + types.NewValidatorSet([]*types.Validator{val1}), + []abci.Validator{{[]byte{}, types.TM2PB.PubKey(pubkey2), 20}}, + + types.NewValidatorSet([]*types.Validator{val1, val2}), + false, + }, + { + "updating a validator is OK", + + types.NewValidatorSet([]*types.Validator{val1}), + []abci.Validator{{[]byte{}, types.TM2PB.PubKey(pubkey1), 20}}, + + types.NewValidatorSet([]*types.Validator{types.NewValidator(pubkey1, 20)}), + false, + }, + { + "removing a validator is OK", + + types.NewValidatorSet([]*types.Validator{val1, val2}), + []abci.Validator{{[]byte{}, types.TM2PB.PubKey(pubkey2), 0}}, + + types.NewValidatorSet([]*types.Validator{val1}), + false, + }, + + { + "removing a non-existing validator results in error", + + types.NewValidatorSet([]*types.Validator{val1}), + []abci.Validator{{[]byte{}, types.TM2PB.PubKey(pubkey2), 0}}, + + types.NewValidatorSet([]*types.Validator{val1}), + true, + }, + + { + "adding a validator with negative power results in error", + + types.NewValidatorSet([]*types.Validator{val1}), + []abci.Validator{{[]byte{}, types.TM2PB.PubKey(pubkey2), -100}}, + + types.NewValidatorSet([]*types.Validator{val1}), + true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := updateValidators(tc.currentSet, tc.abciUpdates) + if tc.shouldErr { + assert.Error(t, err) + } else { + require.Equal(t, tc.resultingSet.Size(), tc.currentSet.Size()) + + assert.Equal(t, tc.resultingSet.TotalVotingPower(), tc.currentSet.TotalVotingPower()) + + assert.Equal(t, tc.resultingSet.Validators[0].Address, tc.currentSet.Validators[0].Address) + if tc.resultingSet.Size() > 1 { + assert.Equal(t, tc.resultingSet.Validators[1].Address, tc.currentSet.Validators[1].Address) + } + } + }) + } +} + +//---------------------------------------------------------------------------- + +// make some bogus txs +func makeTxs(height int64) (txs []types.Tx) { + for i := 0; i < nTxsPerBlock; i++ { + txs = append(txs, types.Tx([]byte{byte(height), byte(i)})) + } + return txs +} + +func state(nVals, height int) (State, dbm.DB) { + vals := make([]types.GenesisValidator, nVals) + for i := 0; i < nVals; i++ { + secret := []byte(fmt.Sprintf("test%d", i)) + pk := crypto.GenPrivKeyEd25519FromSecret(secret) + vals[i] = types.GenesisValidator{ + pk.PubKey(), 1000, fmt.Sprintf("test%d", i), + } + } + s, _ := MakeGenesisState(&types.GenesisDoc{ + ChainID: chainID, + Validators: vals, + AppHash: nil, + }) + + // save validators to db for 2 heights + stateDB := dbm.NewMemDB() + SaveState(stateDB, s) + + for i := 1; i < height; i++ { + s.LastBlockHeight += 1 + SaveState(stateDB, s) + } + return s, stateDB +} + +func makeBlock(state State, height int64) *types.Block { + block, _ := state.MakeBlock(height, makeTxs(state.LastBlockHeight), new(types.Commit)) + return block +} + +//---------------------------------------------------------------------------- + +var _ abci.Application = (*testApp)(nil) + +type testApp struct { + abci.BaseApplication + + Validators []abci.SigningValidator + ByzantineValidators []abci.Evidence +} + +func NewKVStoreApplication() *testApp { + return &testApp{} +} + +func (app *testApp) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) { + return abci.ResponseInfo{} +} + +func (app *testApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock { + app.Validators = req.Validators + app.ByzantineValidators = req.ByzantineValidators + return abci.ResponseBeginBlock{} +} + +func (app *testApp) DeliverTx(tx []byte) abci.ResponseDeliverTx { + return abci.ResponseDeliverTx{Tags: []cmn.KVPair{}} +} + +func (app *testApp) CheckTx(tx []byte) abci.ResponseCheckTx { + return abci.ResponseCheckTx{} +} + +func (app *testApp) Commit() abci.ResponseCommit { + return abci.ResponseCommit{} +} + +func (app *testApp) Query(reqQuery abci.RequestQuery) (resQuery abci.ResponseQuery) { + return +} diff --git a/state/services.go b/state/services.go new file mode 100644 index 000000000..bf0b1a6f4 --- /dev/null +++ b/state/services.go @@ -0,0 +1,86 @@ +package state + +import ( + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/types" +) + +//------------------------------------------------------ +// blockchain services types +// NOTE: Interfaces used by RPC must be thread safe! +//------------------------------------------------------ + +//------------------------------------------------------ +// mempool + +// Mempool defines the mempool interface as used by the ConsensusState. +// Updates to the mempool need to be synchronized with committing a block +// so apps can reset their transient state on Commit +type Mempool interface { + Lock() + Unlock() + + Size() int + CheckTx(types.Tx, func(*abci.Response)) error + Reap(int) types.Txs + Update(height int64, txs types.Txs) error + Flush() + FlushAppConn() error + + TxsAvailable() <-chan int64 + EnableTxsAvailable() +} + +// MockMempool is an empty implementation of a Mempool, useful for testing. +type MockMempool struct { +} + +func (m MockMempool) Lock() {} +func (m MockMempool) Unlock() {} +func (m MockMempool) Size() int { return 0 } +func (m MockMempool) CheckTx(tx types.Tx, cb func(*abci.Response)) error { return nil } +func (m MockMempool) Reap(n int) types.Txs { return types.Txs{} } +func (m MockMempool) Update(height int64, txs types.Txs) error { return nil } +func (m MockMempool) Flush() {} +func (m MockMempool) FlushAppConn() error { return nil } +func (m MockMempool) TxsAvailable() <-chan int64 { return make(chan int64) } +func (m MockMempool) EnableTxsAvailable() {} + +//------------------------------------------------------ +// blockstore + +// BlockStoreRPC is the block store interface used by the RPC. +type BlockStoreRPC interface { + Height() int64 + + LoadBlockMeta(height int64) *types.BlockMeta + LoadBlock(height int64) *types.Block + LoadBlockPart(height int64, index int) *types.Part + + LoadBlockCommit(height int64) *types.Commit + LoadSeenCommit(height int64) *types.Commit +} + +// BlockStore defines the BlockStore interface used by the ConsensusState. +type BlockStore interface { + BlockStoreRPC + SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) +} + +//----------------------------------------------------------------------------------------------------- +// evidence pool + +// EvidencePool defines the EvidencePool interface used by the ConsensusState. +type EvidencePool interface { + PendingEvidence() []types.Evidence + AddEvidence(types.Evidence) error + Update(*types.Block, State) +} + +// MockMempool is an empty implementation of a Mempool, useful for testing. +type MockEvidencePool struct { +} + +func (m MockEvidencePool) PendingEvidence() []types.Evidence { return nil } +func (m MockEvidencePool) AddEvidence(types.Evidence) error { return nil } +func (m MockEvidencePool) Update(*types.Block, State) {} diff --git a/state/state.go b/state/state.go new file mode 100644 index 000000000..3bc08dae3 --- /dev/null +++ b/state/state.go @@ -0,0 +1,187 @@ +package state + +import ( + "bytes" + "fmt" + "io/ioutil" + "time" + + "github.com/tendermint/tendermint/types" +) + +// database keys +var ( + stateKey = []byte("stateKey") +) + +//----------------------------------------------------------------------------- + +// State is a short description of the latest committed block of the Tendermint consensus. +// It keeps all information necessary to validate new blocks, +// including the last validator set and the consensus params. +// All fields are exposed so the struct can be easily serialized, +// but none of them should be mutated directly. +// Instead, use state.Copy() or state.NextState(...). +// NOTE: not goroutine-safe. +type State struct { + // Immutable + ChainID string + + // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) + LastBlockHeight int64 + LastBlockTotalTx int64 + LastBlockID types.BlockID + LastBlockTime time.Time + + // LastValidators is used to validate block.LastCommit. + // Validators are persisted to the database separately every time they change, + // so we can query for historical validator sets. + // Note that if s.LastBlockHeight causes a valset change, + // we set s.LastHeightValidatorsChanged = s.LastBlockHeight + 1 + Validators *types.ValidatorSet + LastValidators *types.ValidatorSet + LastHeightValidatorsChanged int64 + + // Consensus parameters used for validating blocks. + // Changes returned by EndBlock and updated after Commit. + ConsensusParams types.ConsensusParams + LastHeightConsensusParamsChanged int64 + + // Merkle root of the results from executing prev block + LastResultsHash []byte + + // The latest AppHash we've received from calling abci.Commit() + AppHash []byte +} + +// Copy makes a copy of the State for mutating. +func (state State) Copy() State { + return State{ + ChainID: state.ChainID, + + LastBlockHeight: state.LastBlockHeight, + LastBlockTotalTx: state.LastBlockTotalTx, + LastBlockID: state.LastBlockID, + LastBlockTime: state.LastBlockTime, + + Validators: state.Validators.Copy(), + LastValidators: state.LastValidators.Copy(), + LastHeightValidatorsChanged: state.LastHeightValidatorsChanged, + + ConsensusParams: state.ConsensusParams, + LastHeightConsensusParamsChanged: state.LastHeightConsensusParamsChanged, + + AppHash: state.AppHash, + + LastResultsHash: state.LastResultsHash, + } +} + +// Equals returns true if the States are identical. +func (state State) Equals(state2 State) bool { + sbz, s2bz := state.Bytes(), state2.Bytes() + return bytes.Equal(sbz, s2bz) +} + +// Bytes serializes the State using go-amino. +func (state State) Bytes() []byte { + return cdc.MustMarshalBinaryBare(state) +} + +// IsEmpty returns true if the State is equal to the empty State. +func (state State) IsEmpty() bool { + return state.Validators == nil // XXX can't compare to Empty +} + +// GetValidators returns the last and current validator sets. +func (state State) GetValidators() (last *types.ValidatorSet, current *types.ValidatorSet) { + return state.LastValidators, state.Validators +} + +//------------------------------------------------------------------------ +// Create a block from the latest state + +// MakeBlock builds a block with the given txs and commit from the current state. +func (state State) MakeBlock(height int64, txs []types.Tx, commit *types.Commit) (*types.Block, *types.PartSet) { + // build base block + block := types.MakeBlock(height, txs, commit) + + // fill header with state data + block.ChainID = state.ChainID + block.TotalTxs = state.LastBlockTotalTx + block.NumTxs + block.LastBlockID = state.LastBlockID + block.ValidatorsHash = state.Validators.Hash() + block.AppHash = state.AppHash + block.ConsensusHash = state.ConsensusParams.Hash() + block.LastResultsHash = state.LastResultsHash + + return block, block.MakePartSet(state.ConsensusParams.BlockGossip.BlockPartSizeBytes) +} + +//------------------------------------------------------------------------ +// Genesis + +// MakeGenesisStateFromFile reads and unmarshals state from the given +// file. +// +// Used during replay and in tests. +func MakeGenesisStateFromFile(genDocFile string) (State, error) { + genDoc, err := MakeGenesisDocFromFile(genDocFile) + if err != nil { + return State{}, err + } + return MakeGenesisState(genDoc) +} + +// MakeGenesisDocFromFile reads and unmarshals genesis doc from the given file. +func MakeGenesisDocFromFile(genDocFile string) (*types.GenesisDoc, error) { + genDocJSON, err := ioutil.ReadFile(genDocFile) + if err != nil { + return nil, fmt.Errorf("Couldn't read GenesisDoc file: %v", err) + } + genDoc, err := types.GenesisDocFromJSON(genDocJSON) + if err != nil { + return nil, fmt.Errorf("Error reading GenesisDoc: %v", err) + } + return genDoc, nil +} + +// MakeGenesisState creates state from types.GenesisDoc. +func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { + err := genDoc.ValidateAndComplete() + if err != nil { + return State{}, fmt.Errorf("Error in genesis file: %v", err) + } + + // Make validators slice + validators := make([]*types.Validator, len(genDoc.Validators)) + for i, val := range genDoc.Validators { + pubKey := val.PubKey + address := pubKey.Address() + + // Make validator + validators[i] = &types.Validator{ + Address: address, + PubKey: pubKey, + VotingPower: val.Power, + } + } + + return State{ + + ChainID: genDoc.ChainID, + + LastBlockHeight: 0, + LastBlockID: types.BlockID{}, + LastBlockTime: genDoc.GenesisTime, + + Validators: types.NewValidatorSet(validators), + LastValidators: types.NewValidatorSet(nil), + LastHeightValidatorsChanged: 1, + + ConsensusParams: *genDoc.ConsensusParams, + LastHeightConsensusParamsChanged: 1, + + AppHash: genDoc.AppHash, + }, nil +} diff --git a/state/state_test.go b/state/state_test.go new file mode 100644 index 000000000..bf0c910fa --- /dev/null +++ b/state/state_test.go @@ -0,0 +1,489 @@ +package state + +import ( + "bytes" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + crypto "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/types" +) + +// setupTestCase does setup common to all test cases +func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, State) { + config := cfg.ResetTestRoot("state_") + dbType := dbm.DBBackendType(config.DBBackend) + stateDB := dbm.NewDB("state", dbType, config.DBDir()) + state, err := LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile()) + assert.NoError(t, err, "expected no error on LoadStateFromDBOrGenesisFile") + + tearDown := func(t *testing.T) {} + + return tearDown, stateDB, state +} + +// TestStateCopy tests the correct copying behaviour of State. +func TestStateCopy(t *testing.T) { + tearDown, _, state := setupTestCase(t) + defer tearDown(t) + // nolint: vetshadow + assert := assert.New(t) + + stateCopy := state.Copy() + + assert.True(state.Equals(stateCopy), + cmn.Fmt("expected state and its copy to be identical.\ngot: %v\nexpected: %v\n", + stateCopy, state)) + + stateCopy.LastBlockHeight++ + assert.False(state.Equals(stateCopy), cmn.Fmt(`expected states to be different. got same + %v`, state)) +} + +// TestStateSaveLoad tests saving and loading State from a db. +func TestStateSaveLoad(t *testing.T) { + tearDown, stateDB, state := setupTestCase(t) + defer tearDown(t) + // nolint: vetshadow + assert := assert.New(t) + + state.LastBlockHeight++ + SaveState(stateDB, state) + + loadedState := LoadState(stateDB) + assert.True(state.Equals(loadedState), + cmn.Fmt("expected state and its copy to be identical.\ngot: %v\nexpected: %v\n", + loadedState, state)) +} + +// TestABCIResponsesSaveLoad tests saving and loading ABCIResponses. +func TestABCIResponsesSaveLoad1(t *testing.T) { + tearDown, stateDB, state := setupTestCase(t) + defer tearDown(t) + // nolint: vetshadow + assert := assert.New(t) + + state.LastBlockHeight++ + + // build mock responses + block := makeBlock(state, 2) + abciResponses := NewABCIResponses(block) + abciResponses.DeliverTx[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Tags: nil} + abciResponses.DeliverTx[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Tags: nil} + abciResponses.EndBlock = &abci.ResponseEndBlock{ValidatorUpdates: []abci.Validator{ + types.TM2PB.ValidatorFromPubKeyAndPower(crypto.GenPrivKeyEd25519().PubKey(), 10), + }} + + saveABCIResponses(stateDB, block.Height, abciResponses) + loadedABCIResponses, err := LoadABCIResponses(stateDB, block.Height) + assert.Nil(err) + assert.Equal(abciResponses, loadedABCIResponses, + cmn.Fmt("ABCIResponses don't match:\ngot: %v\nexpected: %v\n", + loadedABCIResponses, abciResponses)) +} + +// TestResultsSaveLoad tests saving and loading abci results. +func TestABCIResponsesSaveLoad2(t *testing.T) { + tearDown, stateDB, _ := setupTestCase(t) + defer tearDown(t) + // nolint: vetshadow + assert := assert.New(t) + + cases := [...]struct { + // height is implied index+2 + // as block 1 is created from genesis + added []*abci.ResponseDeliverTx + expected types.ABCIResults + }{ + 0: { + nil, + nil, + }, + 1: { + []*abci.ResponseDeliverTx{ + {Code: 32, Data: []byte("Hello"), Log: "Huh?"}, + }, + types.ABCIResults{ + {32, []byte("Hello")}, + }}, + 2: { + []*abci.ResponseDeliverTx{ + {Code: 383}, + {Data: []byte("Gotcha!"), + Tags: []cmn.KVPair{ + cmn.KVPair{[]byte("a"), []byte("1")}, + cmn.KVPair{[]byte("build"), []byte("stuff")}, + }}, + }, + types.ABCIResults{ + {383, nil}, + {0, []byte("Gotcha!")}, + }}, + 3: { + nil, + nil, + }, + } + + // query all before, should return error + for i := range cases { + h := int64(i + 1) + res, err := LoadABCIResponses(stateDB, h) + assert.Error(err, "%d: %#v", i, res) + } + + // add all cases + for i, tc := range cases { + h := int64(i + 1) // last block height, one below what we save + responses := &ABCIResponses{ + DeliverTx: tc.added, + EndBlock: &abci.ResponseEndBlock{}, + } + saveABCIResponses(stateDB, h, responses) + } + + // query all before, should return expected value + for i, tc := range cases { + h := int64(i + 1) + res, err := LoadABCIResponses(stateDB, h) + assert.NoError(err, "%d", i) + assert.Equal(tc.expected.Hash(), res.ResultsHash(), "%d", i) + } +} + +// TestValidatorSimpleSaveLoad tests saving and loading validators. +func TestValidatorSimpleSaveLoad(t *testing.T) { + tearDown, stateDB, state := setupTestCase(t) + defer tearDown(t) + // nolint: vetshadow + assert := assert.New(t) + + // can't load anything for height 0 + v, err := LoadValidators(stateDB, 0) + assert.IsType(ErrNoValSetForHeight{}, err, "expected err at height 0") + + // should be able to load for height 1 + v, err = LoadValidators(stateDB, 1) + assert.Nil(err, "expected no err at height 1") + assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") + + // increment height, save; should be able to load for next height + state.LastBlockHeight++ + nextHeight := state.LastBlockHeight + 1 + saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) + v, err = LoadValidators(stateDB, nextHeight) + assert.Nil(err, "expected no err") + assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") + + // increment height, save; should be able to load for next height + state.LastBlockHeight += 10 + nextHeight = state.LastBlockHeight + 1 + saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) + v, err = LoadValidators(stateDB, nextHeight) + assert.Nil(err, "expected no err") + assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") + + // should be able to load for next next height + _, err = LoadValidators(stateDB, state.LastBlockHeight+2) + assert.IsType(ErrNoValSetForHeight{}, err, "expected err at unknown height") +} + +// TestValidatorChangesSaveLoad tests saving and loading a validator set with changes. +func TestOneValidatorChangesSaveLoad(t *testing.T) { + tearDown, stateDB, state := setupTestCase(t) + defer tearDown(t) + + // change vals at these heights + changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20} + N := len(changeHeights) + + // build the validator history by running updateState + // with the right validator set for each height + highestHeight := changeHeights[N-1] + 5 + changeIndex := 0 + _, val := state.Validators.GetByIndex(0) + power := val.VotingPower + var err error + for i := int64(1); i < highestHeight; i++ { + // when we get to a change height, + // use the next pubkey + if changeIndex < len(changeHeights) && i == changeHeights[changeIndex] { + changeIndex++ + power++ + } + header, blockID, responses := makeHeaderPartsResponsesValPowerChange(state, i, power) + state, err = updateState(state, blockID, header, responses) + assert.Nil(t, err) + nextHeight := state.LastBlockHeight + 1 + saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) + } + + // on each change height, increment the power by one. + testCases := make([]int64, highestHeight) + changeIndex = 0 + power = val.VotingPower + for i := int64(1); i < highestHeight+1; i++ { + // we we get to the height after a change height + // use the next pubkey (note our counter starts at 0 this time) + if changeIndex < len(changeHeights) && i == changeHeights[changeIndex]+1 { + changeIndex++ + power++ + } + testCases[i-1] = power + } + + for i, power := range testCases { + v, err := LoadValidators(stateDB, int64(i+1)) + assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", i)) + assert.Equal(t, v.Size(), 1, "validator set size is greater than 1: %d", v.Size()) + _, val := v.GetByIndex(0) + + assert.Equal(t, val.VotingPower, power, fmt.Sprintf(`unexpected powerat + height %d`, i)) + } +} + +// TestValidatorChangesSaveLoad tests saving and loading a validator set with +// changes. +func TestManyValidatorChangesSaveLoad(t *testing.T) { + const valSetSize = 7 + tearDown, stateDB, state := setupTestCase(t) + state.Validators = genValSet(valSetSize) + SaveState(stateDB, state) + defer tearDown(t) + + const height = 1 + pubkey := crypto.GenPrivKeyEd25519().PubKey() + // swap the first validator with a new one ^^^ (validator set size stays the same) + header, blockID, responses := makeHeaderPartsResponsesValPubKeyChange(state, height, pubkey) + var err error + state, err = updateState(state, blockID, header, responses) + require.Nil(t, err) + nextHeight := state.LastBlockHeight + 1 + saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) + + v, err := LoadValidators(stateDB, height+1) + assert.Nil(t, err) + assert.Equal(t, valSetSize, v.Size()) + + index, val := v.GetByAddress(pubkey.Address()) + assert.NotNil(t, val) + if index < 0 { + t.Fatal("expected to find newly added validator") + } +} + +func genValSet(size int) *types.ValidatorSet { + vals := make([]*types.Validator, size) + for i := 0; i < size; i++ { + vals[i] = types.NewValidator(crypto.GenPrivKeyEd25519().PubKey(), 10) + } + return types.NewValidatorSet(vals) +} + +// TestConsensusParamsChangesSaveLoad tests saving and loading consensus params +// with changes. +func TestConsensusParamsChangesSaveLoad(t *testing.T) { + tearDown, stateDB, state := setupTestCase(t) + defer tearDown(t) + + // change vals at these heights + changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20} + N := len(changeHeights) + + // each valset is just one validator + // create list of them + params := make([]types.ConsensusParams, N+1) + params[0] = state.ConsensusParams + for i := 1; i < N+1; i++ { + params[i] = *types.DefaultConsensusParams() + params[i].BlockSize.MaxBytes += i + } + + // build the params history by running updateState + // with the right params set for each height + highestHeight := changeHeights[N-1] + 5 + changeIndex := 0 + cp := params[changeIndex] + var err error + for i := int64(1); i < highestHeight; i++ { + // when we get to a change height, + // use the next params + if changeIndex < len(changeHeights) && i == changeHeights[changeIndex] { + changeIndex++ + cp = params[changeIndex] + } + header, blockID, responses := makeHeaderPartsResponsesParams(state, i, cp) + state, err = updateState(state, blockID, header, responses) + + require.Nil(t, err) + nextHeight := state.LastBlockHeight + 1 + saveConsensusParamsInfo(stateDB, nextHeight, state.LastHeightConsensusParamsChanged, state.ConsensusParams) + } + + // make all the test cases by using the same params until after the change + testCases := make([]paramsChangeTestCase, highestHeight) + changeIndex = 0 + cp = params[changeIndex] + for i := int64(1); i < highestHeight+1; i++ { + // we we get to the height after a change height + // use the next pubkey (note our counter starts at 0 this time) + if changeIndex < len(changeHeights) && i == changeHeights[changeIndex]+1 { + changeIndex++ + cp = params[changeIndex] + } + testCases[i-1] = paramsChangeTestCase{i, cp} + } + + for _, testCase := range testCases { + p, err := LoadConsensusParams(stateDB, testCase.height) + assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", testCase.height)) + assert.Equal(t, testCase.params, p, fmt.Sprintf(`unexpected consensus params at + height %d`, testCase.height)) + } +} + +func makeParams(blockBytes, blockTx, blockGas, txBytes, + txGas, partSize int) types.ConsensusParams { + + return types.ConsensusParams{ + BlockSize: types.BlockSize{ + MaxBytes: blockBytes, + MaxTxs: blockTx, + MaxGas: int64(blockGas), + }, + TxSize: types.TxSize{ + MaxBytes: txBytes, + MaxGas: int64(txGas), + }, + BlockGossip: types.BlockGossip{ + BlockPartSizeBytes: partSize, + }, + } +} + +func pk() []byte { + return crypto.GenPrivKeyEd25519().PubKey().Bytes() +} + +func TestApplyUpdates(t *testing.T) { + initParams := makeParams(1, 2, 3, 4, 5, 6) + + cases := [...]struct { + init types.ConsensusParams + updates abci.ConsensusParams + expected types.ConsensusParams + }{ + 0: {initParams, abci.ConsensusParams{}, initParams}, + 1: {initParams, abci.ConsensusParams{}, initParams}, + 2: {initParams, + abci.ConsensusParams{ + TxSize: &abci.TxSize{ + MaxBytes: 123, + }, + }, + makeParams(1, 2, 3, 123, 5, 6)}, + 3: {initParams, + abci.ConsensusParams{ + BlockSize: &abci.BlockSize{ + MaxTxs: 44, + MaxGas: 55, + }, + }, + makeParams(1, 44, 55, 4, 5, 6)}, + 4: {initParams, + abci.ConsensusParams{ + BlockSize: &abci.BlockSize{ + MaxTxs: 789, + }, + TxSize: &abci.TxSize{ + MaxGas: 888, + }, + BlockGossip: &abci.BlockGossip{ + BlockPartSizeBytes: 2002, + }, + }, + makeParams(1, 789, 3, 4, 888, 2002)}, + } + + for i, tc := range cases { + res := tc.init.Update(&(tc.updates)) + assert.Equal(t, tc.expected, res, "case %d", i) + } +} + +func makeHeaderPartsResponsesValPubKeyChange(state State, height int64, + pubkey crypto.PubKey) (*types.Header, types.BlockID, *ABCIResponses) { + + block := makeBlock(state, height) + abciResponses := &ABCIResponses{ + EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + } + + // if the pubkey is new, remove the old and add the new + _, val := state.Validators.GetByIndex(0) + if !bytes.Equal(pubkey.Bytes(), val.PubKey.Bytes()) { + abciResponses.EndBlock = &abci.ResponseEndBlock{ + ValidatorUpdates: []abci.Validator{ + types.TM2PB.ValidatorFromPubKeyAndPower(val.PubKey, 0), + types.TM2PB.ValidatorFromPubKeyAndPower(pubkey, 10), + }, + } + } + + return block.Header, types.BlockID{block.Hash(), types.PartSetHeader{}}, abciResponses +} + +func makeHeaderPartsResponsesValPowerChange(state State, height int64, + power int64) (*types.Header, types.BlockID, *ABCIResponses) { + + block := makeBlock(state, height) + abciResponses := &ABCIResponses{ + EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + } + + // if the pubkey is new, remove the old and add the new + _, val := state.Validators.GetByIndex(0) + if val.VotingPower != power { + abciResponses.EndBlock = &abci.ResponseEndBlock{ + ValidatorUpdates: []abci.Validator{ + types.TM2PB.ValidatorFromPubKeyAndPower(val.PubKey, power), + }, + } + } + + return block.Header, types.BlockID{block.Hash(), types.PartSetHeader{}}, abciResponses +} + +func makeHeaderPartsResponsesParams(state State, height int64, + params types.ConsensusParams) (*types.Header, types.BlockID, *ABCIResponses) { + + block := makeBlock(state, height) + abciResponses := &ABCIResponses{ + EndBlock: &abci.ResponseEndBlock{ConsensusParamUpdates: types.TM2PB.ConsensusParams(¶ms)}, + } + return block.Header, types.BlockID{block.Hash(), types.PartSetHeader{}}, abciResponses +} + +type paramsChangeTestCase struct { + height int64 + params types.ConsensusParams +} + +func makeHeaderPartsResults(state State, height int64, + results []*abci.ResponseDeliverTx) (*types.Header, types.BlockID, *ABCIResponses) { + + block := makeBlock(state, height) + abciResponses := &ABCIResponses{ + DeliverTx: results, + EndBlock: &abci.ResponseEndBlock{}, + } + return block.Header, types.BlockID{block.Hash(), types.PartSetHeader{}}, abciResponses +} diff --git a/state/store.go b/state/store.go new file mode 100644 index 000000000..9e94e36fa --- /dev/null +++ b/state/store.go @@ -0,0 +1,293 @@ +package state + +import ( + "fmt" + + abci "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/types" +) + +//------------------------------------------------------------------------ + +func calcValidatorsKey(height int64) []byte { + return []byte(cmn.Fmt("validatorsKey:%v", height)) +} + +func calcConsensusParamsKey(height int64) []byte { + return []byte(cmn.Fmt("consensusParamsKey:%v", height)) +} + +func calcABCIResponsesKey(height int64) []byte { + return []byte(cmn.Fmt("abciResponsesKey:%v", height)) +} + +// LoadStateFromDBOrGenesisFile loads the most recent state from the database, +// or creates a new one from the given genesisFilePath and persists the result +// to the database. +func LoadStateFromDBOrGenesisFile(stateDB dbm.DB, genesisFilePath string) (State, error) { + state := LoadState(stateDB) + if state.IsEmpty() { + var err error + state, err = MakeGenesisStateFromFile(genesisFilePath) + if err != nil { + return state, err + } + SaveState(stateDB, state) + } + + return state, nil +} + +// LoadStateFromDBOrGenesisDoc loads the most recent state from the database, +// or creates a new one from the given genesisDoc and persists the result +// to the database. +func LoadStateFromDBOrGenesisDoc(stateDB dbm.DB, genesisDoc *types.GenesisDoc) (State, error) { + state := LoadState(stateDB) + if state.IsEmpty() { + var err error + state, err = MakeGenesisState(genesisDoc) + if err != nil { + return state, err + } + SaveState(stateDB, state) + } + + return state, nil +} + +// LoadState loads the State from the database. +func LoadState(db dbm.DB) State { + return loadState(db, stateKey) +} + +func loadState(db dbm.DB, key []byte) (state State) { + buf := db.Get(key) + if len(buf) == 0 { + return state + } + + err := cdc.UnmarshalBinaryBare(buf, &state) + if err != nil { + // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED + cmn.Exit(cmn.Fmt(`LoadState: Data has been corrupted or its spec has changed: + %v\n`, err)) + } + // TODO: ensure that buf is completely read. + + return state +} + +// SaveState persists the State, the ValidatorsInfo, and the ConsensusParamsInfo to the database. +func SaveState(db dbm.DB, state State) { + saveState(db, state, stateKey) +} + +func saveState(db dbm.DB, state State, key []byte) { + nextHeight := state.LastBlockHeight + 1 + saveValidatorsInfo(db, nextHeight, state.LastHeightValidatorsChanged, state.Validators) + saveConsensusParamsInfo(db, nextHeight, state.LastHeightConsensusParamsChanged, state.ConsensusParams) + db.SetSync(stateKey, state.Bytes()) +} + +//------------------------------------------------------------------------ + +// ABCIResponses retains the responses +// of the various ABCI calls during block processing. +// It is persisted to disk for each height before calling Commit. +type ABCIResponses struct { + DeliverTx []*abci.ResponseDeliverTx + EndBlock *abci.ResponseEndBlock +} + +// NewABCIResponses returns a new ABCIResponses +func NewABCIResponses(block *types.Block) *ABCIResponses { + resDeliverTxs := make([]*abci.ResponseDeliverTx, block.NumTxs) + if block.NumTxs == 0 { + // This makes Amino encoding/decoding consistent. + resDeliverTxs = nil + } + return &ABCIResponses{ + DeliverTx: resDeliverTxs, + } +} + +// Bytes serializes the ABCIResponse using go-amino. +func (arz *ABCIResponses) Bytes() []byte { + return cdc.MustMarshalBinaryBare(arz) +} + +func (arz *ABCIResponses) ResultsHash() []byte { + results := types.NewResults(arz.DeliverTx) + return results.Hash() +} + +// LoadABCIResponses loads the ABCIResponses for the given height from the database. +// This is useful for recovering from crashes where we called app.Commit and before we called +// s.Save(). It can also be used to produce Merkle proofs of the result of txs. +func LoadABCIResponses(db dbm.DB, height int64) (*ABCIResponses, error) { + buf := db.Get(calcABCIResponsesKey(height)) + if len(buf) == 0 { + return nil, ErrNoABCIResponsesForHeight{height} + } + + abciResponses := new(ABCIResponses) + err := cdc.UnmarshalBinaryBare(buf, abciResponses) + if err != nil { + // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED + cmn.Exit(cmn.Fmt(`LoadABCIResponses: Data has been corrupted or its spec has + changed: %v\n`, err)) + } + // TODO: ensure that buf is completely read. + + return abciResponses, nil +} + +// SaveABCIResponses persists the ABCIResponses to the database. +// This is useful in case we crash after app.Commit and before s.Save(). +// Responses are indexed by height so they can also be loaded later to produce Merkle proofs. +func saveABCIResponses(db dbm.DB, height int64, abciResponses *ABCIResponses) { + db.SetSync(calcABCIResponsesKey(height), abciResponses.Bytes()) +} + +//----------------------------------------------------------------------------- + +// ValidatorsInfo represents the latest validator set, or the last height it changed +type ValidatorsInfo struct { + ValidatorSet *types.ValidatorSet + LastHeightChanged int64 +} + +// Bytes serializes the ValidatorsInfo using go-amino. +func (valInfo *ValidatorsInfo) Bytes() []byte { + return cdc.MustMarshalBinaryBare(valInfo) +} + +// LoadValidators loads the ValidatorSet for a given height. +// Returns ErrNoValSetForHeight if the validator set can't be found for this height. +func LoadValidators(db dbm.DB, height int64) (*types.ValidatorSet, error) { + valInfo := loadValidatorsInfo(db, height) + if valInfo == nil { + return nil, ErrNoValSetForHeight{height} + } + + if valInfo.ValidatorSet == nil { + valInfo2 := loadValidatorsInfo(db, valInfo.LastHeightChanged) + if valInfo2 == nil { + panic( + fmt.Sprintf( + "Couldn't find validators at height %d as last changed from height %d", + valInfo.LastHeightChanged, + height, + ), + ) + } + valInfo = valInfo2 + } + + return valInfo.ValidatorSet, nil +} + +func loadValidatorsInfo(db dbm.DB, height int64) *ValidatorsInfo { + buf := db.Get(calcValidatorsKey(height)) + if len(buf) == 0 { + return nil + } + + v := new(ValidatorsInfo) + err := cdc.UnmarshalBinaryBare(buf, v) + if err != nil { + // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED + cmn.Exit(cmn.Fmt(`LoadValidators: Data has been corrupted or its spec has changed: + %v\n`, err)) + } + // TODO: ensure that buf is completely read. + + return v +} + +// saveValidatorsInfo persists the validator set for the next block to disk. +// It should be called from s.Save(), right before the state itself is persisted. +// If the validator set did not change after processing the latest block, +// only the last height for which the validators changed is persisted. +func saveValidatorsInfo(db dbm.DB, nextHeight, changeHeight int64, valSet *types.ValidatorSet) { + valInfo := &ValidatorsInfo{ + LastHeightChanged: changeHeight, + } + if changeHeight == nextHeight { + valInfo.ValidatorSet = valSet + } + db.SetSync(calcValidatorsKey(nextHeight), valInfo.Bytes()) +} + +//----------------------------------------------------------------------------- + +// ConsensusParamsInfo represents the latest consensus params, or the last height it changed +type ConsensusParamsInfo struct { + ConsensusParams types.ConsensusParams + LastHeightChanged int64 +} + +// Bytes serializes the ConsensusParamsInfo using go-amino. +func (params ConsensusParamsInfo) Bytes() []byte { + return cdc.MustMarshalBinaryBare(params) +} + +// LoadConsensusParams loads the ConsensusParams for a given height. +func LoadConsensusParams(db dbm.DB, height int64) (types.ConsensusParams, error) { + empty := types.ConsensusParams{} + + paramsInfo := loadConsensusParamsInfo(db, height) + if paramsInfo == nil { + return empty, ErrNoConsensusParamsForHeight{height} + } + + if paramsInfo.ConsensusParams == empty { + paramsInfo2 := loadConsensusParamsInfo(db, paramsInfo.LastHeightChanged) + if paramsInfo2 == nil { + panic( + fmt.Sprintf( + "Couldn't find consensus params at height %d as last changed from height %d", + paramsInfo.LastHeightChanged, + height, + ), + ) + } + paramsInfo = paramsInfo2 + } + + return paramsInfo.ConsensusParams, nil +} + +func loadConsensusParamsInfo(db dbm.DB, height int64) *ConsensusParamsInfo { + buf := db.Get(calcConsensusParamsKey(height)) + if len(buf) == 0 { + return nil + } + + paramsInfo := new(ConsensusParamsInfo) + err := cdc.UnmarshalBinaryBare(buf, paramsInfo) + if err != nil { + // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED + cmn.Exit(cmn.Fmt(`LoadConsensusParams: Data has been corrupted or its spec has changed: + %v\n`, err)) + } + // TODO: ensure that buf is completely read. + + return paramsInfo +} + +// saveConsensusParamsInfo persists the consensus params for the next block to disk. +// It should be called from s.Save(), right before the state itself is persisted. +// If the consensus params did not change after processing the latest block, +// only the last height for which they changed is persisted. +func saveConsensusParamsInfo(db dbm.DB, nextHeight, changeHeight int64, params types.ConsensusParams) { + paramsInfo := &ConsensusParamsInfo{ + LastHeightChanged: changeHeight, + } + if changeHeight == nextHeight { + paramsInfo.ConsensusParams = params + } + db.SetSync(calcConsensusParamsKey(nextHeight), paramsInfo.Bytes()) +} diff --git a/state/txindex/indexer.go b/state/txindex/indexer.go new file mode 100644 index 000000000..ab509f965 --- /dev/null +++ b/state/txindex/indexer.go @@ -0,0 +1,58 @@ +package txindex + +import ( + "errors" + + "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/types" +) + +// TxIndexer interface defines methods to index and search transactions. +type TxIndexer interface { + + // AddBatch analyzes, indexes and stores a batch of transactions. + AddBatch(b *Batch) error + + // Index analyzes, indexes and stores a single transaction. + Index(result *types.TxResult) error + + // Get returns the transaction specified by hash or nil if the transaction is not indexed + // or stored. + Get(hash []byte) (*types.TxResult, error) + + // Search allows you to query for transactions. + Search(q *query.Query) ([]*types.TxResult, error) +} + +//---------------------------------------------------- +// Txs are written as a batch + +// Batch groups together multiple Index operations to be performed at the same time. +// NOTE: Batch is NOT thread-safe and must not be modified after starting its execution. +type Batch struct { + Ops []*types.TxResult +} + +// NewBatch creates a new Batch. +func NewBatch(n int64) *Batch { + return &Batch{ + Ops: make([]*types.TxResult, n), + } +} + +// Add or update an entry for the given result.Index. +func (b *Batch) Add(result *types.TxResult) error { + b.Ops[result.Index] = result + return nil +} + +// Size returns the total number of operations inside the batch. +func (b *Batch) Size() int { + return len(b.Ops) +} + +//---------------------------------------------------- +// Errors + +// ErrorEmptyHash indicates empty hash +var ErrorEmptyHash = errors.New("Transaction hash cannot be empty") diff --git a/state/txindex/indexer_service.go b/state/txindex/indexer_service.go new file mode 100644 index 000000000..088252f5e --- /dev/null +++ b/state/txindex/indexer_service.go @@ -0,0 +1,73 @@ +package txindex + +import ( + "context" + + cmn "github.com/tendermint/tendermint/libs/common" + + "github.com/tendermint/tendermint/types" +) + +const ( + subscriber = "IndexerService" +) + +// IndexerService connects event bus and transaction indexer together in order +// to index transactions coming from event bus. +type IndexerService struct { + cmn.BaseService + + idr TxIndexer + eventBus *types.EventBus +} + +// NewIndexerService returns a new service instance. +func NewIndexerService(idr TxIndexer, eventBus *types.EventBus) *IndexerService { + is := &IndexerService{idr: idr, eventBus: eventBus} + is.BaseService = *cmn.NewBaseService(nil, "IndexerService", is) + return is +} + +// OnStart implements cmn.Service by subscribing for all transactions +// and indexing them by tags. +func (is *IndexerService) OnStart() error { + blockHeadersCh := make(chan interface{}) + if err := is.eventBus.Subscribe(context.Background(), subscriber, types.EventQueryNewBlockHeader, blockHeadersCh); err != nil { + return err + } + + txsCh := make(chan interface{}) + if err := is.eventBus.Subscribe(context.Background(), subscriber, types.EventQueryTx, txsCh); err != nil { + return err + } + + go func() { + for { + e, ok := <-blockHeadersCh + if !ok { + return + } + header := e.(types.EventDataNewBlockHeader).Header + batch := NewBatch(header.NumTxs) + for i := int64(0); i < header.NumTxs; i++ { + e, ok := <-txsCh + if !ok { + is.Logger.Error("Failed to index all transactions due to closed transactions channel", "height", header.Height, "numTxs", header.NumTxs, "numProcessed", i) + return + } + txResult := e.(types.EventDataTx).TxResult + batch.Add(&txResult) + } + is.idr.AddBatch(batch) + is.Logger.Info("Indexed block", "height", header.Height) + } + }() + return nil +} + +// OnStop implements cmn.Service by unsubscribing from all transactions. +func (is *IndexerService) OnStop() { + if is.eventBus.IsRunning() { + _ = is.eventBus.UnsubscribeAll(context.Background(), subscriber) + } +} diff --git a/state/txindex/kv/kv.go b/state/txindex/kv/kv.go new file mode 100644 index 000000000..707325929 --- /dev/null +++ b/state/txindex/kv/kv.go @@ -0,0 +1,437 @@ +package kv + +import ( + "bytes" + "encoding/hex" + "fmt" + "sort" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + + "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/state/txindex" + "github.com/tendermint/tendermint/types" +) + +const ( + tagKeySeparator = "/" +) + +var _ txindex.TxIndexer = (*TxIndex)(nil) + +// TxIndex is the simplest possible indexer, backed by key-value storage (levelDB). +type TxIndex struct { + store dbm.DB + tagsToIndex []string + indexAllTags bool +} + +// NewTxIndex creates new KV indexer. +func NewTxIndex(store dbm.DB, options ...func(*TxIndex)) *TxIndex { + txi := &TxIndex{store: store, tagsToIndex: make([]string, 0), indexAllTags: false} + for _, o := range options { + o(txi) + } + return txi +} + +// IndexTags is an option for setting which tags to index. +func IndexTags(tags []string) func(*TxIndex) { + return func(txi *TxIndex) { + txi.tagsToIndex = tags + } +} + +// IndexAllTags is an option for indexing all tags. +func IndexAllTags() func(*TxIndex) { + return func(txi *TxIndex) { + txi.indexAllTags = true + } +} + +// Get gets transaction from the TxIndex storage and returns it or nil if the +// transaction is not found. +func (txi *TxIndex) Get(hash []byte) (*types.TxResult, error) { + if len(hash) == 0 { + return nil, txindex.ErrorEmptyHash + } + + rawBytes := txi.store.Get(hash) + if rawBytes == nil { + return nil, nil + } + + txResult := new(types.TxResult) + err := cdc.UnmarshalBinaryBare(rawBytes, &txResult) + if err != nil { + return nil, fmt.Errorf("Error reading TxResult: %v", err) + } + + return txResult, nil +} + +// AddBatch indexes a batch of transactions using the given list of tags. +func (txi *TxIndex) AddBatch(b *txindex.Batch) error { + storeBatch := txi.store.NewBatch() + + for _, result := range b.Ops { + hash := result.Tx.Hash() + + // index tx by tags + for _, tag := range result.Result.Tags { + if txi.indexAllTags || cmn.StringInSlice(string(tag.Key), txi.tagsToIndex) { + storeBatch.Set(keyForTag(tag, result), hash) + } + } + + // index tx by hash + rawBytes, err := cdc.MarshalBinaryBare(result) + if err != nil { + return err + } + storeBatch.Set(hash, rawBytes) + } + + storeBatch.Write() + return nil +} + +// Index indexes a single transaction using the given list of tags. +func (txi *TxIndex) Index(result *types.TxResult) error { + b := txi.store.NewBatch() + + hash := result.Tx.Hash() + + // index tx by tags + for _, tag := range result.Result.Tags { + if txi.indexAllTags || cmn.StringInSlice(string(tag.Key), txi.tagsToIndex) { + b.Set(keyForTag(tag, result), hash) + } + } + + // index tx by hash + rawBytes, err := cdc.MarshalBinaryBare(result) + if err != nil { + return err + } + b.Set(hash, rawBytes) + + b.Write() + return nil +} + +// Search performs a search using the given query. It breaks the query into +// conditions (like "tx.height > 5"). For each condition, it queries the DB +// index. One special use cases here: (1) if "tx.hash" is found, it returns tx +// result for it (2) for range queries it is better for the client to provide +// both lower and upper bounds, so we are not performing a full scan. Results +// from querying indexes are then intersected and returned to the caller. +func (txi *TxIndex) Search(q *query.Query) ([]*types.TxResult, error) { + var hashes [][]byte + var hashesInitialized bool + + // get a list of conditions (like "tx.height > 5") + conditions := q.Conditions() + + // if there is a hash condition, return the result immediately + hash, err, ok := lookForHash(conditions) + if err != nil { + return nil, errors.Wrap(err, "error during searching for a hash in the query") + } else if ok { + res, err := txi.Get(hash) + if res == nil { + return []*types.TxResult{}, nil + } + return []*types.TxResult{res}, errors.Wrap(err, "error while retrieving the result") + } + + // conditions to skip because they're handled before "everything else" + skipIndexes := make([]int, 0) + + // if there is a height condition ("tx.height=3"), extract it for faster lookups + height, heightIndex := lookForHeight(conditions) + if heightIndex >= 0 { + skipIndexes = append(skipIndexes, heightIndex) + } + + // extract ranges + // if both upper and lower bounds exist, it's better to get them in order not + // no iterate over kvs that are not within range. + ranges, rangeIndexes := lookForRanges(conditions) + if len(ranges) > 0 { + skipIndexes = append(skipIndexes, rangeIndexes...) + + for _, r := range ranges { + if !hashesInitialized { + hashes = txi.matchRange(r, []byte(r.key)) + hashesInitialized = true + } else { + hashes = intersect(hashes, txi.matchRange(r, []byte(r.key))) + } + } + } + + // for all other conditions + for i, c := range conditions { + if cmn.IntInSlice(i, skipIndexes) { + continue + } + + if !hashesInitialized { + hashes = txi.match(c, startKey(c, height)) + hashesInitialized = true + } else { + hashes = intersect(hashes, txi.match(c, startKey(c, height))) + } + } + + results := make([]*types.TxResult, len(hashes)) + i := 0 + for _, h := range hashes { + results[i], err = txi.Get(h) + if err != nil { + return nil, errors.Wrapf(err, "failed to get Tx{%X}", h) + } + i++ + } + + // sort by height by default + sort.Slice(results, func(i, j int) bool { + return results[i].Height < results[j].Height + }) + + return results, nil +} + +func lookForHash(conditions []query.Condition) (hash []byte, err error, ok bool) { + for _, c := range conditions { + if c.Tag == types.TxHashKey { + decoded, err := hex.DecodeString(c.Operand.(string)) + return decoded, err, true + } + } + return +} + +func lookForHeight(conditions []query.Condition) (height int64, index int) { + for i, c := range conditions { + if c.Tag == types.TxHeightKey { + return c.Operand.(int64), i + } + } + return 0, -1 +} + +// special map to hold range conditions +// Example: account.number => queryRange{lowerBound: 1, upperBound: 5} +type queryRanges map[string]queryRange + +type queryRange struct { + key string + lowerBound interface{} // int || time.Time + includeLowerBound bool + upperBound interface{} // int || time.Time + includeUpperBound bool +} + +func (r queryRange) lowerBoundValue() interface{} { + if r.lowerBound == nil { + return nil + } + + if r.includeLowerBound { + return r.lowerBound + } else { + switch t := r.lowerBound.(type) { + case int64: + return t + 1 + case time.Time: + return t.Unix() + 1 + default: + panic("not implemented") + } + } +} + +func (r queryRange) AnyBound() interface{} { + if r.lowerBound != nil { + return r.lowerBound + } else { + return r.upperBound + } +} + +func (r queryRange) upperBoundValue() interface{} { + if r.upperBound == nil { + return nil + } + + if r.includeUpperBound { + return r.upperBound + } else { + switch t := r.upperBound.(type) { + case int64: + return t - 1 + case time.Time: + return t.Unix() - 1 + default: + panic("not implemented") + } + } +} + +func lookForRanges(conditions []query.Condition) (ranges queryRanges, indexes []int) { + ranges = make(queryRanges) + for i, c := range conditions { + if isRangeOperation(c.Op) { + r, ok := ranges[c.Tag] + if !ok { + r = queryRange{key: c.Tag} + } + switch c.Op { + case query.OpGreater: + r.lowerBound = c.Operand + case query.OpGreaterEqual: + r.includeLowerBound = true + r.lowerBound = c.Operand + case query.OpLess: + r.upperBound = c.Operand + case query.OpLessEqual: + r.includeUpperBound = true + r.upperBound = c.Operand + } + ranges[c.Tag] = r + indexes = append(indexes, i) + } + } + return ranges, indexes +} + +func isRangeOperation(op query.Operator) bool { + switch op { + case query.OpGreater, query.OpGreaterEqual, query.OpLess, query.OpLessEqual: + return true + default: + return false + } +} + +func (txi *TxIndex) match(c query.Condition, startKey []byte) (hashes [][]byte) { + if c.Op == query.OpEqual { + it := dbm.IteratePrefix(txi.store, startKey) + defer it.Close() + for ; it.Valid(); it.Next() { + hashes = append(hashes, it.Value()) + } + } else if c.Op == query.OpContains { + // XXX: doing full scan because startKey does not apply here + // For example, if startKey = "account.owner=an" and search query = "accoutn.owner CONSISTS an" + // we can't iterate with prefix "account.owner=an" because we might miss keys like "account.owner=Ulan" + it := txi.store.Iterator(nil, nil) + defer it.Close() + for ; it.Valid(); it.Next() { + if !isTagKey(it.Key()) { + continue + } + if strings.Contains(extractValueFromKey(it.Key()), c.Operand.(string)) { + hashes = append(hashes, it.Value()) + } + } + } else { + panic("other operators should be handled already") + } + return +} + +func (txi *TxIndex) matchRange(r queryRange, prefix []byte) (hashes [][]byte) { + // create a map to prevent duplicates + hashesMap := make(map[string][]byte) + + lowerBound := r.lowerBoundValue() + upperBound := r.upperBoundValue() + + it := dbm.IteratePrefix(txi.store, prefix) + defer it.Close() +LOOP: + for ; it.Valid(); it.Next() { + if !isTagKey(it.Key()) { + continue + } + switch r.AnyBound().(type) { + case int64: + v, err := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64) + if err != nil { + continue LOOP + } + include := true + if lowerBound != nil && v < lowerBound.(int64) { + include = false + } + if upperBound != nil && v > upperBound.(int64) { + include = false + } + if include { + hashesMap[fmt.Sprintf("%X", it.Value())] = it.Value() + } + // XXX: passing time in a ABCI Tags is not yet implemented + // case time.Time: + // v := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64) + // if v == r.upperBound { + // break + // } + } + } + hashes = make([][]byte, len(hashesMap)) + i := 0 + for _, h := range hashesMap { + hashes[i] = h + i++ + } + return +} + +/////////////////////////////////////////////////////////////////////////////// +// Keys + +func startKey(c query.Condition, height int64) []byte { + var key string + if height > 0 { + key = fmt.Sprintf("%s/%v/%d", c.Tag, c.Operand, height) + } else { + key = fmt.Sprintf("%s/%v", c.Tag, c.Operand) + } + return []byte(key) +} + +func isTagKey(key []byte) bool { + return strings.Count(string(key), tagKeySeparator) == 3 +} + +func extractValueFromKey(key []byte) string { + parts := strings.SplitN(string(key), tagKeySeparator, 3) + return parts[1] +} + +func keyForTag(tag cmn.KVPair, result *types.TxResult) []byte { + return []byte(fmt.Sprintf("%s/%s/%d/%d", tag.Key, tag.Value, result.Height, result.Index)) +} + +/////////////////////////////////////////////////////////////////////////////// +// Utils + +func intersect(as, bs [][]byte) [][]byte { + i := make([][]byte, 0, cmn.MinInt(len(as), len(bs))) + for _, a := range as { + for _, b := range bs { + if bytes.Equal(a, b) { + i = append(i, a) + } + } + } + return i +} diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go new file mode 100644 index 000000000..1272f4a73 --- /dev/null +++ b/state/txindex/kv/kv_test.go @@ -0,0 +1,239 @@ +package kv + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" + db "github.com/tendermint/tendermint/libs/db" + + "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/state/txindex" + "github.com/tendermint/tendermint/types" +) + +func TestTxIndex(t *testing.T) { + indexer := NewTxIndex(db.NewMemDB()) + + tx := types.Tx("HELLO WORLD") + txResult := &types.TxResult{1, 0, tx, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Tags: nil}} + hash := tx.Hash() + + batch := txindex.NewBatch(1) + if err := batch.Add(txResult); err != nil { + t.Error(err) + } + err := indexer.AddBatch(batch) + require.NoError(t, err) + + loadedTxResult, err := indexer.Get(hash) + require.NoError(t, err) + assert.Equal(t, txResult, loadedTxResult) + + tx2 := types.Tx("BYE BYE WORLD") + txResult2 := &types.TxResult{1, 0, tx2, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Tags: nil}} + hash2 := tx2.Hash() + + err = indexer.Index(txResult2) + require.NoError(t, err) + + loadedTxResult2, err := indexer.Get(hash2) + require.NoError(t, err) + assert.Equal(t, txResult2, loadedTxResult2) +} + +func TestTxSearch(t *testing.T) { + allowedTags := []string{"account.number", "account.owner", "account.date"} + indexer := NewTxIndex(db.NewMemDB(), IndexTags(allowedTags)) + + txResult := txResultWithTags([]cmn.KVPair{ + {Key: []byte("account.number"), Value: []byte("1")}, + {Key: []byte("account.owner"), Value: []byte("Ivan")}, + {Key: []byte("not_allowed"), Value: []byte("Vlad")}, + }) + hash := txResult.Tx.Hash() + + err := indexer.Index(txResult) + require.NoError(t, err) + + testCases := []struct { + q string + resultsLength int + }{ + // search by hash + {fmt.Sprintf("tx.hash = '%X'", hash), 1}, + // search by exact match (one tag) + {"account.number = 1", 1}, + // search by exact match (two tags) + {"account.number = 1 AND account.owner = 'Ivan'", 1}, + // search by exact match (two tags) + {"account.number = 1 AND account.owner = 'Vlad'", 0}, + // search by range + {"account.number >= 1 AND account.number <= 5", 1}, + // search by range (lower bound) + {"account.number >= 1", 1}, + // search by range (upper bound) + {"account.number <= 5", 1}, + // search using not allowed tag + {"not_allowed = 'boom'", 0}, + // search for not existing tx result + {"account.number >= 2 AND account.number <= 5", 0}, + // search using not existing tag + {"account.date >= TIME 2013-05-03T14:45:00Z", 0}, + // search using CONTAINS + {"account.owner CONTAINS 'an'", 1}, + // search using CONTAINS + {"account.owner CONTAINS 'Vlad'", 0}, + } + + for _, tc := range testCases { + t.Run(tc.q, func(t *testing.T) { + results, err := indexer.Search(query.MustParse(tc.q)) + assert.NoError(t, err) + + assert.Len(t, results, tc.resultsLength) + if tc.resultsLength > 0 { + assert.Equal(t, []*types.TxResult{txResult}, results) + } + }) + } +} + +func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { + allowedTags := []string{"account.number"} + indexer := NewTxIndex(db.NewMemDB(), IndexTags(allowedTags)) + + txResult := txResultWithTags([]cmn.KVPair{ + {Key: []byte("account.number"), Value: []byte("1")}, + {Key: []byte("account.number"), Value: []byte("2")}, + }) + + err := indexer.Index(txResult) + require.NoError(t, err) + + results, err := indexer.Search(query.MustParse("account.number >= 1")) + assert.NoError(t, err) + + assert.Len(t, results, 1) + assert.Equal(t, []*types.TxResult{txResult}, results) +} + +func TestTxSearchMultipleTxs(t *testing.T) { + allowedTags := []string{"account.number"} + indexer := NewTxIndex(db.NewMemDB(), IndexTags(allowedTags)) + + // indexed first, but bigger height (to test the order of transactions) + txResult := txResultWithTags([]cmn.KVPair{ + {Key: []byte("account.number"), Value: []byte("1")}, + }) + txResult.Tx = types.Tx("Bob's account") + txResult.Height = 2 + err := indexer.Index(txResult) + require.NoError(t, err) + + // indexed second, but smaller height (to test the order of transactions) + txResult2 := txResultWithTags([]cmn.KVPair{ + {Key: []byte("account.number"), Value: []byte("2")}, + }) + txResult2.Tx = types.Tx("Alice's account") + txResult2.Height = 1 + err = indexer.Index(txResult2) + require.NoError(t, err) + + results, err := indexer.Search(query.MustParse("account.number >= 1")) + assert.NoError(t, err) + + require.Len(t, results, 2) + assert.Equal(t, []*types.TxResult{txResult2, txResult}, results) +} + +func TestIndexAllTags(t *testing.T) { + indexer := NewTxIndex(db.NewMemDB(), IndexAllTags()) + + txResult := txResultWithTags([]cmn.KVPair{ + cmn.KVPair{[]byte("account.owner"), []byte("Ivan")}, + cmn.KVPair{[]byte("account.number"), []byte("1")}, + }) + + err := indexer.Index(txResult) + require.NoError(t, err) + + results, err := indexer.Search(query.MustParse("account.number >= 1")) + assert.NoError(t, err) + assert.Len(t, results, 1) + assert.Equal(t, []*types.TxResult{txResult}, results) + + results, err = indexer.Search(query.MustParse("account.owner = 'Ivan'")) + assert.NoError(t, err) + assert.Len(t, results, 1) + assert.Equal(t, []*types.TxResult{txResult}, results) +} + +func txResultWithTags(tags []cmn.KVPair) *types.TxResult { + tx := types.Tx("HELLO WORLD") + return &types.TxResult{ + Height: 1, + Index: 0, + Tx: tx, + Result: abci.ResponseDeliverTx{ + Data: []byte{0}, + Code: abci.CodeTypeOK, + Log: "", + Tags: tags, + Fee: cmn.KI64Pair{Key: nil, Value: 0}, + }, + } +} + +func benchmarkTxIndex(txsCount int64, b *testing.B) { + tx := types.Tx("HELLO WORLD") + txResult := &types.TxResult{ + Height: 1, + Index: 0, + Tx: tx, + Result: abci.ResponseDeliverTx{ + Data: []byte{0}, + Code: abci.CodeTypeOK, + Log: "", + Tags: []cmn.KVPair{}, + Fee: cmn.KI64Pair{Key: []uint8{}, Value: 0}, + }, + } + + dir, err := ioutil.TempDir("", "tx_index_db") + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(dir) // nolint: errcheck + + store := db.NewDB("tx_index", "leveldb", dir) + indexer := NewTxIndex(store) + + batch := txindex.NewBatch(txsCount) + for i := int64(0); i < txsCount; i++ { + if err := batch.Add(txResult); err != nil { + b.Fatal(err) + } + txResult.Index++ + } + + b.ResetTimer() + + for n := 0; n < b.N; n++ { + err = indexer.AddBatch(batch) + } + if err != nil { + b.Fatal(err) + } +} + +func BenchmarkTxIndex1(b *testing.B) { benchmarkTxIndex(1, b) } +func BenchmarkTxIndex500(b *testing.B) { benchmarkTxIndex(500, b) } +func BenchmarkTxIndex1000(b *testing.B) { benchmarkTxIndex(1000, b) } +func BenchmarkTxIndex2000(b *testing.B) { benchmarkTxIndex(2000, b) } +func BenchmarkTxIndex10000(b *testing.B) { benchmarkTxIndex(10000, b) } diff --git a/state/txindex/kv/wire.go b/state/txindex/kv/wire.go new file mode 100644 index 000000000..ccca75254 --- /dev/null +++ b/state/txindex/kv/wire.go @@ -0,0 +1,10 @@ +package kv + +import ( + "github.com/tendermint/go-amino" +) + +var cdc = amino.NewCodec() + +func init() { +} diff --git a/state/txindex/null/null.go b/state/txindex/null/null.go new file mode 100644 index 000000000..f85de2e6f --- /dev/null +++ b/state/txindex/null/null.go @@ -0,0 +1,33 @@ +package null + +import ( + "errors" + + "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/state/txindex" + "github.com/tendermint/tendermint/types" +) + +var _ txindex.TxIndexer = (*TxIndex)(nil) + +// TxIndex acts as a /dev/null. +type TxIndex struct{} + +// Get on a TxIndex is disabled and panics when invoked. +func (txi *TxIndex) Get(hash []byte) (*types.TxResult, error) { + return nil, errors.New(`Indexing is disabled (set 'tx_index = "kv"' in config)`) +} + +// AddBatch is a noop and always returns nil. +func (txi *TxIndex) AddBatch(batch *txindex.Batch) error { + return nil +} + +// Index is a noop and always returns nil. +func (txi *TxIndex) Index(result *types.TxResult) error { + return nil +} + +func (txi *TxIndex) Search(q *query.Query) ([]*types.TxResult, error) { + return []*types.TxResult{}, nil +} diff --git a/state/validation.go b/state/validation.go new file mode 100644 index 000000000..c36339203 --- /dev/null +++ b/state/validation.go @@ -0,0 +1,125 @@ +package state + +import ( + "bytes" + "errors" + "fmt" + + "github.com/tendermint/tendermint/types" + dbm "github.com/tendermint/tendermint/libs/db" +) + +//----------------------------------------------------- +// Validate block + +func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { + // validate internal consistency + if err := block.ValidateBasic(); err != nil { + return err + } + + // validate basic info + if block.ChainID != state.ChainID { + return fmt.Errorf("Wrong Block.Header.ChainID. Expected %v, got %v", state.ChainID, block.ChainID) + } + if block.Height != state.LastBlockHeight+1 { + return fmt.Errorf("Wrong Block.Header.Height. Expected %v, got %v", state.LastBlockHeight+1, block.Height) + } + /* TODO: Determine bounds for Time + See blockchain/reactor "stopSyncingDurationMinutes" + + if !block.Time.After(lastBlockTime) { + return errors.New("Invalid Block.Header.Time") + } + */ + + // validate prev block info + if !block.LastBlockID.Equals(state.LastBlockID) { + return fmt.Errorf("Wrong Block.Header.LastBlockID. Expected %v, got %v", state.LastBlockID, block.LastBlockID) + } + newTxs := int64(len(block.Data.Txs)) + if block.TotalTxs != state.LastBlockTotalTx+newTxs { + return fmt.Errorf("Wrong Block.Header.TotalTxs. Expected %v, got %v", state.LastBlockTotalTx+newTxs, block.TotalTxs) + } + + // validate app info + if !bytes.Equal(block.AppHash, state.AppHash) { + return fmt.Errorf("Wrong Block.Header.AppHash. Expected %X, got %v", state.AppHash, block.AppHash) + } + if !bytes.Equal(block.ConsensusHash, state.ConsensusParams.Hash()) { + return fmt.Errorf("Wrong Block.Header.ConsensusHash. Expected %X, got %v", state.ConsensusParams.Hash(), block.ConsensusHash) + } + if !bytes.Equal(block.LastResultsHash, state.LastResultsHash) { + return fmt.Errorf("Wrong Block.Header.LastResultsHash. Expected %X, got %v", state.LastResultsHash, block.LastResultsHash) + } + if !bytes.Equal(block.ValidatorsHash, state.Validators.Hash()) { + return fmt.Errorf("Wrong Block.Header.ValidatorsHash. Expected %X, got %v", state.Validators.Hash(), block.ValidatorsHash) + } + + // Validate block LastCommit. + if block.Height == 1 { + if len(block.LastCommit.Precommits) != 0 { + return errors.New("Block at height 1 (first block) should have no LastCommit precommits") + } + } else { + if len(block.LastCommit.Precommits) != state.LastValidators.Size() { + return fmt.Errorf("Invalid block commit size. Expected %v, got %v", + state.LastValidators.Size(), len(block.LastCommit.Precommits)) + } + err := state.LastValidators.VerifyCommit( + state.ChainID, state.LastBlockID, block.Height-1, block.LastCommit) + if err != nil { + return err + } + } + + // TODO: Each check requires loading an old validator set. + // We should cap the amount of evidence per block + // to prevent potential proposer DoS. + for _, ev := range block.Evidence.Evidence { + if err := VerifyEvidence(stateDB, state, ev); err != nil { + return types.NewEvidenceInvalidErr(ev, err) + } + } + + return nil +} + +// VerifyEvidence verifies the evidence fully by checking: +// - it is sufficiently recent (MaxAge) +// - it is from a key who was a validator at the given height +// - it is internally consistent +// - it was properly signed by the alleged equivocator +func VerifyEvidence(stateDB dbm.DB, state State, evidence types.Evidence) error { + height := state.LastBlockHeight + + evidenceAge := height - evidence.Height() + maxAge := state.ConsensusParams.EvidenceParams.MaxAge + if evidenceAge > maxAge { + return fmt.Errorf("Evidence from height %d is too old. Min height is %d", + evidence.Height(), height-maxAge) + } + + valset, err := LoadValidators(stateDB, evidence.Height()) + if err != nil { + // TODO: if err is just that we cant find it cuz we pruned, ignore. + // TODO: if its actually bad evidence, punish peer + return err + } + + // The address must have been an active validator at the height. + // NOTE: we will ignore evidence from H if the key was not a validator + // at H, even if it is a validator at some nearby H' + ev := evidence + height, addr := ev.Height(), ev.Address() + _, val := valset.GetByAddress(addr) + if val == nil { + return fmt.Errorf("Address %X was not a validator at height %d", addr, height) + } + + if err := evidence.Verify(state.ChainID, val.PubKey); err != nil { + return err + } + + return nil +} diff --git a/state/validation_test.go b/state/validation_test.go new file mode 100644 index 000000000..362a40737 --- /dev/null +++ b/state/validation_test.go @@ -0,0 +1,68 @@ +package state + +import ( + "testing" + + "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" +) + +func TestValidateBlock(t *testing.T) { + state, _ := state(1, 1) + + blockExec := NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), nil, nil, nil) + + // proper block must pass + block := makeBlock(state, 1) + err := blockExec.ValidateBlock(state, block) + require.NoError(t, err) + + // wrong chain fails + block = makeBlock(state, 1) + block.ChainID = "not-the-real-one" + err = blockExec.ValidateBlock(state, block) + require.Error(t, err) + + // wrong height fails + block = makeBlock(state, 1) + block.Height += 10 + err = blockExec.ValidateBlock(state, block) + require.Error(t, err) + + // wrong total tx fails + block = makeBlock(state, 1) + block.TotalTxs += 10 + err = blockExec.ValidateBlock(state, block) + require.Error(t, err) + + // wrong blockid fails + block = makeBlock(state, 1) + block.LastBlockID.PartsHeader.Total += 10 + err = blockExec.ValidateBlock(state, block) + require.Error(t, err) + + // wrong app hash fails + block = makeBlock(state, 1) + block.AppHash = []byte("wrong app hash") + err = blockExec.ValidateBlock(state, block) + require.Error(t, err) + + // wrong consensus hash fails + block = makeBlock(state, 1) + block.ConsensusHash = []byte("wrong consensus hash") + err = blockExec.ValidateBlock(state, block) + require.Error(t, err) + + // wrong results hash fails + block = makeBlock(state, 1) + block.LastResultsHash = []byte("wrong results hash") + err = blockExec.ValidateBlock(state, block) + require.Error(t, err) + + // wrong validators hash fails + block = makeBlock(state, 1) + block.ValidatorsHash = []byte("wrong validators hash") + err = blockExec.ValidateBlock(state, block) + require.Error(t, err) +} diff --git a/state/wire.go b/state/wire.go new file mode 100644 index 000000000..af743c7b8 --- /dev/null +++ b/state/wire.go @@ -0,0 +1,12 @@ +package state + +import ( + "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" +) + +var cdc = amino.NewCodec() + +func init() { + crypto.RegisterAmino(cdc) +} diff --git a/test/README.md b/test/README.md new file mode 100644 index 000000000..fc436948a --- /dev/null +++ b/test/README.md @@ -0,0 +1,21 @@ +# Tendermint Tests + +The unit tests (ie. the `go test` s) can be run with `make test`. +The integration tests can be run with `make test_integrations`. + +Running the integrations test will build a docker container with local version of tendermint +and run the following tests in docker containers: + +- go tests, with --race + - includes test coverage +- app tests + - kvstore app over socket + - counter app over socket + - counter app over grpc +- persistence tests + - crash tendermint at each of many predefined points, restart, and ensure it syncs properly with the app +- p2p tests + - start a local kvstore app testnet on a docker network (requires docker version 1.10+) + - send a tx on each node and ensure the state root is updated on all of them + - crash and restart nodes one at a time and ensure they can sync back up (via fastsync) + - crash and restart all nodes at once and ensure they can sync back up diff --git a/test/app/clean.sh b/test/app/clean.sh new file mode 100755 index 000000000..22814f015 --- /dev/null +++ b/test/app/clean.sh @@ -0,0 +1,3 @@ +killall tendermint +killall abci-cli +rm -rf ~/.tendermint_app diff --git a/test/app/counter_test.sh b/test/app/counter_test.sh new file mode 100755 index 000000000..868f8d037 --- /dev/null +++ b/test/app/counter_test.sh @@ -0,0 +1,141 @@ +#! /bin/bash + +if [[ "$GRPC_BROADCAST_TX" == "" ]]; then + GRPC_BROADCAST_TX="" +fi + +set -u + +##################### +# counter over socket +##################### +TESTNAME=$1 + +# Send some txs + +function getCode() { + set +u + R=$1 + set -u + if [[ "$R" == "" ]]; then + echo -1 + fi + + if [[ $(echo $R | jq 'has("code")') == "true" ]]; then + # this wont actually work if theres an error ... + echo "$R" | jq ".code" + else + # protobuf auto adds `omitempty` to everything so code OK and empty data/log + # will not even show when marshalled into json + # apparently we can use github.com/golang/protobuf/jsonpb to do the marshalling ... + echo 0 + fi +} + +# build grpc client if needed +if [[ "$GRPC_BROADCAST_TX" != "" ]]; then + if [ -f grpc_client ]; then + rm grpc_client + fi + echo "... building grpc_client" + go build -o grpc_client grpc_client.go +fi + +function sendTx() { + TX=$1 + set +u + SHOULD_ERR=$2 + if [ "$SHOULD_ERR" == "" ]; then + SHOULD_ERR=false + fi + set -u + if [[ "$GRPC_BROADCAST_TX" == "" ]]; then + RESPONSE=$(curl -s localhost:26657/broadcast_tx_commit?tx=0x"$TX") + IS_ERR=$(echo "$RESPONSE" | jq 'has("error")') + ERROR=$(echo "$RESPONSE" | jq '.error') + ERROR=$(echo "$ERROR" | tr -d '"') # remove surrounding quotes + + RESPONSE=$(echo "$RESPONSE" | jq '.result') + else + RESPONSE=$(./grpc_client "$TX") + IS_ERR=false + ERROR="" + fi + + echo "RESPONSE" + echo "$RESPONSE" + + echo "$RESPONSE" | jq . &> /dev/null + IS_JSON=$? + if [[ "$IS_JSON" != "0" ]]; then + IS_ERR=true + ERROR="$RESPONSE" + fi + APPEND_TX_RESPONSE=$(echo "$RESPONSE" | jq '.deliver_tx') + APPEND_TX_CODE=$(getCode "$APPEND_TX_RESPONSE") + CHECK_TX_RESPONSE=$(echo "$RESPONSE" | jq '.check_tx') + CHECK_TX_CODE=$(getCode "$CHECK_TX_RESPONSE") + + echo "-------" + echo "TX $TX" + echo "RESPONSE $RESPONSE" + echo "ERROR $ERROR" + echo "IS_ERR $IS_ERR" + echo "----" + + if $SHOULD_ERR; then + if [[ "$IS_ERR" != "true" ]]; then + echo "Expected error sending tx ($TX)" + exit 1 + fi + else + if [[ "$IS_ERR" == "true" ]]; then + echo "Unexpected error sending tx ($TX)" + exit 1 + fi + + fi +} + +echo "... sending tx. expect no error" + +# 0 should pass once and get in block, with no error +TX=00 +sendTx $TX +if [[ $APPEND_TX_CODE != 0 ]]; then + echo "Got non-zero exit code for $TX. $RESPONSE" + exit 1 +fi + + +echo "... sending tx. expect error" + +# second time should get rejected by the mempool (return error and non-zero code) +sendTx $TX true + + +echo "... sending tx. expect no error" + +# now, TX=01 should pass, with no error +TX=01 +sendTx $TX +if [[ $APPEND_TX_CODE != 0 ]]; then + echo "Got non-zero exit code for $TX. $RESPONSE" + exit 1 +fi + +echo "... sending tx. expect no error, but invalid" + +# now, TX=03 should get in a block (passes CheckTx, no error), but is invalid +TX=03 +sendTx $TX +if [[ "$CHECK_TX_CODE" != 0 ]]; then + echo "Got non-zero exit code for checktx on $TX. $RESPONSE" + exit 1 +fi +if [[ $APPEND_TX_CODE == 0 ]]; then + echo "Got zero exit code for $TX. Should have been bad nonce. $RESPONSE" + exit 1 +fi + +echo "Passed Test: $TESTNAME" diff --git a/test/app/grpc_client.go b/test/app/grpc_client.go new file mode 100644 index 000000000..c55713c7f --- /dev/null +++ b/test/app/grpc_client.go @@ -0,0 +1,42 @@ +package main + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "os" + + "context" + + "github.com/tendermint/tendermint/rpc/grpc" +) + +var grpcAddr = "tcp://localhost:36656" + +func main() { + args := os.Args + if len(args) == 1 { + fmt.Println("Must enter a transaction to send (hex)") + os.Exit(1) + } + tx := args[1] + txBytes, err := hex.DecodeString(tx) + if err != nil { + fmt.Println("Invalid hex", err) + os.Exit(1) + } + + clientGRPC := core_grpc.StartGRPCClient(grpcAddr) + res, err := clientGRPC.BroadcastTx(context.Background(), &core_grpc.RequestBroadcastTx{txBytes}) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + bz, err := json.Marshal(res) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + fmt.Println(string(bz)) +} diff --git a/test/app/kvstore_test.sh b/test/app/kvstore_test.sh new file mode 100755 index 000000000..67f6b583c --- /dev/null +++ b/test/app/kvstore_test.sh @@ -0,0 +1,84 @@ +#! /bin/bash +set -ex + +function toHex() { + echo -n $1 | hexdump -ve '1/1 "%.2X"' | awk '{print "0x" $0}' + +} + +##################### +# kvstore with curl +##################### +TESTNAME=$1 + +# store key value pair +KEY="abcd" +VALUE="dcba" +echo $(toHex $KEY=$VALUE) +curl -s 127.0.0.1:26657/broadcast_tx_commit?tx=$(toHex $KEY=$VALUE) +echo $? +echo "" + + +########################### +# test using the abci-cli +########################### + +echo "... testing query with abci-cli" + +# we should be able to look up the key +RESPONSE=`abci-cli query \"$KEY\"` + +set +e +A=`echo $RESPONSE | grep "$VALUE"` +if [[ $? != 0 ]]; then + echo "Failed to find $VALUE for $KEY. Response:" + echo "$RESPONSE" + exit 1 +fi +set -e + +# we should not be able to look up the value +RESPONSE=`abci-cli query \"$VALUE\"` +set +e +A=`echo $RESPONSE | grep $VALUE` +if [[ $? == 0 ]]; then + echo "Found '$VALUE' for $VALUE when we should not have. Response:" + echo "$RESPONSE" + exit 1 +fi +set -e + +############################# +# test using the /abci_query +############################# + +echo "... testing query with /abci_query 2" + +# we should be able to look up the key +RESPONSE=`curl -s "127.0.0.1:26657/abci_query?path=\"\"&data=$(toHex $KEY)&prove=false"` +RESPONSE=`echo $RESPONSE | jq .result.response.log` + +set +e +A=`echo $RESPONSE | grep 'exists'` +if [[ $? != 0 ]]; then + echo "Failed to find 'exists' for $KEY. Response:" + echo "$RESPONSE" + exit 1 +fi +set -e + +# we should not be able to look up the value +RESPONSE=`curl -s "127.0.0.1:26657/abci_query?path=\"\"&data=$(toHex $VALUE)&prove=false"` +RESPONSE=`echo $RESPONSE | jq .result.response.log` +set +e +A=`echo $RESPONSE | grep 'exists'` +if [[ $? == 0 ]]; then + echo "Found 'exists' for $VALUE when we should not have. Response:" + echo "$RESPONSE" + exit 1 +fi +set -e + + +echo "Passed Test: $TESTNAME" diff --git a/test/app/test.sh b/test/app/test.sh new file mode 100755 index 000000000..0f77da04e --- /dev/null +++ b/test/app/test.sh @@ -0,0 +1,129 @@ +#! /bin/bash +set -ex + +#- kvstore over socket, curl +#- counter over socket, curl +#- counter over grpc, curl +#- counter over grpc, grpc + +# TODO: install everything + +export PATH="$GOBIN:$PATH" +export TMHOME=$HOME/.tendermint_app + +function kvstore_over_socket(){ + rm -rf $TMHOME + tendermint init + echo "Starting kvstore_over_socket" + abci-cli kvstore > /dev/null & + pid_kvstore=$! + tendermint node > tendermint.log & + pid_tendermint=$! + sleep 5 + + echo "running test" + bash kvstore_test.sh "KVStore over Socket" + + kill -9 $pid_kvstore $pid_tendermint +} + +# start tendermint first +function kvstore_over_socket_reorder(){ + rm -rf $TMHOME + tendermint init + echo "Starting kvstore_over_socket_reorder (ie. start tendermint first)" + tendermint node > tendermint.log & + pid_tendermint=$! + sleep 2 + abci-cli kvstore > /dev/null & + pid_kvstore=$! + sleep 5 + + echo "running test" + bash kvstore_test.sh "KVStore over Socket" + + kill -9 $pid_kvstore $pid_tendermint +} + + +function counter_over_socket() { + rm -rf $TMHOME + tendermint init + echo "Starting counter_over_socket" + abci-cli counter --serial > /dev/null & + pid_counter=$! + tendermint node > tendermint.log & + pid_tendermint=$! + sleep 5 + + echo "running test" + bash counter_test.sh "Counter over Socket" + + kill -9 $pid_counter $pid_tendermint +} + +function counter_over_grpc() { + rm -rf $TMHOME + tendermint init + echo "Starting counter_over_grpc" + abci-cli counter --serial --abci grpc > /dev/null & + pid_counter=$! + tendermint node --abci grpc > tendermint.log & + pid_tendermint=$! + sleep 5 + + echo "running test" + bash counter_test.sh "Counter over GRPC" + + kill -9 $pid_counter $pid_tendermint +} + +function counter_over_grpc_grpc() { + rm -rf $TMHOME + tendermint init + echo "Starting counter_over_grpc_grpc (ie. with grpc broadcast_tx)" + abci-cli counter --serial --abci grpc > /dev/null & + pid_counter=$! + sleep 1 + GRPC_PORT=36656 + tendermint node --abci grpc --rpc.grpc_laddr tcp://localhost:$GRPC_PORT > tendermint.log & + pid_tendermint=$! + sleep 5 + + echo "running test" + GRPC_BROADCAST_TX=true bash counter_test.sh "Counter over GRPC via GRPC BroadcastTx" + + kill -9 $pid_counter $pid_tendermint +} + +cd $GOPATH/src/github.com/tendermint/tendermint/test/app + +case "$1" in + "kvstore_over_socket") + kvstore_over_socket + ;; +"kvstore_over_socket_reorder") + kvstore_over_socket_reorder + ;; + "counter_over_socket") + counter_over_socket + ;; +"counter_over_grpc") + counter_over_grpc + ;; + "counter_over_grpc_grpc") + counter_over_grpc_grpc + ;; +*) + echo "Running all" + kvstore_over_socket + echo "" + kvstore_over_socket_reorder + echo "" + counter_over_socket + echo "" + counter_over_grpc + echo "" + counter_over_grpc_grpc +esac + diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile new file mode 100644 index 000000000..8b69d27fe --- /dev/null +++ b/test/docker/Dockerfile @@ -0,0 +1,35 @@ +FROM golang:1.10 + +# Add testing deps for curl +RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list + +# Grab deps (jq, hexdump, xxd, killall) +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + jq bsdmainutils vim-common psmisc netcat curl + +# Setup tendermint repo +ENV REPO $GOPATH/src/github.com/tendermint/tendermint +ENV GOBIN $GOPATH/bin +WORKDIR $REPO + +# Install the vendored dependencies before copying code +# docker caching prevents reinstall on code change! +ADD Gopkg.toml Gopkg.toml +ADD Gopkg.lock Gopkg.lock +ADD Makefile Makefile +RUN make get_tools +RUN make get_vendor_deps + +# Now copy in the code +# NOTE: this will overwrite whatever is in vendor/ +COPY . $REPO + +RUN go install ./cmd/tendermint +RUN go install ./abci/cmd/abci-cli + +# expose the volume for debugging +VOLUME $REPO + +EXPOSE 26656 +EXPOSE 26657 diff --git a/test/docker/build.sh b/test/docker/build.sh new file mode 100644 index 000000000..39df08720 --- /dev/null +++ b/test/docker/build.sh @@ -0,0 +1,3 @@ +#! /bin/bash + +docker build -t tester -f ./test/docker/Dockerfile . diff --git a/test/p2p/README.md b/test/p2p/README.md new file mode 100644 index 000000000..4ee3690af --- /dev/null +++ b/test/p2p/README.md @@ -0,0 +1,54 @@ +# Tendermint P2P Tests + +These scripts facilitate setting up and testing a local testnet using docker containers. + +Setup your own local testnet as follows. + +For consistency, we assume all commands are run from the Tendermint repository root (ie. $GOPATH/src/github.com/tendermint/tendermint). + +First, build the docker image: + +``` +docker build -t tendermint_tester -f ./test/docker/Dockerfile . +``` + +Now create the docker network: + +``` +docker network create --driver bridge --subnet 172.57.0.0/16 my_testnet +``` + +This gives us a new network with IP addresses in the rage `172.57.0.0 - 172.57.255.255`. +Peers on the network can have any IP address in this range. +For our four node network, let's pick `172.57.0.101 - 172.57.0.104`. +Since we use Tendermint's default listening port of 26656, our list of seed nodes will look like: + +``` +172.57.0.101:26656,172.57.0.102:26656,172.57.0.103:26656,172.57.0.104:26656 +``` + +Now we can start up the peers. We already have config files setup in `test/p2p/data/`. +Let's use a for-loop to start our peers: + +``` +for i in $(seq 1 4); do + docker run -d \ + --net=my_testnet\ + --ip="172.57.0.$((100 + $i))" \ + --name local_testnet_$i \ + --entrypoint tendermint \ + -e TMHOME=/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$i/core \ + tendermint_tester node --p2p.persistent_peers 172.57.0.101:26656,172.57.0.102:26656,172.57.0.103:26656,172.57.0.104:26656 --proxy_app=kvstore +done +``` + +If you now run `docker ps`, you'll see your containers! + +We can confirm they are making blocks by checking the `/status` message using `curl` and `jq` to pretty print the output json: + +``` +curl 172.57.0.101:26657/status | jq . +``` + + + diff --git a/test/p2p/atomic_broadcast/test.sh b/test/p2p/atomic_broadcast/test.sh new file mode 100644 index 000000000..f066707d3 --- /dev/null +++ b/test/p2p/atomic_broadcast/test.sh @@ -0,0 +1,75 @@ +#! /bin/bash +set -u + +N=$1 + +################################################################### +# assumes peers are already synced up +# test sending txs +# for each peer: +# send a tx, wait for commit +# assert app hash on every peer reflects the post tx state +################################################################### + +echo "" +# run the test on each of them +for i in $(seq 1 "$N"); do + addr=$(test/p2p/ip.sh "$i"):26657 + + # current state + HASH1=$(curl -s "$addr/status" | jq .result.sync_info.latest_app_hash) + + # - send a tx + TX=aadeadbeefbeefbeef0$i + echo "Broadcast Tx $TX" + curl -s "$addr/broadcast_tx_commit?tx=0x$TX" + echo "" + + # we need to wait another block to get the new app_hash + h1=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height | jq fromjson) + h2=$h1 + while [ "$h2" == "$h1" ]; do + sleep 1 + h2=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height | jq fromjson) + done + + # wait for all other peers to get to this height + minHeight=$h2 + for j in $(seq 1 "$N"); do + if [[ "$i" != "$j" ]]; then + addrJ=$(test/p2p/ip.sh "$j"):26657 + + h=$(curl -s "$addrJ/status" | jq .result.sync_info.latest_block_height | jq fromjson) + while [ "$h" -lt "$minHeight" ]; do + sleep 1 + h=$(curl -s "$addrJ/status" | jq .result.sync_info.latest_block_height | jq fromjson) + done + fi + done + + # check that hash was updated + HASH2=$(curl -s "$addr/status" | jq .result.sync_info.latest_app_hash) + if [[ "$HASH1" == "$HASH2" ]]; then + echo "Expected state hash to update from $HASH1. Got $HASH2" + exit 1 + fi + + # check we get the same new hash on all other nodes + for j in $(seq 1 "$N"); do + if [[ "$i" != "$j" ]]; then + addrJ=$(test/p2p/ip.sh "$j"):26657 + HASH3=$(curl -s "$addrJ/status" | jq .result.sync_info.latest_app_hash) + + if [[ "$HASH2" != "$HASH3" ]]; then + echo "App hash for node $j doesn't match. Got $HASH3, expected $HASH2" + exit 1 + fi + fi + done + + echo "All nodes are up to date" +done + +echo "" +echo "PASS" +echo "" diff --git a/test/p2p/basic/test.sh b/test/p2p/basic/test.sh new file mode 100755 index 000000000..caf665122 --- /dev/null +++ b/test/p2p/basic/test.sh @@ -0,0 +1,74 @@ +#! /bin/bash +set -u + +N=$1 + +################################################################### +# wait for all peers to come online +# for each peer: +# wait to have N-1 peers +# wait to be at height > 1 +################################################################### + +# wait 60s per step per peer +MAX_SLEEP=60 + +# wait for everyone to come online +echo "Waiting for nodes to come online" +for i in `seq 1 $N`; do + addr=$(test/p2p/ip.sh $i):26657 + curl -s $addr/status > /dev/null + ERR=$? + COUNT=0 + while [ "$ERR" != 0 ]; do + sleep 1 + curl -s $addr/status > /dev/null + ERR=$? + COUNT=$((COUNT+1)) + if [ "$COUNT" -gt "$MAX_SLEEP" ]; then + echo "Waited too long for node $i to come online" + exit 1 + fi + done + echo "... node $i is up" +done + +echo "" +# wait for each of them to sync up +for i in `seq 1 $N`; do + addr=$(test/p2p/ip.sh $i):26657 + N_1=$(($N - 1)) + + # - assert everyone has N-1 other peers + N_PEERS=`curl -s $addr/net_info | jq '.result.peers | length'` + COUNT=0 + while [ "$N_PEERS" != $N_1 ]; do + echo "Waiting for node $i to connect to all peers ..." + sleep 1 + N_PEERS=`curl -s $addr/net_info | jq '.result.peers | length'` + COUNT=$((COUNT+1)) + if [ "$COUNT" -gt "$MAX_SLEEP" ]; then + echo "Waited too long for node $i to connect to all peers" + exit 1 + fi + done + + # - assert block height is greater than 1 + BLOCK_HEIGHT=`curl -s $addr/status | jq .result.sync_info.latest_block_height | jq fromjson` + COUNT=0 + while [ "$BLOCK_HEIGHT" -le 1 ]; do + echo "Waiting for node $i to commit a block ..." + sleep 1 + BLOCK_HEIGHT=`curl -s $addr/status | jq .result.sync_info.latest_block_height | jq fromjson` + COUNT=$((COUNT+1)) + if [ "$COUNT" -gt "$MAX_SLEEP" ]; then + echo "Waited too long for node $i to commit a block" + exit 1 + fi + done + echo "Node $i is connected to all peers and at block $BLOCK_HEIGHT" +done + +echo "" +echo "PASS" +echo "" diff --git a/test/p2p/circleci.sh b/test/p2p/circleci.sh new file mode 100644 index 000000000..19200afbe --- /dev/null +++ b/test/p2p/circleci.sh @@ -0,0 +1,35 @@ +#! /bin/bash +set -eux + +# Get the directory of where this script is. +SOURCE="${BASH_SOURCE[0]}" +while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done +DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" + +LOGS_DIR="$DIR/../logs" +echo +echo "* [$(date +"%T")] cleaning up $LOGS_DIR" +rm -rf "$LOGS_DIR" +mkdir -p "$LOGS_DIR" + +set +e +echo +echo "* [$(date +"%T")] removing run_test container" +docker rm -vf run_test +set -e + +echo +echo "* [$(date +"%T")] starting rsyslog container" +docker rm -f rsyslog || true +docker run -d -v "$LOGS_DIR:/var/log/" -p 127.0.0.1:5514:514/udp --name rsyslog voxxit/rsyslog + +set +u +if [[ "$SKIP_BUILD" == "" ]]; then + echo + echo "* [$(date +"%T")] building docker image" + bash "$DIR/../docker/build.sh" +fi + +echo +echo "* [$(date +"%T")] running p2p tests on a local docker network" +bash "$DIR/../p2p/test.sh" tester diff --git a/test/p2p/client.sh b/test/p2p/client.sh new file mode 100644 index 000000000..fa11ce870 --- /dev/null +++ b/test/p2p/client.sh @@ -0,0 +1,19 @@ +#! /bin/bash +set -eu + +DOCKER_IMAGE=$1 +NETWORK_NAME=$2 +ID=$3 +CMD=$4 + +NAME=test_container_$ID + +echo "starting test client container with CMD=$CMD" +# run the test container on the local network +docker run -t --rm \ + -v "$GOPATH/src/github.com/tendermint/tendermint/test/p2p/:/go/src/github.com/tendermint/tendermint/test/p2p" \ + --net="$NETWORK_NAME" \ + --ip=$(test/p2p/ip.sh "-1") \ + --name "$NAME" \ + --entrypoint bash \ + "$DOCKER_IMAGE" $CMD diff --git a/test/p2p/data/mach1/core/config/genesis.json b/test/p2p/data/mach1/core/config/genesis.json new file mode 100644 index 000000000..515c10714 --- /dev/null +++ b/test/p2p/data/mach1/core/config/genesis.json @@ -0,0 +1,39 @@ +{ + "genesis_time": "2016-06-24T20:01:19.322Z", + "chain_id": "chain-9ujDWI", + "validators": [ + { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY=" + }, + "power": "1", + "name": "mach1" + }, + { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0=" + }, + "power": "1", + "name": "mach2" + }, + { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE=" + }, + "power": "1", + "name": "mach3" + }, + { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k=" + }, + "power": "1", + "name": "mach4" + } + ], + "app_hash": "" +} diff --git a/test/p2p/data/mach1/core/config/node_key.json b/test/p2p/data/mach1/core/config/node_key.json new file mode 100644 index 000000000..4fa960850 --- /dev/null +++ b/test/p2p/data/mach1/core/config/node_key.json @@ -0,0 +1,6 @@ +{ + "priv_key": { + "type": "tendermint/PrivKeyEd25519", + "value": "BpYtFp8xSrudBa5aBLRuSPD72PGDAUm0dJORDL3Kd5YJbluUzRefVFrjwoHZv1yeDj2P9xkEi2L3hJCUz/qFkQ==" + } +} diff --git a/test/p2p/data/mach1/core/config/priv_validator.json b/test/p2p/data/mach1/core/config/priv_validator.json new file mode 100644 index 000000000..ea2a01f5c --- /dev/null +++ b/test/p2p/data/mach1/core/config/priv_validator.json @@ -0,0 +1,14 @@ +{ + "address": "AE47BBD4B3ACD80BFE17F6E0C66C5B8492A81AE4", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY=" + }, + "last_height": "0", + "last_round": "0", + "last_step": 0, + "priv_key": { + "type": "tendermint/PrivKeyEd25519", + "value": "VHqgfHqM4WxcsqQMbCbRWwoylgQQqfHqblC2NvGrOJq+iTPf8WAMAm40cY8XhaTN6rkMNWmLOU44tpR66R3hFg==" + } +} diff --git a/test/p2p/data/mach2/core/config/genesis.json b/test/p2p/data/mach2/core/config/genesis.json new file mode 100644 index 000000000..515c10714 --- /dev/null +++ b/test/p2p/data/mach2/core/config/genesis.json @@ -0,0 +1,39 @@ +{ + "genesis_time": "2016-06-24T20:01:19.322Z", + "chain_id": "chain-9ujDWI", + "validators": [ + { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY=" + }, + "power": "1", + "name": "mach1" + }, + { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0=" + }, + "power": "1", + "name": "mach2" + }, + { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE=" + }, + "power": "1", + "name": "mach3" + }, + { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k=" + }, + "power": "1", + "name": "mach4" + } + ], + "app_hash": "" +} diff --git a/test/p2p/data/mach2/core/config/node_key.json b/test/p2p/data/mach2/core/config/node_key.json new file mode 100644 index 000000000..6eb151106 --- /dev/null +++ b/test/p2p/data/mach2/core/config/node_key.json @@ -0,0 +1,6 @@ +{ + "priv_key": { + "type": "tendermint/PrivKeyEd25519", + "value": "uM6LDVE4wQIIUmq9rc6RxzX8zEGG4G4Jcuw15klzQopF68YfJM4bkbPSavurEcJ4nvBMusKBg2GcARFrZqnFKA==" + } +} diff --git a/test/p2p/data/mach2/core/config/priv_validator.json b/test/p2p/data/mach2/core/config/priv_validator.json new file mode 100644 index 000000000..6e0cd7f8f --- /dev/null +++ b/test/p2p/data/mach2/core/config/priv_validator.json @@ -0,0 +1,14 @@ +{ + "address": "5D61EE46CCE91F579086522D7FD8CEC3F854E946", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0=" + }, + "last_height": "0", + "last_round": "0", + "last_step": 0, + "priv_key": { + "type": "tendermint/PrivKeyEd25519", + "value": "0EeInmBQL8MSnQq38zSxg47Z7R7Nmcu5a3GtWr9agUNtxTRGUyMSZYfSoqk7WdaJtxcHOx3paKJabvE9WVMYrQ==" + } +} diff --git a/test/p2p/data/mach3/core/config/genesis.json b/test/p2p/data/mach3/core/config/genesis.json new file mode 100644 index 000000000..515c10714 --- /dev/null +++ b/test/p2p/data/mach3/core/config/genesis.json @@ -0,0 +1,39 @@ +{ + "genesis_time": "2016-06-24T20:01:19.322Z", + "chain_id": "chain-9ujDWI", + "validators": [ + { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY=" + }, + "power": "1", + "name": "mach1" + }, + { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0=" + }, + "power": "1", + "name": "mach2" + }, + { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE=" + }, + "power": "1", + "name": "mach3" + }, + { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k=" + }, + "power": "1", + "name": "mach4" + } + ], + "app_hash": "" +} diff --git a/test/p2p/data/mach3/core/config/node_key.json b/test/p2p/data/mach3/core/config/node_key.json new file mode 100644 index 000000000..0885bcf9c --- /dev/null +++ b/test/p2p/data/mach3/core/config/node_key.json @@ -0,0 +1,6 @@ +{ + "priv_key": { + "type": "tendermint/PrivKeyEd25519", + "value": "kT3orG0YkipT9rAZbvAjtGk/7Pu1ZeCE8LSUF2jz2uiSs1rdlUVi/gccRlvCRLKvrtSicOyEkmk0FHPOGS3mgg==" + } +} diff --git a/test/p2p/data/mach3/core/config/priv_validator.json b/test/p2p/data/mach3/core/config/priv_validator.json new file mode 100644 index 000000000..ec68ca7bb --- /dev/null +++ b/test/p2p/data/mach3/core/config/priv_validator.json @@ -0,0 +1,14 @@ +{ + "address": "705F9DA2CC7D7AF5F4519455ED99622E40E439A1", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE=" + }, + "last_height": "0", + "last_round": "0", + "last_step": 0, + "priv_key": { + "type": "tendermint/PrivKeyEd25519", + "value": "waTkfzSfxfVW9Kmie6d2uUQkwxK6ps9u5EuGc0jXw/KuZ6xpfRNaoLRgHqV+qrP+v0uqTyKcRaWYwphbEvzRoQ==" + } +} diff --git a/test/p2p/data/mach4/core/config/genesis.json b/test/p2p/data/mach4/core/config/genesis.json new file mode 100644 index 000000000..515c10714 --- /dev/null +++ b/test/p2p/data/mach4/core/config/genesis.json @@ -0,0 +1,39 @@ +{ + "genesis_time": "2016-06-24T20:01:19.322Z", + "chain_id": "chain-9ujDWI", + "validators": [ + { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY=" + }, + "power": "1", + "name": "mach1" + }, + { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0=" + }, + "power": "1", + "name": "mach2" + }, + { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE=" + }, + "power": "1", + "name": "mach3" + }, + { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k=" + }, + "power": "1", + "name": "mach4" + } + ], + "app_hash": "" +} diff --git a/test/p2p/data/mach4/core/config/node_key.json b/test/p2p/data/mach4/core/config/node_key.json new file mode 100644 index 000000000..d6a5d79c2 --- /dev/null +++ b/test/p2p/data/mach4/core/config/node_key.json @@ -0,0 +1,6 @@ +{ + "priv_key": { + "type": "tendermint/PrivKeyEd25519", + "value": "QIIm8/QEEawiJi3Zozv+J9b+1CufCEkGs3lxGMlRy4L4FVIXCoXJTwYIrotZtwoMqLYEqQV1hbKKJmFA3GFelw==" + } +} diff --git a/test/p2p/data/mach4/core/config/priv_validator.json b/test/p2p/data/mach4/core/config/priv_validator.json new file mode 100644 index 000000000..468550ea8 --- /dev/null +++ b/test/p2p/data/mach4/core/config/priv_validator.json @@ -0,0 +1,14 @@ +{ + "address": "D1054266EC9EEA511ED9A76DEFD520BBE1B5E850", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k=" + }, + "last_height": "0", + "last_round": "0", + "last_step": 0, + "priv_key": { + "type": "tendermint/PrivKeyEd25519", + "value": "xMw+0o8CDC29qYvNvwjDztNwRw508l6TjV0pXo49KwyevI9YztS0bc1auKulkd0lPNfLUDcnP9oyvAtkYcTv2Q==" + } +} diff --git a/test/p2p/fast_sync/check_peer.sh b/test/p2p/fast_sync/check_peer.sh new file mode 100644 index 000000000..d5d3fc2b5 --- /dev/null +++ b/test/p2p/fast_sync/check_peer.sh @@ -0,0 +1,43 @@ +#! /bin/bash +set -eu +set -o pipefail + +ID=$1 + +########################################### +# +# Wait for peer to catchup to other peers +# +########################################### + +addr=$(test/p2p/ip.sh $ID):26657 +peerID=$(( $(($ID % 4)) + 1 )) # 1->2 ... 3->4 ... 4->1 +peer_addr=$(test/p2p/ip.sh $peerID):26657 + +# get another peer's height +h1=`curl -s $peer_addr/status | jq .result.sync_info.latest_block_height | jq fromjson` + +# get another peer's state +root1=`curl -s $peer_addr/status | jq .result.sync_info.latest_app_hash` + +echo "Other peer is on height $h1 with state $root1" +echo "Waiting for peer $ID to catch up" + +# wait for it to sync to past its previous height +set +e +set +o pipefail +h2="0" +while [[ "$h2" -lt "$(($h1+3))" ]]; do + sleep 1 + h2=`curl -s $addr/status | jq .result.sync_info.latest_block_height | jq fromjson` + echo "... $h2" +done + +# check the app hash +root2=`curl -s $addr/status | jq .result.sync_info.latest_app_hash` + +if [[ "$root1" != "$root2" ]]; then + echo "App hash after fast sync does not match. Got $root2; expected $root1" + exit 1 +fi +echo "... fast sync successful" diff --git a/test/p2p/fast_sync/test.sh b/test/p2p/fast_sync/test.sh new file mode 100644 index 000000000..8820d199c --- /dev/null +++ b/test/p2p/fast_sync/test.sh @@ -0,0 +1,16 @@ +#! /bin/bash +set -eu + +DOCKER_IMAGE=$1 +NETWORK_NAME=$2 +N=$3 +PROXY_APP=$4 + +cd $GOPATH/src/github.com/tendermint/tendermint + +# run it on each of them +for i in `seq 1 $N`; do + bash test/p2p/fast_sync/test_peer.sh $DOCKER_IMAGE $NETWORK_NAME $i $N $PROXY_APP +done + + diff --git a/test/p2p/fast_sync/test_peer.sh b/test/p2p/fast_sync/test_peer.sh new file mode 100644 index 000000000..08ea9deb1 --- /dev/null +++ b/test/p2p/fast_sync/test_peer.sh @@ -0,0 +1,38 @@ +#! /bin/bash +set -eu + +DOCKER_IMAGE=$1 +NETWORK_NAME=$2 +ID=$3 +N=$4 +PROXY_APP=$5 + +############################################################### +# this runs on each peer: +# kill peer +# bring it back online via fast sync +# wait for it to sync and check the app hash +############################################################### + + +echo "Testing fastsync on node $ID" + +# kill peer +set +e # circle sigh :( + docker rm -vf local_testnet_$ID + set -e + + # restart peer - should have an empty blockchain + PERSISTENT_PEERS="$(test/p2p/ip_plus_id.sh 1 $DOCKER_IMAGE):26656" + for j in `seq 2 $N`; do + PERSISTENT_PEERS="$PERSISTENT_PEERS,$(test/p2p/ip_plus_id.sh $j $DOCKER_IMAGE):26656" + done + bash test/p2p/peer.sh $DOCKER_IMAGE $NETWORK_NAME $ID $PROXY_APP "--p2p.persistent_peers $PERSISTENT_PEERS --p2p.pex --rpc.unsafe" + + # wait for peer to sync and check the app hash + bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME fs_$ID "test/p2p/fast_sync/check_peer.sh $ID" + + echo "" + echo "PASS" + echo "" + diff --git a/test/p2p/ip.sh b/test/p2p/ip.sh new file mode 100755 index 000000000..77753f541 --- /dev/null +++ b/test/p2p/ip.sh @@ -0,0 +1,5 @@ +#! /bin/bash +set -eu + +ID=$1 +echo "172.57.0.$((100+$ID))" diff --git a/test/p2p/ip_plus_id.sh b/test/p2p/ip_plus_id.sh new file mode 100755 index 000000000..0d2248fe0 --- /dev/null +++ b/test/p2p/ip_plus_id.sh @@ -0,0 +1,7 @@ +#! /bin/bash +set -eu + +ID=$1 +DOCKER_IMAGE=$2 +NODEID="$(docker run --rm -e TMHOME=/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core $DOCKER_IMAGE tendermint show_node_id)" +echo "$NODEID@172.57.0.$((100+$ID))" diff --git a/test/p2p/kill_all/check_peers.sh b/test/p2p/kill_all/check_peers.sh new file mode 100644 index 000000000..87a768110 --- /dev/null +++ b/test/p2p/kill_all/check_peers.sh @@ -0,0 +1,49 @@ +#! /bin/bash +set -eu + +NUM_OF_PEERS=$1 + +# how many attempts for each peer to catch up by height +MAX_ATTEMPTS_TO_CATCH_UP=120 + +echo "Waiting for nodes to come online" +set +e +for i in $(seq 1 "$NUM_OF_PEERS"); do + addr=$(test/p2p/ip.sh "$i"):26657 + curl -s "$addr/status" > /dev/null + ERR=$? + while [ "$ERR" != 0 ]; do + sleep 1 + curl -s "$addr/status" > /dev/null + ERR=$? + done + echo "... node $i is up" +done +set -e + +# get the first peer's height +addr=$(test/p2p/ip.sh 1):26657 +h1=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height) +echo "1st peer is on height $h1" + +echo "Waiting until other peers reporting a height higher than the 1st one" +for i in $(seq 2 "$NUM_OF_PEERS"); do + attempt=1 + hi=0 + + while [[ $hi -le $h1 ]] ; do + addr=$(test/p2p/ip.sh "$i"):26657 + hi=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height) + + echo "... peer $i is on height $hi" + + ((attempt++)) + if [ "$attempt" -ge $MAX_ATTEMPTS_TO_CATCH_UP ] ; then + echo "$attempt unsuccessful attempts were made to catch up" + curl -s "$addr/dump_consensus_state" | jq .result + exit 1 + fi + + sleep 1 + done +done diff --git a/test/p2p/kill_all/test.sh b/test/p2p/kill_all/test.sh new file mode 100644 index 000000000..318a1fe47 --- /dev/null +++ b/test/p2p/kill_all/test.sh @@ -0,0 +1,32 @@ +#! /bin/bash +set -eu + +DOCKER_IMAGE=$1 +NETWORK_NAME=$2 +NUM_OF_PEERS=$3 +NUM_OF_CRASHES=$4 + +cd "$GOPATH/src/github.com/tendermint/tendermint" + +############################################################### +# NUM_OF_CRASHES times: +# restart all peers +# wait for them to sync and check that they are making progress +############################################################### + +for i in $(seq 1 "$NUM_OF_CRASHES"); do + echo "" + echo "Restarting all peers! Take $i ..." + + # restart all peers + for j in $(seq 1 "$NUM_OF_PEERS"); do + docker stop "local_testnet_$j" + docker start "local_testnet_$j" + done + + bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" kill_all_$i "test/p2p/kill_all/check_peers.sh $NUM_OF_PEERS" +done + +echo "" +echo "PASS" +echo "" diff --git a/test/p2p/local_testnet_start.sh b/test/p2p/local_testnet_start.sh new file mode 100644 index 000000000..25b3c6d3e --- /dev/null +++ b/test/p2p/local_testnet_start.sh @@ -0,0 +1,24 @@ +#! /bin/bash +set -eu + +DOCKER_IMAGE=$1 +NETWORK_NAME=$2 +N=$3 +APP_PROXY=$4 + +set +u +PERSISTENT_PEERS=$5 +if [[ "$PERSISTENT_PEERS" != "" ]]; then + echo "PersistentPeers: $PERSISTENT_PEERS" + PERSISTENT_PEERS="--p2p.persistent_peers $PERSISTENT_PEERS" +fi +set -u + +cd "$GOPATH/src/github.com/tendermint/tendermint" + +# create docker network +docker network create --driver bridge --subnet 172.57.0.0/16 "$NETWORK_NAME" + +for i in $(seq 1 "$N"); do + bash test/p2p/peer.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$i" "$APP_PROXY" "$PERSISTENT_PEERS --p2p.pex --rpc.unsafe" +done diff --git a/test/p2p/local_testnet_stop.sh b/test/p2p/local_testnet_stop.sh new file mode 100644 index 000000000..1dace4694 --- /dev/null +++ b/test/p2p/local_testnet_stop.sh @@ -0,0 +1,12 @@ +#! /bin/bash +set -u + +NETWORK_NAME=$1 +N=$2 + +for i in $(seq 1 "$N"); do + docker stop "local_testnet_$i" + docker rm -vf "local_testnet_$i" +done + +docker network rm "$NETWORK_NAME" diff --git a/test/p2p/peer.sh b/test/p2p/peer.sh new file mode 100644 index 000000000..15d44ff33 --- /dev/null +++ b/test/p2p/peer.sh @@ -0,0 +1,27 @@ +#! /bin/bash +set -eu + +DOCKER_IMAGE=$1 +NETWORK_NAME=$2 +ID=$3 +APP_PROXY=$4 + +set +u +NODE_FLAGS=$5 +set -u + +echo "starting tendermint peer ID=$ID" +# start tendermint container on the network +# NOTE: $NODE_FLAGS should be unescaped (no quotes). otherwise it will be +# treated as one flag. +docker run -d \ + --net="$NETWORK_NAME" \ + --ip=$(test/p2p/ip.sh "$ID") \ + --name "local_testnet_$ID" \ + --entrypoint tendermint \ + -e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core" \ + --log-driver=syslog \ + --log-opt syslog-address=udp://127.0.0.1:5514 \ + --log-opt syslog-facility=daemon \ + --log-opt tag="{{.Name}}" \ + "$DOCKER_IMAGE" node $NODE_FLAGS --log_level=debug --proxy_app="$APP_PROXY" diff --git a/test/p2p/persistent_peers.sh b/test/p2p/persistent_peers.sh new file mode 100644 index 000000000..6d3e1ed66 --- /dev/null +++ b/test/p2p/persistent_peers.sh @@ -0,0 +1,13 @@ +#! /bin/bash +set -eu + +N=$1 +DOCKER_IMAGE=$2 + +cd "$GOPATH/src/github.com/tendermint/tendermint" + +persistent_peers="$(test/p2p/ip_plus_id.sh 1 $DOCKER_IMAGE):26656" +for i in $(seq 2 $N); do + persistent_peers="$persistent_peers,$(test/p2p/ip_plus_id.sh $i $DOCKER_IMAGE):26656" +done +echo "$persistent_peers" diff --git a/test/p2p/pex/check_peer.sh b/test/p2p/pex/check_peer.sh new file mode 100644 index 000000000..7ae42e9b6 --- /dev/null +++ b/test/p2p/pex/check_peer.sh @@ -0,0 +1,17 @@ +#! /bin/bash +set -u + +ID=$1 +N=$2 + +addr=$(test/p2p/ip.sh "$ID"):26657 + +echo "2. wait until peer $ID connects to other nodes using pex reactor" +peers_count="0" +while [[ "$peers_count" -lt "$((N-1))" ]]; do + sleep 1 + peers_count=$(curl -s "$addr/net_info" | jq ".result.peers | length") + echo "... peers count = $peers_count, expected = $((N-1))" +done + +echo "... successful" diff --git a/test/p2p/pex/dial_peers.sh b/test/p2p/pex/dial_peers.sh new file mode 100644 index 000000000..43bde48b5 --- /dev/null +++ b/test/p2p/pex/dial_peers.sh @@ -0,0 +1,23 @@ +#! /bin/bash +set -u + +N=$1 +PEERS=$2 + +cd "$GOPATH/src/github.com/tendermint/tendermint" + +echo "Waiting for nodes to come online" +for i in $(seq 1 "$N"); do + addr=$(test/p2p/ip.sh "$i"):26657 + curl -s "$addr/status" > /dev/null + ERR=$? + while [ "$ERR" != 0 ]; do + sleep 1 + curl -s "$addr/status" > /dev/null + ERR=$? + done + echo "... node $i is up" +done + +IP=$(test/p2p/ip.sh 1) +curl "$IP:26657/dial_peers?persistent=true&peers=\\[$PEERS\\]" diff --git a/test/p2p/pex/test.sh b/test/p2p/pex/test.sh new file mode 100644 index 000000000..ffecd6510 --- /dev/null +++ b/test/p2p/pex/test.sh @@ -0,0 +1,15 @@ +#! /bin/bash +set -eu + +DOCKER_IMAGE=$1 +NETWORK_NAME=$2 +N=$3 +PROXY_APP=$4 + +cd "$GOPATH/src/github.com/tendermint/tendermint" + +echo "Test reconnecting from the address book" +bash test/p2p/pex/test_addrbook.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$N" "$PROXY_APP" + +echo "Test connecting via /dial_peers" +bash test/p2p/pex/test_dial_peers.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$N" "$PROXY_APP" diff --git a/test/p2p/pex/test_addrbook.sh b/test/p2p/pex/test_addrbook.sh new file mode 100644 index 000000000..d54bcf428 --- /dev/null +++ b/test/p2p/pex/test_addrbook.sh @@ -0,0 +1,57 @@ +#! /bin/bash +set -eu + +DOCKER_IMAGE=$1 +NETWORK_NAME=$2 +N=$3 +PROXY_APP=$4 + +ID=1 + +echo "----------------------------------------------------------------------" +echo "Testing pex creates the addrbook and uses it if persistent_peers are not provided" +echo "(assuming peers are started with pex enabled)" + +CLIENT_NAME="pex_addrbook_$ID" + +echo "1. restart peer $ID" +docker stop "local_testnet_$ID" +# preserve addrbook.json +docker cp "local_testnet_$ID:/go/src/github.com/tendermint/tendermint/test/p2p/data/mach1/core/config/addrbook.json" "/tmp/addrbook.json" +set +e #CIRCLE +docker rm -vf "local_testnet_$ID" +set -e + +# NOTE that we do not provide persistent_peers +bash test/p2p/peer.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$ID" "$PROXY_APP" "--p2p.pex --rpc.unsafe" +docker cp "/tmp/addrbook.json" "local_testnet_$ID:/go/src/github.com/tendermint/tendermint/test/p2p/data/mach1/core/config/addrbook.json" +echo "with the following addrbook:" +cat /tmp/addrbook.json +# exec doesn't work on circle +# docker exec "local_testnet_$ID" cat "/go/src/github.com/tendermint/tendermint/test/p2p/data/mach1/core/config/addrbook.json" +echo "" + +# if the client runs forever, it means addrbook wasn't saved or was empty +bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$CLIENT_NAME" "test/p2p/pex/check_peer.sh $ID $N" + +echo "----------------------------------------------------------------------" +echo "Testing other peers connect to us if we have neither persistent_peers nor the addrbook" +echo "(assuming peers are started with pex enabled)" + +CLIENT_NAME="pex_no_addrbook_$ID" + +echo "1. restart peer $ID" +docker stop "local_testnet_$ID" +set +e #CIRCLE +docker rm -vf "local_testnet_$ID" +set -e + +# NOTE that we do not provide persistent_peers +bash test/p2p/peer.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$ID" "$PROXY_APP" "--p2p.pex --rpc.unsafe" + +# if the client runs forever, it means other peers have removed us from their books (which should not happen) +bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$CLIENT_NAME" "test/p2p/pex/check_peer.sh $ID $N" + +echo "" +echo "PASS" +echo "" diff --git a/test/p2p/pex/test_dial_peers.sh b/test/p2p/pex/test_dial_peers.sh new file mode 100644 index 000000000..cb6e7e182 --- /dev/null +++ b/test/p2p/pex/test_dial_peers.sh @@ -0,0 +1,39 @@ +#! /bin/bash +set -eu + +DOCKER_IMAGE=$1 +NETWORK_NAME=$2 +N=$3 +PROXY_APP=$4 + +ID=1 + +cd $GOPATH/src/github.com/tendermint/tendermint + +echo "----------------------------------------------------------------------" +echo "Testing full network connection using one /dial_peers call" +echo "(assuming peers are started with pex enabled)" + +# stop the existing testnet and remove local network +set +e +bash test/p2p/local_testnet_stop.sh $NETWORK_NAME $N +set -e + +# start the testnet on a local network +# NOTE we re-use the same network for all tests +bash test/p2p/local_testnet_start.sh $DOCKER_IMAGE $NETWORK_NAME $N $PROXY_APP "" + +PERSISTENT_PEERS="\"$(test/p2p/ip_plus_id.sh 1 $DOCKER_IMAGE):26656\"" +for i in $(seq 2 $N); do + PERSISTENT_PEERS="$PERSISTENT_PEERS,\"$(test/p2p/ip_plus_id.sh $i $DOCKER_IMAGE):26656\"" +done +echo "$PERSISTENT_PEERS" + +# dial peers from one node +CLIENT_NAME="dial_peers" +bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME $CLIENT_NAME "test/p2p/pex/dial_peers.sh $N $PERSISTENT_PEERS" + +# test basic connectivity and consensus +# start client container and check the num peers and height for all nodes +CLIENT_NAME="dial_peers_basic" +bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME $CLIENT_NAME "test/p2p/basic/test.sh $N" diff --git a/test/p2p/test.sh b/test/p2p/test.sh new file mode 100644 index 000000000..abcf2ca07 --- /dev/null +++ b/test/p2p/test.sh @@ -0,0 +1,38 @@ +#! /bin/bash +set -eu + +DOCKER_IMAGE=$1 +NETWORK_NAME=local_testnet +N=4 +PROXY_APP=persistent_kvstore + +cd "$GOPATH/src/github.com/tendermint/tendermint" + +# stop the existing testnet and remove local network +set +e +bash test/p2p/local_testnet_stop.sh "$NETWORK_NAME" "$N" +set -e + +PERSISTENT_PEERS=$(bash test/p2p/persistent_peers.sh $N $DOCKER_IMAGE) + +# start the testnet on a local network +# NOTE we re-use the same network for all tests +bash test/p2p/local_testnet_start.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$N" "$PROXY_APP" "$PERSISTENT_PEERS" + +# test basic connectivity and consensus +# start client container and check the num peers and height for all nodes +bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" basic "test/p2p/basic/test.sh $N" + +# test atomic broadcast: +# start client container and test sending a tx to each node +bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" ab "test/p2p/atomic_broadcast/test.sh $N" + +# test fast sync (from current state of network): +# for each node, kill it and readd via fast sync +bash test/p2p/fast_sync/test.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$N" "$PROXY_APP" + +# test killing all peers 3 times +bash test/p2p/kill_all/test.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$N" 3 + +# test pex +bash test/p2p/pex/test.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$N" "$PROXY_APP" diff --git a/test/persist/test_failure_indices.sh b/test/persist/test_failure_indices.sh new file mode 100644 index 000000000..4d523d943 --- /dev/null +++ b/test/persist/test_failure_indices.sh @@ -0,0 +1,124 @@ +#! /bin/bash + +export PATH="$GOBIN:$PATH" +export TMHOME=$HOME/.tendermint_persist + +rm -rf "$TMHOME" +tendermint init + +# use a unix socket so we can remove it +RPC_ADDR="$(pwd)/rpc.sock" + +TM_CMD="tendermint node --log_level=debug --rpc.laddr=unix://$RPC_ADDR" # &> tendermint_${name}.log" +DUMMY_CMD="abci-cli kvstore --persist $TMHOME/kvstore" # &> kvstore_${name}.log" + + +function start_procs(){ + name=$1 + indexToFail=$2 + echo "Starting persistent kvstore and tendermint" + if [[ "$CIRCLECI" == true ]]; then + $DUMMY_CMD & + else + $DUMMY_CMD &> "kvstore_${name}.log" & + fi + PID_DUMMY=$! + + # before starting tendermint, remove the rpc socket + rm -f $RPC_ADDR + if [[ "$indexToFail" == "" ]]; then + # run in background, dont fail + if [[ "$CIRCLECI" == true ]]; then + $TM_CMD & + else + $TM_CMD &> "tendermint_${name}.log" & + fi + PID_TENDERMINT=$! + else + # run in foreground, fail + if [[ "$CIRCLECI" == true ]]; then + FAIL_TEST_INDEX=$indexToFail $TM_CMD + else + FAIL_TEST_INDEX=$indexToFail $TM_CMD &> "tendermint_${name}.log" + fi + PID_TENDERMINT=$! + fi +} + +function kill_procs(){ + kill -9 "$PID_DUMMY" "$PID_TENDERMINT" + wait "$PID_DUMMY" + wait "$PID_TENDERMINT" +} + +# wait for port to be available +function wait_for_port() { + port=$1 + # this will succeed while port is bound + nc -z 127.0.0.1 $port + ERR=$? + i=0 + while [ "$ERR" == 0 ]; do + echo "... port $port is still bound. waiting ..." + sleep 1 + nc -z 127.0.0.1 $port + ERR=$? + i=$((i + 1)) + if [[ $i == 10 ]]; then + echo "Timed out waiting for port to be released" + exit 1 + fi + done + echo "... port $port is free!" +} + + +failsStart=0 +fails=$(grep -r "fail.Fail" --include \*.go . | wc -l) +failsEnd=$((fails-1)) + +for failIndex in $(seq $failsStart $failsEnd); do + echo "" + echo "* Test FailIndex $failIndex" + # test failure at failIndex + + bash $(dirname $0)/txs.sh "localhost:26657" & + start_procs 1 "$failIndex" + + # tendermint should already have exited when it hits the fail index + # but kill -9 for good measure + kill_procs + + start_procs 2 + + # wait for node to handshake and make a new block + # NOTE: --unix-socket is only available in curl v7.40+ + curl -s --unix-socket "$RPC_ADDR" http://localhost/status > /dev/null + ERR=$? + i=0 + while [ "$ERR" != 0 ]; do + sleep 1 + curl -s --unix-socket "$RPC_ADDR" http://localhost/status > /dev/null + ERR=$? + i=$((i + 1)) + if [[ $i == 20 ]]; then + echo "Timed out waiting for tendermint to start" + exit 1 + fi + done + + # wait for a new block + h1=$(curl -s --unix-socket "$RPC_ADDR" http://localhost/status | jq .result.sync_info.latest_block_height) + h2=$h1 + while [ "$h2" == "$h1" ]; do + sleep 1 + h2=$(curl -s --unix-socket "$RPC_ADDR" http://localhost/status | jq .result.sync_info.latest_block_height) + done + + kill_procs + + echo "* Passed Test for FailIndex $failIndex" + echo "" +done + +echo "Passed Test: Persistence" diff --git a/test/persist/test_simple.sh b/test/persist/test_simple.sh new file mode 100644 index 000000000..706e04c26 --- /dev/null +++ b/test/persist/test_simple.sh @@ -0,0 +1,70 @@ +#! /bin/bash + + +export TMHOME=$HOME/.tendermint_persist + +rm -rf $TMHOME +tendermint init + +function start_procs(){ + name=$1 + echo "Starting persistent kvstore and tendermint" + abci-cli kvstore --persist $TMHOME/kvstore &> "kvstore_${name}.log" & + PID_DUMMY=$! + tendermint node &> tendermint_${name}.log & + PID_TENDERMINT=$! + sleep 5 +} + +function kill_procs(){ + kill -9 $PID_DUMMY $PID_TENDERMINT +} + + +function send_txs(){ + # send a bunch of txs over a few blocks + echo "Sending txs" + for i in `seq 1 5`; do + for j in `seq 1 100`; do + tx=`head -c 8 /dev/urandom | hexdump -ve '1/1 "%.2X"'` + curl -s 127.0.0.1:26657/broadcast_tx_async?tx=0x$tx &> /dev/null + done + sleep 1 + done +} + + +start_procs 1 +send_txs +kill_procs + +start_procs 2 + +# wait for node to handshake and make a new block +addr="localhost:26657" +curl -s $addr/status > /dev/null +ERR=$? +i=0 +while [ "$ERR" != 0 ]; do + sleep 1 + curl -s $addr/status > /dev/null + ERR=$? + i=$(($i + 1)) + if [[ $i == 10 ]]; then + echo "Timed out waiting for tendermint to start" + exit 1 + fi +done + +# wait for a new block +h1=`curl -s $addr/status | jq .result.sync_info.latest_block_height` +h2=$h1 +while [ "$h2" == "$h1" ]; do + sleep 1 + h2=`curl -s $addr/status | jq .result.sync_info.latest_block_height` +done + +kill_procs +sleep 2 + +echo "Passed Test: Persistence" diff --git a/test/persist/txs.sh b/test/persist/txs.sh new file mode 100644 index 000000000..120aa8a56 --- /dev/null +++ b/test/persist/txs.sh @@ -0,0 +1,23 @@ +#! /bin/bash +set -u + +# wait till node is up, send txs +ADDR=$1 #="127.0.0.1:26657" +curl -s $ADDR/status > /dev/null +ERR=$? +while [ "$ERR" != 0 ]; do + sleep 1 + curl -s $ADDR/status > /dev/null + ERR=$? +done + +# send a bunch of txs over a few blocks +echo "Node is up, sending txs" +for i in $(seq 1 5); do + for _ in $(seq 1 100); do + tx=$(head -c 8 /dev/urandom | hexdump -ve '1/1 "%.2X"') + curl -s "$ADDR/broadcast_tx_async?tx=0x$tx" &> /dev/null + done + echo "sent 100" + sleep 1 +done diff --git a/test/test_cover.sh b/test/test_cover.sh new file mode 100644 index 000000000..5f2dea3ee --- /dev/null +++ b/test/test_cover.sh @@ -0,0 +1,14 @@ +#! /bin/bash + +PKGS=$(go list github.com/tendermint/tendermint/... | grep -v /vendor/) + +set -e + +echo "mode: atomic" > coverage.txt +for pkg in ${PKGS[@]}; do + go test -timeout 5m -race -coverprofile=profile.out -covermode=atomic "$pkg" + if [ -f profile.out ]; then + tail -n +2 profile.out >> coverage.txt; + rm profile.out + fi +done diff --git a/types/block.go b/types/block.go new file mode 100644 index 000000000..bc018ee89 --- /dev/null +++ b/types/block.go @@ -0,0 +1,577 @@ +package types + +import ( + "bytes" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/crypto/tmhash" + cmn "github.com/tendermint/tendermint/libs/common" +) + +// Block defines the atomic unit of a Tendermint blockchain. +// TODO: add Version byte +type Block struct { + mtx sync.Mutex + *Header `json:"header"` + *Data `json:"data"` + Evidence EvidenceData `json:"evidence"` + LastCommit *Commit `json:"last_commit"` +} + +// MakeBlock returns a new block with an empty header, except what can be computed from itself. +// It populates the same set of fields validated by ValidateBasic +func MakeBlock(height int64, txs []Tx, commit *Commit) *Block { + block := &Block{ + Header: &Header{ + Height: height, + Time: time.Now(), + NumTxs: int64(len(txs)), + }, + LastCommit: commit, + Data: &Data{ + Txs: txs, + }, + } + block.fillHeader() + return block +} + +// AddEvidence appends the given evidence to the block +func (b *Block) AddEvidence(evidence []Evidence) { + b.Evidence.Evidence = append(b.Evidence.Evidence, evidence...) +} + +// ValidateBasic performs basic validation that doesn't involve state data. +// It checks the internal consistency of the block. +func (b *Block) ValidateBasic() error { + if b == nil { + return errors.New("Nil blocks are invalid") + } + b.mtx.Lock() + defer b.mtx.Unlock() + + newTxs := int64(len(b.Data.Txs)) + if b.NumTxs != newTxs { + return fmt.Errorf("Wrong Block.Header.NumTxs. Expected %v, got %v", newTxs, b.NumTxs) + } + if !bytes.Equal(b.LastCommitHash, b.LastCommit.Hash()) { + return fmt.Errorf("Wrong Block.Header.LastCommitHash. Expected %v, got %v", b.LastCommitHash, b.LastCommit.Hash()) + } + if b.Header.Height != 1 { + if err := b.LastCommit.ValidateBasic(); err != nil { + return err + } + } + if !bytes.Equal(b.DataHash, b.Data.Hash()) { + return fmt.Errorf("Wrong Block.Header.DataHash. Expected %v, got %v", b.DataHash, b.Data.Hash()) + } + if !bytes.Equal(b.EvidenceHash, b.Evidence.Hash()) { + return errors.New(cmn.Fmt("Wrong Block.Header.EvidenceHash. Expected %v, got %v", b.EvidenceHash, b.Evidence.Hash())) + } + return nil +} + +// fillHeader fills in any remaining header fields that are a function of the block data +func (b *Block) fillHeader() { + if b.LastCommitHash == nil { + b.LastCommitHash = b.LastCommit.Hash() + } + if b.DataHash == nil { + b.DataHash = b.Data.Hash() + } + if b.EvidenceHash == nil { + b.EvidenceHash = b.Evidence.Hash() + } +} + +// Hash computes and returns the block hash. +// If the block is incomplete, block hash is nil for safety. +func (b *Block) Hash() cmn.HexBytes { + if b == nil { + return nil + } + b.mtx.Lock() + defer b.mtx.Unlock() + + if b == nil || b.Header == nil || b.Data == nil || b.LastCommit == nil { + return nil + } + b.fillHeader() + return b.Header.Hash() +} + +// MakePartSet returns a PartSet containing parts of a serialized block. +// This is the form in which the block is gossipped to peers. +func (b *Block) MakePartSet(partSize int) *PartSet { + if b == nil { + return nil + } + b.mtx.Lock() + defer b.mtx.Unlock() + + // We prefix the byte length, so that unmarshaling + // can easily happen via a reader. + bz, err := cdc.MarshalBinary(b) + if err != nil { + panic(err) + } + return NewPartSetFromData(bz, partSize) +} + +// HashesTo is a convenience function that checks if a block hashes to the given argument. +// Returns false if the block is nil or the hash is empty. +func (b *Block) HashesTo(hash []byte) bool { + if len(hash) == 0 { + return false + } + if b == nil { + return false + } + return bytes.Equal(b.Hash(), hash) +} + +// Size returns size of the block in bytes. +func (b *Block) Size() int { + bz, err := cdc.MarshalBinaryBare(b) + if err != nil { + return 0 + } + return len(bz) +} + +// String returns a string representation of the block +func (b *Block) String() string { + return b.StringIndented("") +} + +// StringIndented returns a string representation of the block +func (b *Block) StringIndented(indent string) string { + if b == nil { + return "nil-Block" + } + return fmt.Sprintf(`Block{ +%s %v +%s %v +%s %v +%s %v +%s}#%v`, + indent, b.Header.StringIndented(indent+" "), + indent, b.Data.StringIndented(indent+" "), + indent, b.Evidence.StringIndented(indent+" "), + indent, b.LastCommit.StringIndented(indent+" "), + indent, b.Hash()) +} + +// StringShort returns a shortened string representation of the block +func (b *Block) StringShort() string { + if b == nil { + return "nil-Block" + } + return fmt.Sprintf("Block#%v", b.Hash()) +} + +//----------------------------------------------------------------------------- + +// Header defines the structure of a Tendermint block header +// TODO: limit header size +// NOTE: changes to the Header should be duplicated in the abci Header +type Header struct { + // basic block info + ChainID string `json:"chain_id"` + Height int64 `json:"height"` + Time time.Time `json:"time"` + NumTxs int64 `json:"num_txs"` + + // prev block info + LastBlockID BlockID `json:"last_block_id"` + TotalTxs int64 `json:"total_txs"` + + // hashes of block data + LastCommitHash cmn.HexBytes `json:"last_commit_hash"` // commit from validators from the last block + DataHash cmn.HexBytes `json:"data_hash"` // transactions + + // hashes from the app output from the prev block + ValidatorsHash cmn.HexBytes `json:"validators_hash"` // validators for the current block + ConsensusHash cmn.HexBytes `json:"consensus_hash"` // consensus params for current block + AppHash cmn.HexBytes `json:"app_hash"` // state after txs from the previous block + LastResultsHash cmn.HexBytes `json:"last_results_hash"` // root hash of all results from the txs from the previous block + + // consensus info + EvidenceHash cmn.HexBytes `json:"evidence_hash"` // evidence included in the block +} + +// Hash returns the hash of the header. +// Returns nil if ValidatorHash is missing, +// since a Header is not valid unless there is +// a ValidaotrsHash (corresponding to the validator set). +func (h *Header) Hash() cmn.HexBytes { + if h == nil || len(h.ValidatorsHash) == 0 { + return nil + } + return merkle.SimpleHashFromMap(map[string]merkle.Hasher{ + "ChainID": aminoHasher(h.ChainID), + "Height": aminoHasher(h.Height), + "Time": aminoHasher(h.Time), + "NumTxs": aminoHasher(h.NumTxs), + "TotalTxs": aminoHasher(h.TotalTxs), + "LastBlockID": aminoHasher(h.LastBlockID), + "LastCommit": aminoHasher(h.LastCommitHash), + "Data": aminoHasher(h.DataHash), + "Validators": aminoHasher(h.ValidatorsHash), + "App": aminoHasher(h.AppHash), + "Consensus": aminoHasher(h.ConsensusHash), + "Results": aminoHasher(h.LastResultsHash), + "Evidence": aminoHasher(h.EvidenceHash), + }) +} + +// StringIndented returns a string representation of the header +func (h *Header) StringIndented(indent string) string { + if h == nil { + return "nil-Header" + } + return fmt.Sprintf(`Header{ +%s ChainID: %v +%s Height: %v +%s Time: %v +%s NumTxs: %v +%s TotalTxs: %v +%s LastBlockID: %v +%s LastCommit: %v +%s Data: %v +%s Validators: %v +%s App: %v +%s Consensus: %v +%s Results: %v +%s Evidence: %v +%s}#%v`, + indent, h.ChainID, + indent, h.Height, + indent, h.Time, + indent, h.NumTxs, + indent, h.TotalTxs, + indent, h.LastBlockID, + indent, h.LastCommitHash, + indent, h.DataHash, + indent, h.ValidatorsHash, + indent, h.AppHash, + indent, h.ConsensusHash, + indent, h.LastResultsHash, + indent, h.EvidenceHash, + indent, h.Hash()) +} + +//------------------------------------- + +// Commit contains the evidence that a block was committed by a set of validators. +// NOTE: Commit is empty for height 1, but never nil. +type Commit struct { + // NOTE: The Precommits are in order of address to preserve the bonded ValidatorSet order. + // Any peer with a block can gossip precommits by index with a peer without recalculating the + // active ValidatorSet. + BlockID BlockID `json:"block_id"` + Precommits []*Vote `json:"precommits"` + + // Volatile + firstPrecommit *Vote + hash cmn.HexBytes + bitArray *cmn.BitArray +} + +// FirstPrecommit returns the first non-nil precommit in the commit. +// If all precommits are nil, it returns an empty precommit with height 0. +func (commit *Commit) FirstPrecommit() *Vote { + if len(commit.Precommits) == 0 { + return nil + } + if commit.firstPrecommit != nil { + return commit.firstPrecommit + } + for _, precommit := range commit.Precommits { + if precommit != nil { + commit.firstPrecommit = precommit + return precommit + } + } + return &Vote{ + Type: VoteTypePrecommit, + } +} + +// Height returns the height of the commit +func (commit *Commit) Height() int64 { + if len(commit.Precommits) == 0 { + return 0 + } + return commit.FirstPrecommit().Height +} + +// Round returns the round of the commit +func (commit *Commit) Round() int { + if len(commit.Precommits) == 0 { + return 0 + } + return commit.FirstPrecommit().Round +} + +// Type returns the vote type of the commit, which is always VoteTypePrecommit +func (commit *Commit) Type() byte { + return VoteTypePrecommit +} + +// Size returns the number of votes in the commit +func (commit *Commit) Size() int { + if commit == nil { + return 0 + } + return len(commit.Precommits) +} + +// BitArray returns a BitArray of which validators voted in this commit +func (commit *Commit) BitArray() *cmn.BitArray { + if commit.bitArray == nil { + commit.bitArray = cmn.NewBitArray(len(commit.Precommits)) + for i, precommit := range commit.Precommits { + // TODO: need to check the BlockID otherwise we could be counting conflicts, + // not just the one with +2/3 ! + commit.bitArray.SetIndex(i, precommit != nil) + } + } + return commit.bitArray +} + +// GetByIndex returns the vote corresponding to a given validator index +func (commit *Commit) GetByIndex(index int) *Vote { + return commit.Precommits[index] +} + +// IsCommit returns true if there is at least one vote +func (commit *Commit) IsCommit() bool { + return len(commit.Precommits) != 0 +} + +// ValidateBasic performs basic validation that doesn't involve state data. +func (commit *Commit) ValidateBasic() error { + if commit.BlockID.IsZero() { + return errors.New("Commit cannot be for nil block") + } + if len(commit.Precommits) == 0 { + return errors.New("No precommits in commit") + } + height, round := commit.Height(), commit.Round() + + // validate the precommits + for _, precommit := range commit.Precommits { + // It's OK for precommits to be missing. + if precommit == nil { + continue + } + // Ensure that all votes are precommits + if precommit.Type != VoteTypePrecommit { + return fmt.Errorf("Invalid commit vote. Expected precommit, got %v", + precommit.Type) + } + // Ensure that all heights are the same + if precommit.Height != height { + return fmt.Errorf("Invalid commit precommit height. Expected %v, got %v", + height, precommit.Height) + } + // Ensure that all rounds are the same + if precommit.Round != round { + return fmt.Errorf("Invalid commit precommit round. Expected %v, got %v", + round, precommit.Round) + } + } + return nil +} + +// Hash returns the hash of the commit +func (commit *Commit) Hash() cmn.HexBytes { + if commit.hash == nil { + bs := make([]merkle.Hasher, len(commit.Precommits)) + for i, precommit := range commit.Precommits { + bs[i] = aminoHasher(precommit) + } + commit.hash = merkle.SimpleHashFromHashers(bs) + } + return commit.hash +} + +// StringIndented returns a string representation of the commit +func (commit *Commit) StringIndented(indent string) string { + if commit == nil { + return "nil-Commit" + } + precommitStrings := make([]string, len(commit.Precommits)) + for i, precommit := range commit.Precommits { + precommitStrings[i] = precommit.String() + } + return fmt.Sprintf(`Commit{ +%s BlockID: %v +%s Precommits: %v +%s}#%v`, + indent, commit.BlockID, + indent, strings.Join(precommitStrings, "\n"+indent+" "), + indent, commit.hash) +} + +//----------------------------------------------------------------------------- + +// SignedHeader is a header along with the commits that prove it +type SignedHeader struct { + Header *Header `json:"header"` + Commit *Commit `json:"commit"` +} + +//----------------------------------------------------------------------------- + +// Data contains the set of transactions included in the block +type Data struct { + + // Txs that will be applied by state @ block.Height+1. + // NOTE: not all txs here are valid. We're just agreeing on the order first. + // This means that block.AppHash does not include these txs. + Txs Txs `json:"txs"` + + // Volatile + hash cmn.HexBytes +} + +// Hash returns the hash of the data +func (data *Data) Hash() cmn.HexBytes { + if data == nil { + return (Txs{}).Hash() + } + if data.hash == nil { + data.hash = data.Txs.Hash() // NOTE: leaves of merkle tree are TxIDs + } + return data.hash +} + +// StringIndented returns a string representation of the transactions +func (data *Data) StringIndented(indent string) string { + if data == nil { + return "nil-Data" + } + txStrings := make([]string, cmn.MinInt(len(data.Txs), 21)) + for i, tx := range data.Txs { + if i == 20 { + txStrings[i] = fmt.Sprintf("... (%v total)", len(data.Txs)) + break + } + txStrings[i] = fmt.Sprintf("%X (%d bytes)", tx.Hash(), len(tx)) + } + return fmt.Sprintf(`Data{ +%s %v +%s}#%v`, + indent, strings.Join(txStrings, "\n"+indent+" "), + indent, data.hash) +} + +//----------------------------------------------------------------------------- + +// EvidenceData contains any evidence of malicious wrong-doing by validators +type EvidenceData struct { + Evidence EvidenceList `json:"evidence"` + + // Volatile + hash cmn.HexBytes +} + +// Hash returns the hash of the data. +func (data *EvidenceData) Hash() cmn.HexBytes { + if data.hash == nil { + data.hash = data.Evidence.Hash() + } + return data.hash +} + +// StringIndented returns a string representation of the evidence. +func (data *EvidenceData) StringIndented(indent string) string { + if data == nil { + return "nil-Evidence" + } + evStrings := make([]string, cmn.MinInt(len(data.Evidence), 21)) + for i, ev := range data.Evidence { + if i == 20 { + evStrings[i] = fmt.Sprintf("... (%v total)", len(data.Evidence)) + break + } + evStrings[i] = fmt.Sprintf("Evidence:%v", ev) + } + return fmt.Sprintf(`EvidenceData{ +%s %v +%s}#%v`, + indent, strings.Join(evStrings, "\n"+indent+" "), + indent, data.hash) + return "" +} + +//-------------------------------------------------------------------------------- + +// BlockID defines the unique ID of a block as its Hash and its PartSetHeader +type BlockID struct { + Hash cmn.HexBytes `json:"hash"` + PartsHeader PartSetHeader `json:"parts"` +} + +// IsZero returns true if this is the BlockID for a nil-block +func (blockID BlockID) IsZero() bool { + return len(blockID.Hash) == 0 && blockID.PartsHeader.IsZero() +} + +// Equals returns true if the BlockID matches the given BlockID +func (blockID BlockID) Equals(other BlockID) bool { + return bytes.Equal(blockID.Hash, other.Hash) && + blockID.PartsHeader.Equals(other.PartsHeader) +} + +// Key returns a machine-readable string representation of the BlockID +func (blockID BlockID) Key() string { + bz, err := cdc.MarshalBinaryBare(blockID.PartsHeader) + if err != nil { + panic(err) + } + return string(blockID.Hash) + string(bz) +} + +// String returns a human readable string representation of the BlockID +func (blockID BlockID) String() string { + return fmt.Sprintf(`%v:%v`, blockID.Hash, blockID.PartsHeader) +} + +//------------------------------------------------------- + +type hasher struct { + item interface{} +} + +func (h hasher) Hash() []byte { + hasher := tmhash.New() + if h.item != nil && !cmn.IsTypedNil(h.item) && !cmn.IsEmpty(h.item) { + bz, err := cdc.MarshalBinaryBare(h.item) + if err != nil { + panic(err) + } + _, err = hasher.Write(bz) + if err != nil { + panic(err) + } + } + return hasher.Sum(nil) + +} + +func aminoHash(item interface{}) []byte { + h := hasher{item} + return h.Hash() +} + +func aminoHasher(item interface{}) merkle.Hasher { + return hasher{item} +} diff --git a/types/block_meta.go b/types/block_meta.go new file mode 100644 index 000000000..6dd502e4f --- /dev/null +++ b/types/block_meta.go @@ -0,0 +1,15 @@ +package types + +// BlockMeta contains meta information about a block - namely, it's ID and Header. +type BlockMeta struct { + BlockID BlockID `json:"block_id"` // the block hash and partsethash + Header *Header `json:"header"` // The block's Header +} + +// NewBlockMeta returns a new BlockMeta from the block and its blockParts. +func NewBlockMeta(block *Block, blockParts *PartSet) *BlockMeta { + return &BlockMeta{ + BlockID: BlockID{block.Hash(), blockParts.Header()}, + Header: block.Header, + } +} diff --git a/types/block_test.go b/types/block_test.go new file mode 100644 index 000000000..0948e7b21 --- /dev/null +++ b/types/block_test.go @@ -0,0 +1,88 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + crypto "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" +) + +func TestValidateBlock(t *testing.T) { + txs := []Tx{Tx("foo"), Tx("bar")} + lastID := makeBlockIDRandom() + h := int64(3) + + voteSet, _, vals := randVoteSet(h-1, 1, VoteTypePrecommit, 10, 1) + commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals) + require.NoError(t, err) + + block := MakeBlock(h, txs, commit) + require.NotNil(t, block) + + // proper block must pass + err = block.ValidateBasic() + require.NoError(t, err) + + // tamper with NumTxs + block = MakeBlock(h, txs, commit) + block.NumTxs++ + err = block.ValidateBasic() + require.Error(t, err) + + // remove 1/2 the commits + block = MakeBlock(h, txs, commit) + block.LastCommit.Precommits = commit.Precommits[:commit.Size()/2] + block.LastCommit.hash = nil // clear hash or change wont be noticed + err = block.ValidateBasic() + require.Error(t, err) + + // tamper with LastCommitHash + block = MakeBlock(h, txs, commit) + block.LastCommitHash = []byte("something else") + err = block.ValidateBasic() + require.Error(t, err) + + // tamper with data + block = MakeBlock(h, txs, commit) + block.Data.Txs[0] = Tx("something else") + block.Data.hash = nil // clear hash or change wont be noticed + err = block.ValidateBasic() + require.Error(t, err) + + // tamper with DataHash + block = MakeBlock(h, txs, commit) + block.DataHash = cmn.RandBytes(len(block.DataHash)) + err = block.ValidateBasic() + require.Error(t, err) +} + +func makeBlockIDRandom() BlockID { + blockHash, blockPartsHeader := crypto.CRandBytes(32), PartSetHeader{123, crypto.CRandBytes(32)} + return BlockID{blockHash, blockPartsHeader} +} + +func makeBlockID(hash string, partSetSize int, partSetHash string) BlockID { + return BlockID{ + Hash: []byte(hash), + PartsHeader: PartSetHeader{ + Total: partSetSize, + Hash: []byte(partSetHash), + }, + } + +} + +var nilBytes []byte + +func TestNilHeaderHashDoesntCrash(t *testing.T) { + assert.Equal(t, []byte((*Header)(nil).Hash()), nilBytes) + assert.Equal(t, []byte((new(Header)).Hash()), nilBytes) +} + +func TestNilDataHashDoesntCrash(t *testing.T) { + assert.Equal(t, []byte((*Data)(nil).Hash()), nilBytes) + assert.Equal(t, []byte(new(Data).Hash()), nilBytes) +} diff --git a/types/canonical_json.go b/types/canonical_json.go new file mode 100644 index 000000000..189a8a7a2 --- /dev/null +++ b/types/canonical_json.go @@ -0,0 +1,114 @@ +package types + +import ( + "time" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +// Canonical json is amino's json for structs with fields in alphabetical order + +// TimeFormat is used for generating the sigs +const TimeFormat = "2006-01-02T15:04:05.000Z" + +type CanonicalJSONBlockID struct { + Hash cmn.HexBytes `json:"hash,omitempty"` + PartsHeader CanonicalJSONPartSetHeader `json:"parts,omitempty"` +} + +type CanonicalJSONPartSetHeader struct { + Hash cmn.HexBytes `json:"hash,omitempty"` + Total int `json:"total,omitempty"` +} + +type CanonicalJSONProposal struct { + ChainID string `json:"@chain_id"` + Type string `json:"@type"` + BlockPartsHeader CanonicalJSONPartSetHeader `json:"block_parts_header"` + Height int64 `json:"height"` + POLBlockID CanonicalJSONBlockID `json:"pol_block_id"` + POLRound int `json:"pol_round"` + Round int `json:"round"` + Timestamp string `json:"timestamp"` +} + +type CanonicalJSONVote struct { + ChainID string `json:"@chain_id"` + Type string `json:"@type"` + BlockID CanonicalJSONBlockID `json:"block_id"` + Height int64 `json:"height"` + Round int `json:"round"` + Timestamp string `json:"timestamp"` + VoteType byte `json:"type"` +} + +type CanonicalJSONHeartbeat struct { + ChainID string `json:"@chain_id"` + Type string `json:"@type"` + Height int64 `json:"height"` + Round int `json:"round"` + Sequence int `json:"sequence"` + ValidatorAddress Address `json:"validator_address"` + ValidatorIndex int `json:"validator_index"` +} + +//----------------------------------- +// Canonicalize the structs + +func CanonicalBlockID(blockID BlockID) CanonicalJSONBlockID { + return CanonicalJSONBlockID{ + Hash: blockID.Hash, + PartsHeader: CanonicalPartSetHeader(blockID.PartsHeader), + } +} + +func CanonicalPartSetHeader(psh PartSetHeader) CanonicalJSONPartSetHeader { + return CanonicalJSONPartSetHeader{ + psh.Hash, + psh.Total, + } +} + +func CanonicalProposal(chainID string, proposal *Proposal) CanonicalJSONProposal { + return CanonicalJSONProposal{ + ChainID: chainID, + Type: "proposal", + BlockPartsHeader: CanonicalPartSetHeader(proposal.BlockPartsHeader), + Height: proposal.Height, + Timestamp: CanonicalTime(proposal.Timestamp), + POLBlockID: CanonicalBlockID(proposal.POLBlockID), + POLRound: proposal.POLRound, + Round: proposal.Round, + } +} + +func CanonicalVote(chainID string, vote *Vote) CanonicalJSONVote { + return CanonicalJSONVote{ + ChainID: chainID, + Type: "vote", + BlockID: CanonicalBlockID(vote.BlockID), + Height: vote.Height, + Round: vote.Round, + Timestamp: CanonicalTime(vote.Timestamp), + VoteType: vote.Type, + } +} + +func CanonicalHeartbeat(chainID string, heartbeat *Heartbeat) CanonicalJSONHeartbeat { + return CanonicalJSONHeartbeat{ + ChainID: chainID, + Type: "heartbeat", + Height: heartbeat.Height, + Round: heartbeat.Round, + Sequence: heartbeat.Sequence, + ValidatorAddress: heartbeat.ValidatorAddress, + ValidatorIndex: heartbeat.ValidatorIndex, + } +} + +func CanonicalTime(t time.Time) string { + // Note that sending time over amino resets it to + // local time, we need to force UTC here, so the + // signatures match + return t.UTC().Format(TimeFormat) +} diff --git a/types/event_buffer.go b/types/event_buffer.go new file mode 100644 index 000000000..18b41014e --- /dev/null +++ b/types/event_buffer.go @@ -0,0 +1,50 @@ +package types + +// Interface assertions +var _ TxEventPublisher = (*TxEventBuffer)(nil) + +// TxEventBuffer is a buffer of events, which uses a slice to temporarily store +// events. +type TxEventBuffer struct { + next TxEventPublisher + capacity int + events []EventDataTx +} + +// NewTxEventBuffer accepts a TxEventPublisher and returns a new buffer with the given +// capacity. +func NewTxEventBuffer(next TxEventPublisher, capacity int) *TxEventBuffer { + return &TxEventBuffer{ + next: next, + capacity: capacity, + events: make([]EventDataTx, 0, capacity), + } +} + +// Len returns the number of events cached. +func (b TxEventBuffer) Len() int { + return len(b.events) +} + +// PublishEventTx buffers an event to be fired upon finality. +func (b *TxEventBuffer) PublishEventTx(e EventDataTx) error { + b.events = append(b.events, e) + return nil +} + +// Flush publishes events by running next.PublishWithTags on all cached events. +// Blocks. Clears cached events. +func (b *TxEventBuffer) Flush() error { + for _, e := range b.events { + err := b.next.PublishEventTx(e) + if err != nil { + return err + } + } + + // Clear out the elements and set the length to 0 + // but maintain the underlying slice's capacity. + // See Issue https://github.com/tendermint/tendermint/issues/1189 + b.events = b.events[:0] + return nil +} diff --git a/types/event_buffer_test.go b/types/event_buffer_test.go new file mode 100644 index 000000000..74ae9da29 --- /dev/null +++ b/types/event_buffer_test.go @@ -0,0 +1,21 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type eventBusMock struct{} + +func (eventBusMock) PublishEventTx(e EventDataTx) error { + return nil +} + +func TestEventBuffer(t *testing.T) { + b := NewTxEventBuffer(eventBusMock{}, 1) + b.PublishEventTx(EventDataTx{}) + assert.Equal(t, 1, b.Len()) + b.Flush() + assert.Equal(t, 0, b.Len()) +} diff --git a/types/event_bus.go b/types/event_bus.go new file mode 100644 index 000000000..54fc60c7b --- /dev/null +++ b/types/event_bus.go @@ -0,0 +1,167 @@ +package types + +import ( + "context" + "fmt" + + tmpubsub "github.com/tendermint/tendermint/libs/pubsub" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" +) + +const defaultCapacity = 0 + +type EventBusSubscriber interface { + Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error + Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error + UnsubscribeAll(ctx context.Context, subscriber string) error +} + +// EventBus is a common bus for all events going through the system. All calls +// are proxied to underlying pubsub server. All events must be published using +// EventBus to ensure correct data types. +type EventBus struct { + cmn.BaseService + pubsub *tmpubsub.Server +} + +// NewEventBus returns a new event bus. +func NewEventBus() *EventBus { + return NewEventBusWithBufferCapacity(defaultCapacity) +} + +// NewEventBusWithBufferCapacity returns a new event bus with the given buffer capacity. +func NewEventBusWithBufferCapacity(cap int) *EventBus { + // capacity could be exposed later if needed + pubsub := tmpubsub.NewServer(tmpubsub.BufferCapacity(cap)) + b := &EventBus{pubsub: pubsub} + b.BaseService = *cmn.NewBaseService(nil, "EventBus", b) + return b +} + +func (b *EventBus) SetLogger(l log.Logger) { + b.BaseService.SetLogger(l) + b.pubsub.SetLogger(l.With("module", "pubsub")) +} + +func (b *EventBus) OnStart() error { + return b.pubsub.OnStart() +} + +func (b *EventBus) OnStop() { + b.pubsub.OnStop() +} + +func (b *EventBus) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error { + return b.pubsub.Subscribe(ctx, subscriber, query, out) +} + +func (b *EventBus) Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error { + return b.pubsub.Unsubscribe(ctx, subscriber, query) +} + +func (b *EventBus) UnsubscribeAll(ctx context.Context, subscriber string) error { + return b.pubsub.UnsubscribeAll(ctx, subscriber) +} + +func (b *EventBus) Publish(eventType string, eventData TMEventData) error { + // no explicit deadline for publishing events + ctx := context.Background() + b.pubsub.PublishWithTags(ctx, eventData, tmpubsub.NewTagMap(map[string]string{EventTypeKey: eventType})) + return nil +} + +//--- block, tx, and vote events + +func (b *EventBus) PublishEventNewBlock(event EventDataNewBlock) error { + return b.Publish(EventNewBlock, event) +} + +func (b *EventBus) PublishEventNewBlockHeader(event EventDataNewBlockHeader) error { + return b.Publish(EventNewBlockHeader, event) +} + +func (b *EventBus) PublishEventVote(event EventDataVote) error { + return b.Publish(EventVote, event) +} + +// PublishEventTx publishes tx event with tags from Result. Note it will add +// predefined tags (EventTypeKey, TxHashKey). Existing tags with the same names +// will be overwritten. +func (b *EventBus) PublishEventTx(event EventDataTx) error { + // no explicit deadline for publishing events + ctx := context.Background() + + tags := make(map[string]string) + + // validate and fill tags from tx result + for _, tag := range event.Result.Tags { + // basic validation + if len(tag.Key) == 0 { + b.Logger.Info("Got tag with an empty key (skipping)", "tag", tag, "tx", event.Tx) + continue + } + tags[string(tag.Key)] = string(tag.Value) + } + + // add predefined tags + logIfTagExists(EventTypeKey, tags, b.Logger) + tags[EventTypeKey] = EventTx + + logIfTagExists(TxHashKey, tags, b.Logger) + tags[TxHashKey] = fmt.Sprintf("%X", event.Tx.Hash()) + + logIfTagExists(TxHeightKey, tags, b.Logger) + tags[TxHeightKey] = fmt.Sprintf("%d", event.Height) + + b.pubsub.PublishWithTags(ctx, event, tmpubsub.NewTagMap(tags)) + return nil +} + +func (b *EventBus) PublishEventProposalHeartbeat(event EventDataProposalHeartbeat) error { + return b.Publish(EventProposalHeartbeat, event) +} + +//--- EventDataRoundState events + +func (b *EventBus) PublishEventNewRoundStep(event EventDataRoundState) error { + return b.Publish(EventNewRoundStep, event) +} + +func (b *EventBus) PublishEventTimeoutPropose(event EventDataRoundState) error { + return b.Publish(EventTimeoutPropose, event) +} + +func (b *EventBus) PublishEventTimeoutWait(event EventDataRoundState) error { + return b.Publish(EventTimeoutWait, event) +} + +func (b *EventBus) PublishEventNewRound(event EventDataRoundState) error { + return b.Publish(EventNewRound, event) +} + +func (b *EventBus) PublishEventCompleteProposal(event EventDataRoundState) error { + return b.Publish(EventCompleteProposal, event) +} + +func (b *EventBus) PublishEventPolka(event EventDataRoundState) error { + return b.Publish(EventPolka, event) +} + +func (b *EventBus) PublishEventUnlock(event EventDataRoundState) error { + return b.Publish(EventUnlock, event) +} + +func (b *EventBus) PublishEventRelock(event EventDataRoundState) error { + return b.Publish(EventRelock, event) +} + +func (b *EventBus) PublishEventLock(event EventDataRoundState) error { + return b.Publish(EventLock, event) +} + +func logIfTagExists(tag string, tags map[string]string, logger log.Logger) { + if value, ok := tags[tag]; ok { + logger.Error("Found predefined tag (value will be overwritten)", "tag", tag, "value", value) + } +} diff --git a/types/event_bus_test.go b/types/event_bus_test.go new file mode 100644 index 000000000..81903004d --- /dev/null +++ b/types/event_bus_test.go @@ -0,0 +1,171 @@ +package types + +import ( + "context" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + abci "github.com/tendermint/tendermint/abci/types" + tmpubsub "github.com/tendermint/tendermint/libs/pubsub" + tmquery "github.com/tendermint/tendermint/libs/pubsub/query" + cmn "github.com/tendermint/tendermint/libs/common" +) + +func TestEventBusPublishEventTx(t *testing.T) { + eventBus := NewEventBus() + err := eventBus.Start() + require.NoError(t, err) + defer eventBus.Stop() + + tx := Tx("foo") + result := abci.ResponseDeliverTx{Data: []byte("bar"), Tags: []cmn.KVPair{{[]byte("baz"), []byte("1")}}, Fee: cmn.KI64Pair{Key: []uint8{}, Value: 0}} + + txEventsCh := make(chan interface{}) + + // PublishEventTx adds all these 3 tags, so the query below should work + query := fmt.Sprintf("tm.event='Tx' AND tx.height=1 AND tx.hash='%X' AND baz=1", tx.Hash()) + err = eventBus.Subscribe(context.Background(), "test", tmquery.MustParse(query), txEventsCh) + require.NoError(t, err) + + done := make(chan struct{}) + go func() { + for e := range txEventsCh { + edt := e.(EventDataTx) + assert.Equal(t, int64(1), edt.Height) + assert.Equal(t, uint32(0), edt.Index) + assert.Equal(t, tx, edt.Tx) + assert.Equal(t, result, edt.Result) + close(done) + } + }() + + err = eventBus.PublishEventTx(EventDataTx{TxResult{ + Height: 1, + Index: 0, + Tx: tx, + Result: result, + }}) + assert.NoError(t, err) + + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatal("did not receive a transaction after 1 sec.") + } +} + +func BenchmarkEventBus(b *testing.B) { + benchmarks := []struct { + name string + numClients int + randQueries bool + randEvents bool + }{ + {"10Clients1Query1Event", 10, false, false}, + {"100Clients", 100, false, false}, + {"1000Clients", 1000, false, false}, + + {"10ClientsRandQueries1Event", 10, true, false}, + {"100Clients", 100, true, false}, + {"1000Clients", 1000, true, false}, + + {"10ClientsRandQueriesRandEvents", 10, true, true}, + {"100Clients", 100, true, true}, + {"1000Clients", 1000, true, true}, + + {"10Clients1QueryRandEvents", 10, false, true}, + {"100Clients", 100, false, true}, + {"1000Clients", 1000, false, true}, + } + + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + benchmarkEventBus(bm.numClients, bm.randQueries, bm.randEvents, b) + }) + } +} + +func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *testing.B) { + // for random* functions + rand.Seed(time.Now().Unix()) + + eventBus := NewEventBusWithBufferCapacity(0) // set buffer capacity to 0 so we are not testing cache + eventBus.Start() + defer eventBus.Stop() + + ctx := context.Background() + q := EventQueryNewBlock + + for i := 0; i < numClients; i++ { + ch := make(chan interface{}) + go func() { + for range ch { + } + }() + if randQueries { + q = randQuery() + } + eventBus.Subscribe(ctx, fmt.Sprintf("client-%d", i), q, ch) + } + + eventType := EventNewBlock + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + if randEvents { + eventType = randEvent() + } + + eventBus.Publish(eventType, EventDataString("Gamora")) + } +} + +var events = []string{EventBond, + EventUnbond, + EventRebond, + EventDupeout, + EventFork, + EventNewBlock, + EventNewBlockHeader, + EventNewRound, + EventNewRoundStep, + EventTimeoutPropose, + EventCompleteProposal, + EventPolka, + EventUnlock, + EventLock, + EventRelock, + EventTimeoutWait, + EventVote} + +func randEvent() string { + return events[rand.Intn(len(events))] +} + +var queries = []tmpubsub.Query{EventQueryBond, + EventQueryUnbond, + EventQueryRebond, + EventQueryDupeout, + EventQueryFork, + EventQueryNewBlock, + EventQueryNewBlockHeader, + EventQueryNewRound, + EventQueryNewRoundStep, + EventQueryTimeoutPropose, + EventQueryCompleteProposal, + EventQueryPolka, + EventQueryUnlock, + EventQueryLock, + EventQueryRelock, + EventQueryTimeoutWait, + EventQueryVote} + +func randQuery() tmpubsub.Query { + return queries[rand.Intn(len(queries))] +} diff --git a/types/events.go b/types/events.go new file mode 100644 index 000000000..2b87297cd --- /dev/null +++ b/types/events.go @@ -0,0 +1,154 @@ +package types + +import ( + "fmt" + + amino "github.com/tendermint/go-amino" + tmpubsub "github.com/tendermint/tendermint/libs/pubsub" + tmquery "github.com/tendermint/tendermint/libs/pubsub/query" +) + +// Reserved event types +const ( + EventBond = "Bond" + EventCompleteProposal = "CompleteProposal" + EventDupeout = "Dupeout" + EventFork = "Fork" + EventLock = "Lock" + EventNewBlock = "NewBlock" + EventNewBlockHeader = "NewBlockHeader" + EventNewRound = "NewRound" + EventNewRoundStep = "NewRoundStep" + EventPolka = "Polka" + EventRebond = "Rebond" + EventRelock = "Relock" + EventTimeoutPropose = "TimeoutPropose" + EventTimeoutWait = "TimeoutWait" + EventTx = "Tx" + EventUnbond = "Unbond" + EventUnlock = "Unlock" + EventVote = "Vote" + EventProposalHeartbeat = "ProposalHeartbeat" +) + +/////////////////////////////////////////////////////////////////////////////// +// ENCODING / DECODING +/////////////////////////////////////////////////////////////////////////////// + +// implements events.EventData +type TMEventData interface { + AssertIsTMEventData() + // empty interface +} + +func (_ EventDataNewBlock) AssertIsTMEventData() {} +func (_ EventDataNewBlockHeader) AssertIsTMEventData() {} +func (_ EventDataTx) AssertIsTMEventData() {} +func (_ EventDataRoundState) AssertIsTMEventData() {} +func (_ EventDataVote) AssertIsTMEventData() {} +func (_ EventDataProposalHeartbeat) AssertIsTMEventData() {} +func (_ EventDataString) AssertIsTMEventData() {} + +func RegisterEventDatas(cdc *amino.Codec) { + cdc.RegisterInterface((*TMEventData)(nil), nil) + cdc.RegisterConcrete(EventDataNewBlock{}, "tendermint/event/NewBlock", nil) + cdc.RegisterConcrete(EventDataNewBlockHeader{}, "tendermint/event/NewBlockHeader", nil) + cdc.RegisterConcrete(EventDataTx{}, "tendermint/event/Tx", nil) + cdc.RegisterConcrete(EventDataRoundState{}, "tendermint/event/RoundState", nil) + cdc.RegisterConcrete(EventDataVote{}, "tendermint/event/Vote", nil) + cdc.RegisterConcrete(EventDataProposalHeartbeat{}, "tendermint/event/ProposalHeartbeat", nil) + cdc.RegisterConcrete(EventDataString(""), "tendermint/event/ProposalString", nil) +} + +// Most event messages are basic types (a block, a transaction) +// but some (an input to a call tx or a receive) are more exotic + +type EventDataNewBlock struct { + Block *Block `json:"block"` +} + +// light weight event for benchmarking +type EventDataNewBlockHeader struct { + Header *Header `json:"header"` +} + +// All txs fire EventDataTx +type EventDataTx struct { + TxResult +} + +type EventDataProposalHeartbeat struct { + Heartbeat *Heartbeat +} + +// NOTE: This goes into the replay WAL +type EventDataRoundState struct { + Height int64 `json:"height"` + Round int `json:"round"` + Step string `json:"step"` + + // private, not exposed to websockets + RoundState interface{} `json:"-"` +} + +type EventDataVote struct { + Vote *Vote +} + +type EventDataString string + +/////////////////////////////////////////////////////////////////////////////// +// PUBSUB +/////////////////////////////////////////////////////////////////////////////// + +const ( + // EventTypeKey is a reserved key, used to specify event type in tags. + EventTypeKey = "tm.event" + // TxHashKey is a reserved key, used to specify transaction's hash. + // see EventBus#PublishEventTx + TxHashKey = "tx.hash" + // TxHeightKey is a reserved key, used to specify transaction block's height. + // see EventBus#PublishEventTx + TxHeightKey = "tx.height" +) + +var ( + EventQueryBond = QueryForEvent(EventBond) + EventQueryUnbond = QueryForEvent(EventUnbond) + EventQueryRebond = QueryForEvent(EventRebond) + EventQueryDupeout = QueryForEvent(EventDupeout) + EventQueryFork = QueryForEvent(EventFork) + EventQueryNewBlock = QueryForEvent(EventNewBlock) + EventQueryNewBlockHeader = QueryForEvent(EventNewBlockHeader) + EventQueryNewRound = QueryForEvent(EventNewRound) + EventQueryNewRoundStep = QueryForEvent(EventNewRoundStep) + EventQueryTimeoutPropose = QueryForEvent(EventTimeoutPropose) + EventQueryCompleteProposal = QueryForEvent(EventCompleteProposal) + EventQueryPolka = QueryForEvent(EventPolka) + EventQueryUnlock = QueryForEvent(EventUnlock) + EventQueryLock = QueryForEvent(EventLock) + EventQueryRelock = QueryForEvent(EventRelock) + EventQueryTimeoutWait = QueryForEvent(EventTimeoutWait) + EventQueryVote = QueryForEvent(EventVote) + EventQueryProposalHeartbeat = QueryForEvent(EventProposalHeartbeat) + EventQueryTx = QueryForEvent(EventTx) +) + +func EventQueryTxFor(tx Tx) tmpubsub.Query { + return tmquery.MustParse(fmt.Sprintf("%s='%s' AND %s='%X'", EventTypeKey, EventTx, TxHashKey, tx.Hash())) +} + +func QueryForEvent(eventType string) tmpubsub.Query { + return tmquery.MustParse(fmt.Sprintf("%s='%s'", EventTypeKey, eventType)) +} + +// BlockEventPublisher publishes all block related events +type BlockEventPublisher interface { + PublishEventNewBlock(block EventDataNewBlock) error + PublishEventNewBlockHeader(header EventDataNewBlockHeader) error + PublishEventTx(EventDataTx) error +} + +type TxEventPublisher interface { + PublishEventTx(EventDataTx) error +} diff --git a/types/evidence.go b/types/evidence.go new file mode 100644 index 000000000..266375ec3 --- /dev/null +++ b/types/evidence.go @@ -0,0 +1,213 @@ +package types + +import ( + "bytes" + "fmt" + + "github.com/tendermint/go-amino" + + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/merkle" +) + +// ErrEvidenceInvalid wraps a piece of evidence and the error denoting how or why it is invalid. +type ErrEvidenceInvalid struct { + Evidence Evidence + ErrorValue error +} + +func NewEvidenceInvalidErr(ev Evidence, err error) *ErrEvidenceInvalid { + return &ErrEvidenceInvalid{ev, err} +} + +// Error returns a string representation of the error. +func (err *ErrEvidenceInvalid) Error() string { + return fmt.Sprintf("Invalid evidence: %v. Evidence: %v", err.ErrorValue, err.Evidence) +} + +//------------------------------------------- + +// Evidence represents any provable malicious activity by a validator +type Evidence interface { + Height() int64 // height of the equivocation + Address() []byte // address of the equivocating validator + Hash() []byte // hash of the evidence + Verify(chainID string, pubKey crypto.PubKey) error // verify the evidence + Equal(Evidence) bool // check equality of evidence + + String() string +} + +func RegisterEvidences(cdc *amino.Codec) { + cdc.RegisterInterface((*Evidence)(nil), nil) + cdc.RegisterConcrete(&DuplicateVoteEvidence{}, "tendermint/DuplicateVoteEvidence", nil) +} + +//------------------------------------------- + +// DuplicateVoteEvidence contains evidence a validator signed two conflicting votes. +type DuplicateVoteEvidence struct { + PubKey crypto.PubKey + VoteA *Vote + VoteB *Vote +} + +// String returns a string representation of the evidence. +func (dve *DuplicateVoteEvidence) String() string { + return fmt.Sprintf("VoteA: %v; VoteB: %v", dve.VoteA, dve.VoteB) + +} + +// Height returns the height this evidence refers to. +func (dve *DuplicateVoteEvidence) Height() int64 { + return dve.VoteA.Height +} + +// Address returns the address of the validator. +func (dve *DuplicateVoteEvidence) Address() []byte { + return dve.PubKey.Address() +} + +// Hash returns the hash of the evidence. +func (dve *DuplicateVoteEvidence) Hash() []byte { + return aminoHasher(dve).Hash() +} + +// Verify returns an error if the two votes aren't conflicting. +// To be conflicting, they must be from the same validator, for the same H/R/S, but for different blocks. +func (dve *DuplicateVoteEvidence) Verify(chainID string, pubKey crypto.PubKey) error { + // H/R/S must be the same + if dve.VoteA.Height != dve.VoteB.Height || + dve.VoteA.Round != dve.VoteB.Round || + dve.VoteA.Type != dve.VoteB.Type { + return fmt.Errorf("DuplicateVoteEvidence Error: H/R/S does not match. Got %v and %v", dve.VoteA, dve.VoteB) + } + + // Address must be the same + if !bytes.Equal(dve.VoteA.ValidatorAddress, dve.VoteB.ValidatorAddress) { + return fmt.Errorf("DuplicateVoteEvidence Error: Validator addresses do not match. Got %X and %X", dve.VoteA.ValidatorAddress, dve.VoteB.ValidatorAddress) + } + + // Index must be the same + if dve.VoteA.ValidatorIndex != dve.VoteB.ValidatorIndex { + return fmt.Errorf("DuplicateVoteEvidence Error: Validator indices do not match. Got %d and %d", dve.VoteA.ValidatorIndex, dve.VoteB.ValidatorIndex) + } + + // BlockIDs must be different + if dve.VoteA.BlockID.Equals(dve.VoteB.BlockID) { + return fmt.Errorf("DuplicateVoteEvidence Error: BlockIDs are the same (%v) - not a real duplicate vote", dve.VoteA.BlockID) + } + + // pubkey must match address (this should already be true, sanity check) + addr := dve.VoteA.ValidatorAddress + if !bytes.Equal(pubKey.Address(), addr) { + return fmt.Errorf("DuplicateVoteEvidence FAILED SANITY CHECK - address (%X) doesn't match pubkey (%v - %X)", + addr, pubKey, pubKey.Address()) + } + + // Signatures must be valid + if !pubKey.VerifyBytes(dve.VoteA.SignBytes(chainID), dve.VoteA.Signature) { + return fmt.Errorf("DuplicateVoteEvidence Error verifying VoteA: %v", ErrVoteInvalidSignature) + } + if !pubKey.VerifyBytes(dve.VoteB.SignBytes(chainID), dve.VoteB.Signature) { + return fmt.Errorf("DuplicateVoteEvidence Error verifying VoteB: %v", ErrVoteInvalidSignature) + } + + return nil +} + +// Equal checks if two pieces of evidence are equal. +func (dve *DuplicateVoteEvidence) Equal(ev Evidence) bool { + if _, ok := ev.(*DuplicateVoteEvidence); !ok { + return false + } + + // just check their hashes + dveHash := aminoHasher(dve).Hash() + evHash := aminoHasher(ev).Hash() + return bytes.Equal(dveHash, evHash) +} + +//----------------------------------------------------------------- + +// UNSTABLE +type MockGoodEvidence struct { + Height_ int64 + Address_ []byte +} + +// UNSTABLE +func NewMockGoodEvidence(height int64, idx int, address []byte) MockGoodEvidence { + return MockGoodEvidence{height, address} +} + +func (e MockGoodEvidence) Height() int64 { return e.Height_ } +func (e MockGoodEvidence) Address() []byte { return e.Address_ } +func (e MockGoodEvidence) Hash() []byte { + return []byte(fmt.Sprintf("%d-%x", e.Height_, e.Address_)) +} +func (e MockGoodEvidence) Verify(chainID string, pubKey crypto.PubKey) error { return nil } +func (e MockGoodEvidence) Equal(ev Evidence) bool { + e2 := ev.(MockGoodEvidence) + return e.Height_ == e2.Height_ && + bytes.Equal(e.Address_, e2.Address_) +} +func (e MockGoodEvidence) String() string { + return fmt.Sprintf("GoodEvidence: %d/%s", e.Height_, e.Address_) +} + +// UNSTABLE +type MockBadEvidence struct { + MockGoodEvidence +} + +func (e MockBadEvidence) Verify(chainID string, pubKey crypto.PubKey) error { + return fmt.Errorf("MockBadEvidence") +} +func (e MockBadEvidence) Equal(ev Evidence) bool { + e2 := ev.(MockBadEvidence) + return e.Height_ == e2.Height_ && + bytes.Equal(e.Address_, e2.Address_) +} +func (e MockBadEvidence) String() string { + return fmt.Sprintf("BadEvidence: %d/%s", e.Height_, e.Address_) +} + +//------------------------------------------- + +// EvidenceList is a list of Evidence. Evidences is not a word. +type EvidenceList []Evidence + +// Hash returns the simple merkle root hash of the EvidenceList. +func (evl EvidenceList) Hash() []byte { + // Recursive impl. + // Copied from crypto/merkle to avoid allocations + switch len(evl) { + case 0: + return nil + case 1: + return evl[0].Hash() + default: + left := EvidenceList(evl[:(len(evl)+1)/2]).Hash() + right := EvidenceList(evl[(len(evl)+1)/2:]).Hash() + return merkle.SimpleHashFromTwoHashes(left, right) + } +} + +func (evl EvidenceList) String() string { + s := "" + for _, e := range evl { + s += fmt.Sprintf("%s\t\t", e) + } + return s +} + +// Has returns true if the evidence is in the EvidenceList. +func (evl EvidenceList) Has(evidence Evidence) bool { + for _, ev := range evl { + if ev.Equal(evidence) { + return true + } + } + return false +} diff --git a/types/evidence_test.go b/types/evidence_test.go new file mode 100644 index 000000000..5bbb2a37d --- /dev/null +++ b/types/evidence_test.go @@ -0,0 +1,74 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type voteData struct { + vote1 *Vote + vote2 *Vote + valid bool +} + +func makeVote(val PrivValidator, chainID string, valIndex int, height int64, round, step int, blockID BlockID) *Vote { + v := &Vote{ + ValidatorAddress: val.GetAddress(), + ValidatorIndex: valIndex, + Height: height, + Round: round, + Type: byte(step), + BlockID: blockID, + } + err := val.SignVote(chainID, v) + if err != nil { + panic(err) + } + return v +} + +func TestEvidence(t *testing.T) { + val := NewMockPV() + val2 := NewMockPV() + blockID := makeBlockID("blockhash", 1000, "partshash") + blockID2 := makeBlockID("blockhash2", 1000, "partshash") + blockID3 := makeBlockID("blockhash", 10000, "partshash") + blockID4 := makeBlockID("blockhash", 10000, "partshash2") + + chainID := "mychain" + + vote1 := makeVote(val, chainID, 0, 10, 2, 1, blockID) + badVote := makeVote(val, chainID, 0, 10, 2, 1, blockID) + err := val2.SignVote(chainID, badVote) + if err != nil { + panic(err) + } + + cases := []voteData{ + {vote1, makeVote(val, chainID, 0, 10, 2, 1, blockID2), true}, // different block ids + {vote1, makeVote(val, chainID, 0, 10, 2, 1, blockID3), true}, + {vote1, makeVote(val, chainID, 0, 10, 2, 1, blockID4), true}, + {vote1, makeVote(val, chainID, 0, 10, 2, 1, blockID), false}, // wrong block id + {vote1, makeVote(val, "mychain2", 0, 10, 2, 1, blockID2), false}, // wrong chain id + {vote1, makeVote(val, chainID, 1, 10, 2, 1, blockID2), false}, // wrong val index + {vote1, makeVote(val, chainID, 0, 11, 2, 1, blockID2), false}, // wrong height + {vote1, makeVote(val, chainID, 0, 10, 3, 1, blockID2), false}, // wrong round + {vote1, makeVote(val, chainID, 0, 10, 2, 2, blockID2), false}, // wrong step + {vote1, makeVote(val2, chainID, 0, 10, 2, 1, blockID), false}, // wrong validator + {vote1, badVote, false}, // signed by wrong key + } + + pubKey := val.GetPubKey() + for _, c := range cases { + ev := &DuplicateVoteEvidence{ + VoteA: c.vote1, + VoteB: c.vote2, + } + if c.valid { + assert.Nil(t, ev.Verify(chainID, pubKey), "evidence should be valid") + } else { + assert.NotNil(t, ev.Verify(chainID, pubKey), "evidence should be invalid") + } + } +} diff --git a/types/genesis.go b/types/genesis.go new file mode 100644 index 000000000..0367c6b2f --- /dev/null +++ b/types/genesis.go @@ -0,0 +1,123 @@ +package types + +import ( + "encoding/json" + "io/ioutil" + "time" + + "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" +) + +//------------------------------------------------------------ +// core types for a genesis definition + +// GenesisValidator is an initial validator. +type GenesisValidator struct { + PubKey crypto.PubKey `json:"pub_key"` + Power int64 `json:"power"` + Name string `json:"name"` +} + +// GenesisDoc defines the initial conditions for a tendermint blockchain, in particular its validator set. +type GenesisDoc struct { + GenesisTime time.Time `json:"genesis_time"` + ChainID string `json:"chain_id"` + ConsensusParams *ConsensusParams `json:"consensus_params,omitempty"` + Validators []GenesisValidator `json:"validators"` + AppHash cmn.HexBytes `json:"app_hash"` + AppStateJSON json.RawMessage `json:"app_state,omitempty"` + AppOptions json.RawMessage `json:"app_options,omitempty"` // DEPRECATED +} + +// AppState returns raw application state. +// TODO: replace with AppState field during next breaking release (0.18) +func (genDoc *GenesisDoc) AppState() json.RawMessage { + if len(genDoc.AppOptions) > 0 { + return genDoc.AppOptions + } + return genDoc.AppStateJSON +} + +// SaveAs is a utility method for saving GenensisDoc as a JSON file. +func (genDoc *GenesisDoc) SaveAs(file string) error { + genDocBytes, err := cdc.MarshalJSONIndent(genDoc, "", " ") + if err != nil { + return err + } + return cmn.WriteFile(file, genDocBytes, 0644) +} + +// ValidatorHash returns the hash of the validator set contained in the GenesisDoc +func (genDoc *GenesisDoc) ValidatorHash() []byte { + vals := make([]*Validator, len(genDoc.Validators)) + for i, v := range genDoc.Validators { + vals[i] = NewValidator(v.PubKey, v.Power) + } + vset := NewValidatorSet(vals) + return vset.Hash() +} + +// ValidateAndComplete checks that all necessary fields are present +// and fills in defaults for optional fields left empty +func (genDoc *GenesisDoc) ValidateAndComplete() error { + + if genDoc.ChainID == "" { + return cmn.NewError("Genesis doc must include non-empty chain_id") + } + + if genDoc.ConsensusParams == nil { + genDoc.ConsensusParams = DefaultConsensusParams() + } else { + if err := genDoc.ConsensusParams.Validate(); err != nil { + return err + } + } + + if len(genDoc.Validators) == 0 { + return cmn.NewError("The genesis file must have at least one validator") + } + + for _, v := range genDoc.Validators { + if v.Power == 0 { + return cmn.NewError("The genesis file cannot contain validators with no voting power: %v", v) + } + } + + if genDoc.GenesisTime.IsZero() { + genDoc.GenesisTime = time.Now() + } + + return nil +} + +//------------------------------------------------------------ +// Make genesis state from file + +// GenesisDocFromJSON unmarshalls JSON data into a GenesisDoc. +func GenesisDocFromJSON(jsonBlob []byte) (*GenesisDoc, error) { + genDoc := GenesisDoc{} + err := cdc.UnmarshalJSON(jsonBlob, &genDoc) + if err != nil { + return nil, err + } + + if err := genDoc.ValidateAndComplete(); err != nil { + return nil, err + } + + return &genDoc, err +} + +// GenesisDocFromFile reads JSON data from a file and unmarshalls it into a GenesisDoc. +func GenesisDocFromFile(genDocFile string) (*GenesisDoc, error) { + jsonBlob, err := ioutil.ReadFile(genDocFile) + if err != nil { + return nil, cmn.ErrorWrap(err, "Couldn't read GenesisDoc file") + } + genDoc, err := GenesisDocFromJSON(jsonBlob) + if err != nil { + return nil, cmn.ErrorWrap(err, cmn.Fmt("Error reading GenesisDoc at %v", genDocFile)) + } + return genDoc, nil +} diff --git a/types/genesis_test.go b/types/genesis_test.go new file mode 100644 index 000000000..24398a9a5 --- /dev/null +++ b/types/genesis_test.go @@ -0,0 +1,61 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/crypto" +) + +func TestGenesisBad(t *testing.T) { + // test some bad ones from raw json + testCases := [][]byte{ + []byte{}, // empty + []byte{1, 1, 1, 1, 1}, // junk + []byte(`{}`), // empty + []byte(`{"chain_id":"mychain"}`), // missing validators + []byte(`{"chain_id":"mychain","validators":[]}`), // missing validators + []byte(`{"chain_id":"mychain","validators":[{}]}`), // missing validators + []byte(`{"chain_id":"mychain","validators":null}`), // missing validators + []byte(`{"chain_id":"mychain"}`), // missing validators + []byte(`{"validators":[{"pub_key":{"type":"tendermint/PubKeyEd25519","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="},"power":"10","name":""}]}`), // missing chain_id + } + + for _, testCase := range testCases { + _, err := GenesisDocFromJSON(testCase) + assert.Error(t, err, "expected error for empty genDoc json") + } +} + +func TestGenesisGood(t *testing.T) { + // test a good one by raw json + genDocBytes := []byte(`{"genesis_time":"0001-01-01T00:00:00Z","chain_id":"test-chain-QDKdJr","consensus_params":null,"validators":[{"pub_key":{"type":"tendermint/PubKeyEd25519","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="},"power":"10","name":""}],"app_hash":"","app_state":{"account_owner": "Bob"}}`) + _, err := GenesisDocFromJSON(genDocBytes) + assert.NoError(t, err, "expected no error for good genDoc json") + + // create a base gendoc from struct + baseGenDoc := &GenesisDoc{ + ChainID: "abc", + Validators: []GenesisValidator{{crypto.GenPrivKeyEd25519().PubKey(), 10, "myval"}}, + } + genDocBytes, err = cdc.MarshalJSON(baseGenDoc) + assert.NoError(t, err, "error marshalling genDoc") + + // test base gendoc and check consensus params were filled + genDoc, err := GenesisDocFromJSON(genDocBytes) + assert.NoError(t, err, "expected no error for valid genDoc json") + assert.NotNil(t, genDoc.ConsensusParams, "expected consensus params to be filled in") + + // create json with consensus params filled + genDocBytes, err = cdc.MarshalJSON(genDoc) + assert.NoError(t, err, "error marshalling genDoc") + genDoc, err = GenesisDocFromJSON(genDocBytes) + assert.NoError(t, err, "expected no error for valid genDoc json") + + // test with invalid consensus params + genDoc.ConsensusParams.BlockSize.MaxBytes = 0 + genDocBytes, err = cdc.MarshalJSON(genDoc) + assert.NoError(t, err, "error marshalling genDoc") + genDoc, err = GenesisDocFromJSON(genDocBytes) + assert.Error(t, err, "expected error for genDoc json with block size of 0") +} diff --git a/types/heartbeat.go b/types/heartbeat.go new file mode 100644 index 000000000..cebe2864c --- /dev/null +++ b/types/heartbeat.go @@ -0,0 +1,52 @@ +package types + +import ( + "fmt" + + "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" +) + +// Heartbeat is a simple vote-like structure so validators can +// alert others that they are alive and waiting for transactions. +// Note: We aren't adding ",omitempty" to Heartbeat's +// json field tags because we always want the JSON +// representation to be in its canonical form. +type Heartbeat struct { + ValidatorAddress Address `json:"validator_address"` + ValidatorIndex int `json:"validator_index"` + Height int64 `json:"height"` + Round int `json:"round"` + Sequence int `json:"sequence"` + Signature crypto.Signature `json:"signature"` +} + +// SignBytes returns the Heartbeat bytes for signing. +// It panics if the Heartbeat is nil. +func (heartbeat *Heartbeat) SignBytes(chainID string) []byte { + bz, err := cdc.MarshalJSON(CanonicalHeartbeat(chainID, heartbeat)) + if err != nil { + panic(err) + } + return bz +} + +// Copy makes a copy of the Heartbeat. +func (heartbeat *Heartbeat) Copy() *Heartbeat { + if heartbeat == nil { + return nil + } + heartbeatCopy := *heartbeat + return &heartbeatCopy +} + +// String returns a string representation of the Heartbeat. +func (heartbeat *Heartbeat) String() string { + if heartbeat == nil { + return "nil-heartbeat" + } + + return fmt.Sprintf("Heartbeat{%v:%X %v/%02d (%v) %v}", + heartbeat.ValidatorIndex, cmn.Fingerprint(heartbeat.ValidatorAddress), + heartbeat.Height, heartbeat.Round, heartbeat.Sequence, heartbeat.Signature) +} diff --git a/types/heartbeat_test.go b/types/heartbeat_test.go new file mode 100644 index 000000000..174c3ba97 --- /dev/null +++ b/types/heartbeat_test.go @@ -0,0 +1,53 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/crypto" +) + +func TestHeartbeatCopy(t *testing.T) { + hb := &Heartbeat{ValidatorIndex: 1, Height: 10, Round: 1} + hbCopy := hb.Copy() + require.Equal(t, hbCopy, hb, "heartbeat copy should be the same") + hbCopy.Round = hb.Round + 10 + require.NotEqual(t, hbCopy, hb, "heartbeat copy mutation should not change original") + + var nilHb *Heartbeat + nilHbCopy := nilHb.Copy() + require.Nil(t, nilHbCopy, "copy of nil should also return nil") +} + +func TestHeartbeatString(t *testing.T) { + var nilHb *Heartbeat + require.Contains(t, nilHb.String(), "nil", "expecting a string and no panic") + + hb := &Heartbeat{ValidatorIndex: 1, Height: 11, Round: 2} + require.Equal(t, hb.String(), "Heartbeat{1:000000000000 11/02 (0) }") + + var key crypto.PrivKeyEd25519 + sig, err := key.Sign([]byte("Tendermint")) + require.NoError(t, err) + hb.Signature = sig + require.Equal(t, hb.String(), "Heartbeat{1:000000000000 11/02 (0) /FF41E371B9BF.../}") +} + +func TestHeartbeatWriteSignBytes(t *testing.T) { + + hb := &Heartbeat{ValidatorIndex: 1, Height: 10, Round: 1} + bz := hb.SignBytes("0xdeadbeef") + // XXX HMMMMMMM + require.Equal(t, string(bz), `{"@chain_id":"0xdeadbeef","@type":"heartbeat","height":"10","round":"1","sequence":"0","validator_address":"","validator_index":"1"}`) + + plainHb := &Heartbeat{} + bz = plainHb.SignBytes("0xdeadbeef") + require.Equal(t, string(bz), `{"@chain_id":"0xdeadbeef","@type":"heartbeat","height":"0","round":"0","sequence":"0","validator_address":"","validator_index":"0"}`) + + require.Panics(t, func() { + var nilHb *Heartbeat + bz := nilHb.SignBytes("0xdeadbeef") + require.Equal(t, string(bz), "null") + }) +} diff --git a/types/keys.go b/types/keys.go new file mode 100644 index 000000000..941e82b65 --- /dev/null +++ b/types/keys.go @@ -0,0 +1,6 @@ +package types + +// UNSTABLE +var ( + PeerStateKey = "ConsensusReactor.peerState" +) diff --git a/types/nop_event_bus.go b/types/nop_event_bus.go new file mode 100644 index 000000000..cd1eab8cd --- /dev/null +++ b/types/nop_event_bus.go @@ -0,0 +1,77 @@ +package types + +import ( + "context" + + tmpubsub "github.com/tendermint/tendermint/libs/pubsub" +) + +type NopEventBus struct{} + +func (NopEventBus) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error { + return nil +} + +func (NopEventBus) Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error { + return nil +} + +func (NopEventBus) UnsubscribeAll(ctx context.Context, subscriber string) error { + return nil +} + +//--- block, tx, and vote events + +func (NopEventBus) PublishEventNewBlock(block EventDataNewBlock) error { + return nil +} + +func (NopEventBus) PublishEventNewBlockHeader(header EventDataNewBlockHeader) error { + return nil +} + +func (NopEventBus) PublishEventVote(vote EventDataVote) error { + return nil +} + +func (NopEventBus) PublishEventTx(tx EventDataTx) error { + return nil +} + +//--- EventDataRoundState events + +func (NopEventBus) PublishEventNewRoundStep(rs EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventTimeoutPropose(rs EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventTimeoutWait(rs EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventNewRound(rs EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventCompleteProposal(rs EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventPolka(rs EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventUnlock(rs EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventRelock(rs EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventLock(rs EventDataRoundState) error { + return nil +} diff --git a/types/params.go b/types/params.go new file mode 100644 index 000000000..3056c82a0 --- /dev/null +++ b/types/params.go @@ -0,0 +1,156 @@ +package types + +import ( + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto/merkle" + cmn "github.com/tendermint/tendermint/libs/common" +) + +const ( + // MaxBlockSizeBytes is the maximum permitted size of the blocks. + MaxBlockSizeBytes = 104857600 // 100MB +) + +// ConsensusParams contains consensus critical parameters +// that determine the validity of blocks. +type ConsensusParams struct { + BlockSize `json:"block_size_params"` + TxSize `json:"tx_size_params"` + BlockGossip `json:"block_gossip_params"` + EvidenceParams `json:"evidence_params"` +} + +// BlockSize contain limits on the block size. +type BlockSize struct { + MaxBytes int `json:"max_bytes"` // NOTE: must not be 0 nor greater than 100MB + MaxTxs int `json:"max_txs"` + MaxGas int64 `json:"max_gas"` +} + +// TxSize contain limits on the tx size. +type TxSize struct { + MaxBytes int `json:"max_bytes"` + MaxGas int64 `json:"max_gas"` +} + +// BlockGossip determine consensus critical elements of how blocks are gossiped +type BlockGossip struct { + BlockPartSizeBytes int `json:"block_part_size_bytes"` // NOTE: must not be 0 +} + +// EvidenceParams determine how we handle evidence of malfeasance +type EvidenceParams struct { + MaxAge int64 `json:"max_age"` // only accept new evidence more recent than this +} + +// DefaultConsensusParams returns a default ConsensusParams. +func DefaultConsensusParams() *ConsensusParams { + return &ConsensusParams{ + DefaultBlockSize(), + DefaultTxSize(), + DefaultBlockGossip(), + DefaultEvidenceParams(), + } +} + +// DefaultBlockSize returns a default BlockSize. +func DefaultBlockSize() BlockSize { + return BlockSize{ + MaxBytes: 22020096, // 21MB + MaxTxs: 10000, + MaxGas: -1, + } +} + +// DefaultTxSize returns a default TxSize. +func DefaultTxSize() TxSize { + return TxSize{ + MaxBytes: 10240, // 10kB + MaxGas: -1, + } +} + +// DefaultBlockGossip returns a default BlockGossip. +func DefaultBlockGossip() BlockGossip { + return BlockGossip{ + BlockPartSizeBytes: 65536, // 64kB, + } +} + +// DefaultEvidence Params returns a default EvidenceParams. +func DefaultEvidenceParams() EvidenceParams { + return EvidenceParams{ + MaxAge: 100000, // 27.8 hrs at 1block/s + } +} + +// Validate validates the ConsensusParams to ensure all values +// are within their allowed limits, and returns an error if they are not. +func (params *ConsensusParams) Validate() error { + // ensure some values are greater than 0 + if params.BlockSize.MaxBytes <= 0 { + return cmn.NewError("BlockSize.MaxBytes must be greater than 0. Got %d", params.BlockSize.MaxBytes) + } + if params.BlockGossip.BlockPartSizeBytes <= 0 { + return cmn.NewError("BlockGossip.BlockPartSizeBytes must be greater than 0. Got %d", params.BlockGossip.BlockPartSizeBytes) + } + + // ensure blocks aren't too big + if params.BlockSize.MaxBytes > MaxBlockSizeBytes { + return cmn.NewError("BlockSize.MaxBytes is too big. %d > %d", + params.BlockSize.MaxBytes, MaxBlockSizeBytes) + } + return nil +} + +// Hash returns a merkle hash of the parameters to store +// in the block header +func (params *ConsensusParams) Hash() []byte { + return merkle.SimpleHashFromMap(map[string]merkle.Hasher{ + "block_gossip_part_size_bytes": aminoHasher(params.BlockGossip.BlockPartSizeBytes), + "block_size_max_bytes": aminoHasher(params.BlockSize.MaxBytes), + "block_size_max_gas": aminoHasher(params.BlockSize.MaxGas), + "block_size_max_txs": aminoHasher(params.BlockSize.MaxTxs), + "tx_size_max_bytes": aminoHasher(params.TxSize.MaxBytes), + "tx_size_max_gas": aminoHasher(params.TxSize.MaxGas), + }) +} + +// Update returns a copy of the params with updates from the non-zero fields of p2. +// NOTE: note: must not modify the original +func (params ConsensusParams) Update(params2 *abci.ConsensusParams) ConsensusParams { + res := params // explicit copy + + if params2 == nil { + return res + } + + // we must defensively consider any structs may be nil + // XXX: it's cast city over here. It's ok because we only do int32->int + // but still, watch it champ. + if params2.BlockSize != nil { + if params2.BlockSize.MaxBytes > 0 { + res.BlockSize.MaxBytes = int(params2.BlockSize.MaxBytes) + } + if params2.BlockSize.MaxTxs > 0 { + res.BlockSize.MaxTxs = int(params2.BlockSize.MaxTxs) + } + if params2.BlockSize.MaxGas > 0 { + res.BlockSize.MaxGas = params2.BlockSize.MaxGas + } + } + if params2.TxSize != nil { + if params2.TxSize.MaxBytes > 0 { + res.TxSize.MaxBytes = int(params2.TxSize.MaxBytes) + } + if params2.TxSize.MaxGas > 0 { + res.TxSize.MaxGas = params2.TxSize.MaxGas + } + } + if params2.BlockGossip != nil { + if params2.BlockGossip.BlockPartSizeBytes > 0 { + res.BlockGossip.BlockPartSizeBytes = int(params2.BlockGossip.BlockPartSizeBytes) + } + } + return res +} diff --git a/types/params_test.go b/types/params_test.go new file mode 100644 index 000000000..f645585eb --- /dev/null +++ b/types/params_test.go @@ -0,0 +1,88 @@ +package types + +import ( + "bytes" + "sort" + "testing" + + "github.com/stretchr/testify/assert" +) + +func newConsensusParams(blockSize, partSize int) ConsensusParams { + return ConsensusParams{ + BlockSize: BlockSize{MaxBytes: blockSize}, + BlockGossip: BlockGossip{BlockPartSizeBytes: partSize}, + } +} + +func TestConsensusParamsValidation(t *testing.T) { + testCases := []struct { + params ConsensusParams + valid bool + }{ + {newConsensusParams(1, 1), true}, + {newConsensusParams(1, 0), false}, + {newConsensusParams(0, 1), false}, + {newConsensusParams(0, 0), false}, + {newConsensusParams(0, 10), false}, + {newConsensusParams(10, -1), false}, + {newConsensusParams(47*1024*1024, 400), true}, + {newConsensusParams(10, 400), true}, + {newConsensusParams(100*1024*1024, 400), true}, + {newConsensusParams(101*1024*1024, 400), false}, + {newConsensusParams(1024*1024*1024, 400), false}, + } + for _, testCase := range testCases { + if testCase.valid { + assert.NoError(t, testCase.params.Validate(), "expected no error for valid params") + } else { + assert.Error(t, testCase.params.Validate(), "expected error for non valid params") + } + } +} + +func makeParams(blockBytes, blockTx, blockGas, txBytes, + txGas, partSize int) ConsensusParams { + + return ConsensusParams{ + BlockSize: BlockSize{ + MaxBytes: blockBytes, + MaxTxs: blockTx, + MaxGas: int64(blockGas), + }, + TxSize: TxSize{ + MaxBytes: txBytes, + MaxGas: int64(txGas), + }, + BlockGossip: BlockGossip{ + BlockPartSizeBytes: partSize, + }, + } +} + +func TestConsensusParamsHash(t *testing.T) { + params := []ConsensusParams{ + makeParams(1, 2, 3, 4, 5, 6), + makeParams(7, 2, 3, 4, 5, 6), + makeParams(1, 7, 3, 4, 5, 6), + makeParams(1, 2, 7, 4, 5, 6), + makeParams(1, 2, 3, 7, 5, 6), + makeParams(1, 2, 3, 4, 7, 6), + makeParams(1, 2, 3, 4, 5, 7), + makeParams(6, 5, 4, 3, 2, 1), + } + + hashes := make([][]byte, len(params)) + for i := range params { + hashes[i] = params[i].Hash() + } + + // make sure there are no duplicates... + // sort, then check in order for matches + sort.Slice(hashes, func(i, j int) bool { + return bytes.Compare(hashes[i], hashes[j]) < 0 + }) + for i := 0; i < len(hashes)-1; i++ { + assert.NotEqual(t, hashes[i], hashes[i+1]) + } +} diff --git a/types/part_set.go b/types/part_set.go new file mode 100644 index 000000000..f6d7f6b6e --- /dev/null +++ b/types/part_set.go @@ -0,0 +1,280 @@ +package types + +import ( + "bytes" + "errors" + "fmt" + "io" + "sync" + + "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/crypto/tmhash" + cmn "github.com/tendermint/tendermint/libs/common" +) + +var ( + ErrPartSetUnexpectedIndex = errors.New("Error part set unexpected index") + ErrPartSetInvalidProof = errors.New("Error part set invalid proof") +) + +type Part struct { + Index int `json:"index"` + Bytes cmn.HexBytes `json:"bytes"` + Proof merkle.SimpleProof `json:"proof"` + + // Cache + hash []byte +} + +func (part *Part) Hash() []byte { + if part.hash != nil { + return part.hash + } + hasher := tmhash.New() + hasher.Write(part.Bytes) // nolint: errcheck, gas + part.hash = hasher.Sum(nil) + return part.hash +} + +func (part *Part) String() string { + return part.StringIndented("") +} + +func (part *Part) StringIndented(indent string) string { + return fmt.Sprintf(`Part{#%v +%s Bytes: %X... +%s Proof: %v +%s}`, + part.Index, + indent, cmn.Fingerprint(part.Bytes), + indent, part.Proof.StringIndented(indent+" "), + indent) +} + +//------------------------------------- + +type PartSetHeader struct { + Total int `json:"total"` + Hash cmn.HexBytes `json:"hash"` +} + +func (psh PartSetHeader) String() string { + return fmt.Sprintf("%v:%X", psh.Total, cmn.Fingerprint(psh.Hash)) +} + +func (psh PartSetHeader) IsZero() bool { + return psh.Total == 0 +} + +func (psh PartSetHeader) Equals(other PartSetHeader) bool { + return psh.Total == other.Total && bytes.Equal(psh.Hash, other.Hash) +} + +//------------------------------------- + +type PartSet struct { + total int + hash []byte + + mtx sync.Mutex + parts []*Part + partsBitArray *cmn.BitArray + count int +} + +// Returns an immutable, full PartSet from the data bytes. +// The data bytes are split into "partSize" chunks, and merkle tree computed. +func NewPartSetFromData(data []byte, partSize int) *PartSet { + // divide data into 4kb parts. + total := (len(data) + partSize - 1) / partSize + parts := make([]*Part, total) + parts_ := make([]merkle.Hasher, total) + partsBitArray := cmn.NewBitArray(total) + for i := 0; i < total; i++ { + part := &Part{ + Index: i, + Bytes: data[i*partSize : cmn.MinInt(len(data), (i+1)*partSize)], + } + parts[i] = part + parts_[i] = part + partsBitArray.SetIndex(i, true) + } + // Compute merkle proofs + root, proofs := merkle.SimpleProofsFromHashers(parts_) + for i := 0; i < total; i++ { + parts[i].Proof = *proofs[i] + } + return &PartSet{ + total: total, + hash: root, + parts: parts, + partsBitArray: partsBitArray, + count: total, + } +} + +// Returns an empty PartSet ready to be populated. +func NewPartSetFromHeader(header PartSetHeader) *PartSet { + return &PartSet{ + total: header.Total, + hash: header.Hash, + parts: make([]*Part, header.Total), + partsBitArray: cmn.NewBitArray(header.Total), + count: 0, + } +} + +func (ps *PartSet) Header() PartSetHeader { + if ps == nil { + return PartSetHeader{} + } + return PartSetHeader{ + Total: ps.total, + Hash: ps.hash, + } +} + +func (ps *PartSet) HasHeader(header PartSetHeader) bool { + if ps == nil { + return false + } + return ps.Header().Equals(header) +} + +func (ps *PartSet) BitArray() *cmn.BitArray { + ps.mtx.Lock() + defer ps.mtx.Unlock() + return ps.partsBitArray.Copy() +} + +func (ps *PartSet) Hash() []byte { + if ps == nil { + return nil + } + return ps.hash +} + +func (ps *PartSet) HashesTo(hash []byte) bool { + if ps == nil { + return false + } + return bytes.Equal(ps.hash, hash) +} + +func (ps *PartSet) Count() int { + if ps == nil { + return 0 + } + return ps.count +} + +func (ps *PartSet) Total() int { + if ps == nil { + return 0 + } + return ps.total +} + +func (ps *PartSet) AddPart(part *Part) (bool, error) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + // Invalid part index + if part.Index >= ps.total { + return false, ErrPartSetUnexpectedIndex + } + + // If part already exists, return false. + if ps.parts[part.Index] != nil { + return false, nil + } + + // Check hash proof + if !part.Proof.Verify(part.Index, ps.total, part.Hash(), ps.Hash()) { + return false, ErrPartSetInvalidProof + } + + // Add part + ps.parts[part.Index] = part + ps.partsBitArray.SetIndex(part.Index, true) + ps.count++ + return true, nil +} + +func (ps *PartSet) GetPart(index int) *Part { + ps.mtx.Lock() + defer ps.mtx.Unlock() + return ps.parts[index] +} + +func (ps *PartSet) IsComplete() bool { + return ps.count == ps.total +} + +func (ps *PartSet) GetReader() io.Reader { + if !ps.IsComplete() { + cmn.PanicSanity("Cannot GetReader() on incomplete PartSet") + } + return NewPartSetReader(ps.parts) +} + +type PartSetReader struct { + i int + parts []*Part + reader *bytes.Reader +} + +func NewPartSetReader(parts []*Part) *PartSetReader { + return &PartSetReader{ + i: 0, + parts: parts, + reader: bytes.NewReader(parts[0].Bytes), + } +} + +func (psr *PartSetReader) Read(p []byte) (n int, err error) { + readerLen := psr.reader.Len() + if readerLen >= len(p) { + return psr.reader.Read(p) + } else if readerLen > 0 { + n1, err := psr.Read(p[:readerLen]) + if err != nil { + return n1, err + } + n2, err := psr.Read(p[readerLen:]) + return n1 + n2, err + } + + psr.i++ + if psr.i >= len(psr.parts) { + return 0, io.EOF + } + psr.reader = bytes.NewReader(psr.parts[psr.i].Bytes) + return psr.Read(p) +} + +func (ps *PartSet) StringShort() string { + if ps == nil { + return "nil-PartSet" + } + ps.mtx.Lock() + defer ps.mtx.Unlock() + return fmt.Sprintf("(%v of %v)", ps.Count(), ps.Total()) +} + +func (ps *PartSet) MarshalJSON() ([]byte, error) { + if ps == nil { + return []byte("{}"), nil + } + + ps.mtx.Lock() + defer ps.mtx.Unlock() + + return cdc.MarshalJSON(struct { + CountTotal string `json:"count/total"` + PartsBitArray *cmn.BitArray `json:"parts_bit_array"` + }{ + fmt.Sprintf("%d/%d", ps.Count(), ps.Total()), + ps.partsBitArray, + }) +} diff --git a/types/part_set_test.go b/types/part_set_test.go new file mode 100644 index 000000000..01437f05e --- /dev/null +++ b/types/part_set_test.go @@ -0,0 +1,90 @@ +package types + +import ( + "bytes" + "io/ioutil" + "testing" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +const ( + testPartSize = 65536 // 64KB ... 4096 // 4KB +) + +func TestBasicPartSet(t *testing.T) { + + // Construct random data of size partSize * 100 + data := cmn.RandBytes(testPartSize * 100) + + partSet := NewPartSetFromData(data, testPartSize) + if len(partSet.Hash()) == 0 { + t.Error("Expected to get hash") + } + if partSet.Total() != 100 { + t.Errorf("Expected to get 100 parts, but got %v", partSet.Total()) + } + if !partSet.IsComplete() { + t.Errorf("PartSet should be complete") + } + + // Test adding parts to a new partSet. + partSet2 := NewPartSetFromHeader(partSet.Header()) + + for i := 0; i < partSet.Total(); i++ { + part := partSet.GetPart(i) + //t.Logf("\n%v", part) + added, err := partSet2.AddPart(part) + if !added || err != nil { + t.Errorf("Failed to add part %v, error: %v", i, err) + } + } + + if !bytes.Equal(partSet.Hash(), partSet2.Hash()) { + t.Error("Expected to get same hash") + } + if partSet2.Total() != 100 { + t.Errorf("Expected to get 100 parts, but got %v", partSet2.Total()) + } + if !partSet2.IsComplete() { + t.Errorf("Reconstructed PartSet should be complete") + } + + // Reconstruct data, assert that they are equal. + data2Reader := partSet2.GetReader() + data2, err := ioutil.ReadAll(data2Reader) + if err != nil { + t.Errorf("Error reading data2Reader: %v", err) + } + if !bytes.Equal(data, data2) { + t.Errorf("Got wrong data.") + } + +} + +func TestWrongProof(t *testing.T) { + + // Construct random data of size partSize * 100 + data := cmn.RandBytes(testPartSize * 100) + partSet := NewPartSetFromData(data, testPartSize) + + // Test adding a part with wrong data. + partSet2 := NewPartSetFromHeader(partSet.Header()) + + // Test adding a part with wrong trail. + part := partSet.GetPart(0) + part.Proof.Aunts[0][0] += byte(0x01) + added, err := partSet2.AddPart(part) + if added || err == nil { + t.Errorf("Expected to fail adding a part with bad trail.") + } + + // Test adding a part with wrong bytes. + part = partSet.GetPart(1) + part.Bytes[0] += byte(0x01) + added, err = partSet2.AddPart(part) + if added || err == nil { + t.Errorf("Expected to fail adding a part with bad bytes.") + } + +} diff --git a/types/priv_validator.go b/types/priv_validator.go new file mode 100644 index 000000000..85db65a41 --- /dev/null +++ b/types/priv_validator.go @@ -0,0 +1,104 @@ +package types + +import ( + "bytes" + "fmt" + + "github.com/tendermint/tendermint/crypto" +) + +// PrivValidator defines the functionality of a local Tendermint validator +// that signs votes, proposals, and heartbeats, and never double signs. +type PrivValidator interface { + GetAddress() Address // redundant since .PubKey().Address() + GetPubKey() crypto.PubKey + + SignVote(chainID string, vote *Vote) error + SignProposal(chainID string, proposal *Proposal) error + SignHeartbeat(chainID string, heartbeat *Heartbeat) error +} + +//---------------------------------------- +// Misc. + +type PrivValidatorsByAddress []PrivValidator + +func (pvs PrivValidatorsByAddress) Len() int { + return len(pvs) +} + +func (pvs PrivValidatorsByAddress) Less(i, j int) bool { + return bytes.Compare(pvs[i].GetAddress(), pvs[j].GetAddress()) == -1 +} + +func (pvs PrivValidatorsByAddress) Swap(i, j int) { + it := pvs[i] + pvs[i] = pvs[j] + pvs[j] = it +} + +//---------------------------------------- +// MockPV + +// MockPV implements PrivValidator without any safety or persistence. +// Only use it for testing. +type MockPV struct { + privKey crypto.PrivKey +} + +func NewMockPV() *MockPV { + return &MockPV{crypto.GenPrivKeyEd25519()} +} + +// Implements PrivValidator. +func (pv *MockPV) GetAddress() Address { + return pv.privKey.PubKey().Address() +} + +// Implements PrivValidator. +func (pv *MockPV) GetPubKey() crypto.PubKey { + return pv.privKey.PubKey() +} + +// Implements PrivValidator. +func (pv *MockPV) SignVote(chainID string, vote *Vote) error { + signBytes := vote.SignBytes(chainID) + sig, err := pv.privKey.Sign(signBytes) + if err != nil { + return err + } + vote.Signature = sig + return nil +} + +// Implements PrivValidator. +func (pv *MockPV) SignProposal(chainID string, proposal *Proposal) error { + signBytes := proposal.SignBytes(chainID) + sig, err := pv.privKey.Sign(signBytes) + if err != nil { + return err + } + proposal.Signature = sig + return nil +} + +// signHeartbeat signs the heartbeat without any checking. +func (pv *MockPV) SignHeartbeat(chainID string, heartbeat *Heartbeat) error { + sig, err := pv.privKey.Sign(heartbeat.SignBytes(chainID)) + if err != nil { + return err + } + heartbeat.Signature = sig + return nil +} + +// String returns a string representation of the MockPV. +func (pv *MockPV) String() string { + return fmt.Sprintf("MockPV{%v}", pv.GetAddress()) +} + +// XXX: Implement. +func (pv *MockPV) DisableChecks() { + // Currently this does nothing, + // as MockPV has no safety checks at all. +} diff --git a/types/proposal.go b/types/proposal.go new file mode 100644 index 000000000..52ce8756e --- /dev/null +++ b/types/proposal.go @@ -0,0 +1,58 @@ +package types + +import ( + "errors" + "fmt" + "time" + + "github.com/tendermint/tendermint/crypto" +) + +var ( + ErrInvalidBlockPartSignature = errors.New("Error invalid block part signature") + ErrInvalidBlockPartHash = errors.New("Error invalid block part hash") +) + +// Proposal defines a block proposal for the consensus. +// It refers to the block only by its PartSetHeader. +// It must be signed by the correct proposer for the given Height/Round +// to be considered valid. It may depend on votes from a previous round, +// a so-called Proof-of-Lock (POL) round, as noted in the POLRound and POLBlockID. +type Proposal struct { + Height int64 `json:"height"` + Round int `json:"round"` + Timestamp time.Time `json:"timestamp"` + BlockPartsHeader PartSetHeader `json:"block_parts_header"` + POLRound int `json:"pol_round"` // -1 if null. + POLBlockID BlockID `json:"pol_block_id"` // zero if null. + Signature crypto.Signature `json:"signature"` +} + +// NewProposal returns a new Proposal. +// If there is no POLRound, polRound should be -1. +func NewProposal(height int64, round int, blockPartsHeader PartSetHeader, polRound int, polBlockID BlockID) *Proposal { + return &Proposal{ + Height: height, + Round: round, + Timestamp: time.Now().UTC(), + BlockPartsHeader: blockPartsHeader, + POLRound: polRound, + POLBlockID: polBlockID, + } +} + +// String returns a string representation of the Proposal. +func (p *Proposal) String() string { + return fmt.Sprintf("Proposal{%v/%v %v (%v,%v) %v @ %s}", + p.Height, p.Round, p.BlockPartsHeader, p.POLRound, + p.POLBlockID, p.Signature, CanonicalTime(p.Timestamp)) +} + +// SignBytes returns the Proposal bytes for signing +func (p *Proposal) SignBytes(chainID string) []byte { + bz, err := cdc.MarshalJSON(CanonicalProposal(chainID, p)) + if err != nil { + panic(err) + } + return bz +} diff --git a/types/proposal_test.go b/types/proposal_test.go new file mode 100644 index 000000000..8aef870fc --- /dev/null +++ b/types/proposal_test.go @@ -0,0 +1,102 @@ +package types + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +var testProposal *Proposal + +func init() { + var stamp, err = time.Parse(TimeFormat, "2018-02-11T07:09:22.765Z") + if err != nil { + panic(err) + } + testProposal = &Proposal{ + Height: 12345, + Round: 23456, + BlockPartsHeader: PartSetHeader{111, []byte("blockparts")}, + POLRound: -1, + Timestamp: stamp, + } +} + +func TestProposalSignable(t *testing.T) { + signBytes := testProposal.SignBytes("test_chain_id") + signStr := string(signBytes) + + expected := `{"@chain_id":"test_chain_id","@type":"proposal","block_parts_header":{"hash":"626C6F636B7061727473","total":"111"},"height":"12345","pol_block_id":{},"pol_round":"-1","round":"23456","timestamp":"2018-02-11T07:09:22.765Z"}` + if signStr != expected { + t.Errorf("Got unexpected sign string for Proposal. Expected:\n%v\nGot:\n%v", expected, signStr) + } + + if signStr != expected { + t.Errorf("Got unexpected sign string for Proposal. Expected:\n%v\nGot:\n%v", expected, signStr) + } +} + +func TestProposalString(t *testing.T) { + str := testProposal.String() + expected := `Proposal{12345/23456 111:626C6F636B70 (-1,:0:000000000000) @ 2018-02-11T07:09:22.765Z}` + if str != expected { + t.Errorf("Got unexpected string for Proposal. Expected:\n%v\nGot:\n%v", expected, str) + } +} + +func TestProposalVerifySignature(t *testing.T) { + privVal := NewMockPV() + pubKey := privVal.GetPubKey() + + prop := NewProposal(4, 2, PartSetHeader{777, []byte("proper")}, 2, BlockID{}) + signBytes := prop.SignBytes("test_chain_id") + + // sign it + err := privVal.SignProposal("test_chain_id", prop) + require.NoError(t, err) + + // verify the same proposal + valid := pubKey.VerifyBytes(signBytes, prop.Signature) + require.True(t, valid) + + // serialize, deserialize and verify again.... + newProp := new(Proposal) + bs, err := cdc.MarshalBinary(prop) + require.NoError(t, err) + err = cdc.UnmarshalBinary(bs, &newProp) + require.NoError(t, err) + + // verify the transmitted proposal + newSignBytes := newProp.SignBytes("test_chain_id") + require.Equal(t, string(signBytes), string(newSignBytes)) + valid = pubKey.VerifyBytes(newSignBytes, newProp.Signature) + require.True(t, valid) +} + +func BenchmarkProposalWriteSignBytes(b *testing.B) { + for i := 0; i < b.N; i++ { + testProposal.SignBytes("test_chain_id") + } +} + +func BenchmarkProposalSign(b *testing.B) { + privVal := NewMockPV() + for i := 0; i < b.N; i++ { + err := privVal.SignProposal("test_chain_id", testProposal) + if err != nil { + b.Error(err) + } + } +} + +func BenchmarkProposalVerifySignature(b *testing.B) { + privVal := NewMockPV() + err := privVal.SignProposal("test_chain_id", testProposal) + require.Nil(b, err) + pubKey := privVal.GetPubKey() + + for i := 0; i < b.N; i++ { + pubKey.VerifyBytes(testProposal.SignBytes("test_chain_id"), testProposal.Signature) + } +} diff --git a/types/protobuf.go b/types/protobuf.go new file mode 100644 index 000000000..ad7362e03 --- /dev/null +++ b/types/protobuf.go @@ -0,0 +1,221 @@ +package types + +import ( + "bytes" + "fmt" + "reflect" + "time" + + abci "github.com/tendermint/tendermint/abci/types" + crypto "github.com/tendermint/tendermint/crypto" +) + +//------------------------------------------------------- +// Use strings to distinguish types in ABCI messages + +const ( + ABCIEvidenceTypeDuplicateVote = "duplicate/vote" + ABCIEvidenceTypeMockGood = "mock/good" +) + +const ( + ABCIPubKeyTypeEd25519 = "ed25519" + ABCIPubKeyTypeSecp256k1 = "secp256k1" +) + +//------------------------------------------------------- + +// TM2PB is used for converting Tendermint ABCI to protobuf ABCI. +// UNSTABLE +var TM2PB = tm2pb{} + +type tm2pb struct{} + +func (tm2pb) Header(header *Header) abci.Header { + return abci.Header{ + ChainID: header.ChainID, + Height: header.Height, + + Time: header.Time.Unix(), + NumTxs: int32(header.NumTxs), // XXX: overflow + TotalTxs: header.TotalTxs, + + LastBlockHash: header.LastBlockID.Hash, + ValidatorsHash: header.ValidatorsHash, + AppHash: header.AppHash, + + // Proposer: TODO + } +} + +// XXX: panics on unknown pubkey type +func (tm2pb) Validator(val *Validator) abci.Validator { + return abci.Validator{ + Address: val.PubKey.Address(), + PubKey: TM2PB.PubKey(val.PubKey), + Power: val.VotingPower, + } +} + +// XXX: panics on nil or unknown pubkey type +// TODO: add cases when new pubkey types are added to crypto +func (tm2pb) PubKey(pubKey crypto.PubKey) abci.PubKey { + switch pk := pubKey.(type) { + case crypto.PubKeyEd25519: + return abci.PubKey{ + Type: ABCIPubKeyTypeEd25519, + Data: pk[:], + } + case crypto.PubKeySecp256k1: + return abci.PubKey{ + Type: ABCIPubKeyTypeSecp256k1, + Data: pk[:], + } + default: + panic(fmt.Sprintf("unknown pubkey type: %v %v", pubKey, reflect.TypeOf(pubKey))) + } +} + +// XXX: panics on nil or unknown pubkey type +func (tm2pb) Validators(vals *ValidatorSet) []abci.Validator { + validators := make([]abci.Validator, len(vals.Validators)) + for i, val := range vals.Validators { + validators[i] = TM2PB.Validator(val) + } + return validators +} + +func (tm2pb) ConsensusParams(params *ConsensusParams) *abci.ConsensusParams { + return &abci.ConsensusParams{ + BlockSize: &abci.BlockSize{ + + MaxBytes: int32(params.BlockSize.MaxBytes), + MaxTxs: int32(params.BlockSize.MaxTxs), + MaxGas: params.BlockSize.MaxGas, + }, + TxSize: &abci.TxSize{ + MaxBytes: int32(params.TxSize.MaxBytes), + MaxGas: params.TxSize.MaxGas, + }, + BlockGossip: &abci.BlockGossip{ + BlockPartSizeBytes: int32(params.BlockGossip.BlockPartSizeBytes), + }, + } +} + +// ABCI Evidence includes information from the past that's not included in the evidence itself +// so Evidence types stays compact. +// XXX: panics on nil or unknown pubkey type +func (tm2pb) Evidence(ev Evidence, valSet *ValidatorSet, evTime time.Time) abci.Evidence { + _, val := valSet.GetByAddress(ev.Address()) + if val == nil { + // should already have checked this + panic(val) + } + + // set type + var evType string + switch ev.(type) { + case *DuplicateVoteEvidence: + evType = ABCIEvidenceTypeDuplicateVote + case MockGoodEvidence: + // XXX: not great to have test types in production paths ... + evType = ABCIEvidenceTypeMockGood + default: + panic(fmt.Sprintf("Unknown evidence type: %v %v", ev, reflect.TypeOf(ev))) + } + + return abci.Evidence{ + Type: evType, + Validator: TM2PB.Validator(val), + Height: ev.Height(), + Time: evTime.Unix(), + TotalVotingPower: valSet.TotalVotingPower(), + } +} + +// XXX: panics on nil or unknown pubkey type +func (tm2pb) ValidatorFromPubKeyAndPower(pubkey crypto.PubKey, power int64) abci.Validator { + pubkeyABCI := TM2PB.PubKey(pubkey) + return abci.Validator{ + Address: pubkey.Address(), + PubKey: pubkeyABCI, + Power: power, + } +} + +//---------------------------------------------------------------------------- + +// PB2TM is used for converting protobuf ABCI to Tendermint ABCI. +// UNSTABLE +var PB2TM = pb2tm{} + +type pb2tm struct{} + +func (pb2tm) PubKey(pubKey abci.PubKey) (crypto.PubKey, error) { + // TODO: define these in crypto and use them + sizeEd := 32 + sizeSecp := 33 + switch pubKey.Type { + case ABCIPubKeyTypeEd25519: + if len(pubKey.Data) != sizeEd { + return nil, fmt.Errorf("Invalid size for PubKeyEd25519. Got %d, expected %d", len(pubKey.Data), sizeEd) + } + var pk crypto.PubKeyEd25519 + copy(pk[:], pubKey.Data) + return pk, nil + case ABCIPubKeyTypeSecp256k1: + if len(pubKey.Data) != sizeSecp { + return nil, fmt.Errorf("Invalid size for PubKeyEd25519. Got %d, expected %d", len(pubKey.Data), sizeSecp) + } + var pk crypto.PubKeySecp256k1 + copy(pk[:], pubKey.Data) + return pk, nil + default: + return nil, fmt.Errorf("Unknown pubkey type %v", pubKey.Type) + } +} + +func (pb2tm) Validators(vals []abci.Validator) ([]*Validator, error) { + tmVals := make([]*Validator, len(vals)) + for i, v := range vals { + pub, err := PB2TM.PubKey(v.PubKey) + if err != nil { + return nil, err + } + // If the app provided an address too, it must match. + // This is just a sanity check. + if len(v.Address) > 0 { + if !bytes.Equal(pub.Address(), v.Address) { + return nil, fmt.Errorf("Validator.Address (%X) does not match PubKey.Address (%X)", + v.Address, pub.Address()) + } + } + tmVals[i] = &Validator{ + Address: pub.Address(), + PubKey: pub, + VotingPower: v.Power, + } + } + return tmVals, nil +} + +func (pb2tm) ConsensusParams(csp *abci.ConsensusParams) ConsensusParams { + return ConsensusParams{ + BlockSize: BlockSize{ + MaxBytes: int(csp.BlockSize.MaxBytes), // XXX + MaxTxs: int(csp.BlockSize.MaxTxs), // XXX + MaxGas: csp.BlockSize.MaxGas, + }, + TxSize: TxSize{ + MaxBytes: int(csp.TxSize.MaxBytes), // XXX + MaxGas: csp.TxSize.MaxGas, + }, + BlockGossip: BlockGossip{ + BlockPartSizeBytes: int(csp.BlockGossip.BlockPartSizeBytes), // XXX + }, + // TODO: EvidenceParams: EvidenceParams{ + // MaxAge: int(csp.Evidence.MaxAge), // XXX + // }, + } +} diff --git a/types/protobuf_test.go b/types/protobuf_test.go new file mode 100644 index 000000000..cd986fd81 --- /dev/null +++ b/types/protobuf_test.go @@ -0,0 +1,69 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/assert" + abci "github.com/tendermint/tendermint/abci/types" + crypto "github.com/tendermint/tendermint/crypto" +) + +func TestABCIPubKey(t *testing.T) { + pkEd := crypto.GenPrivKeyEd25519().PubKey() + pkSecp := crypto.GenPrivKeySecp256k1().PubKey() + testABCIPubKey(t, pkEd, ABCIPubKeyTypeEd25519) + testABCIPubKey(t, pkSecp, ABCIPubKeyTypeSecp256k1) +} + +func testABCIPubKey(t *testing.T, pk crypto.PubKey, typeStr string) { + abciPubKey := TM2PB.PubKey(pk) + pk2, err := PB2TM.PubKey(abciPubKey) + assert.Nil(t, err) + assert.Equal(t, pk, pk2) +} + +func TestABCIValidators(t *testing.T) { + pkEd := crypto.GenPrivKeyEd25519().PubKey() + + // correct validator + tmValExpected := &Validator{ + Address: pkEd.Address(), + PubKey: pkEd, + VotingPower: 10, + } + + tmVal := &Validator{ + Address: pkEd.Address(), + PubKey: pkEd, + VotingPower: 10, + } + + abciVal := TM2PB.Validator(tmVal) + tmVals, err := PB2TM.Validators([]abci.Validator{abciVal}) + assert.Nil(t, err) + assert.Equal(t, tmValExpected, tmVals[0]) + + // val with address + tmVal.Address = pkEd.Address() + + abciVal = TM2PB.Validator(tmVal) + tmVals, err = PB2TM.Validators([]abci.Validator{abciVal}) + assert.Nil(t, err) + assert.Equal(t, tmValExpected, tmVals[0]) + + // val with incorrect address + abciVal = TM2PB.Validator(tmVal) + abciVal.Address = []byte("incorrect!") + tmVals, err = PB2TM.Validators([]abci.Validator{abciVal}) + assert.NotNil(t, err) + assert.Nil(t, tmVals) +} + +func TestABCIConsensusParams(t *testing.T) { + cp := DefaultConsensusParams() + cp.EvidenceParams.MaxAge = 0 // TODO add this to ABCI + abciCP := TM2PB.ConsensusParams(cp) + cp2 := PB2TM.ConsensusParams(abciCP) + + assert.Equal(t, *cp, cp2) +} diff --git a/types/results.go b/types/results.go new file mode 100644 index 000000000..7f8e6093a --- /dev/null +++ b/types/results.go @@ -0,0 +1,72 @@ +package types + +import ( + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto/merkle" + cmn "github.com/tendermint/tendermint/libs/common" +) + +//----------------------------------------------------------------------------- + +// ABCIResult is the deterministic component of a ResponseDeliverTx. +// TODO: add Tags +type ABCIResult struct { + Code uint32 `json:"code"` + Data cmn.HexBytes `json:"data"` +} + +// Hash returns the canonical hash of the ABCIResult +func (a ABCIResult) Hash() []byte { + bz := aminoHash(a) + return bz +} + +// ABCIResults wraps the deliver tx results to return a proof +type ABCIResults []ABCIResult + +// NewResults creates ABCIResults from ResponseDeliverTx +func NewResults(del []*abci.ResponseDeliverTx) ABCIResults { + res := make(ABCIResults, len(del)) + for i, d := range del { + res[i] = NewResultFromResponse(d) + } + return res +} + +func NewResultFromResponse(response *abci.ResponseDeliverTx) ABCIResult { + return ABCIResult{ + Code: response.Code, + Data: response.Data, + } +} + +// Bytes serializes the ABCIResponse using wire +func (a ABCIResults) Bytes() []byte { + bz, err := cdc.MarshalBinary(a) + if err != nil { + panic(err) + } + return bz +} + +// Hash returns a merkle hash of all results +func (a ABCIResults) Hash() []byte { + // NOTE: we copy the impl of the merkle tree for txs - + // we should be consistent and either do it for both or not. + return merkle.SimpleHashFromHashers(a.toHashers()) +} + +// ProveResult returns a merkle proof of one result from the set +func (a ABCIResults) ProveResult(i int) merkle.SimpleProof { + _, proofs := merkle.SimpleProofsFromHashers(a.toHashers()) + return *proofs[i] +} + +func (a ABCIResults) toHashers() []merkle.Hasher { + l := len(a) + hashers := make([]merkle.Hasher, l) + for i := 0; i < l; i++ { + hashers[i] = a[i] + } + return hashers +} diff --git a/types/results_test.go b/types/results_test.go new file mode 100644 index 000000000..009e2693d --- /dev/null +++ b/types/results_test.go @@ -0,0 +1,43 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestABCIResults(t *testing.T) { + a := ABCIResult{Code: 0, Data: nil} + b := ABCIResult{Code: 0, Data: []byte{}} + c := ABCIResult{Code: 0, Data: []byte("one")} + d := ABCIResult{Code: 14, Data: nil} + e := ABCIResult{Code: 14, Data: []byte("foo")} + f := ABCIResult{Code: 14, Data: []byte("bar")} + + // Nil and []byte{} should produce the same hash. + require.Equal(t, a.Hash(), a.Hash()) + require.Equal(t, b.Hash(), b.Hash()) + require.Equal(t, a.Hash(), b.Hash()) + + // a and b should be the same, don't go in results. + results := ABCIResults{a, c, d, e, f} + + // Make sure each result hashes properly. + var last []byte + for i, res := range results { + h := res.Hash() + assert.NotEqual(t, last, h, "%d", i) + last = h + } + + // Make sure that we can get a root hash from results and verify proofs. + root := results.Hash() + assert.NotEmpty(t, root) + + for i, res := range results { + proof := results.ProveResult(i) + valid := proof.Verify(i, len(results), res.Hash(), root) + assert.True(t, valid, "%d", i) + } +} diff --git a/types/signable.go b/types/signable.go new file mode 100644 index 000000000..cc6498882 --- /dev/null +++ b/types/signable.go @@ -0,0 +1,11 @@ +package types + +// Signable is an interface for all signable things. +// It typically removes signatures before serializing. +// SignBytes returns the bytes to be signed +// NOTE: chainIDs are part of the SignBytes but not +// necessarily the object themselves. +// NOTE: Expected to panic if there is an error marshalling. +type Signable interface { + SignBytes(chainID string) []byte +} diff --git a/types/test_util.go b/types/test_util.go new file mode 100644 index 000000000..f21c2831f --- /dev/null +++ b/types/test_util.go @@ -0,0 +1,37 @@ +package types + +import "time" + +func MakeCommit(blockID BlockID, height int64, round int, + voteSet *VoteSet, + validators []PrivValidator) (*Commit, error) { + + // all sign + for i := 0; i < len(validators); i++ { + + vote := &Vote{ + ValidatorAddress: validators[i].GetAddress(), + ValidatorIndex: i, + Height: height, + Round: round, + Type: VoteTypePrecommit, + BlockID: blockID, + Timestamp: time.Now().UTC(), + } + + _, err := signAddVote(validators[i], vote, voteSet) + if err != nil { + return nil, err + } + } + + return voteSet.MakeCommit(), nil +} + +func signAddVote(privVal PrivValidator, vote *Vote, voteSet *VoteSet) (signed bool, err error) { + err = privVal.SignVote(voteSet.ChainID(), vote) + if err != nil { + return false, err + } + return voteSet.AddVote(vote) +} diff --git a/types/tx.go b/types/tx.go new file mode 100644 index 000000000..489f0b232 --- /dev/null +++ b/types/tx.go @@ -0,0 +1,128 @@ +package types + +import ( + "bytes" + "errors" + "fmt" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/crypto/tmhash" + cmn "github.com/tendermint/tendermint/libs/common" +) + +// Tx is an arbitrary byte array. +// NOTE: Tx has no types at this level, so when wire encoded it's just length-prefixed. +// Might we want types here ? +type Tx []byte + +// Hash computes the TMHASH hash of the wire encoded transaction. +func (tx Tx) Hash() []byte { + return tmhash.Sum(tx) +} + +// String returns the hex-encoded transaction as a string. +func (tx Tx) String() string { + return fmt.Sprintf("Tx{%X}", []byte(tx)) +} + +// Txs is a slice of Tx. +type Txs []Tx + +// Hash returns the simple Merkle root hash of the transactions. +func (txs Txs) Hash() []byte { + // Recursive impl. + // Copied from tendermint/crypto/merkle to avoid allocations + switch len(txs) { + case 0: + return nil + case 1: + return txs[0].Hash() + default: + left := Txs(txs[:(len(txs)+1)/2]).Hash() + right := Txs(txs[(len(txs)+1)/2:]).Hash() + return merkle.SimpleHashFromTwoHashes(left, right) + } +} + +// Index returns the index of this transaction in the list, or -1 if not found +func (txs Txs) Index(tx Tx) int { + for i := range txs { + if bytes.Equal(txs[i], tx) { + return i + } + } + return -1 +} + +// IndexByHash returns the index of this transaction hash in the list, or -1 if not found +func (txs Txs) IndexByHash(hash []byte) int { + for i := range txs { + if bytes.Equal(txs[i].Hash(), hash) { + return i + } + } + return -1 +} + +// Proof returns a simple merkle proof for this node. +// Panics if i < 0 or i >= len(txs) +// TODO: optimize this! +func (txs Txs) Proof(i int) TxProof { + l := len(txs) + hashers := make([]merkle.Hasher, l) + for i := 0; i < l; i++ { + hashers[i] = txs[i] + } + root, proofs := merkle.SimpleProofsFromHashers(hashers) + + return TxProof{ + Index: i, + Total: l, + RootHash: root, + Data: txs[i], + Proof: *proofs[i], + } +} + +// TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. +type TxProof struct { + Index, Total int + RootHash cmn.HexBytes + Data Tx + Proof merkle.SimpleProof +} + +// LeadHash returns the hash of the this proof refers to. +func (tp TxProof) LeafHash() []byte { + return tp.Data.Hash() +} + +// Validate verifies the proof. It returns nil if the RootHash matches the dataHash argument, +// and if the proof is internally consistent. Otherwise, it returns a sensible error. +func (tp TxProof) Validate(dataHash []byte) error { + if !bytes.Equal(dataHash, tp.RootHash) { + return errors.New("Proof matches different data hash") + } + if tp.Index < 0 { + return errors.New("Proof index cannot be negative") + } + if tp.Total <= 0 { + return errors.New("Proof total must be positive") + } + valid := tp.Proof.Verify(tp.Index, tp.Total, tp.LeafHash(), tp.RootHash) + if !valid { + return errors.New("Proof is not internally consistent") + } + return nil +} + +// TxResult contains results of executing the transaction. +// +// One usage is indexing transaction results. +type TxResult struct { + Height int64 `json:"height"` + Index uint32 `json:"index"` + Tx Tx `json:"tx"` + Result abci.ResponseDeliverTx `json:"result"` +} diff --git a/types/tx_test.go b/types/tx_test.go new file mode 100644 index 000000000..67df5c5f3 --- /dev/null +++ b/types/tx_test.go @@ -0,0 +1,125 @@ +package types + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + + cmn "github.com/tendermint/tendermint/libs/common" + ctest "github.com/tendermint/tendermint/libs/test" +) + +func makeTxs(cnt, size int) Txs { + txs := make(Txs, cnt) + for i := 0; i < cnt; i++ { + txs[i] = cmn.RandBytes(size) + } + return txs +} + +func randInt(low, high int) int { + off := cmn.RandInt() % (high - low) + return low + off +} + +func TestTxIndex(t *testing.T) { + assert := assert.New(t) + for i := 0; i < 20; i++ { + txs := makeTxs(15, 60) + for j := 0; j < len(txs); j++ { + tx := txs[j] + idx := txs.Index(tx) + assert.Equal(j, idx) + } + assert.Equal(-1, txs.Index(nil)) + assert.Equal(-1, txs.Index(Tx("foodnwkf"))) + } +} + +func TestValidTxProof(t *testing.T) { + assert := assert.New(t) + cases := []struct { + txs Txs + }{ + {Txs{{1, 4, 34, 87, 163, 1}}}, + {Txs{{5, 56, 165, 2}, {4, 77}}}, + {Txs{Tx("foo"), Tx("bar"), Tx("baz")}}, + {makeTxs(20, 5)}, + {makeTxs(7, 81)}, + {makeTxs(61, 15)}, + } + + for h, tc := range cases { + txs := tc.txs + root := txs.Hash() + // make sure valid proof for every tx + for i := range txs { + leaf := txs[i] + leafHash := leaf.Hash() + proof := txs.Proof(i) + assert.Equal(i, proof.Index, "%d: %d", h, i) + assert.Equal(len(txs), proof.Total, "%d: %d", h, i) + assert.EqualValues(root, proof.RootHash, "%d: %d", h, i) + assert.EqualValues(leaf, proof.Data, "%d: %d", h, i) + assert.EqualValues(leafHash, proof.LeafHash(), "%d: %d", h, i) + assert.Nil(proof.Validate(root), "%d: %d", h, i) + assert.NotNil(proof.Validate([]byte("foobar")), "%d: %d", h, i) + + // read-write must also work + var p2 TxProof + bin, err := cdc.MarshalBinary(proof) + assert.Nil(err) + err = cdc.UnmarshalBinary(bin, &p2) + if assert.Nil(err, "%d: %d: %+v", h, i, err) { + assert.Nil(p2.Validate(root), "%d: %d", h, i) + } + } + } +} + +func TestTxProofUnchangable(t *testing.T) { + // run the other test a bunch... + for i := 0; i < 40; i++ { + testTxProofUnchangable(t) + } +} + +func testTxProofUnchangable(t *testing.T) { + assert := assert.New(t) + + // make some proof + txs := makeTxs(randInt(2, 100), randInt(16, 128)) + root := txs.Hash() + i := randInt(0, len(txs)-1) + proof := txs.Proof(i) + + // make sure it is valid to start with + assert.Nil(proof.Validate(root)) + bin, err := cdc.MarshalBinary(proof) + assert.Nil(err) + + // try mutating the data and make sure nothing breaks + for j := 0; j < 500; j++ { + bad := ctest.MutateByteSlice(bin) + if !bytes.Equal(bad, bin) { + assertBadProof(t, root, bad, proof) + } + } +} + +// This makes sure that the proof doesn't deserialize into something valid. +func assertBadProof(t *testing.T, root []byte, bad []byte, good TxProof) { + var proof TxProof + err := cdc.UnmarshalBinary(bad, &proof) + if err == nil { + err = proof.Validate(root) + if err == nil { + // XXX Fix simple merkle proofs so the following is *not* OK. + // This can happen if we have a slightly different total (where the + // path ends up the same). If it is something else, we have a real + // problem. + assert.NotEqual(t, proof.Total, good.Total, "bad: %#v\ngood: %#v", proof, good) + } + } +} diff --git a/types/validator.go b/types/validator.go new file mode 100644 index 000000000..e43acf09d --- /dev/null +++ b/types/validator.go @@ -0,0 +1,98 @@ +package types + +import ( + "bytes" + "fmt" + + "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" +) + +// Volatile state for each Validator +// NOTE: The Accum is not included in Validator.Hash(); +// make sure to update that method if changes are made here +type Validator struct { + Address Address `json:"address"` + PubKey crypto.PubKey `json:"pub_key"` + VotingPower int64 `json:"voting_power"` + + Accum int64 `json:"accum"` +} + +func NewValidator(pubKey crypto.PubKey, votingPower int64) *Validator { + return &Validator{ + Address: pubKey.Address(), + PubKey: pubKey, + VotingPower: votingPower, + Accum: 0, + } +} + +// Creates a new copy of the validator so we can mutate accum. +// Panics if the validator is nil. +func (v *Validator) Copy() *Validator { + vCopy := *v + return &vCopy +} + +// Returns the one with higher Accum. +func (v *Validator) CompareAccum(other *Validator) *Validator { + if v == nil { + return other + } + if v.Accum > other.Accum { + return v + } else if v.Accum < other.Accum { + return other + } else { + result := bytes.Compare(v.Address, other.Address) + if result < 0 { + return v + } else if result > 0 { + return other + } else { + cmn.PanicSanity("Cannot compare identical validators") + return nil + } + } +} + +func (v *Validator) String() string { + if v == nil { + return "nil-Validator" + } + return fmt.Sprintf("Validator{%v %v VP:%v A:%v}", + v.Address, + v.PubKey, + v.VotingPower, + v.Accum) +} + +// Hash computes the unique ID of a validator with a given voting power. +// It excludes the Accum value, which changes with every round. +func (v *Validator) Hash() []byte { + return aminoHash(struct { + Address Address + PubKey crypto.PubKey + VotingPower int64 + }{ + v.Address, + v.PubKey, + v.VotingPower, + }) +} + +//---------------------------------------- +// RandValidator + +// RandValidator returns a randomized validator, useful for testing. +// UNSTABLE +func RandValidator(randPower bool, minPower int64) (*Validator, PrivValidator) { + privVal := NewMockPV() + votePower := minPower + if randPower { + votePower += int64(cmn.RandUint32()) + } + val := NewValidator(privVal.GetPubKey(), votePower) + return val, privVal +} diff --git a/types/validator_set.go b/types/validator_set.go new file mode 100644 index 000000000..191f8b428 --- /dev/null +++ b/types/validator_set.go @@ -0,0 +1,517 @@ +package types + +import ( + "bytes" + "fmt" + "math" + "sort" + "strings" + + "github.com/tendermint/tendermint/crypto/merkle" + cmn "github.com/tendermint/tendermint/libs/common" +) + +// ValidatorSet represent a set of *Validator at a given height. +// The validators can be fetched by address or index. +// The index is in order of .Address, so the indices are fixed +// for all rounds of a given blockchain height. +// On the other hand, the .AccumPower of each validator and +// the designated .GetProposer() of a set changes every round, +// upon calling .IncrementAccum(). +// NOTE: Not goroutine-safe. +// NOTE: All get/set to validators should copy the value for safety. +type ValidatorSet struct { + // NOTE: persisted via reflect, must be exported. + Validators []*Validator `json:"validators"` + Proposer *Validator `json:"proposer"` + + // cached (unexported) + totalVotingPower int64 +} + +func NewValidatorSet(vals []*Validator) *ValidatorSet { + validators := make([]*Validator, len(vals)) + for i, val := range vals { + validators[i] = val.Copy() + } + sort.Sort(ValidatorsByAddress(validators)) + vs := &ValidatorSet{ + Validators: validators, + } + + if vals != nil { + vs.IncrementAccum(1) + } + + return vs +} + +// incrementAccum and update the proposer +func (valSet *ValidatorSet) IncrementAccum(times int) { + // Add VotingPower * times to each validator and order into heap. + validatorsHeap := cmn.NewHeap() + for _, val := range valSet.Validators { + // check for overflow both multiplication and sum + val.Accum = safeAddClip(val.Accum, safeMulClip(val.VotingPower, int64(times))) + validatorsHeap.PushComparable(val, accumComparable{val}) + } + + // Decrement the validator with most accum times times + for i := 0; i < times; i++ { + mostest := validatorsHeap.Peek().(*Validator) + // mind underflow + mostest.Accum = safeSubClip(mostest.Accum, valSet.TotalVotingPower()) + + if i == times-1 { + valSet.Proposer = mostest + } else { + validatorsHeap.Update(mostest, accumComparable{mostest}) + } + } +} + +// Copy each validator into a new ValidatorSet +func (valSet *ValidatorSet) Copy() *ValidatorSet { + validators := make([]*Validator, len(valSet.Validators)) + for i, val := range valSet.Validators { + // NOTE: must copy, since IncrementAccum updates in place. + validators[i] = val.Copy() + } + return &ValidatorSet{ + Validators: validators, + Proposer: valSet.Proposer, + totalVotingPower: valSet.totalVotingPower, + } +} + +// HasAddress returns true if address given is in the validator set, false - +// otherwise. +func (valSet *ValidatorSet) HasAddress(address []byte) bool { + idx := sort.Search(len(valSet.Validators), func(i int) bool { + return bytes.Compare(address, valSet.Validators[i].Address) <= 0 + }) + return idx < len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address) +} + +// GetByAddress returns an index of the validator with address and validator +// itself if found. Otherwise, -1 and nil are returned. +func (valSet *ValidatorSet) GetByAddress(address []byte) (index int, val *Validator) { + idx := sort.Search(len(valSet.Validators), func(i int) bool { + return bytes.Compare(address, valSet.Validators[i].Address) <= 0 + }) + if idx < len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address) { + return idx, valSet.Validators[idx].Copy() + } + return -1, nil +} + +// GetByIndex returns the validator's address and validator itself by index. +// It returns nil values if index is less than 0 or greater or equal to +// len(ValidatorSet.Validators). +func (valSet *ValidatorSet) GetByIndex(index int) (address []byte, val *Validator) { + if index < 0 || index >= len(valSet.Validators) { + return nil, nil + } + val = valSet.Validators[index] + return val.Address, val.Copy() +} + +// Size returns the length of the validator set. +func (valSet *ValidatorSet) Size() int { + return len(valSet.Validators) +} + +// TotalVotingPower returns the sum of the voting powers of all validators. +func (valSet *ValidatorSet) TotalVotingPower() int64 { + if valSet.totalVotingPower == 0 { + for _, val := range valSet.Validators { + // mind overflow + valSet.totalVotingPower = safeAddClip(valSet.totalVotingPower, val.VotingPower) + } + } + return valSet.totalVotingPower +} + +// GetProposer returns the current proposer. If the validator set is empty, nil +// is returned. +func (valSet *ValidatorSet) GetProposer() (proposer *Validator) { + if len(valSet.Validators) == 0 { + return nil + } + if valSet.Proposer == nil { + valSet.Proposer = valSet.findProposer() + } + return valSet.Proposer.Copy() +} + +func (valSet *ValidatorSet) findProposer() *Validator { + var proposer *Validator + for _, val := range valSet.Validators { + if proposer == nil || !bytes.Equal(val.Address, proposer.Address) { + proposer = proposer.CompareAccum(val) + } + } + return proposer +} + +// Hash returns the Merkle root hash build using validators (as leaves) in the +// set. +func (valSet *ValidatorSet) Hash() []byte { + if len(valSet.Validators) == 0 { + return nil + } + hashers := make([]merkle.Hasher, len(valSet.Validators)) + for i, val := range valSet.Validators { + hashers[i] = val + } + return merkle.SimpleHashFromHashers(hashers) +} + +// Add adds val to the validator set and returns true. It returns false if val +// is already in the set. +func (valSet *ValidatorSet) Add(val *Validator) (added bool) { + val = val.Copy() + idx := sort.Search(len(valSet.Validators), func(i int) bool { + return bytes.Compare(val.Address, valSet.Validators[i].Address) <= 0 + }) + if idx >= len(valSet.Validators) { + valSet.Validators = append(valSet.Validators, val) + // Invalidate cache + valSet.Proposer = nil + valSet.totalVotingPower = 0 + return true + } else if bytes.Equal(valSet.Validators[idx].Address, val.Address) { + return false + } else { + newValidators := make([]*Validator, len(valSet.Validators)+1) + copy(newValidators[:idx], valSet.Validators[:idx]) + newValidators[idx] = val + copy(newValidators[idx+1:], valSet.Validators[idx:]) + valSet.Validators = newValidators + // Invalidate cache + valSet.Proposer = nil + valSet.totalVotingPower = 0 + return true + } +} + +// Update updates val and returns true. It returns false if val is not present +// in the set. +func (valSet *ValidatorSet) Update(val *Validator) (updated bool) { + index, sameVal := valSet.GetByAddress(val.Address) + if sameVal == nil { + return false + } + valSet.Validators[index] = val.Copy() + // Invalidate cache + valSet.Proposer = nil + valSet.totalVotingPower = 0 + return true +} + +// Remove deletes the validator with address. It returns the validator removed +// and true. If returns nil and false if validator is not present in the set. +func (valSet *ValidatorSet) Remove(address []byte) (val *Validator, removed bool) { + idx := sort.Search(len(valSet.Validators), func(i int) bool { + return bytes.Compare(address, valSet.Validators[i].Address) <= 0 + }) + if idx >= len(valSet.Validators) || !bytes.Equal(valSet.Validators[idx].Address, address) { + return nil, false + } + removedVal := valSet.Validators[idx] + newValidators := valSet.Validators[:idx] + if idx+1 < len(valSet.Validators) { + newValidators = append(newValidators, valSet.Validators[idx+1:]...) + } + valSet.Validators = newValidators + // Invalidate cache + valSet.Proposer = nil + valSet.totalVotingPower = 0 + return removedVal, true +} + +// Iterate will run the given function over the set. +func (valSet *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) { + for i, val := range valSet.Validators { + stop := fn(i, val.Copy()) + if stop { + break + } + } +} + +// Verify that +2/3 of the set had signed the given signBytes +func (valSet *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height int64, commit *Commit) error { + if valSet.Size() != len(commit.Precommits) { + return fmt.Errorf("Invalid commit -- wrong set size: %v vs %v", valSet.Size(), len(commit.Precommits)) + } + if height != commit.Height() { + return fmt.Errorf("Invalid commit -- wrong height: %v vs %v", height, commit.Height()) + } + + talliedVotingPower := int64(0) + round := commit.Round() + + for idx, precommit := range commit.Precommits { + // may be nil if validator skipped. + if precommit == nil { + continue + } + if precommit.Height != height { + return fmt.Errorf("Invalid commit -- wrong height: %v vs %v", height, precommit.Height) + } + if precommit.Round != round { + return fmt.Errorf("Invalid commit -- wrong round: %v vs %v", round, precommit.Round) + } + if precommit.Type != VoteTypePrecommit { + return fmt.Errorf("Invalid commit -- not precommit @ index %v", idx) + } + _, val := valSet.GetByIndex(idx) + // Validate signature + precommitSignBytes := precommit.SignBytes(chainID) + if !val.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { + return fmt.Errorf("Invalid commit -- invalid signature: %v", precommit) + } + if !blockID.Equals(precommit.BlockID) { + continue // Not an error, but doesn't count + } + // Good precommit! + talliedVotingPower += val.VotingPower + } + + if talliedVotingPower > valSet.TotalVotingPower()*2/3 { + return nil + } + return fmt.Errorf("Invalid commit -- insufficient voting power: got %v, needed %v", + talliedVotingPower, (valSet.TotalVotingPower()*2/3 + 1)) +} + +// VerifyCommitAny will check to see if the set would +// be valid with a different validator set. +// +// valSet is the validator set that we know +// * over 2/3 of the power in old signed this block +// +// newSet is the validator set that signed this block +// * only votes from old are sufficient for 2/3 majority +// in the new set as well +// +// That means that: +// * 10% of the valset can't just declare themselves kings +// * If the validator set is 3x old size, we need more proof to trust +func (valSet *ValidatorSet) VerifyCommitAny(newSet *ValidatorSet, chainID string, + blockID BlockID, height int64, commit *Commit) error { + + if newSet.Size() != len(commit.Precommits) { + return cmn.NewError("Invalid commit -- wrong set size: %v vs %v", newSet.Size(), len(commit.Precommits)) + } + if height != commit.Height() { + return cmn.NewError("Invalid commit -- wrong height: %v vs %v", height, commit.Height()) + } + + oldVotingPower := int64(0) + newVotingPower := int64(0) + seen := map[int]bool{} + round := commit.Round() + + for idx, precommit := range commit.Precommits { + // first check as in VerifyCommit + if precommit == nil { + continue + } + if precommit.Height != height { + // return certerr.ErrHeightMismatch(height, precommit.Height) + return cmn.NewError("Blocks don't match - %d vs %d", round, precommit.Round) + } + if precommit.Round != round { + return cmn.NewError("Invalid commit -- wrong round: %v vs %v", round, precommit.Round) + } + if precommit.Type != VoteTypePrecommit { + return cmn.NewError("Invalid commit -- not precommit @ index %v", idx) + } + if !blockID.Equals(precommit.BlockID) { + continue // Not an error, but doesn't count + } + + // we only grab by address, ignoring unknown validators + vi, ov := valSet.GetByAddress(precommit.ValidatorAddress) + if ov == nil || seen[vi] { + continue // missing or double vote... + } + seen[vi] = true + + // Validate signature old school + precommitSignBytes := precommit.SignBytes(chainID) + if !ov.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { + return cmn.NewError("Invalid commit -- invalid signature: %v", precommit) + } + // Good precommit! + oldVotingPower += ov.VotingPower + + // check new school + _, cv := newSet.GetByIndex(idx) + if cv.PubKey.Equals(ov.PubKey) { + // make sure this is properly set in the current block as well + newVotingPower += cv.VotingPower + } + } + + if oldVotingPower <= valSet.TotalVotingPower()*2/3 { + return cmn.NewError("Invalid commit -- insufficient old voting power: got %v, needed %v", + oldVotingPower, (valSet.TotalVotingPower()*2/3 + 1)) + } else if newVotingPower <= newSet.TotalVotingPower()*2/3 { + return cmn.NewError("Invalid commit -- insufficient cur voting power: got %v, needed %v", + newVotingPower, (newSet.TotalVotingPower()*2/3 + 1)) + } + return nil +} + +func (valSet *ValidatorSet) String() string { + return valSet.StringIndented("") +} + +// String +func (valSet *ValidatorSet) StringIndented(indent string) string { + if valSet == nil { + return "nil-ValidatorSet" + } + valStrings := []string{} + valSet.Iterate(func(index int, val *Validator) bool { + valStrings = append(valStrings, val.String()) + return false + }) + return fmt.Sprintf(`ValidatorSet{ +%s Proposer: %v +%s Validators: +%s %v +%s}`, + indent, valSet.GetProposer().String(), + indent, + indent, strings.Join(valStrings, "\n"+indent+" "), + indent) + +} + +//------------------------------------- +// Implements sort for sorting validators by address. + +// Sort validators by address +type ValidatorsByAddress []*Validator + +func (vs ValidatorsByAddress) Len() int { + return len(vs) +} + +func (vs ValidatorsByAddress) Less(i, j int) bool { + return bytes.Compare(vs[i].Address, vs[j].Address) == -1 +} + +func (vs ValidatorsByAddress) Swap(i, j int) { + it := vs[i] + vs[i] = vs[j] + vs[j] = it +} + +//------------------------------------- +// Use with Heap for sorting validators by accum + +type accumComparable struct { + *Validator +} + +// We want to find the validator with the greatest accum. +func (ac accumComparable) Less(o interface{}) bool { + other := o.(accumComparable).Validator + larger := ac.CompareAccum(other) + return bytes.Equal(larger.Address, ac.Address) +} + +//---------------------------------------- +// For testing + +// RandValidatorSet returns a randomized validator set, useful for testing. +// NOTE: PrivValidator are in order. +// UNSTABLE +func RandValidatorSet(numValidators int, votingPower int64) (*ValidatorSet, []PrivValidator) { + vals := make([]*Validator, numValidators) + privValidators := make([]PrivValidator, numValidators) + for i := 0; i < numValidators; i++ { + val, privValidator := RandValidator(false, votingPower) + vals[i] = val + privValidators[i] = privValidator + } + valSet := NewValidatorSet(vals) + sort.Sort(PrivValidatorsByAddress(privValidators)) + return valSet, privValidators +} + +/////////////////////////////////////////////////////////////////////////////// +// Safe multiplication and addition/subtraction + +func safeMul(a, b int64) (int64, bool) { + if a == 0 || b == 0 { + return 0, false + } + if a == 1 { + return b, false + } + if b == 1 { + return a, false + } + if a == math.MinInt64 || b == math.MinInt64 { + return -1, true + } + c := a * b + return c, c/b != a +} + +func safeAdd(a, b int64) (int64, bool) { + if b > 0 && a > math.MaxInt64-b { + return -1, true + } else if b < 0 && a < math.MinInt64-b { + return -1, true + } + return a + b, false +} + +func safeSub(a, b int64) (int64, bool) { + if b > 0 && a < math.MinInt64+b { + return -1, true + } else if b < 0 && a > math.MaxInt64+b { + return -1, true + } + return a - b, false +} + +func safeMulClip(a, b int64) int64 { + c, overflow := safeMul(a, b) + if overflow { + if (a < 0 || b < 0) && !(a < 0 && b < 0) { + return math.MinInt64 + } + return math.MaxInt64 + } + return c +} + +func safeAddClip(a, b int64) int64 { + c, overflow := safeAdd(a, b) + if overflow { + if b < 0 { + return math.MinInt64 + } + return math.MaxInt64 + } + return c +} + +func safeSubClip(a, b int64) int64 { + c, overflow := safeSub(a, b) + if overflow { + if b > 0 { + return math.MinInt64 + } + return math.MaxInt64 + } + return c +} diff --git a/types/validator_set_test.go b/types/validator_set_test.go new file mode 100644 index 000000000..61f4dada9 --- /dev/null +++ b/types/validator_set_test.go @@ -0,0 +1,372 @@ +package types + +import ( + "bytes" + "math" + "strings" + "testing" + "testing/quick" + "time" + + "github.com/stretchr/testify/assert" + + crypto "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" +) + +func TestCopy(t *testing.T) { + vset := randValidatorSet(10) + vsetHash := vset.Hash() + if len(vsetHash) == 0 { + t.Fatalf("ValidatorSet had unexpected zero hash") + } + + vsetCopy := vset.Copy() + vsetCopyHash := vsetCopy.Hash() + + if !bytes.Equal(vsetHash, vsetCopyHash) { + t.Fatalf("ValidatorSet copy had wrong hash. Orig: %X, Copy: %X", vsetHash, vsetCopyHash) + } +} + +func BenchmarkValidatorSetCopy(b *testing.B) { + b.StopTimer() + vset := NewValidatorSet([]*Validator{}) + for i := 0; i < 1000; i++ { + privKey := crypto.GenPrivKeyEd25519() + pubKey := privKey.PubKey() + val := NewValidator(pubKey, 0) + if !vset.Add(val) { + panic("Failed to add validator") + } + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + vset.Copy() + } +} + +//------------------------------------------------------------------- + +func TestProposerSelection1(t *testing.T) { + vset := NewValidatorSet([]*Validator{ + newValidator([]byte("foo"), 1000), + newValidator([]byte("bar"), 300), + newValidator([]byte("baz"), 330), + }) + proposers := []string{} + for i := 0; i < 99; i++ { + val := vset.GetProposer() + proposers = append(proposers, string(val.Address)) + vset.IncrementAccum(1) + } + expected := `foo baz foo bar foo foo baz foo bar foo foo baz foo foo bar foo baz foo foo bar foo foo baz foo bar foo foo baz foo bar foo foo baz foo foo bar foo baz foo foo bar foo baz foo foo bar foo baz foo foo bar foo baz foo foo foo baz bar foo foo foo baz foo bar foo foo baz foo bar foo foo baz foo bar foo foo baz foo bar foo foo baz foo foo bar foo baz foo foo bar foo baz foo foo bar foo baz foo foo` + if expected != strings.Join(proposers, " ") { + t.Errorf("Expected sequence of proposers was\n%v\nbut got \n%v", expected, strings.Join(proposers, " ")) + } +} + +func TestProposerSelection2(t *testing.T) { + addr0 := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + addr1 := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1} + addr2 := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2} + + // when all voting power is same, we go in order of addresses + val0, val1, val2 := newValidator(addr0, 100), newValidator(addr1, 100), newValidator(addr2, 100) + valList := []*Validator{val0, val1, val2} + vals := NewValidatorSet(valList) + for i := 0; i < len(valList)*5; i++ { + ii := (i) % len(valList) + prop := vals.GetProposer() + if !bytes.Equal(prop.Address, valList[ii].Address) { + t.Fatalf("(%d): Expected %X. Got %X", i, valList[ii].Address, prop.Address) + } + vals.IncrementAccum(1) + } + + // One validator has more than the others, but not enough to propose twice in a row + *val2 = *newValidator(addr2, 400) + vals = NewValidatorSet(valList) + // vals.IncrementAccum(1) + prop := vals.GetProposer() + if !bytes.Equal(prop.Address, addr2) { + t.Fatalf("Expected address with highest voting power to be first proposer. Got %X", prop.Address) + } + vals.IncrementAccum(1) + prop = vals.GetProposer() + if !bytes.Equal(prop.Address, addr0) { + t.Fatalf("Expected smallest address to be validator. Got %X", prop.Address) + } + + // One validator has more than the others, and enough to be proposer twice in a row + *val2 = *newValidator(addr2, 401) + vals = NewValidatorSet(valList) + prop = vals.GetProposer() + if !bytes.Equal(prop.Address, addr2) { + t.Fatalf("Expected address with highest voting power to be first proposer. Got %X", prop.Address) + } + vals.IncrementAccum(1) + prop = vals.GetProposer() + if !bytes.Equal(prop.Address, addr2) { + t.Fatalf("Expected address with highest voting power to be second proposer. Got %X", prop.Address) + } + vals.IncrementAccum(1) + prop = vals.GetProposer() + if !bytes.Equal(prop.Address, addr0) { + t.Fatalf("Expected smallest address to be validator. Got %X", prop.Address) + } + + // each validator should be the proposer a proportional number of times + val0, val1, val2 = newValidator(addr0, 4), newValidator(addr1, 5), newValidator(addr2, 3) + valList = []*Validator{val0, val1, val2} + propCount := make([]int, 3) + vals = NewValidatorSet(valList) + N := 1 + for i := 0; i < 120*N; i++ { + prop := vals.GetProposer() + ii := prop.Address[19] + propCount[ii]++ + vals.IncrementAccum(1) + } + + if propCount[0] != 40*N { + t.Fatalf("Expected prop count for validator with 4/12 of voting power to be %d/%d. Got %d/%d", 40*N, 120*N, propCount[0], 120*N) + } + if propCount[1] != 50*N { + t.Fatalf("Expected prop count for validator with 5/12 of voting power to be %d/%d. Got %d/%d", 50*N, 120*N, propCount[1], 120*N) + } + if propCount[2] != 30*N { + t.Fatalf("Expected prop count for validator with 3/12 of voting power to be %d/%d. Got %d/%d", 30*N, 120*N, propCount[2], 120*N) + } +} + +func TestProposerSelection3(t *testing.T) { + vset := NewValidatorSet([]*Validator{ + newValidator([]byte("a"), 1), + newValidator([]byte("b"), 1), + newValidator([]byte("c"), 1), + newValidator([]byte("d"), 1), + }) + + proposerOrder := make([]*Validator, 4) + for i := 0; i < 4; i++ { + proposerOrder[i] = vset.GetProposer() + vset.IncrementAccum(1) + } + + // i for the loop + // j for the times + // we should go in order for ever, despite some IncrementAccums with times > 1 + var i, j int + for ; i < 10000; i++ { + got := vset.GetProposer().Address + expected := proposerOrder[j%4].Address + if !bytes.Equal(got, expected) { + t.Fatalf(cmn.Fmt("vset.Proposer (%X) does not match expected proposer (%X) for (%d, %d)", got, expected, i, j)) + } + + // serialize, deserialize, check proposer + b := vset.toBytes() + vset.fromBytes(b) + + computed := vset.GetProposer() // findGetProposer() + if i != 0 { + if !bytes.Equal(got, computed.Address) { + t.Fatalf(cmn.Fmt("vset.Proposer (%X) does not match computed proposer (%X) for (%d, %d)", got, computed.Address, i, j)) + } + } + + // times is usually 1 + times := 1 + mod := (cmn.RandInt() % 5) + 1 + if cmn.RandInt()%mod > 0 { + // sometimes its up to 5 + times = cmn.RandInt() % 5 + } + vset.IncrementAccum(times) + + j += times + } +} + +func newValidator(address []byte, power int64) *Validator { + return &Validator{Address: address, VotingPower: power} +} + +func randPubKey() crypto.PubKey { + var pubKey [32]byte + copy(pubKey[:], cmn.RandBytes(32)) + return crypto.PubKeyEd25519(pubKey) +} + +func randValidator_() *Validator { + val := NewValidator(randPubKey(), cmn.RandInt64()) + val.Accum = cmn.RandInt64() + return val +} + +func randValidatorSet(numValidators int) *ValidatorSet { + validators := make([]*Validator, numValidators) + for i := 0; i < numValidators; i++ { + validators[i] = randValidator_() + } + return NewValidatorSet(validators) +} + +func (valSet *ValidatorSet) toBytes() []byte { + bz, err := cdc.MarshalBinary(valSet) + if err != nil { + panic(err) + } + return bz +} + +func (valSet *ValidatorSet) fromBytes(b []byte) { + err := cdc.UnmarshalBinary(b, &valSet) + if err != nil { + // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED + panic(err) + } +} + +//------------------------------------------------------------------- + +func TestValidatorSetTotalVotingPowerOverflows(t *testing.T) { + vset := NewValidatorSet([]*Validator{ + {Address: []byte("a"), VotingPower: math.MaxInt64, Accum: 0}, + {Address: []byte("b"), VotingPower: math.MaxInt64, Accum: 0}, + {Address: []byte("c"), VotingPower: math.MaxInt64, Accum: 0}, + }) + + assert.EqualValues(t, math.MaxInt64, vset.TotalVotingPower()) +} + +func TestValidatorSetIncrementAccumOverflows(t *testing.T) { + // NewValidatorSet calls IncrementAccum(1) + vset := NewValidatorSet([]*Validator{ + // too much voting power + 0: {Address: []byte("a"), VotingPower: math.MaxInt64, Accum: 0}, + // too big accum + 1: {Address: []byte("b"), VotingPower: 10, Accum: math.MaxInt64}, + // almost too big accum + 2: {Address: []byte("c"), VotingPower: 10, Accum: math.MaxInt64 - 5}, + }) + + assert.Equal(t, int64(0), vset.Validators[0].Accum, "0") // because we decrement val with most voting power + assert.EqualValues(t, math.MaxInt64, vset.Validators[1].Accum, "1") + assert.EqualValues(t, math.MaxInt64, vset.Validators[2].Accum, "2") +} + +func TestValidatorSetIncrementAccumUnderflows(t *testing.T) { + // NewValidatorSet calls IncrementAccum(1) + vset := NewValidatorSet([]*Validator{ + 0: {Address: []byte("a"), VotingPower: math.MaxInt64, Accum: math.MinInt64}, + 1: {Address: []byte("b"), VotingPower: 1, Accum: math.MinInt64}, + }) + + vset.IncrementAccum(5) + + assert.EqualValues(t, math.MinInt64, vset.Validators[0].Accum, "0") + assert.EqualValues(t, math.MinInt64, vset.Validators[1].Accum, "1") +} + +func TestSafeMul(t *testing.T) { + f := func(a, b int64) bool { + c, overflow := safeMul(a, b) + return overflow || (!overflow && c == a*b) + } + if err := quick.Check(f, nil); err != nil { + t.Error(err) + } +} + +func TestSafeAdd(t *testing.T) { + f := func(a, b int64) bool { + c, overflow := safeAdd(a, b) + return overflow || (!overflow && c == a+b) + } + if err := quick.Check(f, nil); err != nil { + t.Error(err) + } +} + +func TestSafeMulClip(t *testing.T) { + assert.EqualValues(t, math.MaxInt64, safeMulClip(math.MinInt64, math.MinInt64)) + assert.EqualValues(t, math.MinInt64, safeMulClip(math.MaxInt64, math.MinInt64)) + assert.EqualValues(t, math.MinInt64, safeMulClip(math.MinInt64, math.MaxInt64)) + assert.EqualValues(t, math.MaxInt64, safeMulClip(math.MaxInt64, 2)) +} + +func TestSafeAddClip(t *testing.T) { + assert.EqualValues(t, math.MaxInt64, safeAddClip(math.MaxInt64, 10)) + assert.EqualValues(t, math.MaxInt64, safeAddClip(math.MaxInt64, math.MaxInt64)) + assert.EqualValues(t, math.MinInt64, safeAddClip(math.MinInt64, -10)) +} + +func TestSafeSubClip(t *testing.T) { + assert.EqualValues(t, math.MinInt64, safeSubClip(math.MinInt64, 10)) + assert.EqualValues(t, 0, safeSubClip(math.MinInt64, math.MinInt64)) + assert.EqualValues(t, math.MinInt64, safeSubClip(math.MinInt64, math.MaxInt64)) + assert.EqualValues(t, math.MaxInt64, safeSubClip(math.MaxInt64, -10)) +} + +//------------------------------------------------------------------- + +func TestValidatorSetVerifyCommit(t *testing.T) { + privKey := crypto.GenPrivKeyEd25519() + pubKey := privKey.PubKey() + v1 := NewValidator(pubKey, 1000) + vset := NewValidatorSet([]*Validator{v1}) + + chainID := "mychainID" + blockID := BlockID{Hash: []byte("hello")} + height := int64(5) + vote := &Vote{ + ValidatorAddress: v1.Address, + ValidatorIndex: 0, + Height: height, + Round: 0, + Timestamp: time.Now().UTC(), + Type: VoteTypePrecommit, + BlockID: blockID, + } + sig, err := privKey.Sign(vote.SignBytes(chainID)) + assert.NoError(t, err) + vote.Signature = sig + commit := &Commit{ + BlockID: blockID, + Precommits: []*Vote{vote}, + } + + badChainID := "notmychainID" + badBlockID := BlockID{Hash: []byte("goodbye")} + badHeight := height + 1 + badCommit := &Commit{ + BlockID: blockID, + Precommits: []*Vote{nil}, + } + + // test some error cases + // TODO: test more cases! + cases := []struct { + chainID string + blockID BlockID + height int64 + commit *Commit + }{ + {badChainID, blockID, height, commit}, + {chainID, badBlockID, height, commit}, + {chainID, blockID, badHeight, commit}, + {chainID, blockID, height, badCommit}, + } + + for i, c := range cases { + err := vset.VerifyCommit(c.chainID, c.blockID, c.height, c.commit) + assert.NotNil(t, err, i) + } + + // test a good one + err = vset.VerifyCommit(chainID, blockID, height, commit) + assert.Nil(t, err) +} diff --git a/types/vote.go b/types/vote.go new file mode 100644 index 000000000..ed4ebd73e --- /dev/null +++ b/types/vote.go @@ -0,0 +1,117 @@ +package types + +import ( + "bytes" + "errors" + "fmt" + "time" + + crypto "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" +) + +var ( + ErrVoteUnexpectedStep = errors.New("Unexpected step") + ErrVoteInvalidValidatorIndex = errors.New("Invalid validator index") + ErrVoteInvalidValidatorAddress = errors.New("Invalid validator address") + ErrVoteInvalidSignature = errors.New("Invalid signature") + ErrVoteInvalidBlockHash = errors.New("Invalid block hash") + ErrVoteNonDeterministicSignature = errors.New("Non-deterministic signature") + ErrVoteNil = errors.New("Nil vote") +) + +type ErrVoteConflictingVotes struct { + *DuplicateVoteEvidence +} + +func (err *ErrVoteConflictingVotes) Error() string { + return fmt.Sprintf("Conflicting votes from validator %v", err.PubKey.Address()) +} + +func NewConflictingVoteError(val *Validator, voteA, voteB *Vote) *ErrVoteConflictingVotes { + return &ErrVoteConflictingVotes{ + &DuplicateVoteEvidence{ + PubKey: val.PubKey, + VoteA: voteA, + VoteB: voteB, + }, + } +} + +// Types of votes +// TODO Make a new type "VoteType" +const ( + VoteTypePrevote = byte(0x01) + VoteTypePrecommit = byte(0x02) +) + +func IsVoteTypeValid(type_ byte) bool { + switch type_ { + case VoteTypePrevote: + return true + case VoteTypePrecommit: + return true + default: + return false + } +} + +// Address is hex bytes. TODO: crypto.Address +type Address = cmn.HexBytes + +// Represents a prevote, precommit, or commit vote from validators for consensus. +type Vote struct { + ValidatorAddress Address `json:"validator_address"` + ValidatorIndex int `json:"validator_index"` + Height int64 `json:"height"` + Round int `json:"round"` + Timestamp time.Time `json:"timestamp"` + Type byte `json:"type"` + BlockID BlockID `json:"block_id"` // zero if vote is nil. + Signature crypto.Signature `json:"signature"` +} + +func (vote *Vote) SignBytes(chainID string) []byte { + bz, err := cdc.MarshalJSON(CanonicalVote(chainID, vote)) + if err != nil { + panic(err) + } + return bz +} + +func (vote *Vote) Copy() *Vote { + voteCopy := *vote + return &voteCopy +} + +func (vote *Vote) String() string { + if vote == nil { + return "nil-Vote" + } + var typeString string + switch vote.Type { + case VoteTypePrevote: + typeString = "Prevote" + case VoteTypePrecommit: + typeString = "Precommit" + default: + cmn.PanicSanity("Unknown vote type") + } + + return fmt.Sprintf("Vote{%v:%X %v/%02d/%v(%v) %X %v @ %s}", + vote.ValidatorIndex, cmn.Fingerprint(vote.ValidatorAddress), + vote.Height, vote.Round, vote.Type, typeString, + cmn.Fingerprint(vote.BlockID.Hash), vote.Signature, + CanonicalTime(vote.Timestamp)) +} + +func (vote *Vote) Verify(chainID string, pubKey crypto.PubKey) error { + if !bytes.Equal(pubKey.Address(), vote.ValidatorAddress) { + return ErrVoteInvalidValidatorAddress + } + + if !pubKey.VerifyBytes(vote.SignBytes(chainID), vote.Signature) { + return ErrVoteInvalidSignature + } + return nil +} diff --git a/types/vote_set.go b/types/vote_set.go new file mode 100644 index 000000000..c51681053 --- /dev/null +++ b/types/vote_set.go @@ -0,0 +1,603 @@ +package types + +import ( + "bytes" + "fmt" + "strings" + "sync" + + "github.com/pkg/errors" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +// UNSTABLE +// XXX: duplicate of p2p.ID to avoid dependence between packages. +// Perhaps we can have a minimal types package containing this (and other things?) +// that both `types` and `p2p` import ? +type P2PID string + +/* + VoteSet helps collect signatures from validators at each height+round for a + predefined vote type. + + We need VoteSet to be able to keep track of conflicting votes when validators + double-sign. Yet, we can't keep track of *all* the votes seen, as that could + be a DoS attack vector. + + There are two storage areas for votes. + 1. voteSet.votes + 2. voteSet.votesByBlock + + `.votes` is the "canonical" list of votes. It always has at least one vote, + if a vote from a validator had been seen at all. Usually it keeps track of + the first vote seen, but when a 2/3 majority is found, votes for that get + priority and are copied over from `.votesByBlock`. + + `.votesByBlock` keeps track of a list of votes for a particular block. There + are two ways a &blockVotes{} gets created in `.votesByBlock`. + 1. the first vote seen by a validator was for the particular block. + 2. a peer claims to have seen 2/3 majority for the particular block. + + Since the first vote from a validator will always get added in `.votesByBlock` + , all votes in `.votes` will have a corresponding entry in `.votesByBlock`. + + When a &blockVotes{} in `.votesByBlock` reaches a 2/3 majority quorum, its + votes are copied into `.votes`. + + All this is memory bounded because conflicting votes only get added if a peer + told us to track that block, each peer only gets to tell us 1 such block, and, + there's only a limited number of peers. + + NOTE: Assumes that the sum total of voting power does not exceed MaxUInt64. +*/ +type VoteSet struct { + chainID string + height int64 + round int + type_ byte + valSet *ValidatorSet + + mtx sync.Mutex + votesBitArray *cmn.BitArray + votes []*Vote // Primary votes to share + sum int64 // Sum of voting power for seen votes, discounting conflicts + maj23 *BlockID // First 2/3 majority seen + votesByBlock map[string]*blockVotes // string(blockHash|blockParts) -> blockVotes + peerMaj23s map[P2PID]BlockID // Maj23 for each peer +} + +// Constructs a new VoteSet struct used to accumulate votes for given height/round. +func NewVoteSet(chainID string, height int64, round int, type_ byte, valSet *ValidatorSet) *VoteSet { + if height == 0 { + cmn.PanicSanity("Cannot make VoteSet for height == 0, doesn't make sense.") + } + return &VoteSet{ + chainID: chainID, + height: height, + round: round, + type_: type_, + valSet: valSet, + votesBitArray: cmn.NewBitArray(valSet.Size()), + votes: make([]*Vote, valSet.Size()), + sum: 0, + maj23: nil, + votesByBlock: make(map[string]*blockVotes, valSet.Size()), + peerMaj23s: make(map[P2PID]BlockID), + } +} + +func (voteSet *VoteSet) ChainID() string { + return voteSet.chainID +} + +func (voteSet *VoteSet) Height() int64 { + if voteSet == nil { + return 0 + } + return voteSet.height +} + +func (voteSet *VoteSet) Round() int { + if voteSet == nil { + return -1 + } + return voteSet.round +} + +func (voteSet *VoteSet) Type() byte { + if voteSet == nil { + return 0x00 + } + return voteSet.type_ +} + +func (voteSet *VoteSet) Size() int { + if voteSet == nil { + return 0 + } + return voteSet.valSet.Size() +} + +// Returns added=true if vote is valid and new. +// Otherwise returns err=ErrVote[ +// UnexpectedStep | InvalidIndex | InvalidAddress | +// InvalidSignature | InvalidBlockHash | ConflictingVotes ] +// Duplicate votes return added=false, err=nil. +// Conflicting votes return added=*, err=ErrVoteConflictingVotes. +// NOTE: vote should not be mutated after adding. +// NOTE: VoteSet must not be nil +// NOTE: Vote must not be nil +func (voteSet *VoteSet) AddVote(vote *Vote) (added bool, err error) { + if voteSet == nil { + cmn.PanicSanity("AddVote() on nil VoteSet") + } + voteSet.mtx.Lock() + defer voteSet.mtx.Unlock() + + return voteSet.addVote(vote) +} + +// NOTE: Validates as much as possible before attempting to verify the signature. +func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { + if vote == nil { + return false, ErrVoteNil + } + valIndex := vote.ValidatorIndex + valAddr := vote.ValidatorAddress + blockKey := vote.BlockID.Key() + + // Ensure that validator index was set + if valIndex < 0 { + return false, errors.Wrap(ErrVoteInvalidValidatorIndex, "Index < 0") + } else if len(valAddr) == 0 { + return false, errors.Wrap(ErrVoteInvalidValidatorAddress, "Empty address") + } + + // Make sure the step matches. + if (vote.Height != voteSet.height) || + (vote.Round != voteSet.round) || + (vote.Type != voteSet.type_) { + return false, errors.Wrapf(ErrVoteUnexpectedStep, "Got %d/%d/%d, expected %d/%d/%d", + voteSet.height, voteSet.round, voteSet.type_, + vote.Height, vote.Round, vote.Type) + } + + // Ensure that signer is a validator. + lookupAddr, val := voteSet.valSet.GetByIndex(valIndex) + if val == nil { + return false, errors.Wrapf(ErrVoteInvalidValidatorIndex, + "Cannot find validator %d in valSet of size %d", valIndex, voteSet.valSet.Size()) + } + + // Ensure that the signer has the right address + if !bytes.Equal(valAddr, lookupAddr) { + return false, errors.Wrapf(ErrVoteInvalidValidatorAddress, + "vote.ValidatorAddress (%X) does not match address (%X) for vote.ValidatorIndex (%d)\nEnsure the genesis file is correct across all validators.", + valAddr, lookupAddr, valIndex) + } + + // If we already know of this vote, return false. + if existing, ok := voteSet.getVote(valIndex, blockKey); ok { + if existing.Signature.Equals(vote.Signature) { + return false, nil // duplicate + } + return false, errors.Wrapf(ErrVoteNonDeterministicSignature, "Existing vote: %v; New vote: %v", existing, vote) + } + + // Check signature. + if err := vote.Verify(voteSet.chainID, val.PubKey); err != nil { + return false, errors.Wrapf(err, "Failed to verify vote with ChainID %s and PubKey %s", voteSet.chainID, val.PubKey) + } + + // Add vote and get conflicting vote if any + added, conflicting := voteSet.addVerifiedVote(vote, blockKey, val.VotingPower) + if conflicting != nil { + return added, NewConflictingVoteError(val, conflicting, vote) + } + if !added { + cmn.PanicSanity("Expected to add non-conflicting vote") + } + return added, nil +} + +// Returns (vote, true) if vote exists for valIndex and blockKey +func (voteSet *VoteSet) getVote(valIndex int, blockKey string) (vote *Vote, ok bool) { + if existing := voteSet.votes[valIndex]; existing != nil && existing.BlockID.Key() == blockKey { + return existing, true + } + if existing := voteSet.votesByBlock[blockKey].getByIndex(valIndex); existing != nil { + return existing, true + } + return nil, false +} + +// Assumes signature is valid. +// If conflicting vote exists, returns it. +func (voteSet *VoteSet) addVerifiedVote(vote *Vote, blockKey string, votingPower int64) (added bool, conflicting *Vote) { + valIndex := vote.ValidatorIndex + + // Already exists in voteSet.votes? + if existing := voteSet.votes[valIndex]; existing != nil { + if existing.BlockID.Equals(vote.BlockID) { + cmn.PanicSanity("addVerifiedVote does not expect duplicate votes") + } else { + conflicting = existing + } + // Replace vote if blockKey matches voteSet.maj23. + if voteSet.maj23 != nil && voteSet.maj23.Key() == blockKey { + voteSet.votes[valIndex] = vote + voteSet.votesBitArray.SetIndex(valIndex, true) + } + // Otherwise don't add it to voteSet.votes + } else { + // Add to voteSet.votes and incr .sum + voteSet.votes[valIndex] = vote + voteSet.votesBitArray.SetIndex(valIndex, true) + voteSet.sum += votingPower + } + + votesByBlock, ok := voteSet.votesByBlock[blockKey] + if ok { + if conflicting != nil && !votesByBlock.peerMaj23 { + // There's a conflict and no peer claims that this block is special. + return false, conflicting + } + // We'll add the vote in a bit. + } else { + // .votesByBlock doesn't exist... + if conflicting != nil { + // ... and there's a conflicting vote. + // We're not even tracking this blockKey, so just forget it. + return false, conflicting + } + // ... and there's no conflicting vote. + // Start tracking this blockKey + votesByBlock = newBlockVotes(false, voteSet.valSet.Size()) + voteSet.votesByBlock[blockKey] = votesByBlock + // We'll add the vote in a bit. + } + + // Before adding to votesByBlock, see if we'll exceed quorum + origSum := votesByBlock.sum + quorum := voteSet.valSet.TotalVotingPower()*2/3 + 1 + + // Add vote to votesByBlock + votesByBlock.addVerifiedVote(vote, votingPower) + + // If we just crossed the quorum threshold and have 2/3 majority... + if origSum < quorum && quorum <= votesByBlock.sum { + // Only consider the first quorum reached + if voteSet.maj23 == nil { + maj23BlockID := vote.BlockID + voteSet.maj23 = &maj23BlockID + // And also copy votes over to voteSet.votes + for i, vote := range votesByBlock.votes { + if vote != nil { + voteSet.votes[i] = vote + } + } + } + } + + return true, conflicting +} + +// If a peer claims that it has 2/3 majority for given blockKey, call this. +// NOTE: if there are too many peers, or too much peer churn, +// this can cause memory issues. +// TODO: implement ability to remove peers too +// NOTE: VoteSet must not be nil +func (voteSet *VoteSet) SetPeerMaj23(peerID P2PID, blockID BlockID) error { + if voteSet == nil { + cmn.PanicSanity("SetPeerMaj23() on nil VoteSet") + } + voteSet.mtx.Lock() + defer voteSet.mtx.Unlock() + + blockKey := blockID.Key() + + // Make sure peer hasn't already told us something. + if existing, ok := voteSet.peerMaj23s[peerID]; ok { + if existing.Equals(blockID) { + return nil // Nothing to do + } + return fmt.Errorf("SetPeerMaj23: Received conflicting blockID from peer %v. Got %v, expected %v", + peerID, blockID, existing) + } + voteSet.peerMaj23s[peerID] = blockID + + // Create .votesByBlock entry if needed. + votesByBlock, ok := voteSet.votesByBlock[blockKey] + if ok { + if votesByBlock.peerMaj23 { + return nil // Nothing to do + } + votesByBlock.peerMaj23 = true + // No need to copy votes, already there. + } else { + votesByBlock = newBlockVotes(true, voteSet.valSet.Size()) + voteSet.votesByBlock[blockKey] = votesByBlock + // No need to copy votes, no votes to copy over. + } + return nil +} + +func (voteSet *VoteSet) BitArray() *cmn.BitArray { + if voteSet == nil { + return nil + } + voteSet.mtx.Lock() + defer voteSet.mtx.Unlock() + return voteSet.votesBitArray.Copy() +} + +func (voteSet *VoteSet) BitArrayByBlockID(blockID BlockID) *cmn.BitArray { + if voteSet == nil { + return nil + } + voteSet.mtx.Lock() + defer voteSet.mtx.Unlock() + votesByBlock, ok := voteSet.votesByBlock[blockID.Key()] + if ok { + return votesByBlock.bitArray.Copy() + } + return nil +} + +// NOTE: if validator has conflicting votes, returns "canonical" vote +func (voteSet *VoteSet) GetByIndex(valIndex int) *Vote { + if voteSet == nil { + return nil + } + voteSet.mtx.Lock() + defer voteSet.mtx.Unlock() + return voteSet.votes[valIndex] +} + +func (voteSet *VoteSet) GetByAddress(address []byte) *Vote { + if voteSet == nil { + return nil + } + voteSet.mtx.Lock() + defer voteSet.mtx.Unlock() + valIndex, val := voteSet.valSet.GetByAddress(address) + if val == nil { + cmn.PanicSanity("GetByAddress(address) returned nil") + } + return voteSet.votes[valIndex] +} + +func (voteSet *VoteSet) HasTwoThirdsMajority() bool { + if voteSet == nil { + return false + } + voteSet.mtx.Lock() + defer voteSet.mtx.Unlock() + return voteSet.maj23 != nil +} + +func (voteSet *VoteSet) IsCommit() bool { + if voteSet == nil { + return false + } + if voteSet.type_ != VoteTypePrecommit { + return false + } + voteSet.mtx.Lock() + defer voteSet.mtx.Unlock() + return voteSet.maj23 != nil +} + +func (voteSet *VoteSet) HasTwoThirdsAny() bool { + if voteSet == nil { + return false + } + voteSet.mtx.Lock() + defer voteSet.mtx.Unlock() + return voteSet.sum > voteSet.valSet.TotalVotingPower()*2/3 +} + +func (voteSet *VoteSet) HasAll() bool { + voteSet.mtx.Lock() + defer voteSet.mtx.Unlock() + return voteSet.sum == voteSet.valSet.TotalVotingPower() +} + +// If there was a +2/3 majority for blockID, return blockID and true. +// Else, return the empty BlockID{} and false. +func (voteSet *VoteSet) TwoThirdsMajority() (blockID BlockID, ok bool) { + if voteSet == nil { + return BlockID{}, false + } + voteSet.mtx.Lock() + defer voteSet.mtx.Unlock() + if voteSet.maj23 != nil { + return *voteSet.maj23, true + } + return BlockID{}, false +} + +//-------------------------------------------------------------------------------- +// Strings and JSON + +func (voteSet *VoteSet) String() string { + if voteSet == nil { + return "nil-VoteSet" + } + return voteSet.StringIndented("") +} + +func (voteSet *VoteSet) StringIndented(indent string) string { + voteSet.mtx.Lock() + defer voteSet.mtx.Unlock() + voteStrings := make([]string, len(voteSet.votes)) + for i, vote := range voteSet.votes { + if vote == nil { + voteStrings[i] = "nil-Vote" + } else { + voteStrings[i] = vote.String() + } + } + return fmt.Sprintf(`VoteSet{ +%s H:%v R:%v T:%v +%s %v +%s %v +%s %v +%s}`, + indent, voteSet.height, voteSet.round, voteSet.type_, + indent, strings.Join(voteStrings, "\n"+indent+" "), + indent, voteSet.votesBitArray, + indent, voteSet.peerMaj23s, + indent) +} + +// Marshal the VoteSet to JSON. Same as String(), just in JSON, +// and without the height/round/type_ (since its already included in the votes). +func (voteSet *VoteSet) MarshalJSON() ([]byte, error) { + voteSet.mtx.Lock() + defer voteSet.mtx.Unlock() + return cdc.MarshalJSON(VoteSetJSON{ + voteSet.voteStrings(), + voteSet.bitArrayString(), + voteSet.peerMaj23s, + }) +} + +// More human readable JSON of the vote set +// NOTE: insufficient for unmarshalling from (compressed votes) +// TODO: make the peerMaj23s nicer to read (eg just the block hash) +type VoteSetJSON struct { + Votes []string `json:"votes"` + VotesBitArray string `json:"votes_bit_array"` + PeerMaj23s map[P2PID]BlockID `json:"peer_maj_23s"` +} + +// Return the bit-array of votes including +// the fraction of power that has voted like: +// "BA{29:xx__x__x_x___x__x_______xxx__} 856/1304 = 0.66" +func (voteSet *VoteSet) BitArrayString() string { + voteSet.mtx.Lock() + defer voteSet.mtx.Unlock() + return voteSet.bitArrayString() +} + +func (voteSet *VoteSet) bitArrayString() string { + bAString := voteSet.votesBitArray.String() + voted, total, fracVoted := voteSet.sumTotalFrac() + return fmt.Sprintf("%s %d/%d = %.2f", bAString, voted, total, fracVoted) +} + +// Returns a list of votes compressed to more readable strings. +func (voteSet *VoteSet) VoteStrings() []string { + voteSet.mtx.Lock() + defer voteSet.mtx.Unlock() + return voteSet.voteStrings() +} + +func (voteSet *VoteSet) voteStrings() []string { + voteStrings := make([]string, len(voteSet.votes)) + for i, vote := range voteSet.votes { + if vote == nil { + voteStrings[i] = "nil-Vote" + } else { + voteStrings[i] = vote.String() + } + } + return voteStrings +} + +func (voteSet *VoteSet) StringShort() string { + if voteSet == nil { + return "nil-VoteSet" + } + voteSet.mtx.Lock() + defer voteSet.mtx.Unlock() + _, _, frac := voteSet.sumTotalFrac() + return fmt.Sprintf(`VoteSet{H:%v R:%v T:%v +2/3:%v(%v) %v %v}`, + voteSet.height, voteSet.round, voteSet.type_, voteSet.maj23, frac, voteSet.votesBitArray, voteSet.peerMaj23s) +} + +// return the power voted, the total, and the fraction +func (voteSet *VoteSet) sumTotalFrac() (int64, int64, float64) { + voted, total := voteSet.sum, voteSet.valSet.TotalVotingPower() + fracVoted := float64(voted) / float64(total) + return voted, total, fracVoted +} + +//-------------------------------------------------------------------------------- +// Commit + +func (voteSet *VoteSet) MakeCommit() *Commit { + if voteSet.type_ != VoteTypePrecommit { + cmn.PanicSanity("Cannot MakeCommit() unless VoteSet.Type is VoteTypePrecommit") + } + voteSet.mtx.Lock() + defer voteSet.mtx.Unlock() + + // Make sure we have a 2/3 majority + if voteSet.maj23 == nil { + cmn.PanicSanity("Cannot MakeCommit() unless a blockhash has +2/3") + } + + // For every validator, get the precommit + votesCopy := make([]*Vote, len(voteSet.votes)) + copy(votesCopy, voteSet.votes) + return &Commit{ + BlockID: *voteSet.maj23, + Precommits: votesCopy, + } +} + +//-------------------------------------------------------------------------------- + +/* + Votes for a particular block + There are two ways a *blockVotes gets created for a blockKey. + 1. first (non-conflicting) vote of a validator w/ blockKey (peerMaj23=false) + 2. A peer claims to have a 2/3 majority w/ blockKey (peerMaj23=true) +*/ +type blockVotes struct { + peerMaj23 bool // peer claims to have maj23 + bitArray *cmn.BitArray // valIndex -> hasVote? + votes []*Vote // valIndex -> *Vote + sum int64 // vote sum +} + +func newBlockVotes(peerMaj23 bool, numValidators int) *blockVotes { + return &blockVotes{ + peerMaj23: peerMaj23, + bitArray: cmn.NewBitArray(numValidators), + votes: make([]*Vote, numValidators), + sum: 0, + } +} + +func (vs *blockVotes) addVerifiedVote(vote *Vote, votingPower int64) { + valIndex := vote.ValidatorIndex + if existing := vs.votes[valIndex]; existing == nil { + vs.bitArray.SetIndex(valIndex, true) + vs.votes[valIndex] = vote + vs.sum += votingPower + } +} + +func (vs *blockVotes) getByIndex(index int) *Vote { + if vs == nil { + return nil + } + return vs.votes[index] +} + +//-------------------------------------------------------------------------------- + +// Common interface between *consensus.VoteSet and types.Commit +type VoteSetReader interface { + Height() int64 + Round() int + Type() byte + Size() int + BitArray() *cmn.BitArray + GetByIndex(int) *Vote + IsCommit() bool +} diff --git a/types/vote_set_test.go b/types/vote_set_test.go new file mode 100644 index 000000000..32ceb7b16 --- /dev/null +++ b/types/vote_set_test.go @@ -0,0 +1,508 @@ +package types + +import ( + "bytes" + "testing" + "time" + + crypto "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" + tst "github.com/tendermint/tendermint/libs/test" +) + +// NOTE: privValidators are in order +func randVoteSet(height int64, round int, type_ byte, numValidators int, votingPower int64) (*VoteSet, *ValidatorSet, []PrivValidator) { + valSet, privValidators := RandValidatorSet(numValidators, votingPower) + return NewVoteSet("test_chain_id", height, round, type_, valSet), valSet, privValidators +} + +// Convenience: Return new vote with different validator address/index +func withValidator(vote *Vote, addr []byte, idx int) *Vote { + vote = vote.Copy() + vote.ValidatorAddress = addr + vote.ValidatorIndex = idx + return vote +} + +// Convenience: Return new vote with different height +func withHeight(vote *Vote, height int64) *Vote { + vote = vote.Copy() + vote.Height = height + return vote +} + +// Convenience: Return new vote with different round +func withRound(vote *Vote, round int) *Vote { + vote = vote.Copy() + vote.Round = round + return vote +} + +// Convenience: Return new vote with different type +func withType(vote *Vote, type_ byte) *Vote { + vote = vote.Copy() + vote.Type = type_ + return vote +} + +// Convenience: Return new vote with different blockHash +func withBlockHash(vote *Vote, blockHash []byte) *Vote { + vote = vote.Copy() + vote.BlockID.Hash = blockHash + return vote +} + +// Convenience: Return new vote with different blockParts +func withBlockPartsHeader(vote *Vote, blockPartsHeader PartSetHeader) *Vote { + vote = vote.Copy() + vote.BlockID.PartsHeader = blockPartsHeader + return vote +} + +func TestAddVote(t *testing.T) { + height, round := int64(1), 0 + voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 10, 1) + val0 := privValidators[0] + + // t.Logf(">> %v", voteSet) + + if voteSet.GetByAddress(val0.GetAddress()) != nil { + t.Errorf("Expected GetByAddress(val0.Address) to be nil") + } + if voteSet.BitArray().GetIndex(0) { + t.Errorf("Expected BitArray.GetIndex(0) to be false") + } + blockID, ok := voteSet.TwoThirdsMajority() + if ok || !blockID.IsZero() { + t.Errorf("There should be no 2/3 majority") + } + + vote := &Vote{ + ValidatorAddress: val0.GetAddress(), + ValidatorIndex: 0, // since privValidators are in order + Height: height, + Round: round, + Type: VoteTypePrevote, + Timestamp: time.Now().UTC(), + BlockID: BlockID{nil, PartSetHeader{}}, + } + _, err := signAddVote(val0, vote, voteSet) + if err != nil { + t.Error(err) + } + + if voteSet.GetByAddress(val0.GetAddress()) == nil { + t.Errorf("Expected GetByAddress(val0.Address) to be present") + } + if !voteSet.BitArray().GetIndex(0) { + t.Errorf("Expected BitArray.GetIndex(0) to be true") + } + blockID, ok = voteSet.TwoThirdsMajority() + if ok || !blockID.IsZero() { + t.Errorf("There should be no 2/3 majority") + } +} + +func Test2_3Majority(t *testing.T) { + height, round := int64(1), 0 + voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 10, 1) + + voteProto := &Vote{ + ValidatorAddress: nil, // NOTE: must fill in + ValidatorIndex: -1, // NOTE: must fill in + Height: height, + Round: round, + Type: VoteTypePrevote, + Timestamp: time.Now().UTC(), + BlockID: BlockID{nil, PartSetHeader{}}, + } + // 6 out of 10 voted for nil. + for i := 0; i < 6; i++ { + vote := withValidator(voteProto, privValidators[i].GetAddress(), i) + _, err := signAddVote(privValidators[i], vote, voteSet) + if err != nil { + t.Error(err) + } + } + blockID, ok := voteSet.TwoThirdsMajority() + if ok || !blockID.IsZero() { + t.Errorf("There should be no 2/3 majority") + } + + // 7th validator voted for some blockhash + { + vote := withValidator(voteProto, privValidators[6].GetAddress(), 6) + _, err := signAddVote(privValidators[6], withBlockHash(vote, cmn.RandBytes(32)), voteSet) + if err != nil { + t.Error(err) + } + blockID, ok = voteSet.TwoThirdsMajority() + if ok || !blockID.IsZero() { + t.Errorf("There should be no 2/3 majority") + } + } + + // 8th validator voted for nil. + { + vote := withValidator(voteProto, privValidators[7].GetAddress(), 7) + _, err := signAddVote(privValidators[7], vote, voteSet) + if err != nil { + t.Error(err) + } + blockID, ok = voteSet.TwoThirdsMajority() + if !ok || !blockID.IsZero() { + t.Errorf("There should be 2/3 majority for nil") + } + } +} + +func Test2_3MajorityRedux(t *testing.T) { + height, round := int64(1), 0 + voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 100, 1) + + blockHash := crypto.CRandBytes(32) + blockPartsTotal := 123 + blockPartsHeader := PartSetHeader{blockPartsTotal, crypto.CRandBytes(32)} + + voteProto := &Vote{ + ValidatorAddress: nil, // NOTE: must fill in + ValidatorIndex: -1, // NOTE: must fill in + Height: height, + Round: round, + Timestamp: time.Now().UTC(), + Type: VoteTypePrevote, + BlockID: BlockID{blockHash, blockPartsHeader}, + } + + // 66 out of 100 voted for nil. + for i := 0; i < 66; i++ { + vote := withValidator(voteProto, privValidators[i].GetAddress(), i) + _, err := signAddVote(privValidators[i], vote, voteSet) + if err != nil { + t.Error(err) + } + } + blockID, ok := voteSet.TwoThirdsMajority() + if ok || !blockID.IsZero() { + t.Errorf("There should be no 2/3 majority") + } + + // 67th validator voted for nil + { + vote := withValidator(voteProto, privValidators[66].GetAddress(), 66) + _, err := signAddVote(privValidators[66], withBlockHash(vote, nil), voteSet) + if err != nil { + t.Error(err) + } + blockID, ok = voteSet.TwoThirdsMajority() + if ok || !blockID.IsZero() { + t.Errorf("There should be no 2/3 majority: last vote added was nil") + } + } + + // 68th validator voted for a different BlockParts PartSetHeader + { + vote := withValidator(voteProto, privValidators[67].GetAddress(), 67) + blockPartsHeader := PartSetHeader{blockPartsTotal, crypto.CRandBytes(32)} + _, err := signAddVote(privValidators[67], withBlockPartsHeader(vote, blockPartsHeader), voteSet) + if err != nil { + t.Error(err) + } + blockID, ok = voteSet.TwoThirdsMajority() + if ok || !blockID.IsZero() { + t.Errorf("There should be no 2/3 majority: last vote added had different PartSetHeader Hash") + } + } + + // 69th validator voted for different BlockParts Total + { + vote := withValidator(voteProto, privValidators[68].GetAddress(), 68) + blockPartsHeader := PartSetHeader{blockPartsTotal + 1, blockPartsHeader.Hash} + _, err := signAddVote(privValidators[68], withBlockPartsHeader(vote, blockPartsHeader), voteSet) + if err != nil { + t.Error(err) + } + blockID, ok = voteSet.TwoThirdsMajority() + if ok || !blockID.IsZero() { + t.Errorf("There should be no 2/3 majority: last vote added had different PartSetHeader Total") + } + } + + // 70th validator voted for different BlockHash + { + vote := withValidator(voteProto, privValidators[69].GetAddress(), 69) + _, err := signAddVote(privValidators[69], withBlockHash(vote, cmn.RandBytes(32)), voteSet) + if err != nil { + t.Error(err) + } + blockID, ok = voteSet.TwoThirdsMajority() + if ok || !blockID.IsZero() { + t.Errorf("There should be no 2/3 majority: last vote added had different BlockHash") + } + } + + // 71st validator voted for the right BlockHash & BlockPartsHeader + { + vote := withValidator(voteProto, privValidators[70].GetAddress(), 70) + _, err := signAddVote(privValidators[70], vote, voteSet) + if err != nil { + t.Error(err) + } + blockID, ok = voteSet.TwoThirdsMajority() + if !ok || !blockID.Equals(BlockID{blockHash, blockPartsHeader}) { + t.Errorf("There should be 2/3 majority") + } + } +} + +func TestBadVotes(t *testing.T) { + height, round := int64(1), 0 + voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 10, 1) + + voteProto := &Vote{ + ValidatorAddress: nil, + ValidatorIndex: -1, + Height: height, + Round: round, + Timestamp: time.Now().UTC(), + Type: VoteTypePrevote, + BlockID: BlockID{nil, PartSetHeader{}}, + } + + // val0 votes for nil. + { + vote := withValidator(voteProto, privValidators[0].GetAddress(), 0) + added, err := signAddVote(privValidators[0], vote, voteSet) + if !added || err != nil { + t.Errorf("Expected VoteSet.Add to succeed") + } + } + + // val0 votes again for some block. + { + vote := withValidator(voteProto, privValidators[0].GetAddress(), 0) + added, err := signAddVote(privValidators[0], withBlockHash(vote, cmn.RandBytes(32)), voteSet) + if added || err == nil { + t.Errorf("Expected VoteSet.Add to fail, conflicting vote.") + } + } + + // val1 votes on another height + { + vote := withValidator(voteProto, privValidators[1].GetAddress(), 1) + added, err := signAddVote(privValidators[1], withHeight(vote, height+1), voteSet) + if added || err == nil { + t.Errorf("Expected VoteSet.Add to fail, wrong height") + } + } + + // val2 votes on another round + { + vote := withValidator(voteProto, privValidators[2].GetAddress(), 2) + added, err := signAddVote(privValidators[2], withRound(vote, round+1), voteSet) + if added || err == nil { + t.Errorf("Expected VoteSet.Add to fail, wrong round") + } + } + + // val3 votes of another type. + { + vote := withValidator(voteProto, privValidators[3].GetAddress(), 3) + added, err := signAddVote(privValidators[3], withType(vote, VoteTypePrecommit), voteSet) + if added || err == nil { + t.Errorf("Expected VoteSet.Add to fail, wrong type") + } + } +} + +func TestConflicts(t *testing.T) { + height, round := int64(1), 0 + voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 4, 1) + blockHash1 := cmn.RandBytes(32) + blockHash2 := cmn.RandBytes(32) + + voteProto := &Vote{ + ValidatorAddress: nil, + ValidatorIndex: -1, + Height: height, + Round: round, + Timestamp: time.Now().UTC(), + Type: VoteTypePrevote, + BlockID: BlockID{nil, PartSetHeader{}}, + } + + // val0 votes for nil. + { + vote := withValidator(voteProto, privValidators[0].GetAddress(), 0) + added, err := signAddVote(privValidators[0], vote, voteSet) + if !added || err != nil { + t.Errorf("Expected VoteSet.Add to succeed") + } + } + + // val0 votes again for blockHash1. + { + vote := withValidator(voteProto, privValidators[0].GetAddress(), 0) + added, err := signAddVote(privValidators[0], withBlockHash(vote, blockHash1), voteSet) + if added { + t.Errorf("Expected VoteSet.Add to fail, conflicting vote.") + } + if err == nil { + t.Errorf("Expected VoteSet.Add to return error, conflicting vote.") + } + } + + // start tracking blockHash1 + voteSet.SetPeerMaj23("peerA", BlockID{blockHash1, PartSetHeader{}}) + + // val0 votes again for blockHash1. + { + vote := withValidator(voteProto, privValidators[0].GetAddress(), 0) + added, err := signAddVote(privValidators[0], withBlockHash(vote, blockHash1), voteSet) + if !added { + t.Errorf("Expected VoteSet.Add to succeed, called SetPeerMaj23().") + } + if err == nil { + t.Errorf("Expected VoteSet.Add to return error, conflicting vote.") + } + } + + // attempt tracking blockHash2, should fail because already set for peerA. + voteSet.SetPeerMaj23("peerA", BlockID{blockHash2, PartSetHeader{}}) + + // val0 votes again for blockHash1. + { + vote := withValidator(voteProto, privValidators[0].GetAddress(), 0) + added, err := signAddVote(privValidators[0], withBlockHash(vote, blockHash2), voteSet) + if added { + t.Errorf("Expected VoteSet.Add to fail, duplicate SetPeerMaj23() from peerA") + } + if err == nil { + t.Errorf("Expected VoteSet.Add to return error, conflicting vote.") + } + } + + // val1 votes for blockHash1. + { + vote := withValidator(voteProto, privValidators[1].GetAddress(), 1) + added, err := signAddVote(privValidators[1], withBlockHash(vote, blockHash1), voteSet) + if !added || err != nil { + t.Errorf("Expected VoteSet.Add to succeed") + } + } + + // check + if voteSet.HasTwoThirdsMajority() { + t.Errorf("We shouldn't have 2/3 majority yet") + } + if voteSet.HasTwoThirdsAny() { + t.Errorf("We shouldn't have 2/3 if any votes yet") + } + + // val2 votes for blockHash2. + { + vote := withValidator(voteProto, privValidators[2].GetAddress(), 2) + added, err := signAddVote(privValidators[2], withBlockHash(vote, blockHash2), voteSet) + if !added || err != nil { + t.Errorf("Expected VoteSet.Add to succeed") + } + } + + // check + if voteSet.HasTwoThirdsMajority() { + t.Errorf("We shouldn't have 2/3 majority yet") + } + if !voteSet.HasTwoThirdsAny() { + t.Errorf("We should have 2/3 if any votes") + } + + // now attempt tracking blockHash1 + voteSet.SetPeerMaj23("peerB", BlockID{blockHash1, PartSetHeader{}}) + + // val2 votes for blockHash1. + { + vote := withValidator(voteProto, privValidators[2].GetAddress(), 2) + added, err := signAddVote(privValidators[2], withBlockHash(vote, blockHash1), voteSet) + if !added { + t.Errorf("Expected VoteSet.Add to succeed") + } + if err == nil { + t.Errorf("Expected VoteSet.Add to return error, conflicting vote") + } + } + + // check + if !voteSet.HasTwoThirdsMajority() { + t.Errorf("We should have 2/3 majority for blockHash1") + } + blockIDMaj23, _ := voteSet.TwoThirdsMajority() + if !bytes.Equal(blockIDMaj23.Hash, blockHash1) { + t.Errorf("Got the wrong 2/3 majority blockhash") + } + if !voteSet.HasTwoThirdsAny() { + t.Errorf("We should have 2/3 if any votes") + } + +} + +func TestMakeCommit(t *testing.T) { + height, round := int64(1), 0 + voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrecommit, 10, 1) + blockHash, blockPartsHeader := crypto.CRandBytes(32), PartSetHeader{123, crypto.CRandBytes(32)} + + voteProto := &Vote{ + ValidatorAddress: nil, + ValidatorIndex: -1, + Height: height, + Round: round, + Timestamp: time.Now().UTC(), + Type: VoteTypePrecommit, + BlockID: BlockID{blockHash, blockPartsHeader}, + } + + // 6 out of 10 voted for some block. + for i := 0; i < 6; i++ { + vote := withValidator(voteProto, privValidators[i].GetAddress(), i) + _, err := signAddVote(privValidators[i], vote, voteSet) + if err != nil { + t.Error(err) + } + } + + // MakeCommit should fail. + tst.AssertPanics(t, "Doesn't have +2/3 majority", func() { voteSet.MakeCommit() }) + + // 7th voted for some other block. + { + vote := withValidator(voteProto, privValidators[6].GetAddress(), 6) + vote = withBlockHash(vote, cmn.RandBytes(32)) + vote = withBlockPartsHeader(vote, PartSetHeader{123, cmn.RandBytes(32)}) + + _, err := signAddVote(privValidators[6], vote, voteSet) + if err != nil { + t.Error(err) + } + } + + // The 8th voted like everyone else. + { + vote := withValidator(voteProto, privValidators[7].GetAddress(), 7) + _, err := signAddVote(privValidators[7], vote, voteSet) + if err != nil { + t.Error(err) + } + } + + commit := voteSet.MakeCommit() + + // Commit should have 10 elements + if len(commit.Precommits) != 10 { + t.Errorf("Commit Precommits should have the same number of precommits as validators") + } + + // Ensure that Commit precommits are ordered. + if err := commit.ValidateBasic(); err != nil { + t.Errorf("Error in Commit.ValidateBasic(): %v", err) + } + +} diff --git a/types/vote_test.go b/types/vote_test.go new file mode 100644 index 000000000..cbb22aaae --- /dev/null +++ b/types/vote_test.go @@ -0,0 +1,121 @@ +package types + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func examplePrevote() *Vote { + return exampleVote(VoteTypePrevote) +} + +func examplePrecommit() *Vote { + return exampleVote(VoteTypePrecommit) +} + +func exampleVote(t byte) *Vote { + var stamp, err = time.Parse(TimeFormat, "2017-12-25T03:00:01.234Z") + if err != nil { + panic(err) + } + + return &Vote{ + ValidatorAddress: []byte("addr"), + ValidatorIndex: 56789, + Height: 12345, + Round: 2, + Timestamp: stamp, + Type: t, + BlockID: BlockID{ + Hash: []byte("hash"), + PartsHeader: PartSetHeader{ + Total: 1000000, + Hash: []byte("parts_hash"), + }, + }, + } +} + +func TestVoteSignable(t *testing.T) { + vote := examplePrecommit() + signBytes := vote.SignBytes("test_chain_id") + signStr := string(signBytes) + + expected := `{"@chain_id":"test_chain_id","@type":"vote","block_id":{"hash":"68617368","parts":{"hash":"70617274735F68617368","total":"1000000"}},"height":"12345","round":"2","timestamp":"2017-12-25T03:00:01.234Z","type":2}` + if signStr != expected { + // NOTE: when this fails, you probably want to fix up consensus/replay_test too + t.Errorf("Got unexpected sign string for Vote. Expected:\n%v\nGot:\n%v", expected, signStr) + } +} + +func TestVoteString(t *testing.T) { + tc := []struct { + name string + in string + out string + }{ + {"Precommit", examplePrecommit().String(), `Vote{56789:616464720000 12345/02/2(Precommit) 686173680000 @ 2017-12-25T03:00:01.234Z}`}, + {"Prevote", examplePrevote().String(), `Vote{56789:616464720000 12345/02/1(Prevote) 686173680000 @ 2017-12-25T03:00:01.234Z}`}, + } + + for _, tt := range tc { + tt := tt + t.Run(tt.name, func(st *testing.T) { + if tt.in != tt.out { + t.Errorf("Got unexpected string for Proposal. Expected:\n%v\nGot:\n%v", tt.in, tt.out) + } + }) + } +} + +func TestVoteVerifySignature(t *testing.T) { + privVal := NewMockPV() + pubKey := privVal.GetPubKey() + + vote := examplePrecommit() + signBytes := vote.SignBytes("test_chain_id") + + // sign it + err := privVal.SignVote("test_chain_id", vote) + require.NoError(t, err) + + // verify the same vote + valid := pubKey.VerifyBytes(vote.SignBytes("test_chain_id"), vote.Signature) + require.True(t, valid) + + // serialize, deserialize and verify again.... + precommit := new(Vote) + bs, err := cdc.MarshalBinary(vote) + require.NoError(t, err) + err = cdc.UnmarshalBinary(bs, &precommit) + require.NoError(t, err) + + // verify the transmitted vote + newSignBytes := precommit.SignBytes("test_chain_id") + require.Equal(t, string(signBytes), string(newSignBytes)) + valid = pubKey.VerifyBytes(newSignBytes, precommit.Signature) + require.True(t, valid) +} + +func TestIsVoteTypeValid(t *testing.T) { + tc := []struct { + name string + in byte + out bool + }{ + {"Prevote", VoteTypePrevote, true}, + {"Precommit", VoteTypePrecommit, true}, + {"InvalidType", byte(3), false}, + } + + for _, tt := range tc { + tt := tt + t.Run(tt.name, func(st *testing.T) { + if rs := IsVoteTypeValid(tt.in); rs != tt.out { + t.Errorf("Got unexpected Vote type. Expected:\n%v\nGot:\n%v", rs, tt.out) + } + }) + } +} diff --git a/types/wire.go b/types/wire.go new file mode 100644 index 000000000..6342d7eba --- /dev/null +++ b/types/wire.go @@ -0,0 +1,12 @@ +package types + +import ( + "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" +) + +var cdc = amino.NewCodec() + +func init() { + crypto.RegisterAmino(cdc) +} diff --git a/version/version.go b/version/version.go new file mode 100644 index 000000000..f9faedf04 --- /dev/null +++ b/version/version.go @@ -0,0 +1,23 @@ +package version + +// Version components +const ( + Maj = "0" + Min = "22" + Fix = "2" +) + +var ( + // Version is the current version of Tendermint + // Must be a string because scripts like dist.sh read this file. + Version = "0.22.2-dev" + + // GitCommit is the current HEAD set using ldflags. + GitCommit string +) + +func init() { + if GitCommit != "" { + Version += "-" + GitCommit + } +}