diff --git a/.circleci/config.yml b/.circleci/config.yml index 5a50a61c5..82ef52309 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -16,7 +16,7 @@ jobs: - checkout - restore_cache: keys: - - v2-pkg-cache + - v3-pkg-cache - run: name: tools command: | @@ -31,19 +31,18 @@ jobs: name: binaries command: | export PATH="$GOBIN:$PATH" - make install - cd abci && make install + make install install_abci - persist_to_workspace: root: /tmp/workspace paths: - bin - profiles - save_cache: - key: v2-pkg-cache + key: v3-pkg-cache paths: - /go/pkg - save_cache: - key: v2-tree-{{ .Environment.CIRCLE_SHA1 }} + key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} paths: - /go/src/github.com/tendermint/tendermint @@ -53,9 +52,9 @@ jobs: - attach_workspace: at: /tmp/workspace - restore_cache: - key: v2-pkg-cache + key: v3-pkg-cache - restore_cache: - key: v2-tree-{{ .Environment.CIRCLE_SHA1 }} + key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - run: name: slate docs command: | @@ -69,9 +68,9 @@ jobs: - attach_workspace: at: /tmp/workspace - restore_cache: - key: v2-pkg-cache + key: v3-pkg-cache - restore_cache: - key: v2-tree-{{ .Environment.CIRCLE_SHA1 }} + key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - run: name: metalinter command: | @@ -85,9 +84,9 @@ jobs: - attach_workspace: at: /tmp/workspace - restore_cache: - key: v2-pkg-cache + key: v3-pkg-cache - restore_cache: - key: v2-tree-{{ .Environment.CIRCLE_SHA1 }} + key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - run: name: Run abci apps tests command: | @@ -102,9 +101,9 @@ jobs: - attach_workspace: at: /tmp/workspace - restore_cache: - key: v2-pkg-cache + key: v3-pkg-cache - restore_cache: - key: v2-tree-{{ .Environment.CIRCLE_SHA1 }} + key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - run: name: Run abci-cli tests command: | @@ -117,9 +116,9 @@ jobs: - attach_workspace: at: /tmp/workspace - restore_cache: - key: v2-pkg-cache + key: v3-pkg-cache - restore_cache: - key: v2-tree-{{ .Environment.CIRCLE_SHA1 }} + key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - run: sudo apt-get update && sudo apt-get install -y --no-install-recommends bsdmainutils - run: name: Run tests @@ -132,14 +131,14 @@ jobs: - attach_workspace: at: /tmp/workspace - restore_cache: - key: v2-pkg-cache + key: v3-pkg-cache - restore_cache: - key: v2-tree-{{ .Environment.CIRCLE_SHA1 }} + key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - run: mkdir -p /tmp/logs - run: name: Run tests command: | - for pkg in $(go list github.com/tendermint/tendermint/... | grep -v /vendor/ | circleci tests split --split-by=timings); do + for pkg in $(go list github.com/tendermint/tendermint/... | circleci tests split --split-by=timings); do id=$(basename "$pkg") GOCACHE=off go test -v -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log" @@ -157,9 +156,9 @@ jobs: - attach_workspace: at: /tmp/workspace - restore_cache: - key: v2-pkg-cache + key: v3-pkg-cache - restore_cache: - key: v2-tree-{{ .Environment.CIRCLE_SHA1 }} + key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - run: name: Run tests command: bash test/persist/test_failure_indices.sh @@ -182,7 +181,7 @@ jobs: - attach_workspace: at: /tmp/workspace - restore_cache: - key: v2-tree-{{ .Environment.CIRCLE_SHA1 }} + key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - run: name: gather command: | diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 42f8f44ee..9586a8702 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -2,3 +2,6 @@ # Everything goes through Bucky, Anton, Alex. For now. * @ebuchman @melekes @xla + +# Precious documentation +/docs/ @zramsay @jolesbi diff --git a/.github/ISSUE_TEMPLATE b/.github/ISSUE_TEMPLATE index e0edb29ff..ed3caac5f 100644 --- a/.github/ISSUE_TEMPLATE +++ b/.github/ISSUE_TEMPLATE @@ -33,7 +33,7 @@ in a case of bug. **How to reproduce it** (as minimally and precisely as possible): -**Logs (you can paste a part showing an error or attach the whole file)**: +**Logs (you can paste a small part showing an error or link a pastebin, gist, etc. containing more of the log file)**: **Config (you can paste only the changes you've made)**: diff --git a/.gitignore b/.gitignore index bcfd36db1..d4f36adbb 100644 --- a/.gitignore +++ b/.gitignore @@ -14,10 +14,9 @@ test/p2p/data/ test/logs coverage.txt docs/_build -docs/tools *.log abci-cli -abci/types/types.pb.go +docs/node_modules/ scripts/wal2json/wal2json scripts/cutWALUntil/cutWALUntil @@ -27,3 +26,10 @@ scripts/cutWALUntil/cutWALUntil libs/pubsub/query/fuzz_test/output shunit2 + +*/vendor +*/.glide +.terraform +terraform.tfstate +terraform.tfstate.backup +terraform.tfstate.d diff --git a/CHANGELOG.md b/CHANGELOG.md index 9ffc95fac..27d5656b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,14 +1,135 @@ # Changelog -## TBD +## TBA + +## 0.22.5 + +*July 23th, 2018* + +BREAKING CHANGES: +- [crypto] Refactor `tendermint/crypto` into many subpackages +- [libs/common] remove exponentially distributed random numbers + +IMPROVEMENTS: +- [abci, libs/common] Generated gogoproto static marshaller methods +- [config] Increase default send/recv rates to 5 mB/s +- [p2p] allow persistent peers to be private + +BUG FIXES +- [mempool] fixed a race condition when `create_empty_blocks=false` where a + transaction is published at an old height. +- [p2p] dial external IP setup by `persistent_peers`, not internal NAT IP +- [rpc] make `/status` RPC endpoint resistant to consensus halt + +## 0.22.4 + +*July 14th, 2018* + +BREAKING CHANGES: +- [genesis] removed deprecated `app_options` field. +- [types] Genesis.AppStateJSON -> Genesis.AppState + +FEATURES: +- [tools] Merged in from github.com/tendermint/tools BUG FIXES: +- [tools/tm-bench] Various fixes +- [consensus] Wait for WAL to stop on shutdown +- [abci] Fix #1891, pending requests cannot hang when abci server dies. + Previously a crash in BeginBlock could leave tendermint in broken state. + +## 0.22.3 + +*July 10th, 2018* + +IMPROVEMENTS +- Update dependencies + * pin all values in Gopkg.toml to version or commit + * update golang/protobuf to v1.1.0 + +## 0.22.2 + +*July 10th, 2018* + +IMPROVEMENTS +- More cleanup post repo merge! +- [docs] Include `ecosystem.json` and `tendermint-bft.md` from deprecated `aib-data` repository. +- [config] Add `instrumentation.max_open_connections`, which limits the number + of requests in flight to Prometheus server (if enabled). Default: 3. + + +BUG FIXES +- [rpc] Allow unquoted integers in requests + - NOTE: this is only for URI requests. JSONRPC requests and all responses + will use quoted integers (the proto3 JSON standard). +- [consensus] Fix halt on shutdown -- [rpc] limited number of HTTP/WebSocket connections +## 0.22.1 + +*July 5th, 2018* + +IMPROVEMENTS + +* Cleanup post repo-merge. +* [docs] Various improvements. + +BUG FIXES + +* [state] Return error when EndBlock returns a 0-power validator that isn't + already in the validator set. +* [consensus] Shut down WAL properly. + + +## 0.22.0 + +*July 2nd, 2018* + +BREAKING CHANGES: +- [config] + * Remove `max_block_size_txs` and `max_block_size_bytes` in favor of + consensus params from the genesis file. + * Rename `skip_upnp` to `upnp`, and turn it off by default. + * Change `max_packet_msg_size` back to `max_packet_msg_payload_size` +- [rpc] + * All integers are encoded as strings (part of the update for Amino v0.10.1) + * `syncing` is now called `catching_up` +- [types] Update Amino to v0.10.1 + * Amino is now fully proto3 compatible for the basic types + * JSON-encoded types now use the type name instead of the prefix bytes + * Integers are encoded as strings +- [crypto] Update go-crypto to v0.10.0 and merge into `crypto` + * privKey.Sign returns error. + * ed25519 address changed to the first 20-bytes of the SHA256 of the raw pubkey bytes + * `tmlibs/merkle` -> `crypto/merkle`. Uses SHA256 instead of RIPEMD160 +- [tmlibs] Update to v0.9.0 and merge into `libs` + * remove `merkle` package (moved to `crypto/merkle`) + +FEATURES +- [cmd] Added metrics (served under `/metrics` using a Prometheus client; + disabled by default). See the new `instrumentation` section in the config and + [metrics](https://tendermint.readthedocs.io/projects/tools/en/develop/metrics.html) + guide. +- [p2p] Add IPv6 support to peering. +- [p2p] Add `external_address` to config to allow specifying the address for + peers to dial + +IMPROVEMENT +- [rpc/client] Supports https and wss now. +- [crypto] Make public key size into public constants +- [mempool] Log tx hash, not entire tx +- [abci] Merged in github.com/tendermint/abci +- [crypto] Merged in github.com/tendermint/go-crypto +- [libs] Merged in github.com/tendermint/tmlibs +- [docs] Move from .rst to .md + +BUG FIXES: +- [rpc] Limit maximum number of HTTP/WebSocket connections (`rpc.max_open_connections`) and gRPC connections - (`rpc.grpc_max_open_connections`). Check out [Running In - Production](https://tendermint.readthedocs.io/en/master/running-in-production.html) - guide if you want to increase them. + (`rpc.grpc_max_open_connections`). Check out "Running In Production" guide if + you want to increase them. +- [rpc] Limit maximum request body size to 1MB (header is limited to 1MB). +- [consensus] Fix a halting bug where `create_empty_blocks=false` +- [p2p] Fix panic in seed mode ## 0.21.0 @@ -25,13 +146,6 @@ IMPROVEMENT - [pubsub] Set default capacity to 0 - [docs] Various improvements -FEATURES - -- [main] added metrics (served under `/metrics` using a Prometheus client; - disabled by default). See the new `instrumentation` section in the config and - [metrics](https://tendermint.readthedocs.io/projects/tools/en/v0.21.0/metrics.html) - guide. - BUG FIXES - [consensus] Fix an issue where we don't make blocks after `fast_sync` when `create_empty_blocks=false` diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5fd2d982f..3500732f5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -17,7 +17,7 @@ Instead, we use `git remote` to add the fork as a new remote for the original re For instance, to create a fork and work on a branch of it, I would: * Create the fork on github, using the fork button. - * Go to the original repo checked out locally (ie. `$GOPATH/src/github.com/tendermint/tendermint`) + * Go to the original repo checked out locally (i.e. `$GOPATH/src/github.com/tendermint/tendermint`) * `git remote rename origin upstream` * `git remote add origin git@github.com:ebuchman/basecoin.git` @@ -47,7 +47,7 @@ get_vendor_deps`). Even for dependencies under our control, dep helps us to keep multiple repos in sync as they evolve. Anything with an executable, such as apps, tools, and the core, should use dep. -Run `dep status` to get a list of vendored dependencies that may not be +Run `dep status` to get a list of vendor dependencies that may not be up-to-date. ## Vagrant @@ -85,7 +85,7 @@ especially `go-p2p` and `go-rpc`, as their versions are referenced in tendermint - the latest state of development is on `develop` - `develop` must never fail `make test` - no --force onto `develop` (except when reverting a broken commit, which should seldom happen) -- create a development branch either on github.com/tendermint/tendermint, or your fork (using `git add origin`) +- create a development branch either on github.com/tendermint/tendermint, or your fork (using `git remote add origin`) - before submitting a pull request, begin `git rebase` on top of `develop` ### Pull Merge Procedure: @@ -110,7 +110,7 @@ especially `go-p2p` and `go-rpc`, as their versions are referenced in tendermint - make the required changes - these changes should be small and an absolute necessity - add a note to CHANGELOG.md -- bumb versions +- bump versions - push to hotfix-vX.X.X to run the extended integration tests on the CI - merge hotfix-vX.X.X to master - merge hotfix-vX.X.X to develop diff --git a/abci/Dockerfile.develop b/DOCKER/Dockerfile.abci similarity index 100% rename from abci/Dockerfile.develop rename to DOCKER/Dockerfile.abci diff --git a/Gopkg.lock b/Gopkg.lock index 8d73c93ff..3c305f02b 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -11,12 +11,15 @@ branch = "master" name = "github.com/btcsuite/btcd" packages = ["btcec"] - revision = "86fed781132ac890ee03e906e4ecd5d6fa180c64" + pruneopts = "UT" + revision = "f673a4b563b57b9a95832545c878669a7fa801d9" [[projects]] - branch = "master" name = "github.com/btcsuite/btcutil" - packages = ["base58"] + packages = [ + "base58", + "bech32" + ] revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4" [[projects]] @@ -26,16 +29,15 @@ version = "v1.1.0" [[projects]] - branch = "master" name = "github.com/ebuchman/fail-test" packages = ["."] revision = "95f809107225be108efcf10a3509e4ea6ceef3c4" [[projects]] - branch = "master" name = "github.com/fortytw2/leaktest" packages = ["."] - revision = "b008db64ef8daabb22ff6daa557f33b41d8f6ccd" + revision = "a5ef70473c97b71626b9abeda80ee92ba2a7de9e" + version = "v1.2.0" [[projects]] name = "github.com/fsnotify/fsnotify" @@ -79,8 +81,8 @@ "sortkeys", "types" ] - revision = "1adfc126b41513cc696b209667c8656ea7aac67c" - version = "v1.0.0" + revision = "7d68e886eac4f7e34d0d82241a6273d6c304c5cf" + version = "v1.1.0" [[projects]] name = "github.com/golang/protobuf" @@ -91,8 +93,8 @@ "ptypes/duration", "ptypes/timestamp" ] - revision = "925541529c1fa6821df4e44ce2723319eb2be768" - version = "v1.0.0" + revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" + version = "v1.1.0" [[projects]] branch = "master" @@ -113,7 +115,6 @@ ".", "hcl/ast", "hcl/parser", - "hcl/printer", "hcl/scanner", "hcl/strconv", "hcl/token", @@ -154,9 +155,11 @@ [[projects]] branch = "master" + digest = "1:5ab79470a1d0fb19b041a624415612f8236b3c06070161a910562f2b2d064355" name = "github.com/mitchellh/mapstructure" packages = ["."] - revision = "bb74f1db0675b241733089d5a1faa5dd8b0ef57b" + pruneopts = "UT" + revision = "f15292f7a699fcc1a38a80977f80a046874ba8ac" [[projects]] name = "github.com/pelletier/go-toml" @@ -182,14 +185,15 @@ "prometheus", "prometheus/promhttp" ] - revision = "c5b7fccd204277076155f10851dad72b76a49317" - version = "v0.8.0" + revision = "ae27198cdd90bf12cd134ad79d1366a6cf49f632" [[projects]] branch = "master" + digest = "1:0f37e09b3e92aaeda5991581311f8dbf38944b36a3edec61cc2d1991f527554a" name = "github.com/prometheus/client_model" packages = ["go"] - revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" + pruneopts = "UT" + revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" [[projects]] branch = "master" @@ -210,10 +214,9 @@ "nfs", "xfs" ] - revision = "94663424ae5ae9856b40a9f170762b4197024661" + revision = "ae68e2d4c00fed4943b5f6698d504a5fe083da8a" [[projects]] - branch = "master" name = "github.com/rcrowley/go-metrics" packages = ["."] revision = "e2704e165165ec55d062f5919b4b29494e9fa790" @@ -236,8 +239,8 @@ [[projects]] name = "github.com/spf13/cobra" packages = ["."] - revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385" - version = "v0.0.3" + revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b" + version = "v0.0.1" [[projects]] branch = "master" @@ -254,8 +257,8 @@ [[projects]] name = "github.com/spf13/viper" packages = ["."] - revision = "b5e8006cbee93ec955a89ab31e0e3ce3204f3736" - version = "v1.0.2" + revision = "25b30aa063fc18e48662b86996252eabdcf2f0c7" + version = "v1.0.0" [[projects]] name = "github.com/stretchr/testify" @@ -263,8 +266,8 @@ "assert", "require" ] - revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" - version = "v1.2.2" + revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71" + version = "v1.2.1" [[projects]] branch = "master" @@ -283,7 +286,7 @@ "leveldb/table", "leveldb/util" ] - revision = "e2150783cd35f5b607daca48afd8c57ec54cc995" + revision = "c4c61651e9e37fa117f53c5a906d3b63090d8445" [[projects]] branch = "master" @@ -301,23 +304,6 @@ revision = "2106ca61d91029c931fd54968c2bb02dc96b1412" version = "0.10.1" -[[projects]] - name = "github.com/tendermint/tmlibs" - packages = [ - "autofile", - "cli", - "cli/flags", - "clist", - "common", - "db", - "flowrate", - "log", - "merkle", - "test" - ] - revision = "692f1d86a6e2c0efa698fd1e4541b68c74ffaf38" - version = "v0.8.4" - [[projects]] branch = "master" name = "golang.org/x/crypto" @@ -328,6 +314,7 @@ "curve25519", "hkdf", "internal/chacha20", + "internal/subtle", "nacl/box", "nacl/secretbox", "openpgp/armor", @@ -336,10 +323,9 @@ "ripemd160", "salsa20/salsa" ] - revision = "8ac0e0d97ce45cd83d1d7243c060cb8461dda5e9" + revision = "a2144134853fc9a27a7b1e3eb4f19f1a76df13c9" [[projects]] - branch = "master" name = "golang.org/x/net" packages = [ "context", @@ -351,7 +337,7 @@ "netutil", "trace" ] - revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196" + revision = "292b43bbf7cb8d35ddf40f8d5100ef3837cced3f" [[projects]] branch = "master" @@ -360,7 +346,8 @@ "cpu", "unix" ] - revision = "a9e25c09b96b8870693763211309e213c6ef299d" + pruneopts = "UT" + revision = "ac767d655b305d4e9612f5f6e33120b9176c4ad4" [[projects]] name = "golang.org/x/text" @@ -393,24 +380,32 @@ packages = [ ".", "balancer", + "balancer/base", + "balancer/roundrobin", "codes", "connectivity", "credentials", - "grpclb/grpc_lb_v1/messages", + "encoding", + "encoding/proto", "grpclog", "internal", + "internal/backoff", + "internal/channelz", + "internal/grpcrand", "keepalive", "metadata", "naming", "peer", "resolver", + "resolver/dns", + "resolver/passthrough", "stats", "status", "tap", "transport" ] - revision = "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - version = "v1.7.5" + revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" + version = "v1.13.0" [[projects]] name = "gopkg.in/yaml.v2" @@ -421,6 +416,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "400de835ace8c8a69747afd675d1952daf750c251a02b9dac82a3c9dce4f65a8" + inputs-digest = "9beb2d27dc19e3f9e2c7f416f312f7129f5441b1b53def42503fc6f7d3a54b16" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index ff245d47e..e0eb2e011 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -23,65 +23,70 @@ # non-go = false # go-tests = true # unused-packages = true +# +########################################################### +# NOTE: All packages should be pinned to specific versions. +# Packages without releases must pin to a commit. -[[constraint]] - name = "github.com/ebuchman/fail-test" - branch = "master" - -[[constraint]] - name = "github.com/fortytw2/leaktest" - branch = "master" - [[constraint]] name = "github.com/go-kit/kit" - version = "~0.6.0" + version = "=0.7.0" [[constraint]] name = "github.com/gogo/protobuf" - version = "~1.0.0" + version = "=1.1.1" [[constraint]] name = "github.com/golang/protobuf" - version = "~1.0.0" + version = "=1.1.0" [[constraint]] name = "github.com/gorilla/websocket" - version = "~1.2.0" + version = "=1.2.0" [[constraint]] name = "github.com/pkg/errors" - version = "~0.8.0" - -[[constraint]] - name = "github.com/rcrowley/go-metrics" - branch = "master" + version = "=0.8.0" [[constraint]] name = "github.com/spf13/cobra" - version = "~0.0.1" + version = "=0.0.3" [[constraint]] name = "github.com/spf13/viper" - version = "~1.0.0" + version = "=1.0.2" [[constraint]] name = "github.com/stretchr/testify" - version = "~1.2.1" + version = "=1.2.2" [[constraint]] name = "github.com/tendermint/go-amino" - version = "~0.10.1" - -[[override]] - name = "github.com/tendermint/tmlibs" - version = "~0.8.4" + version = "=v0.11.1" [[constraint]] name = "google.golang.org/grpc" - version = "~1.7.3" + version = "=1.13.0" + +[[constraint]] + name = "github.com/fortytw2/leaktest" + version = "=1.2.0" -# this got updated and broke, so locked to an old working commit ... +################################### +## Some repos dont have releases. +## Pin to revision + +## We can remove this one by updating protobuf to v1.1.0 +## but then the grpc tests break with +#--- FAIL: TestBroadcastTx (0.01s) +#panic: message/group field common.KVPair:bytes without pointer [recovered] +# panic: message/group field common.KVPair:bytes without pointer +# +# ... +# +# github.com/tendermint/tendermint/rpc/grpc_test.TestBroadcastTx(0xc420a5ab40) +# /go/src/github.com/tendermint/tendermint/rpc/grpc/grpc_test.go:29 +0x141 [[override]] name = "google.golang.org/genproto" revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200" @@ -94,10 +99,28 @@ go-tests = true unused-packages = true +[[constraint]] + name = "github.com/ebuchman/fail-test" + revision = "95f809107225be108efcf10a3509e4ea6ceef3c4" + +# last revision used by go-crypto +[[constraint]] + name = "github.com/btcsuite/btcutil" + revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4" + +# Haven't made a release since 2016. [[constraint]] name = "github.com/prometheus/client_golang" - version = "0.8.0" + revision = "ae27198cdd90bf12cd134ad79d1366a6cf49f632" + +[[constraint]] + name = "github.com/rcrowley/go-metrics" + revision = "e2704e165165ec55d062f5919b4b29494e9fa790" [[constraint]] - branch = "master" name = "golang.org/x/net" + revision = "292b43bbf7cb8d35ddf40f8d5100ef3837cced3f" + +[prune] + go-tests = true + unused-packages = true diff --git a/Makefile b/Makefile index 9525560a1..b567fe793 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,13 @@ GOTOOLS = \ + github.com/mitchellh/gox \ github.com/golang/dep/cmd/dep \ - gopkg.in/alecthomas/gometalinter.v2 + gopkg.in/alecthomas/gometalinter.v2 \ + github.com/gogo/protobuf/protoc-gen-gogo \ + github.com/gogo/protobuf/gogoproto \ + github.com/square/certstrap PACKAGES=$(shell go list ./... | grep -v '/vendor/') + +INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf BUILD_TAGS?='tendermint' BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`" @@ -11,7 +17,7 @@ check: check_tools ensure_deps ######################################## -### Build +### Build Tendermint build: CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/ @@ -22,10 +28,35 @@ build_race: install: CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint +######################################## +### Protobuf + +protoc_all: protoc_libs protoc_abci protoc_grpc + +%.pb.go: %.proto + ## If you get the following error, + ## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory" + ## See https://stackoverflow.com/a/25518702 + protoc $(INCLUDE) $< --gogo_out=plugins=grpc:. + @echo "--> adding nolint declarations to protobuf generated files" + @awk -i inplace '/^\s*package \w+/ { print "//nolint" }1' $@ + +######################################## +### Build ABCI + +protoc_abci: abci/types/types.pb.go + +build_abci: + @go build -i ./abci/cmd/... + +install_abci: + @go install ./abci/cmd/... + ######################################## ### Distribution # dist builds binaries for all platforms and packages them for distribution +# TODO add abci to these scripts dist: @BUILD_TAGS=$(BUILD_TAGS) sh -c "'$(CURDIR)/scripts/dist.sh'" @@ -59,6 +90,17 @@ ensure_deps: @echo "--> Running dep" @dep ensure +#For ABCI and libs +get_protoc: + @# https://github.com/google/protobuf/releases + curl -L https://github.com/google/protobuf/releases/download/v3.4.1/protobuf-cpp-3.4.1.tar.gz | tar xvz && \ + cd protobuf-3.4.1 && \ + DIST_LANG=cpp ./configure && \ + make && \ + make install && \ + cd .. && \ + rm -rf protobuf-3.4.1 + draw_deps: @# requires brew install graphviz or apt-get install graphviz go get github.com/RobotsAndPencils/goviz @@ -70,6 +112,32 @@ get_deps_bin_size: @find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log @echo "Results can be found here: $(CURDIR)/deps_bin_size.log" +######################################## +### Libs + +protoc_libs: libs/common/types.pb.go + +gen_certs: clean_certs + ## Generating certificates for TLS testing... + certstrap init --common-name "tendermint.com" --passphrase "" + certstrap request-cert -ip "::" --passphrase "" + certstrap sign "::" --CA "tendermint.com" --passphrase "" + mv out/::.crt out/::.key db/remotedb + +clean_certs: + ## Cleaning TLS testing certificates... + rm -rf out + rm -f db/remotedb/::.crt db/remotedb/::.key + +test_libs: gen_certs + GOCACHE=off go test -tags gcc $(PACKAGES) + make clean_certs + +grpc_dbserver: + protoc -I db/remotedb/proto/ db/remotedb/proto/defs.proto --go_out=plugins=grpc:db/remotedb/proto + +protoc_grpc: rpc/grpc/types.pb.go + ######################################## ### Testing @@ -87,6 +155,15 @@ test_apps: # requires `abci-cli` and `tendermint` binaries installed bash test/app/test.sh +test_abci_apps: + bash abci/tests/test_app/test.sh + +test_abci_cli: + # test the cli against the examples in the tutorial at: + # ./docs/abci-cli.md + # if test fails, update the docs ^ + @ bash abci/tests/test_cli/test.sh + test_persistence: # run the persistence tests using bash # requires `abci-cli` installed @@ -105,17 +182,16 @@ test_p2p: # requires 'tester' the image from above bash test/p2p/test.sh tester -need_abci: - bash scripts/install_abci_apps.sh - test_integrations: make build_docker_test_image make get_tools make get_vendor_deps make install - make need_abci make test_cover make test_apps + make test_abci_apps + make test_abci_cli + make test_libs make test_persistence make test_p2p @@ -233,4 +309,4 @@ build-slate: # To avoid unintended conflicts with file names, always add to .PHONY # unless there is a reason not to. # https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html -.PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate +.PHONY: check build build_race build_abci dist install install_abci check_tools get_tools update_tools get_vendor_deps draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate protoc_grpc protoc_all diff --git a/README.md b/README.md index c31c44dbe..6268e0547 100644 --- a/README.md +++ b/README.md @@ -50,13 +50,13 @@ Go version | Go1.9 or higher ## Install -See the [install instructions](/docs/install.rst) +See the [install instructions](/docs/introduction/install.md) ## Quick Start -- [Single node](/docs/using-tendermint.rst) +- [Single node](/docs/using-tendermint.md) - [Local cluster using docker-compose](/networks/local) -- [Remote cluster using terraform and ansible](/docs/terraform-and-ansible.rst) +- [Remote cluster using terraform and ansible](/docs/networks/terraform-and-ansible.md) - [Join the public testnet](https://cosmos.network/testnet) ## Resources @@ -72,10 +72,7 @@ Additional information about some - and eventually all - of the sub-projects bel ### Sub-projects -* [ABCI](http://github.com/tendermint/abci), the Application Blockchain Interface -* [Go-Wire](http://github.com/tendermint/go-wire), a deterministic serialization library -* [Go-Crypto](http://github.com/tendermint/tendermint/crypto), an elliptic curve cryptography library -* [TmLibs](http://github.com/tendermint/tmlibs), an assortment of Go libraries used internally +* [Amino](http://github.com/tendermint/go-amino), a reflection-based improvement on proto3 * [IAVL](http://github.com/tendermint/iavl), Merkleized IAVL+ Tree implementation ### Tools @@ -119,8 +116,8 @@ CHANGELOG even if they don't lead to MINOR version bumps: - node Exported objects in these packages that are not covered by the versioning scheme -are explicitly marked by `// UNSTABLE` in their go doc comment and may change at any time. -Functions, types, and values in any other package may also change at any time. +are explicitly marked by `// UNSTABLE` in their go doc comment and may change at any +time without notice. Functions, types, and values in any other package may also change at any time. ### Upgrades diff --git a/abci/Makefile b/abci/Makefile deleted file mode 100644 index 7d1c4b2e5..000000000 --- a/abci/Makefile +++ /dev/null @@ -1,174 +0,0 @@ -GOTOOLS = \ - github.com/mitchellh/gox \ - github.com/golang/dep/cmd/dep \ - gopkg.in/alecthomas/gometalinter.v2 \ - github.com/gogo/protobuf/protoc-gen-gogo \ - github.com/gogo/protobuf/gogoproto -GOTOOLS_CHECK = gox dep gometalinter.v2 protoc protoc-gen-gogo -PACKAGES=$(shell go list ./... | grep -v '/vendor/') -INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf - -all: check get_vendor_deps protoc build test install metalinter - -check: check_tools - - -######################################## -### Build - -protoc: - ## If you get the following error, - ## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory" - ## See https://stackoverflow.com/a/25518702 - protoc $(INCLUDE) --gogo_out=plugins=grpc:. types/*.proto - @echo "--> adding nolint declarations to protobuf generated files" - @awk '/package types/ { print "//nolint: gas"; print; next }1' types/types.pb.go > types/types.pb.go.new - @mv types/types.pb.go.new types/types.pb.go - -build: - @go build -i ./cmd/... - -dist: - @bash scripts/dist.sh - @bash scripts/publish.sh - -install: - @go install ./cmd/... - - -######################################## -### Tools & dependencies - -check_tools: - @# https://stackoverflow.com/a/25668869 - @echo "Found tools: $(foreach tool,$(GOTOOLS_CHECK),\ - $(if $(shell which $(tool)),$(tool),$(error "No $(tool) in PATH")))" - -get_tools: - @echo "--> Installing tools" - go get -u -v $(GOTOOLS) - @gometalinter.v2 --install - -get_protoc: - @# https://github.com/google/protobuf/releases - curl -L https://github.com/google/protobuf/releases/download/v3.4.1/protobuf-cpp-3.4.1.tar.gz | tar xvz && \ - cd protobuf-3.4.1 && \ - DIST_LANG=cpp ./configure && \ - make && \ - make install && \ - cd .. && \ - rm -rf protobuf-3.4.1 - -update_tools: - @echo "--> Updating tools" - @go get -u $(GOTOOLS) - -get_vendor_deps: - @rm -rf vendor/ - @echo "--> Running dep ensure" - @dep ensure - - -######################################## -### Testing - -test: - @find . -path ./vendor -prune -o -name "*.sock" -exec rm {} \; - @echo "==> Running go test" - @go test $(PACKAGES) - -test_race: - @find . -path ./vendor -prune -o -name "*.sock" -exec rm {} \; - @echo "==> Running go test --race" - @go test -v -race $(PACKAGES) - -### three tests tested by Jenkins -test_cover: - @ bash tests/test_cover.sh - -test_apps: - # test the counter using a go test script - @ bash tests/test_app/test.sh - -test_cli: - # test the cli against the examples in the tutorial at: - # http://tendermint.readthedocs.io/projects/tools/en/master/abci-cli.html - # - # XXX: if this test fails, fix it and update the docs at: - # https://github.com/tendermint/tendermint/blob/develop/docs/abci-cli.rst - @ bash tests/test_cli/test.sh - -######################################## -### Formatting, linting, and vetting - -fmt: - @go fmt ./... - -metalinter: - @echo "==> Running linter" - gometalinter.v2 --vendor --deadline=600s --disable-all \ - --enable=maligned \ - --enable=deadcode \ - --enable=goconst \ - --enable=goimports \ - --enable=gosimple \ - --enable=ineffassign \ - --enable=megacheck \ - --enable=misspell \ - --enable=staticcheck \ - --enable=safesql \ - --enable=structcheck \ - --enable=unconvert \ - --enable=unused \ - --enable=varcheck \ - --enable=vetshadow \ - ./... - #--enable=gas \ - #--enable=dupl \ - #--enable=errcheck \ - #--enable=gocyclo \ - #--enable=golint \ <== comments on anything exported - #--enable=gotype \ - #--enable=interfacer \ - #--enable=unparam \ - #--enable=vet \ - -metalinter_all: - protoc $(INCLUDE) --lint_out=. types/*.proto - gometalinter.v2 --vendor --deadline=600s --enable-all --disable=lll ./... - - -######################################## -### Docker - -DEVDOC_SAVE = docker commit `docker ps -a -n 1 -q` devdoc:local - -docker_build: - docker build -t "tendermint/abci-dev" -f Dockerfile.develop . - -docker_run: - docker run -it -v "$(CURDIR):/go/src/github.com/tendermint/abci" -w "/go/src/github.com/tendermint/abci" "tendermint/abci-dev" /bin/bash - -docker_run_rm: - docker run -it --rm -v "$(CURDIR):/go/src/github.com/tendermint/abci" -w "/go/src/github.com/tendermint/abci" "tendermint/abci-dev" /bin/bash - -devdoc_init: - docker run -it -v "$(CURDIR):/go/src/github.com/tendermint/abci" -w "/go/src/github.com/tendermint/abci" tendermint/devdoc echo - # TODO make this safer - $(call DEVDOC_SAVE) - -devdoc: - docker run -it -v "$(CURDIR):/go/src/github.com/tendermint/abci" -w "/go/src/github.com/tendermint/abci" devdoc:local bash - -devdoc_save: - # TODO make this safer - $(call DEVDOC_SAVE) - -devdoc_clean: - docker rmi $$(docker images -f "dangling=true" -q) - - -# To avoid unintended conflicts with file names, always add to .PHONY -# unless there is a reason not to. -# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html -.PHONY: check protoc build dist install check_tools get_tools get_protoc update_tools get_vendor_deps test test_race fmt metalinter metalinter_all docker_build docker_run docker_run_rm devdoc_init devdoc devdoc_save devdoc_clean diff --git a/abci/client/client.go b/abci/client/client.go index cdf2c60e7..558588107 100644 --- a/abci/client/client.go +++ b/abci/client/client.go @@ -5,7 +5,7 @@ import ( "sync" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) const ( diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index e64fcb4d6..502ee0fcd 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -10,7 +10,7 @@ import ( grpc "google.golang.org/grpc" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) var _ Client = (*grpcClient)(nil) diff --git a/abci/client/local_client.go b/abci/client/local_client.go index 225273a96..3d1f8d8e4 100644 --- a/abci/client/local_client.go +++ b/abci/client/local_client.go @@ -4,7 +4,7 @@ import ( "sync" types "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) var _ Client = (*localClient)(nil) diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index 77c3d966a..affea1a9e 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -11,7 +11,7 @@ import ( "time" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) const reqQueueSize = 256 // TODO make configurable @@ -357,6 +357,13 @@ func (cli *socketClient) queueRequest(req *types.Request) *ReqRes { } func (cli *socketClient) flushQueue() { + // mark all in-flight messages as resolved (they will get cli.Error()) + for req := cli.reqSent.Front(); req != nil; req = req.Next() { + reqres := req.Value.(*ReqRes) + reqres.Done() + } + + // mark all queued messages as resolved LOOP: for { select { diff --git a/abci/client/socket_client_test.go b/abci/client/socket_client_test.go index 5a9187fb4..49114afd5 100644 --- a/abci/client/socket_client_test.go +++ b/abci/client/socket_client_test.go @@ -2,10 +2,17 @@ package abcicli_test import ( "errors" + "fmt" "testing" "time" - "github.com/tendermint/tendermint/abci/client" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + abcicli "github.com/tendermint/tendermint/abci/client" + "github.com/tendermint/tendermint/abci/server" + "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" ) func TestSocketClientStopForErrorDeadlock(t *testing.T) { @@ -26,3 +33,89 @@ func TestSocketClientStopForErrorDeadlock(t *testing.T) { t.Fatalf("Test took too long, potential deadlock still exists") } } + +func TestProperSyncCalls(t *testing.T) { + app := slowApp{} + + s, c := setupClientServer(t, app) + defer s.Stop() + defer c.Stop() + + resp := make(chan error, 1) + go func() { + // This is BeginBlockSync unrolled.... + reqres := c.BeginBlockAsync(types.RequestBeginBlock{}) + c.FlushSync() + res := reqres.Response.GetBeginBlock() + require.NotNil(t, res) + resp <- c.Error() + }() + + select { + case <-time.After(time.Second): + require.Fail(t, "No response arrived") + case err, ok := <-resp: + require.True(t, ok, "Must not close channel") + assert.NoError(t, err, "This should return success") + } +} + +func TestHangingSyncCalls(t *testing.T) { + app := slowApp{} + + s, c := setupClientServer(t, app) + defer s.Stop() + defer c.Stop() + + resp := make(chan error, 1) + go func() { + // Start BeginBlock and flush it + reqres := c.BeginBlockAsync(types.RequestBeginBlock{}) + flush := c.FlushAsync() + // wait 20 ms for all events to travel socket, but + // no response yet from server + time.Sleep(20 * time.Millisecond) + // kill the server, so the connections break + s.Stop() + + // wait for the response from BeginBlock + reqres.Wait() + flush.Wait() + resp <- c.Error() + }() + + select { + case <-time.After(time.Second): + require.Fail(t, "No response arrived") + case err, ok := <-resp: + require.True(t, ok, "Must not close channel") + assert.Error(t, err, "We should get EOF error") + } +} + +func setupClientServer(t *testing.T, app types.Application) ( + cmn.Service, abcicli.Client) { + // some port between 20k and 30k + port := 20000 + cmn.RandInt32()%10000 + addr := fmt.Sprintf("localhost:%d", port) + + s, err := server.NewServer(addr, "socket", app) + require.NoError(t, err) + err = s.Start() + require.NoError(t, err) + + c := abcicli.NewSocketClient(addr, true) + err = c.Start() + require.NoError(t, err) + + return s, c +} + +type slowApp struct { + types.BaseApplication +} + +func (slowApp) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock { + time.Sleep(200 * time.Millisecond) + return types.ResponseBeginBlock{} +} diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index 0e7b908e7..e20244011 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -11,8 +11,8 @@ import ( "github.com/spf13/cobra" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" abcicli "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/code" diff --git a/abci/example/counter/counter.go b/abci/example/counter/counter.go index 87fc7b188..857e82baf 100644 --- a/abci/example/counter/counter.go +++ b/abci/example/counter/counter.go @@ -6,7 +6,7 @@ import ( "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) type CounterApplication struct { diff --git a/abci/example/example_test.go b/abci/example/example_test.go index a3d161a2f..bbb53b5af 100644 --- a/abci/example/example_test.go +++ b/abci/example/example_test.go @@ -11,8 +11,8 @@ import ( "golang.org/x/net/context" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" abcicli "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/code" diff --git a/abci/example/kvstore/helpers.go b/abci/example/kvstore/helpers.go index 7ddacb5bf..0e69fab9f 100644 --- a/abci/example/kvstore/helpers.go +++ b/abci/example/kvstore/helpers.go @@ -2,7 +2,7 @@ package kvstore import ( "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // RandVal creates one random validator, with a key derived diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index e078d87d1..0f72b44ea 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -8,8 +8,8 @@ import ( "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" ) var ( diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index 46c28c99e..2d8f81272 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -8,8 +8,8 @@ import ( "github.com/stretchr/testify/require" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" abcicli "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/code" diff --git a/abci/example/kvstore/persistent_kvstore.go b/abci/example/kvstore/persistent_kvstore.go index a9067ac14..12ccbab78 100644 --- a/abci/example/kvstore/persistent_kvstore.go +++ b/abci/example/kvstore/persistent_kvstore.go @@ -9,9 +9,9 @@ import ( "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" ) const ( diff --git a/abci/scripts/abci-builder/Dockerfile b/abci/scripts/abci-builder/Dockerfile deleted file mode 100644 index 1182085b4..000000000 --- a/abci/scripts/abci-builder/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM golang:1.9.2 - -RUN apt-get update && apt-get install -y --no-install-recommends \ - zip \ - && rm -rf /var/lib/apt/lists/* - -# We want to ensure that release builds never have any cgo dependencies so we -# switch that off at the highest level. -ENV CGO_ENABLED 0 - -RUN mkdir -p $GOPATH/src/github.com/tendermint/abci -WORKDIR $GOPATH/src/github.com/tendermint/abci diff --git a/abci/scripts/dist.sh b/abci/scripts/dist.sh deleted file mode 100755 index d94ce20f7..000000000 --- a/abci/scripts/dist.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env bash -set -e - -REPO_NAME="abci" - -# Get the version from the environment, or try to figure it out. -if [ -z $VERSION ]; then - VERSION=$(awk -F\" '/Version =/ { print $2; exit }' < version/version.go) -fi -if [ -z "$VERSION" ]; then - echo "Please specify a version." - exit 1 -fi -echo "==> Building version $VERSION..." - -# Get the parent directory of where this script is. -SOURCE="${BASH_SOURCE[0]}" -while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done -DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )" - -# Change into that dir because we expect that. -cd "$DIR" - -# Delete the old dir -echo "==> Removing old directory..." -rm -rf build/pkg -mkdir -p build/pkg - - -# Do a hermetic build inside a Docker container. -docker build -t tendermint/${REPO_NAME}-builder scripts/${REPO_NAME}-builder/ -docker run --rm -e "BUILD_TAGS=$BUILD_TAGS" -v "$(pwd)":/go/src/github.com/tendermint/${REPO_NAME} tendermint/${REPO_NAME}-builder ./scripts/dist_build.sh - -# Add $REPO_NAME and $VERSION prefix to package name. -rm -rf ./build/dist -mkdir -p ./build/dist -for FILENAME in $(find ./build/pkg -mindepth 1 -maxdepth 1 -type f); do - FILENAME=$(basename "$FILENAME") - cp "./build/pkg/${FILENAME}" "./build/dist/${REPO_NAME}_${VERSION}_${FILENAME}" -done - -# Make the checksums. -pushd ./build/dist -shasum -a256 ./* > "./${REPO_NAME}_${VERSION}_SHA256SUMS" -popd - -# Done -echo -echo "==> Results:" -ls -hl ./build/dist - -exit 0 diff --git a/abci/scripts/dist_build.sh b/abci/scripts/dist_build.sh deleted file mode 100755 index c45c752ef..000000000 --- a/abci/scripts/dist_build.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash -set -e - -# Get the parent directory of where this script is. -SOURCE="${BASH_SOURCE[0]}" -while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done -DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )" - -# Change into that dir because we expect that. -cd "$DIR" - -# Get the git commit -GIT_COMMIT="$(git rev-parse --short HEAD)" -GIT_DESCRIBE="$(git describe --tags --always)" -GIT_IMPORT="github.com/tendermint/abci/version" - -# Determine the arch/os combos we're building for -XC_ARCH=${XC_ARCH:-"386 amd64 arm"} -XC_OS=${XC_OS:-"solaris darwin freebsd linux windows"} - -# Make sure build tools are available. -make get_tools - -# Get VENDORED dependencies -make get_vendor_deps - -BINARY="abci-cli" - -# Build! -echo "==> Building..." -"$(which gox)" \ - -os="${XC_OS}" \ - -arch="${XC_ARCH}" \ - -osarch="!darwin/arm !solaris/amd64 !freebsd/amd64" \ - -ldflags "-X ${GIT_IMPORT}.GitCommit='${GIT_COMMIT}' -X ${GIT_IMPORT}.GitDescribe='${GIT_DESCRIBE}'" \ - -output "build/pkg/{{.OS}}_{{.Arch}}/$BINARY" \ - -tags="${BUILD_TAGS}" \ - github.com/tendermint/abci/cmd/$BINARY - -# Zip all the files. -echo "==> Packaging..." -for PLATFORM in $(find ./build/pkg -mindepth 1 -maxdepth 1 -type d); do - OSARCH=$(basename "${PLATFORM}") - echo "--> ${OSARCH}" - - pushd "$PLATFORM" >/dev/null 2>&1 - zip "../${OSARCH}.zip" ./* - popd >/dev/null 2>&1 -done - - - -exit 0 diff --git a/abci/scripts/publish.sh b/abci/scripts/publish.sh deleted file mode 100644 index 715f6c11b..000000000 --- a/abci/scripts/publish.sh +++ /dev/null @@ -1,7 +0,0 @@ -#! /bin/bash - -# Get the version from the environment, or try to figure it out. -if [ -z $VERSION ]; then - VERSION=$(awk -F\" '/Version =/ { print $2; exit }' < version/version.go) -fi -aws s3 cp --recursive build/dist s3://tendermint/binaries/abci/v${VERSION} --acl public-read diff --git a/abci/server/grpc_server.go b/abci/server/grpc_server.go index 3f8b599e9..ccbe609cc 100644 --- a/abci/server/grpc_server.go +++ b/abci/server/grpc_server.go @@ -6,7 +6,7 @@ import ( "google.golang.org/grpc" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) type GRPCServer struct { diff --git a/abci/server/server.go b/abci/server/server.go index 49dde4280..ada514fa8 100644 --- a/abci/server/server.go +++ b/abci/server/server.go @@ -13,7 +13,7 @@ import ( "fmt" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func NewServer(protoAddr, transport string, app types.Application) (cmn.Service, error) { diff --git a/abci/server/socket_server.go b/abci/server/socket_server.go index e7293ffd7..4b92f04cf 100644 --- a/abci/server/socket_server.go +++ b/abci/server/socket_server.go @@ -8,7 +8,7 @@ import ( "sync" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // var maxNumberConnections = 2 diff --git a/abci/specification.rst b/abci/specification.rst deleted file mode 100644 index 8d8530def..000000000 --- a/abci/specification.rst +++ /dev/null @@ -1,294 +0,0 @@ -ABCI Specification -================== - -NOTE: this file has moved to `specification.md <./specification.md>`__. It is left to prevent link breakages for the forseable future. It can safely be deleted in a few months. - -Message Types -~~~~~~~~~~~~~ - -ABCI requests/responses are defined as simple Protobuf messages in `this -schema -file `__. -TendermintCore sends the requests, and the ABCI application sends the -responses. Here, we provide an overview of the messages types and how they -are used by Tendermint. Then we describe each request-response pair as a -function with arguments and return values, and add some notes on usage. - -Some messages (``Echo, Info, InitChain, BeginBlock, EndBlock, Commit``), don't -return errors because an error would indicate a critical failure in the -application and there's nothing Tendermint can do. The problem should be -addressed and both Tendermint and the application restarted. All other -messages (``SetOption, Query, CheckTx, DeliverTx``) return an -application-specific response ``Code uint32``, where only ``0`` is reserved for -``OK``. - -Some messages (``SetOption, Query, CheckTx, DeliverTx``) return -non-deterministic data in the form of ``Info`` and ``Log``. The ``Log`` is -intended for the literal output from the application's logger, while the -``Info`` is any additional info that should be returned. - -The first time a new blockchain is started, Tendermint calls ``InitChain``. -From then on, the Block Execution Sequence that causes the committed state to -be updated is as follows: - -``BeginBlock, [DeliverTx], EndBlock, Commit`` - -where one ``DeliverTx`` is called for each transaction in the block. -Cryptographic commitments to the results of DeliverTx, EndBlock, and -Commit are included in the header of the next block. - -Tendermint opens three connections to the application to handle the different message -types: - -- ``Consensus Connection - InitChain, BeginBlock, DeliverTx, EndBlock, Commit`` - -- ``Mempool Connection - CheckTx`` - -- ``Info Connection - Info, SetOption, Query`` - -The ``Flush`` message is used on every connection, and the ``Echo`` message -is only used for debugging. - -Note that messages may be sent concurrently across all connections - -a typical application will thus maintain a distinct state for each -connection. They may be referred to as the ``DeliverTx state``, the -``CheckTx state``, and the ``Commit state`` respectively. - -See below for more details on the message types and how they are used. - -Echo -^^^^ - -- **Arguments**: - - - ``Message (string)``: A string to echo back - -- **Returns**: - - - ``Message (string)``: The input string - -- **Usage**: - - - Echo a string to test an abci client/server implementation - -Flush -^^^^^ - -- **Usage**: - - - Signals that messages queued on the client should be flushed to - the server. It is called periodically by the client implementation - to ensure asynchronous requests are actually sent, and is called - immediately to make a synchronous request, which returns when the - Flush response comes back. - -Info -^^^^ - -- **Arguments**: - - - ``Version (string)``: The Tendermint version - -- **Returns**: - - - ``Data (string)``: Some arbitrary information - - ``Version (Version)``: Version information - - ``LastBlockHeight (int64)``: Latest block for which the app has - called Commit - - ``LastBlockAppHash ([]byte)``: Latest result of Commit - -- **Usage**: - - - Return information about the application state. - - Used to sync Tendermint with the application during a handshake that - happens on startup. - - Tendermint expects ``LastBlockAppHash`` and ``LastBlockHeight`` to be - updated during ``Commit``, ensuring that ``Commit`` is never called twice - for the same block height. - -SetOption -^^^^^^^^^ - -- **Arguments**: - - - ``Key (string)``: Key to set - - ``Value (string)``: Value to set for key - -- **Returns**: - - - ``Code (uint32)``: Response code - - ``Log (string)``: The output of the application's logger. May be non-deterministic. - - ``Info (string)``: Additional information. May be non-deterministic. - -- **Usage**: - - - Set non-consensus critical application specific options. - - e.g. Key="min-fee", Value="100fermion" could set the minimum fee required for CheckTx - (but not DeliverTx - that would be consensus critical). - -InitChain -^^^^^^^^^ - -- **Arguments**: - - - ``Validators ([]Validator)``: Initial genesis validators - - ``AppStateBytes ([]byte)``: Serialized initial application state - -- **Usage**: - - - Called once upon genesis. - -Query -^^^^^ - -- **Arguments**: - - - ``Data ([]byte)``: Raw query bytes. Can be used with or in lieu of - Path. - - ``Path (string)``: Path of request, like an HTTP GET path. Can be - used with or in liue of Data. - - Apps MUST interpret '/store' as a query by key on the underlying - store. The key SHOULD be specified in the Data field. - - Apps SHOULD allow queries over specific types like '/accounts/...' - or '/votes/...' - - ``Height (int64)``: The block height for which you want the query - (default=0 returns data for the latest committed block). Note that - this is the height of the block containing the application's - Merkle root hash, which represents the state as it was after - committing the block at Height-1 - - ``Prove (bool)``: Return Merkle proof with response if possible - -- **Returns**: - - - ``Code (uint32)``: Response code. - - ``Log (string)``: The output of the application's logger. May be non-deterministic. - - ``Info (string)``: Additional information. May be non-deterministic. - - ``Index (int64)``: The index of the key in the tree. - - ``Key ([]byte)``: The key of the matching data. - - ``Value ([]byte)``: The value of the matching data. - - ``Proof ([]byte)``: Proof for the data, if requested. - - ``Height (int64)``: The block height from which data was derived. - Note that this is the height of the block containing the - application's Merkle root hash, which represents the state as it - was after committing the block at Height-1 - -- **Usage**: - - - Query for data from the application at current or past height. - - Optionally return Merkle proof. - -BeginBlock -^^^^^^^^^^ - -- **Arguments**: - - - ``Hash ([]byte)``: The block's hash. This can be derived from the - block header. - - ``Header (struct{})``: The block header - - ``AbsentValidators ([]int32)``: List of indices of validators not - included in the LastCommit - - ``ByzantineValidators ([]Evidence)``: List of evidence of - validators that acted maliciously - -- **Usage**: - - - Signals the beginning of a new block. Called prior to any DeliverTxs. - - The header is expected to at least contain the Height. - - The ``AbsentValidators`` and ``ByzantineValidators`` can be used to - determine rewards and punishments for the validators. - -CheckTx -^^^^^^^ - -- **Arguments**: - - - ``Tx ([]byte)``: The request transaction bytes - -- **Returns**: - - - ``Code (uint32)``: Response code - - ``Data ([]byte)``: Result bytes, if any. - - ``Log (string)``: The output of the application's logger. May be non-deterministic. - - ``Info (string)``: Additional information. May be non-deterministic. - - ``GasWanted (int64)``: Amount of gas request for transaction. - - ``GasUsed (int64)``: Amount of gas consumed by transaction. - - ``Tags ([]cmn.KVPair)``: Key-Value tags for filtering and indexing transactions (eg. by account). - - ``Fee (cmn.KI64Pair)``: Fee paid for the transaction. - -- **Usage**: Validate a mempool transaction, prior to broadcasting or - proposing. CheckTx should perform stateful but light-weight checks - of the validity of the transaction (like checking signatures and account balances), - but need not execute in full (like running a smart contract). - - Tendermint runs CheckTx and DeliverTx concurrently with eachother, - though on distinct ABCI connections - the mempool connection and the consensus - connection, respectively. - - The application should maintain a separate state to support CheckTx. - This state can be reset to the latest committed state during ``Commit``, - where Tendermint ensures the mempool is locked and not sending new ``CheckTx``. - After ``Commit``, the mempool will rerun CheckTx on all remaining - transactions, throwing out any that are no longer valid. - - Keys and values in Tags must be UTF-8 encoded strings (e.g. "account.owner": "Bob", "balance": "100.0", "date": "2018-01-02") - - -DeliverTx -^^^^^^^^^ - -- **Arguments**: - - - ``Tx ([]byte)``: The request transaction bytes. - -- **Returns**: - - - ``Code (uint32)``: Response code. - - ``Data ([]byte)``: Result bytes, if any. - - ``Log (string)``: The output of the application's logger. May be non-deterministic. - - ``Info (string)``: Additional information. May be non-deterministic. - - ``GasWanted (int64)``: Amount of gas requested for transaction. - - ``GasUsed (int64)``: Amount of gas consumed by transaction. - - ``Tags ([]cmn.KVPair)``: Key-Value tags for filtering and indexing transactions (eg. by account). - - ``Fee (cmn.KI64Pair)``: Fee paid for the transaction. - -- **Usage**: - - - Deliver a transaction to be executed in full by the application. If the transaction is valid, - returns CodeType.OK. - - Keys and values in Tags must be UTF-8 encoded strings (e.g. "account.owner": "Bob", "balance": "100.0", "time": "2018-01-02T12:30:00Z") - -EndBlock -^^^^^^^^ - -- **Arguments**: - - - ``Height (int64)``: Height of the block just executed. - -- **Returns**: - - - ``ValidatorUpdates ([]Validator)``: Changes to validator set (set - voting power to 0 to remove). - - ``ConsensusParamUpdates (ConsensusParams)``: Changes to - consensus-critical time, size, and other parameters. - -- **Usage**: - - - Signals the end of a block. - - Called prior to each Commit, after all transactions. - - Validator set and consensus params are updated with the result. - - Validator pubkeys are expected to be go-wire encoded. - -Commit -^^^^^^ - -- **Returns**: - - - ``Data ([]byte)``: The Merkle root hash - -- **Usage**: - - - Persist the application state. - - Return a Merkle root hash of the application state. - - It's critical that all application instances return the same hash. If not, - they will not be able to agree on the next block, because the hash is - included in the next block! diff --git a/abci/tests/benchmarks/parallel/parallel.go b/abci/tests/benchmarks/parallel/parallel.go index 0b4634492..78b69ed12 100644 --- a/abci/tests/benchmarks/parallel/parallel.go +++ b/abci/tests/benchmarks/parallel/parallel.go @@ -6,7 +6,7 @@ import ( "log" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func main() { diff --git a/abci/tests/benchmarks/simple/simple.go b/abci/tests/benchmarks/simple/simple.go index 77b98d57d..b0819799b 100644 --- a/abci/tests/benchmarks/simple/simple.go +++ b/abci/tests/benchmarks/simple/simple.go @@ -8,7 +8,7 @@ import ( "reflect" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func main() { diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index 06db13d9a..f67297cd7 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -7,7 +7,7 @@ import ( abcicli "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func InitChain(client abcicli.Client) error { diff --git a/abci/tests/test_app/app.go b/abci/tests/test_app/app.go index 42092345a..a33f4ee9e 100644 --- a/abci/tests/test_app/app.go +++ b/abci/tests/test_app/app.go @@ -7,7 +7,7 @@ import ( abcicli "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" ) func startClient(abciType string) abcicli.Client { diff --git a/abci/tests/test_cover.sh b/abci/tests/test_cover.sh deleted file mode 100755 index abbbbe563..000000000 --- a/abci/tests/test_cover.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash - -set -e -echo "" > coverage.txt - -echo "==> Running unit tests" -for d in $(go list ./... | grep -v vendor); do - go test -race -coverprofile=profile.out -covermode=atomic "$d" - if [ -f profile.out ]; then - cat profile.out >> coverage.txt - rm profile.out - fi -done diff --git a/abci/types/messages_test.go b/abci/types/messages_test.go index 21d3595f0..da6595a46 100644 --- a/abci/types/messages_test.go +++ b/abci/types/messages_test.go @@ -8,7 +8,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func TestMarshalJSON(t *testing.T) { diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index a6b806fe6..57dd14393 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -1,62 +1,68 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: types/types.proto +// source: abci/types/types.proto /* -Package types is a generated protocol buffer package. - -It is generated from these files: - types/types.proto - -It has these top-level messages: - Request - RequestEcho - RequestFlush - RequestInfo - RequestSetOption - RequestInitChain - RequestQuery - RequestBeginBlock - RequestCheckTx - RequestDeliverTx - RequestEndBlock - RequestCommit - Response - ResponseException - ResponseEcho - ResponseFlush - ResponseInfo - ResponseSetOption - ResponseInitChain - ResponseQuery - ResponseBeginBlock - ResponseCheckTx - ResponseDeliverTx - ResponseEndBlock - ResponseCommit - ConsensusParams - BlockSize - TxSize - BlockGossip - Header - Validator - SigningValidator - PubKey - Evidence + Package types is a generated protocol buffer package. + + It is generated from these files: + abci/types/types.proto + + It has these top-level messages: + Request + RequestEcho + RequestFlush + RequestInfo + RequestSetOption + RequestInitChain + RequestQuery + RequestBeginBlock + RequestCheckTx + RequestDeliverTx + RequestEndBlock + RequestCommit + Response + ResponseException + ResponseEcho + ResponseFlush + ResponseInfo + ResponseSetOption + ResponseInitChain + ResponseQuery + ResponseBeginBlock + ResponseCheckTx + ResponseDeliverTx + ResponseEndBlock + ResponseCommit + ConsensusParams + BlockSize + TxSize + BlockGossip + Header + Validator + SigningValidator + PubKey + Evidence */ -//nolint: gas +//nolint package types import proto "github.com/gogo/protobuf/proto" +import golang_proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import _ "github.com/gogo/protobuf/gogoproto" -import common "github.com/tendermint/tmlibs/common" +import common "github.com/tendermint/tendermint/libs/common" + +import bytes "bytes" import context "golang.org/x/net/context" import grpc "google.golang.org/grpc" +import io "io" + // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal +var _ = golang_proto.Marshal var _ = fmt.Errorf var _ = math.Inf @@ -89,6 +95,9 @@ func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int type isRequest_Value interface { isRequest_Value() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int } type Request_Echo struct { @@ -739,6 +748,9 @@ func (*Response) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []in type isResponse_Value interface { isResponse_Value() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int } type Response_Exception struct { @@ -1895,280 +1907,1913 @@ func (m *Evidence) GetTotalVotingPower() int64 { func init() { proto.RegisterType((*Request)(nil), "types.Request") + golang_proto.RegisterType((*Request)(nil), "types.Request") proto.RegisterType((*RequestEcho)(nil), "types.RequestEcho") + golang_proto.RegisterType((*RequestEcho)(nil), "types.RequestEcho") proto.RegisterType((*RequestFlush)(nil), "types.RequestFlush") + golang_proto.RegisterType((*RequestFlush)(nil), "types.RequestFlush") proto.RegisterType((*RequestInfo)(nil), "types.RequestInfo") + golang_proto.RegisterType((*RequestInfo)(nil), "types.RequestInfo") proto.RegisterType((*RequestSetOption)(nil), "types.RequestSetOption") + golang_proto.RegisterType((*RequestSetOption)(nil), "types.RequestSetOption") proto.RegisterType((*RequestInitChain)(nil), "types.RequestInitChain") + golang_proto.RegisterType((*RequestInitChain)(nil), "types.RequestInitChain") proto.RegisterType((*RequestQuery)(nil), "types.RequestQuery") + golang_proto.RegisterType((*RequestQuery)(nil), "types.RequestQuery") proto.RegisterType((*RequestBeginBlock)(nil), "types.RequestBeginBlock") + golang_proto.RegisterType((*RequestBeginBlock)(nil), "types.RequestBeginBlock") proto.RegisterType((*RequestCheckTx)(nil), "types.RequestCheckTx") + golang_proto.RegisterType((*RequestCheckTx)(nil), "types.RequestCheckTx") proto.RegisterType((*RequestDeliverTx)(nil), "types.RequestDeliverTx") + golang_proto.RegisterType((*RequestDeliverTx)(nil), "types.RequestDeliverTx") proto.RegisterType((*RequestEndBlock)(nil), "types.RequestEndBlock") + golang_proto.RegisterType((*RequestEndBlock)(nil), "types.RequestEndBlock") proto.RegisterType((*RequestCommit)(nil), "types.RequestCommit") + golang_proto.RegisterType((*RequestCommit)(nil), "types.RequestCommit") proto.RegisterType((*Response)(nil), "types.Response") + golang_proto.RegisterType((*Response)(nil), "types.Response") proto.RegisterType((*ResponseException)(nil), "types.ResponseException") + golang_proto.RegisterType((*ResponseException)(nil), "types.ResponseException") proto.RegisterType((*ResponseEcho)(nil), "types.ResponseEcho") + golang_proto.RegisterType((*ResponseEcho)(nil), "types.ResponseEcho") proto.RegisterType((*ResponseFlush)(nil), "types.ResponseFlush") + golang_proto.RegisterType((*ResponseFlush)(nil), "types.ResponseFlush") proto.RegisterType((*ResponseInfo)(nil), "types.ResponseInfo") + golang_proto.RegisterType((*ResponseInfo)(nil), "types.ResponseInfo") proto.RegisterType((*ResponseSetOption)(nil), "types.ResponseSetOption") + golang_proto.RegisterType((*ResponseSetOption)(nil), "types.ResponseSetOption") proto.RegisterType((*ResponseInitChain)(nil), "types.ResponseInitChain") + golang_proto.RegisterType((*ResponseInitChain)(nil), "types.ResponseInitChain") proto.RegisterType((*ResponseQuery)(nil), "types.ResponseQuery") + golang_proto.RegisterType((*ResponseQuery)(nil), "types.ResponseQuery") proto.RegisterType((*ResponseBeginBlock)(nil), "types.ResponseBeginBlock") + golang_proto.RegisterType((*ResponseBeginBlock)(nil), "types.ResponseBeginBlock") proto.RegisterType((*ResponseCheckTx)(nil), "types.ResponseCheckTx") + golang_proto.RegisterType((*ResponseCheckTx)(nil), "types.ResponseCheckTx") proto.RegisterType((*ResponseDeliverTx)(nil), "types.ResponseDeliverTx") + golang_proto.RegisterType((*ResponseDeliverTx)(nil), "types.ResponseDeliverTx") proto.RegisterType((*ResponseEndBlock)(nil), "types.ResponseEndBlock") + golang_proto.RegisterType((*ResponseEndBlock)(nil), "types.ResponseEndBlock") proto.RegisterType((*ResponseCommit)(nil), "types.ResponseCommit") + golang_proto.RegisterType((*ResponseCommit)(nil), "types.ResponseCommit") proto.RegisterType((*ConsensusParams)(nil), "types.ConsensusParams") + golang_proto.RegisterType((*ConsensusParams)(nil), "types.ConsensusParams") proto.RegisterType((*BlockSize)(nil), "types.BlockSize") + golang_proto.RegisterType((*BlockSize)(nil), "types.BlockSize") proto.RegisterType((*TxSize)(nil), "types.TxSize") + golang_proto.RegisterType((*TxSize)(nil), "types.TxSize") proto.RegisterType((*BlockGossip)(nil), "types.BlockGossip") + golang_proto.RegisterType((*BlockGossip)(nil), "types.BlockGossip") proto.RegisterType((*Header)(nil), "types.Header") + golang_proto.RegisterType((*Header)(nil), "types.Header") proto.RegisterType((*Validator)(nil), "types.Validator") + golang_proto.RegisterType((*Validator)(nil), "types.Validator") proto.RegisterType((*SigningValidator)(nil), "types.SigningValidator") + golang_proto.RegisterType((*SigningValidator)(nil), "types.SigningValidator") proto.RegisterType((*PubKey)(nil), "types.PubKey") + golang_proto.RegisterType((*PubKey)(nil), "types.PubKey") proto.RegisterType((*Evidence)(nil), "types.Evidence") + golang_proto.RegisterType((*Evidence)(nil), "types.Evidence") } +func (this *Request) Equal(that interface{}) bool { + if that == nil { + return this == nil + } -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for ABCIApplication service - -type ABCIApplicationClient interface { - Echo(ctx context.Context, in *RequestEcho, opts ...grpc.CallOption) (*ResponseEcho, error) - Flush(ctx context.Context, in *RequestFlush, opts ...grpc.CallOption) (*ResponseFlush, error) - Info(ctx context.Context, in *RequestInfo, opts ...grpc.CallOption) (*ResponseInfo, error) - SetOption(ctx context.Context, in *RequestSetOption, opts ...grpc.CallOption) (*ResponseSetOption, error) - DeliverTx(ctx context.Context, in *RequestDeliverTx, opts ...grpc.CallOption) (*ResponseDeliverTx, error) - CheckTx(ctx context.Context, in *RequestCheckTx, opts ...grpc.CallOption) (*ResponseCheckTx, error) - Query(ctx context.Context, in *RequestQuery, opts ...grpc.CallOption) (*ResponseQuery, error) - Commit(ctx context.Context, in *RequestCommit, opts ...grpc.CallOption) (*ResponseCommit, error) - InitChain(ctx context.Context, in *RequestInitChain, opts ...grpc.CallOption) (*ResponseInitChain, error) - BeginBlock(ctx context.Context, in *RequestBeginBlock, opts ...grpc.CallOption) (*ResponseBeginBlock, error) - EndBlock(ctx context.Context, in *RequestEndBlock, opts ...grpc.CallOption) (*ResponseEndBlock, error) -} - -type aBCIApplicationClient struct { - cc *grpc.ClientConn + that1, ok := that.(*Request) + if !ok { + that2, ok := that.(Request) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if that1.Value == nil { + if this.Value != nil { + return false + } + } else if this.Value == nil { + return false + } else if !this.Value.Equal(that1.Value) { + return false + } + return true } +func (this *Request_Echo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } -func NewABCIApplicationClient(cc *grpc.ClientConn) ABCIApplicationClient { - return &aBCIApplicationClient{cc} + that1, ok := that.(*Request_Echo) + if !ok { + that2, ok := that.(Request_Echo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Echo.Equal(that1.Echo) { + return false + } + return true } +func (this *Request_Flush) Equal(that interface{}) bool { + if that == nil { + return this == nil + } -func (c *aBCIApplicationClient) Echo(ctx context.Context, in *RequestEcho, opts ...grpc.CallOption) (*ResponseEcho, error) { - out := new(ResponseEcho) - err := grpc.Invoke(ctx, "/types.ABCIApplication/Echo", in, out, c.cc, opts...) - if err != nil { - return nil, err + that1, ok := that.(*Request_Flush) + if !ok { + that2, ok := that.(Request_Flush) + if ok { + that1 = &that2 + } else { + return false + } } - return out, nil + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Flush.Equal(that1.Flush) { + return false + } + return true } +func (this *Request_Info) Equal(that interface{}) bool { + if that == nil { + return this == nil + } -func (c *aBCIApplicationClient) Flush(ctx context.Context, in *RequestFlush, opts ...grpc.CallOption) (*ResponseFlush, error) { - out := new(ResponseFlush) - err := grpc.Invoke(ctx, "/types.ABCIApplication/Flush", in, out, c.cc, opts...) - if err != nil { - return nil, err + that1, ok := that.(*Request_Info) + if !ok { + that2, ok := that.(Request_Info) + if ok { + that1 = &that2 + } else { + return false + } } - return out, nil + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Info.Equal(that1.Info) { + return false + } + return true } +func (this *Request_SetOption) Equal(that interface{}) bool { + if that == nil { + return this == nil + } -func (c *aBCIApplicationClient) Info(ctx context.Context, in *RequestInfo, opts ...grpc.CallOption) (*ResponseInfo, error) { - out := new(ResponseInfo) - err := grpc.Invoke(ctx, "/types.ABCIApplication/Info", in, out, c.cc, opts...) - if err != nil { - return nil, err + that1, ok := that.(*Request_SetOption) + if !ok { + that2, ok := that.(Request_SetOption) + if ok { + that1 = &that2 + } else { + return false + } } - return out, nil + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.SetOption.Equal(that1.SetOption) { + return false + } + return true } +func (this *Request_InitChain) Equal(that interface{}) bool { + if that == nil { + return this == nil + } -func (c *aBCIApplicationClient) SetOption(ctx context.Context, in *RequestSetOption, opts ...grpc.CallOption) (*ResponseSetOption, error) { - out := new(ResponseSetOption) - err := grpc.Invoke(ctx, "/types.ABCIApplication/SetOption", in, out, c.cc, opts...) - if err != nil { - return nil, err + that1, ok := that.(*Request_InitChain) + if !ok { + that2, ok := that.(Request_InitChain) + if ok { + that1 = &that2 + } else { + return false + } } - return out, nil + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.InitChain.Equal(that1.InitChain) { + return false + } + return true } +func (this *Request_Query) Equal(that interface{}) bool { + if that == nil { + return this == nil + } -func (c *aBCIApplicationClient) DeliverTx(ctx context.Context, in *RequestDeliverTx, opts ...grpc.CallOption) (*ResponseDeliverTx, error) { - out := new(ResponseDeliverTx) - err := grpc.Invoke(ctx, "/types.ABCIApplication/DeliverTx", in, out, c.cc, opts...) - if err != nil { - return nil, err + that1, ok := that.(*Request_Query) + if !ok { + that2, ok := that.(Request_Query) + if ok { + that1 = &that2 + } else { + return false + } } - return out, nil + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Query.Equal(that1.Query) { + return false + } + return true } +func (this *Request_BeginBlock) Equal(that interface{}) bool { + if that == nil { + return this == nil + } -func (c *aBCIApplicationClient) CheckTx(ctx context.Context, in *RequestCheckTx, opts ...grpc.CallOption) (*ResponseCheckTx, error) { - out := new(ResponseCheckTx) - err := grpc.Invoke(ctx, "/types.ABCIApplication/CheckTx", in, out, c.cc, opts...) - if err != nil { - return nil, err + that1, ok := that.(*Request_BeginBlock) + if !ok { + that2, ok := that.(Request_BeginBlock) + if ok { + that1 = &that2 + } else { + return false + } } - return out, nil + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.BeginBlock.Equal(that1.BeginBlock) { + return false + } + return true } +func (this *Request_CheckTx) Equal(that interface{}) bool { + if that == nil { + return this == nil + } -func (c *aBCIApplicationClient) Query(ctx context.Context, in *RequestQuery, opts ...grpc.CallOption) (*ResponseQuery, error) { - out := new(ResponseQuery) - err := grpc.Invoke(ctx, "/types.ABCIApplication/Query", in, out, c.cc, opts...) - if err != nil { - return nil, err + that1, ok := that.(*Request_CheckTx) + if !ok { + that2, ok := that.(Request_CheckTx) + if ok { + that1 = &that2 + } else { + return false + } } - return out, nil + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.CheckTx.Equal(that1.CheckTx) { + return false + } + return true } +func (this *Request_DeliverTx) Equal(that interface{}) bool { + if that == nil { + return this == nil + } -func (c *aBCIApplicationClient) Commit(ctx context.Context, in *RequestCommit, opts ...grpc.CallOption) (*ResponseCommit, error) { - out := new(ResponseCommit) - err := grpc.Invoke(ctx, "/types.ABCIApplication/Commit", in, out, c.cc, opts...) - if err != nil { - return nil, err + that1, ok := that.(*Request_DeliverTx) + if !ok { + that2, ok := that.(Request_DeliverTx) + if ok { + that1 = &that2 + } else { + return false + } } - return out, nil + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.DeliverTx.Equal(that1.DeliverTx) { + return false + } + return true } +func (this *Request_EndBlock) Equal(that interface{}) bool { + if that == nil { + return this == nil + } -func (c *aBCIApplicationClient) InitChain(ctx context.Context, in *RequestInitChain, opts ...grpc.CallOption) (*ResponseInitChain, error) { - out := new(ResponseInitChain) - err := grpc.Invoke(ctx, "/types.ABCIApplication/InitChain", in, out, c.cc, opts...) - if err != nil { - return nil, err + that1, ok := that.(*Request_EndBlock) + if !ok { + that2, ok := that.(Request_EndBlock) + if ok { + that1 = &that2 + } else { + return false + } } - return out, nil + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.EndBlock.Equal(that1.EndBlock) { + return false + } + return true } +func (this *Request_Commit) Equal(that interface{}) bool { + if that == nil { + return this == nil + } -func (c *aBCIApplicationClient) BeginBlock(ctx context.Context, in *RequestBeginBlock, opts ...grpc.CallOption) (*ResponseBeginBlock, error) { - out := new(ResponseBeginBlock) - err := grpc.Invoke(ctx, "/types.ABCIApplication/BeginBlock", in, out, c.cc, opts...) - if err != nil { - return nil, err + that1, ok := that.(*Request_Commit) + if !ok { + that2, ok := that.(Request_Commit) + if ok { + that1 = &that2 + } else { + return false + } } - return out, nil + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Commit.Equal(that1.Commit) { + return false + } + return true } +func (this *RequestEcho) Equal(that interface{}) bool { + if that == nil { + return this == nil + } -func (c *aBCIApplicationClient) EndBlock(ctx context.Context, in *RequestEndBlock, opts ...grpc.CallOption) (*ResponseEndBlock, error) { - out := new(ResponseEndBlock) - err := grpc.Invoke(ctx, "/types.ABCIApplication/EndBlock", in, out, c.cc, opts...) - if err != nil { - return nil, err + that1, ok := that.(*RequestEcho) + if !ok { + that2, ok := that.(RequestEcho) + if ok { + that1 = &that2 + } else { + return false + } } - return out, nil + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Message != that1.Message { + return false + } + return true } +func (this *RequestFlush) Equal(that interface{}) bool { + if that == nil { + return this == nil + } -// Server API for ABCIApplication service - -type ABCIApplicationServer interface { - Echo(context.Context, *RequestEcho) (*ResponseEcho, error) - Flush(context.Context, *RequestFlush) (*ResponseFlush, error) - Info(context.Context, *RequestInfo) (*ResponseInfo, error) - SetOption(context.Context, *RequestSetOption) (*ResponseSetOption, error) - DeliverTx(context.Context, *RequestDeliverTx) (*ResponseDeliverTx, error) - CheckTx(context.Context, *RequestCheckTx) (*ResponseCheckTx, error) - Query(context.Context, *RequestQuery) (*ResponseQuery, error) - Commit(context.Context, *RequestCommit) (*ResponseCommit, error) - InitChain(context.Context, *RequestInitChain) (*ResponseInitChain, error) - BeginBlock(context.Context, *RequestBeginBlock) (*ResponseBeginBlock, error) - EndBlock(context.Context, *RequestEndBlock) (*ResponseEndBlock, error) + that1, ok := that.(*RequestFlush) + if !ok { + that2, ok := that.(RequestFlush) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true } +func (this *RequestInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } -func RegisterABCIApplicationServer(s *grpc.Server, srv ABCIApplicationServer) { - s.RegisterService(&_ABCIApplication_serviceDesc, srv) + that1, ok := that.(*RequestInfo) + if !ok { + that2, ok := that.(RequestInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Version != that1.Version { + return false + } + return true } +func (this *RequestSetOption) Equal(that interface{}) bool { + if that == nil { + return this == nil + } -func _ABCIApplication_Echo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestEcho) - if err := dec(in); err != nil { - return nil, err + that1, ok := that.(*RequestSetOption) + if !ok { + that2, ok := that.(RequestSetOption) + if ok { + that1 = &that2 + } else { + return false + } } - if interceptor == nil { - return srv.(ABCIApplicationServer).Echo(ctx, in) + if that1 == nil { + return this == nil + } else if this == nil { + return false } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.ABCIApplication/Echo", + if this.Key != that1.Key { + return false } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).Echo(ctx, req.(*RequestEcho)) + if this.Value != that1.Value { + return false } - return interceptor(ctx, in, info, handler) + return true } +func (this *RequestInitChain) Equal(that interface{}) bool { + if that == nil { + return this == nil + } -func _ABCIApplication_Flush_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestFlush) - if err := dec(in); err != nil { - return nil, err + that1, ok := that.(*RequestInitChain) + if !ok { + that2, ok := that.(RequestInitChain) + if ok { + that1 = &that2 + } else { + return false + } } - if interceptor == nil { - return srv.(ABCIApplicationServer).Flush(ctx, in) + if that1 == nil { + return this == nil + } else if this == nil { + return false } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.ABCIApplication/Flush", + if this.Time != that1.Time { + return false } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).Flush(ctx, req.(*RequestFlush)) + if this.ChainId != that1.ChainId { + return false } - return interceptor(ctx, in, info, handler) -} - -func _ABCIApplication_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestInfo) - if err := dec(in); err != nil { - return nil, err + if !this.ConsensusParams.Equal(that1.ConsensusParams) { + return false } - if interceptor == nil { - return srv.(ABCIApplicationServer).Info(ctx, in) + if len(this.Validators) != len(that1.Validators) { + return false } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.ABCIApplication/Info", + for i := range this.Validators { + if !this.Validators[i].Equal(&that1.Validators[i]) { + return false + } } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).Info(ctx, req.(*RequestInfo)) + if !bytes.Equal(this.AppStateBytes, that1.AppStateBytes) { + return false } - return interceptor(ctx, in, info, handler) + return true } +func (this *RequestQuery) Equal(that interface{}) bool { + if that == nil { + return this == nil + } -func _ABCIApplication_SetOption_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestSetOption) - if err := dec(in); err != nil { - return nil, err + that1, ok := that.(*RequestQuery) + if !ok { + that2, ok := that.(RequestQuery) + if ok { + that1 = &that2 + } else { + return false + } } - if interceptor == nil { - return srv.(ABCIApplicationServer).SetOption(ctx, in) + if that1 == nil { + return this == nil + } else if this == nil { + return false } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.ABCIApplication/SetOption", + if !bytes.Equal(this.Data, that1.Data) { + return false } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).SetOption(ctx, req.(*RequestSetOption)) + if this.Path != that1.Path { + return false } - return interceptor(ctx, in, info, handler) + if this.Height != that1.Height { + return false + } + if this.Prove != that1.Prove { + return false + } + return true } +func (this *RequestBeginBlock) Equal(that interface{}) bool { + if that == nil { + return this == nil + } -func _ABCIApplication_DeliverTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestDeliverTx) - if err := dec(in); err != nil { - return nil, err + that1, ok := that.(*RequestBeginBlock) + if !ok { + that2, ok := that.(RequestBeginBlock) + if ok { + that1 = &that2 + } else { + return false + } } - if interceptor == nil { - return srv.(ABCIApplicationServer).DeliverTx(ctx, in) + if that1 == nil { + return this == nil + } else if this == nil { + return false } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.ABCIApplication/DeliverTx", + if !bytes.Equal(this.Hash, that1.Hash) { + return false } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).DeliverTx(ctx, req.(*RequestDeliverTx)) + if !this.Header.Equal(&that1.Header) { + return false } - return interceptor(ctx, in, info, handler) + if len(this.Validators) != len(that1.Validators) { + return false + } + for i := range this.Validators { + if !this.Validators[i].Equal(&that1.Validators[i]) { + return false + } + } + if len(this.ByzantineValidators) != len(that1.ByzantineValidators) { + return false + } + for i := range this.ByzantineValidators { + if !this.ByzantineValidators[i].Equal(&that1.ByzantineValidators[i]) { + return false + } + } + return true +} +func (this *RequestCheckTx) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RequestCheckTx) + if !ok { + that2, ok := that.(RequestCheckTx) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Tx, that1.Tx) { + return false + } + return true +} +func (this *RequestDeliverTx) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RequestDeliverTx) + if !ok { + that2, ok := that.(RequestDeliverTx) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Tx, that1.Tx) { + return false + } + return true +} +func (this *RequestEndBlock) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RequestEndBlock) + if !ok { + that2, ok := that.(RequestEndBlock) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Height != that1.Height { + return false + } + return true +} +func (this *RequestCommit) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RequestCommit) + if !ok { + that2, ok := that.(RequestCommit) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *Response) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Response) + if !ok { + that2, ok := that.(Response) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if that1.Value == nil { + if this.Value != nil { + return false + } + } else if this.Value == nil { + return false + } else if !this.Value.Equal(that1.Value) { + return false + } + return true +} +func (this *Response_Exception) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Response_Exception) + if !ok { + that2, ok := that.(Response_Exception) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Exception.Equal(that1.Exception) { + return false + } + return true +} +func (this *Response_Echo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Response_Echo) + if !ok { + that2, ok := that.(Response_Echo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Echo.Equal(that1.Echo) { + return false + } + return true +} +func (this *Response_Flush) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Response_Flush) + if !ok { + that2, ok := that.(Response_Flush) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Flush.Equal(that1.Flush) { + return false + } + return true +} +func (this *Response_Info) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Response_Info) + if !ok { + that2, ok := that.(Response_Info) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Info.Equal(that1.Info) { + return false + } + return true +} +func (this *Response_SetOption) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Response_SetOption) + if !ok { + that2, ok := that.(Response_SetOption) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.SetOption.Equal(that1.SetOption) { + return false + } + return true +} +func (this *Response_InitChain) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Response_InitChain) + if !ok { + that2, ok := that.(Response_InitChain) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.InitChain.Equal(that1.InitChain) { + return false + } + return true +} +func (this *Response_Query) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Response_Query) + if !ok { + that2, ok := that.(Response_Query) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Query.Equal(that1.Query) { + return false + } + return true +} +func (this *Response_BeginBlock) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Response_BeginBlock) + if !ok { + that2, ok := that.(Response_BeginBlock) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.BeginBlock.Equal(that1.BeginBlock) { + return false + } + return true +} +func (this *Response_CheckTx) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Response_CheckTx) + if !ok { + that2, ok := that.(Response_CheckTx) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.CheckTx.Equal(that1.CheckTx) { + return false + } + return true +} +func (this *Response_DeliverTx) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Response_DeliverTx) + if !ok { + that2, ok := that.(Response_DeliverTx) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.DeliverTx.Equal(that1.DeliverTx) { + return false + } + return true +} +func (this *Response_EndBlock) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Response_EndBlock) + if !ok { + that2, ok := that.(Response_EndBlock) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.EndBlock.Equal(that1.EndBlock) { + return false + } + return true +} +func (this *Response_Commit) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Response_Commit) + if !ok { + that2, ok := that.(Response_Commit) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Commit.Equal(that1.Commit) { + return false + } + return true +} +func (this *ResponseException) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResponseException) + if !ok { + that2, ok := that.(ResponseException) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Error != that1.Error { + return false + } + return true +} +func (this *ResponseEcho) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResponseEcho) + if !ok { + that2, ok := that.(ResponseEcho) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Message != that1.Message { + return false + } + return true +} +func (this *ResponseFlush) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResponseFlush) + if !ok { + that2, ok := that.(ResponseFlush) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *ResponseInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResponseInfo) + if !ok { + that2, ok := that.(ResponseInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Data != that1.Data { + return false + } + if this.Version != that1.Version { + return false + } + if this.LastBlockHeight != that1.LastBlockHeight { + return false + } + if !bytes.Equal(this.LastBlockAppHash, that1.LastBlockAppHash) { + return false + } + return true +} +func (this *ResponseSetOption) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResponseSetOption) + if !ok { + that2, ok := that.(ResponseSetOption) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Code != that1.Code { + return false + } + if this.Log != that1.Log { + return false + } + if this.Info != that1.Info { + return false + } + return true +} +func (this *ResponseInitChain) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResponseInitChain) + if !ok { + that2, ok := that.(ResponseInitChain) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ConsensusParams.Equal(that1.ConsensusParams) { + return false + } + if len(this.Validators) != len(that1.Validators) { + return false + } + for i := range this.Validators { + if !this.Validators[i].Equal(&that1.Validators[i]) { + return false + } + } + return true +} +func (this *ResponseQuery) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResponseQuery) + if !ok { + that2, ok := that.(ResponseQuery) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Code != that1.Code { + return false + } + if this.Log != that1.Log { + return false + } + if this.Info != that1.Info { + return false + } + if this.Index != that1.Index { + return false + } + if !bytes.Equal(this.Key, that1.Key) { + return false + } + if !bytes.Equal(this.Value, that1.Value) { + return false + } + if !bytes.Equal(this.Proof, that1.Proof) { + return false + } + if this.Height != that1.Height { + return false + } + return true +} +func (this *ResponseBeginBlock) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResponseBeginBlock) + if !ok { + that2, ok := that.(ResponseBeginBlock) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Tags) != len(that1.Tags) { + return false + } + for i := range this.Tags { + if !this.Tags[i].Equal(&that1.Tags[i]) { + return false + } + } + return true +} +func (this *ResponseCheckTx) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResponseCheckTx) + if !ok { + that2, ok := that.(ResponseCheckTx) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Code != that1.Code { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + if this.Log != that1.Log { + return false + } + if this.Info != that1.Info { + return false + } + if this.GasWanted != that1.GasWanted { + return false + } + if this.GasUsed != that1.GasUsed { + return false + } + if len(this.Tags) != len(that1.Tags) { + return false + } + for i := range this.Tags { + if !this.Tags[i].Equal(&that1.Tags[i]) { + return false + } + } + if !this.Fee.Equal(&that1.Fee) { + return false + } + return true +} +func (this *ResponseDeliverTx) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResponseDeliverTx) + if !ok { + that2, ok := that.(ResponseDeliverTx) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Code != that1.Code { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + if this.Log != that1.Log { + return false + } + if this.Info != that1.Info { + return false + } + if this.GasWanted != that1.GasWanted { + return false + } + if this.GasUsed != that1.GasUsed { + return false + } + if len(this.Tags) != len(that1.Tags) { + return false + } + for i := range this.Tags { + if !this.Tags[i].Equal(&that1.Tags[i]) { + return false + } + } + if !this.Fee.Equal(&that1.Fee) { + return false + } + return true +} +func (this *ResponseEndBlock) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResponseEndBlock) + if !ok { + that2, ok := that.(ResponseEndBlock) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.ValidatorUpdates) != len(that1.ValidatorUpdates) { + return false + } + for i := range this.ValidatorUpdates { + if !this.ValidatorUpdates[i].Equal(&that1.ValidatorUpdates[i]) { + return false + } + } + if !this.ConsensusParamUpdates.Equal(that1.ConsensusParamUpdates) { + return false + } + if len(this.Tags) != len(that1.Tags) { + return false + } + for i := range this.Tags { + if !this.Tags[i].Equal(&that1.Tags[i]) { + return false + } + } + return true +} +func (this *ResponseCommit) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResponseCommit) + if !ok { + that2, ok := that.(ResponseCommit) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + return true +} +func (this *ConsensusParams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ConsensusParams) + if !ok { + that2, ok := that.(ConsensusParams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.BlockSize.Equal(that1.BlockSize) { + return false + } + if !this.TxSize.Equal(that1.TxSize) { + return false + } + if !this.BlockGossip.Equal(that1.BlockGossip) { + return false + } + return true +} +func (this *BlockSize) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BlockSize) + if !ok { + that2, ok := that.(BlockSize) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.MaxBytes != that1.MaxBytes { + return false + } + if this.MaxTxs != that1.MaxTxs { + return false + } + if this.MaxGas != that1.MaxGas { + return false + } + return true +} +func (this *TxSize) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TxSize) + if !ok { + that2, ok := that.(TxSize) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.MaxBytes != that1.MaxBytes { + return false + } + if this.MaxGas != that1.MaxGas { + return false + } + return true +} +func (this *BlockGossip) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BlockGossip) + if !ok { + that2, ok := that.(BlockGossip) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.BlockPartSizeBytes != that1.BlockPartSizeBytes { + return false + } + return true +} +func (this *Header) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Header) + if !ok { + that2, ok := that.(Header) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ChainID != that1.ChainID { + return false + } + if this.Height != that1.Height { + return false + } + if this.Time != that1.Time { + return false + } + if this.NumTxs != that1.NumTxs { + return false + } + if this.TotalTxs != that1.TotalTxs { + return false + } + if !bytes.Equal(this.LastBlockHash, that1.LastBlockHash) { + return false + } + if !bytes.Equal(this.ValidatorsHash, that1.ValidatorsHash) { + return false + } + if !bytes.Equal(this.AppHash, that1.AppHash) { + return false + } + if !this.Proposer.Equal(&that1.Proposer) { + return false + } + return true +} +func (this *Validator) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Validator) + if !ok { + that2, ok := that.(Validator) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Address, that1.Address) { + return false + } + if !this.PubKey.Equal(&that1.PubKey) { + return false + } + if this.Power != that1.Power { + return false + } + return true +} +func (this *SigningValidator) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SigningValidator) + if !ok { + that2, ok := that.(SigningValidator) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Validator.Equal(&that1.Validator) { + return false + } + if this.SignedLastBlock != that1.SignedLastBlock { + return false + } + return true +} +func (this *PubKey) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PubKey) + if !ok { + that2, ok := that.(PubKey) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + return true +} +func (this *Evidence) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Evidence) + if !ok { + that2, ok := that.(Evidence) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if !this.Validator.Equal(&that1.Validator) { + return false + } + if this.Height != that1.Height { + return false + } + if this.Time != that1.Time { + return false + } + if this.TotalVotingPower != that1.TotalVotingPower { + return false + } + return true +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ABCIApplication service + +type ABCIApplicationClient interface { + Echo(ctx context.Context, in *RequestEcho, opts ...grpc.CallOption) (*ResponseEcho, error) + Flush(ctx context.Context, in *RequestFlush, opts ...grpc.CallOption) (*ResponseFlush, error) + Info(ctx context.Context, in *RequestInfo, opts ...grpc.CallOption) (*ResponseInfo, error) + SetOption(ctx context.Context, in *RequestSetOption, opts ...grpc.CallOption) (*ResponseSetOption, error) + DeliverTx(ctx context.Context, in *RequestDeliverTx, opts ...grpc.CallOption) (*ResponseDeliverTx, error) + CheckTx(ctx context.Context, in *RequestCheckTx, opts ...grpc.CallOption) (*ResponseCheckTx, error) + Query(ctx context.Context, in *RequestQuery, opts ...grpc.CallOption) (*ResponseQuery, error) + Commit(ctx context.Context, in *RequestCommit, opts ...grpc.CallOption) (*ResponseCommit, error) + InitChain(ctx context.Context, in *RequestInitChain, opts ...grpc.CallOption) (*ResponseInitChain, error) + BeginBlock(ctx context.Context, in *RequestBeginBlock, opts ...grpc.CallOption) (*ResponseBeginBlock, error) + EndBlock(ctx context.Context, in *RequestEndBlock, opts ...grpc.CallOption) (*ResponseEndBlock, error) +} + +type aBCIApplicationClient struct { + cc *grpc.ClientConn +} + +func NewABCIApplicationClient(cc *grpc.ClientConn) ABCIApplicationClient { + return &aBCIApplicationClient{cc} +} + +func (c *aBCIApplicationClient) Echo(ctx context.Context, in *RequestEcho, opts ...grpc.CallOption) (*ResponseEcho, error) { + out := new(ResponseEcho) + err := grpc.Invoke(ctx, "/types.ABCIApplication/Echo", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) Flush(ctx context.Context, in *RequestFlush, opts ...grpc.CallOption) (*ResponseFlush, error) { + out := new(ResponseFlush) + err := grpc.Invoke(ctx, "/types.ABCIApplication/Flush", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) Info(ctx context.Context, in *RequestInfo, opts ...grpc.CallOption) (*ResponseInfo, error) { + out := new(ResponseInfo) + err := grpc.Invoke(ctx, "/types.ABCIApplication/Info", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) SetOption(ctx context.Context, in *RequestSetOption, opts ...grpc.CallOption) (*ResponseSetOption, error) { + out := new(ResponseSetOption) + err := grpc.Invoke(ctx, "/types.ABCIApplication/SetOption", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) DeliverTx(ctx context.Context, in *RequestDeliverTx, opts ...grpc.CallOption) (*ResponseDeliverTx, error) { + out := new(ResponseDeliverTx) + err := grpc.Invoke(ctx, "/types.ABCIApplication/DeliverTx", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) CheckTx(ctx context.Context, in *RequestCheckTx, opts ...grpc.CallOption) (*ResponseCheckTx, error) { + out := new(ResponseCheckTx) + err := grpc.Invoke(ctx, "/types.ABCIApplication/CheckTx", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) Query(ctx context.Context, in *RequestQuery, opts ...grpc.CallOption) (*ResponseQuery, error) { + out := new(ResponseQuery) + err := grpc.Invoke(ctx, "/types.ABCIApplication/Query", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) Commit(ctx context.Context, in *RequestCommit, opts ...grpc.CallOption) (*ResponseCommit, error) { + out := new(ResponseCommit) + err := grpc.Invoke(ctx, "/types.ABCIApplication/Commit", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) InitChain(ctx context.Context, in *RequestInitChain, opts ...grpc.CallOption) (*ResponseInitChain, error) { + out := new(ResponseInitChain) + err := grpc.Invoke(ctx, "/types.ABCIApplication/InitChain", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) BeginBlock(ctx context.Context, in *RequestBeginBlock, opts ...grpc.CallOption) (*ResponseBeginBlock, error) { + out := new(ResponseBeginBlock) + err := grpc.Invoke(ctx, "/types.ABCIApplication/BeginBlock", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) EndBlock(ctx context.Context, in *RequestEndBlock, opts ...grpc.CallOption) (*ResponseEndBlock, error) { + out := new(ResponseEndBlock) + err := grpc.Invoke(ctx, "/types.ABCIApplication/EndBlock", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ABCIApplication service + +type ABCIApplicationServer interface { + Echo(context.Context, *RequestEcho) (*ResponseEcho, error) + Flush(context.Context, *RequestFlush) (*ResponseFlush, error) + Info(context.Context, *RequestInfo) (*ResponseInfo, error) + SetOption(context.Context, *RequestSetOption) (*ResponseSetOption, error) + DeliverTx(context.Context, *RequestDeliverTx) (*ResponseDeliverTx, error) + CheckTx(context.Context, *RequestCheckTx) (*ResponseCheckTx, error) + Query(context.Context, *RequestQuery) (*ResponseQuery, error) + Commit(context.Context, *RequestCommit) (*ResponseCommit, error) + InitChain(context.Context, *RequestInitChain) (*ResponseInitChain, error) + BeginBlock(context.Context, *RequestBeginBlock) (*ResponseBeginBlock, error) + EndBlock(context.Context, *RequestEndBlock) (*ResponseEndBlock, error) +} + +func RegisterABCIApplicationServer(s *grpc.Server, srv ABCIApplicationServer) { + s.RegisterService(&_ABCIApplication_serviceDesc, srv) +} + +func _ABCIApplication_Echo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestEcho) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).Echo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.ABCIApplication/Echo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Echo(ctx, req.(*RequestEcho)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_Flush_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestFlush) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).Flush(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.ABCIApplication/Flush", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Flush(ctx, req.(*RequestFlush)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestInfo) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).Info(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.ABCIApplication/Info", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Info(ctx, req.(*RequestInfo)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_SetOption_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestSetOption) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).SetOption(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.ABCIApplication/SetOption", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).SetOption(ctx, req.(*RequestSetOption)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_DeliverTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestDeliverTx) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).DeliverTx(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.ABCIApplication/DeliverTx", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).DeliverTx(ctx, req.(*RequestDeliverTx)) + } + return interceptor(ctx, in, info, handler) } func _ABCIApplication_CheckTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { @@ -2176,280 +3821,8344 @@ func _ABCIApplication_CheckTx_Handler(srv interface{}, ctx context.Context, dec if err := dec(in); err != nil { return nil, err } - if interceptor == nil { - return srv.(ABCIApplicationServer).CheckTx(ctx, in) + if interceptor == nil { + return srv.(ABCIApplicationServer).CheckTx(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.ABCIApplication/CheckTx", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).CheckTx(ctx, req.(*RequestCheckTx)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestQuery) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).Query(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.ABCIApplication/Query", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Query(ctx, req.(*RequestQuery)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestCommit) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).Commit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.ABCIApplication/Commit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Commit(ctx, req.(*RequestCommit)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_InitChain_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestInitChain) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).InitChain(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.ABCIApplication/InitChain", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).InitChain(ctx, req.(*RequestInitChain)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_BeginBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestBeginBlock) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).BeginBlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.ABCIApplication/BeginBlock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).BeginBlock(ctx, req.(*RequestBeginBlock)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_EndBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestEndBlock) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).EndBlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.ABCIApplication/EndBlock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).EndBlock(ctx, req.(*RequestEndBlock)) + } + return interceptor(ctx, in, info, handler) +} + +var _ABCIApplication_serviceDesc = grpc.ServiceDesc{ + ServiceName: "types.ABCIApplication", + HandlerType: (*ABCIApplicationServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Echo", + Handler: _ABCIApplication_Echo_Handler, + }, + { + MethodName: "Flush", + Handler: _ABCIApplication_Flush_Handler, + }, + { + MethodName: "Info", + Handler: _ABCIApplication_Info_Handler, + }, + { + MethodName: "SetOption", + Handler: _ABCIApplication_SetOption_Handler, + }, + { + MethodName: "DeliverTx", + Handler: _ABCIApplication_DeliverTx_Handler, + }, + { + MethodName: "CheckTx", + Handler: _ABCIApplication_CheckTx_Handler, + }, + { + MethodName: "Query", + Handler: _ABCIApplication_Query_Handler, + }, + { + MethodName: "Commit", + Handler: _ABCIApplication_Commit_Handler, + }, + { + MethodName: "InitChain", + Handler: _ABCIApplication_InitChain_Handler, + }, + { + MethodName: "BeginBlock", + Handler: _ABCIApplication_BeginBlock_Handler, + }, + { + MethodName: "EndBlock", + Handler: _ABCIApplication_EndBlock_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "abci/types/types.proto", +} + +func (m *Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Request) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value != nil { + nn1, err := m.Value.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn1 + } + return i, nil +} + +func (m *Request_Echo) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Echo != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Echo.Size())) + n2, err := m.Echo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} +func (m *Request_Flush) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Flush != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Flush.Size())) + n3, err := m.Flush.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} +func (m *Request_Info) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Info != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Info.Size())) + n4, err := m.Info.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} +func (m *Request_SetOption) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.SetOption != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.SetOption.Size())) + n5, err := m.SetOption.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} +func (m *Request_InitChain) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.InitChain != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.InitChain.Size())) + n6, err := m.InitChain.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} +func (m *Request_Query) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Query != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Query.Size())) + n7, err := m.Query.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} +func (m *Request_BeginBlock) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.BeginBlock != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.BeginBlock.Size())) + n8, err := m.BeginBlock.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} +func (m *Request_CheckTx) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.CheckTx != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.CheckTx.Size())) + n9, err := m.CheckTx.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} +func (m *Request_EndBlock) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.EndBlock != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.EndBlock.Size())) + n10, err := m.EndBlock.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + return i, nil +} +func (m *Request_Commit) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Commit != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Commit.Size())) + n11, err := m.Commit.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + return i, nil +} +func (m *Request_DeliverTx) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.DeliverTx != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.DeliverTx.Size())) + n12, err := m.DeliverTx.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} +func (m *RequestEcho) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestEcho) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Message) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + } + return i, nil +} + +func (m *RequestFlush) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestFlush) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *RequestInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Version) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + } + return i, nil +} + +func (m *RequestSetOption) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestSetOption) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *RequestInitChain) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestInitChain) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Time != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Time)) + } + if len(m.ChainId) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainId))) + i += copy(dAtA[i:], m.ChainId) + } + if m.ConsensusParams != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.ConsensusParams.Size())) + n13, err := m.ConsensusParams.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + if len(m.Validators) > 0 { + for _, msg := range m.Validators { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.AppStateBytes) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppStateBytes))) + i += copy(dAtA[i:], m.AppStateBytes) + } + return i, nil +} + +func (m *RequestQuery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestQuery) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Data) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if len(m.Path) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + } + if m.Height != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + } + if m.Prove { + dAtA[i] = 0x20 + i++ + if m.Prove { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *RequestBeginBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestBeginBlock) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hash) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i += copy(dAtA[i:], m.Hash) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Header.Size())) + n14, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + if len(m.Validators) > 0 { + for _, msg := range m.Validators { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.ByzantineValidators) > 0 { + for _, msg := range m.ByzantineValidators { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RequestCheckTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestCheckTx) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Tx) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) + i += copy(dAtA[i:], m.Tx) + } + return i, nil +} + +func (m *RequestDeliverTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestDeliverTx) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Tx) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) + i += copy(dAtA[i:], m.Tx) + } + return i, nil +} + +func (m *RequestEndBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestEndBlock) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Height != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + } + return i, nil +} + +func (m *RequestCommit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestCommit) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *Response) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value != nil { + nn15, err := m.Value.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn15 + } + return i, nil +} + +func (m *Response_Exception) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Exception != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Exception.Size())) + n16, err := m.Exception.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + return i, nil +} +func (m *Response_Echo) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Echo != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Echo.Size())) + n17, err := m.Echo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + return i, nil +} +func (m *Response_Flush) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Flush != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Flush.Size())) + n18, err := m.Flush.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + return i, nil +} +func (m *Response_Info) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Info != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Info.Size())) + n19, err := m.Info.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + return i, nil +} +func (m *Response_SetOption) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.SetOption != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.SetOption.Size())) + n20, err := m.SetOption.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + return i, nil +} +func (m *Response_InitChain) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.InitChain != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.InitChain.Size())) + n21, err := m.InitChain.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + return i, nil +} +func (m *Response_Query) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Query != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Query.Size())) + n22, err := m.Query.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + return i, nil +} +func (m *Response_BeginBlock) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.BeginBlock != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.BeginBlock.Size())) + n23, err := m.BeginBlock.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + return i, nil +} +func (m *Response_CheckTx) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.CheckTx != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.CheckTx.Size())) + n24, err := m.CheckTx.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + return i, nil +} +func (m *Response_DeliverTx) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.DeliverTx != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.DeliverTx.Size())) + n25, err := m.DeliverTx.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + return i, nil +} +func (m *Response_EndBlock) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.EndBlock != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.EndBlock.Size())) + n26, err := m.EndBlock.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + return i, nil +} +func (m *Response_Commit) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Commit != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Commit.Size())) + n27, err := m.Commit.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + return i, nil +} +func (m *ResponseException) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseException) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Error) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + } + return i, nil +} + +func (m *ResponseEcho) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseEcho) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Message) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + } + return i, nil +} + +func (m *ResponseFlush) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseFlush) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ResponseInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Data) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if len(m.Version) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + } + if m.LastBlockHeight != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.LastBlockHeight)) + } + if len(m.LastBlockAppHash) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.LastBlockAppHash))) + i += copy(dAtA[i:], m.LastBlockAppHash) + } + return i, nil +} + +func (m *ResponseSetOption) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseSetOption) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Code != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) + } + if len(m.Log) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) + i += copy(dAtA[i:], m.Log) + } + if len(m.Info) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) + i += copy(dAtA[i:], m.Info) + } + return i, nil +} + +func (m *ResponseInitChain) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseInitChain) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ConsensusParams != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.ConsensusParams.Size())) + n28, err := m.ConsensusParams.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + if len(m.Validators) > 0 { + for _, msg := range m.Validators { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ResponseQuery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseQuery) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Code != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) + } + if len(m.Log) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) + i += copy(dAtA[i:], m.Log) + } + if len(m.Info) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) + i += copy(dAtA[i:], m.Info) + } + if m.Index != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + } + if len(m.Key) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Value) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + if len(m.Proof) > 0 { + dAtA[i] = 0x42 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Proof))) + i += copy(dAtA[i:], m.Proof) + } + if m.Height != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + } + return i, nil +} + +func (m *ResponseBeginBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseBeginBlock) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Tags) > 0 { + for _, msg := range m.Tags { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ResponseCheckTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseCheckTx) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Code != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) + } + if len(m.Data) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if len(m.Log) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) + i += copy(dAtA[i:], m.Log) + } + if len(m.Info) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) + i += copy(dAtA[i:], m.Info) + } + if m.GasWanted != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.GasWanted)) + } + if m.GasUsed != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.GasUsed)) + } + if len(m.Tags) > 0 { + for _, msg := range m.Tags { + dAtA[i] = 0x3a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x42 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Fee.Size())) + n29, err := m.Fee.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + return i, nil +} + +func (m *ResponseDeliverTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseDeliverTx) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Code != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) + } + if len(m.Data) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if len(m.Log) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) + i += copy(dAtA[i:], m.Log) + } + if len(m.Info) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) + i += copy(dAtA[i:], m.Info) + } + if m.GasWanted != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.GasWanted)) + } + if m.GasUsed != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.GasUsed)) + } + if len(m.Tags) > 0 { + for _, msg := range m.Tags { + dAtA[i] = 0x3a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x42 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Fee.Size())) + n30, err := m.Fee.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + return i, nil +} + +func (m *ResponseEndBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseEndBlock) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ValidatorUpdates) > 0 { + for _, msg := range m.ValidatorUpdates { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.ConsensusParamUpdates != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.ConsensusParamUpdates.Size())) + n31, err := m.ConsensusParamUpdates.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + if len(m.Tags) > 0 { + for _, msg := range m.Tags { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ResponseCommit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseCommit) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Data) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + return i, nil +} + +func (m *ConsensusParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConsensusParams) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.BlockSize != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.BlockSize.Size())) + n32, err := m.BlockSize.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + if m.TxSize != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.TxSize.Size())) + n33, err := m.TxSize.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + } + if m.BlockGossip != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.BlockGossip.Size())) + n34, err := m.BlockGossip.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + } + return i, nil +} + +func (m *BlockSize) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockSize) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.MaxBytes != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.MaxBytes)) + } + if m.MaxTxs != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.MaxTxs)) + } + if m.MaxGas != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.MaxGas)) + } + return i, nil +} + +func (m *TxSize) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TxSize) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.MaxBytes != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.MaxBytes)) + } + if m.MaxGas != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.MaxGas)) + } + return i, nil +} + +func (m *BlockGossip) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockGossip) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.BlockPartSizeBytes != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.BlockPartSizeBytes)) + } + return i, nil +} + +func (m *Header) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Header) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ChainID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainID))) + i += copy(dAtA[i:], m.ChainID) + } + if m.Height != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + } + if m.Time != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Time)) + } + if m.NumTxs != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.NumTxs)) + } + if m.TotalTxs != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.TotalTxs)) + } + if len(m.LastBlockHash) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.LastBlockHash))) + i += copy(dAtA[i:], m.LastBlockHash) + } + if len(m.ValidatorsHash) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorsHash))) + i += copy(dAtA[i:], m.ValidatorsHash) + } + if len(m.AppHash) > 0 { + dAtA[i] = 0x42 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i += copy(dAtA[i:], m.AppHash) + } + dAtA[i] = 0x4a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Proposer.Size())) + n35, err := m.Proposer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + return i, nil +} + +func (m *Validator) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Validator) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Address) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Address))) + i += copy(dAtA[i:], m.Address) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.PubKey.Size())) + n36, err := m.PubKey.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + if m.Power != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Power)) + } + return i, nil +} + +func (m *SigningValidator) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SigningValidator) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Validator.Size())) + n37, err := m.Validator.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + if m.SignedLastBlock { + dAtA[i] = 0x10 + i++ + if m.SignedLastBlock { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PubKey) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PubKey) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Type) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + } + if len(m.Data) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + return i, nil +} + +func (m *Evidence) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Evidence) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Type) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Validator.Size())) + n38, err := m.Validator.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + if m.Height != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + } + if m.Time != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Time)) + } + if m.TotalVotingPower != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.TotalVotingPower)) + } + return i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedRequest(r randyTypes, easy bool) *Request { + this := &Request{} + oneofNumber_Value := []int32{2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 19}[r.Intn(11)] + switch oneofNumber_Value { + case 2: + this.Value = NewPopulatedRequest_Echo(r, easy) + case 3: + this.Value = NewPopulatedRequest_Flush(r, easy) + case 4: + this.Value = NewPopulatedRequest_Info(r, easy) + case 5: + this.Value = NewPopulatedRequest_SetOption(r, easy) + case 6: + this.Value = NewPopulatedRequest_InitChain(r, easy) + case 7: + this.Value = NewPopulatedRequest_Query(r, easy) + case 8: + this.Value = NewPopulatedRequest_BeginBlock(r, easy) + case 9: + this.Value = NewPopulatedRequest_CheckTx(r, easy) + case 11: + this.Value = NewPopulatedRequest_EndBlock(r, easy) + case 12: + this.Value = NewPopulatedRequest_Commit(r, easy) + case 19: + this.Value = NewPopulatedRequest_DeliverTx(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedRequest_Echo(r randyTypes, easy bool) *Request_Echo { + this := &Request_Echo{} + this.Echo = NewPopulatedRequestEcho(r, easy) + return this +} +func NewPopulatedRequest_Flush(r randyTypes, easy bool) *Request_Flush { + this := &Request_Flush{} + this.Flush = NewPopulatedRequestFlush(r, easy) + return this +} +func NewPopulatedRequest_Info(r randyTypes, easy bool) *Request_Info { + this := &Request_Info{} + this.Info = NewPopulatedRequestInfo(r, easy) + return this +} +func NewPopulatedRequest_SetOption(r randyTypes, easy bool) *Request_SetOption { + this := &Request_SetOption{} + this.SetOption = NewPopulatedRequestSetOption(r, easy) + return this +} +func NewPopulatedRequest_InitChain(r randyTypes, easy bool) *Request_InitChain { + this := &Request_InitChain{} + this.InitChain = NewPopulatedRequestInitChain(r, easy) + return this +} +func NewPopulatedRequest_Query(r randyTypes, easy bool) *Request_Query { + this := &Request_Query{} + this.Query = NewPopulatedRequestQuery(r, easy) + return this +} +func NewPopulatedRequest_BeginBlock(r randyTypes, easy bool) *Request_BeginBlock { + this := &Request_BeginBlock{} + this.BeginBlock = NewPopulatedRequestBeginBlock(r, easy) + return this +} +func NewPopulatedRequest_CheckTx(r randyTypes, easy bool) *Request_CheckTx { + this := &Request_CheckTx{} + this.CheckTx = NewPopulatedRequestCheckTx(r, easy) + return this +} +func NewPopulatedRequest_EndBlock(r randyTypes, easy bool) *Request_EndBlock { + this := &Request_EndBlock{} + this.EndBlock = NewPopulatedRequestEndBlock(r, easy) + return this +} +func NewPopulatedRequest_Commit(r randyTypes, easy bool) *Request_Commit { + this := &Request_Commit{} + this.Commit = NewPopulatedRequestCommit(r, easy) + return this +} +func NewPopulatedRequest_DeliverTx(r randyTypes, easy bool) *Request_DeliverTx { + this := &Request_DeliverTx{} + this.DeliverTx = NewPopulatedRequestDeliverTx(r, easy) + return this +} +func NewPopulatedRequestEcho(r randyTypes, easy bool) *RequestEcho { + this := &RequestEcho{} + this.Message = string(randStringTypes(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedRequestFlush(r randyTypes, easy bool) *RequestFlush { + this := &RequestFlush{} + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedRequestInfo(r randyTypes, easy bool) *RequestInfo { + this := &RequestInfo{} + this.Version = string(randStringTypes(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedRequestSetOption(r randyTypes, easy bool) *RequestSetOption { + this := &RequestSetOption{} + this.Key = string(randStringTypes(r)) + this.Value = string(randStringTypes(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedRequestInitChain(r randyTypes, easy bool) *RequestInitChain { + this := &RequestInitChain{} + this.Time = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Time *= -1 + } + this.ChainId = string(randStringTypes(r)) + if r.Intn(10) != 0 { + this.ConsensusParams = NewPopulatedConsensusParams(r, easy) + } + if r.Intn(10) != 0 { + v1 := r.Intn(5) + this.Validators = make([]Validator, v1) + for i := 0; i < v1; i++ { + v2 := NewPopulatedValidator(r, easy) + this.Validators[i] = *v2 + } + } + v3 := r.Intn(100) + this.AppStateBytes = make([]byte, v3) + for i := 0; i < v3; i++ { + this.AppStateBytes[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedRequestQuery(r randyTypes, easy bool) *RequestQuery { + this := &RequestQuery{} + v4 := r.Intn(100) + this.Data = make([]byte, v4) + for i := 0; i < v4; i++ { + this.Data[i] = byte(r.Intn(256)) + } + this.Path = string(randStringTypes(r)) + this.Height = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Height *= -1 + } + this.Prove = bool(bool(r.Intn(2) == 0)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedRequestBeginBlock(r randyTypes, easy bool) *RequestBeginBlock { + this := &RequestBeginBlock{} + v5 := r.Intn(100) + this.Hash = make([]byte, v5) + for i := 0; i < v5; i++ { + this.Hash[i] = byte(r.Intn(256)) + } + v6 := NewPopulatedHeader(r, easy) + this.Header = *v6 + if r.Intn(10) != 0 { + v7 := r.Intn(5) + this.Validators = make([]SigningValidator, v7) + for i := 0; i < v7; i++ { + v8 := NewPopulatedSigningValidator(r, easy) + this.Validators[i] = *v8 + } + } + if r.Intn(10) != 0 { + v9 := r.Intn(5) + this.ByzantineValidators = make([]Evidence, v9) + for i := 0; i < v9; i++ { + v10 := NewPopulatedEvidence(r, easy) + this.ByzantineValidators[i] = *v10 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedRequestCheckTx(r randyTypes, easy bool) *RequestCheckTx { + this := &RequestCheckTx{} + v11 := r.Intn(100) + this.Tx = make([]byte, v11) + for i := 0; i < v11; i++ { + this.Tx[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedRequestDeliverTx(r randyTypes, easy bool) *RequestDeliverTx { + this := &RequestDeliverTx{} + v12 := r.Intn(100) + this.Tx = make([]byte, v12) + for i := 0; i < v12; i++ { + this.Tx[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedRequestEndBlock(r randyTypes, easy bool) *RequestEndBlock { + this := &RequestEndBlock{} + this.Height = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Height *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedRequestCommit(r randyTypes, easy bool) *RequestCommit { + this := &RequestCommit{} + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponse(r randyTypes, easy bool) *Response { + this := &Response{} + oneofNumber_Value := []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}[r.Intn(12)] + switch oneofNumber_Value { + case 1: + this.Value = NewPopulatedResponse_Exception(r, easy) + case 2: + this.Value = NewPopulatedResponse_Echo(r, easy) + case 3: + this.Value = NewPopulatedResponse_Flush(r, easy) + case 4: + this.Value = NewPopulatedResponse_Info(r, easy) + case 5: + this.Value = NewPopulatedResponse_SetOption(r, easy) + case 6: + this.Value = NewPopulatedResponse_InitChain(r, easy) + case 7: + this.Value = NewPopulatedResponse_Query(r, easy) + case 8: + this.Value = NewPopulatedResponse_BeginBlock(r, easy) + case 9: + this.Value = NewPopulatedResponse_CheckTx(r, easy) + case 10: + this.Value = NewPopulatedResponse_DeliverTx(r, easy) + case 11: + this.Value = NewPopulatedResponse_EndBlock(r, easy) + case 12: + this.Value = NewPopulatedResponse_Commit(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponse_Exception(r randyTypes, easy bool) *Response_Exception { + this := &Response_Exception{} + this.Exception = NewPopulatedResponseException(r, easy) + return this +} +func NewPopulatedResponse_Echo(r randyTypes, easy bool) *Response_Echo { + this := &Response_Echo{} + this.Echo = NewPopulatedResponseEcho(r, easy) + return this +} +func NewPopulatedResponse_Flush(r randyTypes, easy bool) *Response_Flush { + this := &Response_Flush{} + this.Flush = NewPopulatedResponseFlush(r, easy) + return this +} +func NewPopulatedResponse_Info(r randyTypes, easy bool) *Response_Info { + this := &Response_Info{} + this.Info = NewPopulatedResponseInfo(r, easy) + return this +} +func NewPopulatedResponse_SetOption(r randyTypes, easy bool) *Response_SetOption { + this := &Response_SetOption{} + this.SetOption = NewPopulatedResponseSetOption(r, easy) + return this +} +func NewPopulatedResponse_InitChain(r randyTypes, easy bool) *Response_InitChain { + this := &Response_InitChain{} + this.InitChain = NewPopulatedResponseInitChain(r, easy) + return this +} +func NewPopulatedResponse_Query(r randyTypes, easy bool) *Response_Query { + this := &Response_Query{} + this.Query = NewPopulatedResponseQuery(r, easy) + return this +} +func NewPopulatedResponse_BeginBlock(r randyTypes, easy bool) *Response_BeginBlock { + this := &Response_BeginBlock{} + this.BeginBlock = NewPopulatedResponseBeginBlock(r, easy) + return this +} +func NewPopulatedResponse_CheckTx(r randyTypes, easy bool) *Response_CheckTx { + this := &Response_CheckTx{} + this.CheckTx = NewPopulatedResponseCheckTx(r, easy) + return this +} +func NewPopulatedResponse_DeliverTx(r randyTypes, easy bool) *Response_DeliverTx { + this := &Response_DeliverTx{} + this.DeliverTx = NewPopulatedResponseDeliverTx(r, easy) + return this +} +func NewPopulatedResponse_EndBlock(r randyTypes, easy bool) *Response_EndBlock { + this := &Response_EndBlock{} + this.EndBlock = NewPopulatedResponseEndBlock(r, easy) + return this +} +func NewPopulatedResponse_Commit(r randyTypes, easy bool) *Response_Commit { + this := &Response_Commit{} + this.Commit = NewPopulatedResponseCommit(r, easy) + return this +} +func NewPopulatedResponseException(r randyTypes, easy bool) *ResponseException { + this := &ResponseException{} + this.Error = string(randStringTypes(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponseEcho(r randyTypes, easy bool) *ResponseEcho { + this := &ResponseEcho{} + this.Message = string(randStringTypes(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponseFlush(r randyTypes, easy bool) *ResponseFlush { + this := &ResponseFlush{} + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponseInfo(r randyTypes, easy bool) *ResponseInfo { + this := &ResponseInfo{} + this.Data = string(randStringTypes(r)) + this.Version = string(randStringTypes(r)) + this.LastBlockHeight = int64(r.Int63()) + if r.Intn(2) == 0 { + this.LastBlockHeight *= -1 + } + v13 := r.Intn(100) + this.LastBlockAppHash = make([]byte, v13) + for i := 0; i < v13; i++ { + this.LastBlockAppHash[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponseSetOption(r randyTypes, easy bool) *ResponseSetOption { + this := &ResponseSetOption{} + this.Code = uint32(r.Uint32()) + this.Log = string(randStringTypes(r)) + this.Info = string(randStringTypes(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponseInitChain(r randyTypes, easy bool) *ResponseInitChain { + this := &ResponseInitChain{} + if r.Intn(10) != 0 { + this.ConsensusParams = NewPopulatedConsensusParams(r, easy) + } + if r.Intn(10) != 0 { + v14 := r.Intn(5) + this.Validators = make([]Validator, v14) + for i := 0; i < v14; i++ { + v15 := NewPopulatedValidator(r, easy) + this.Validators[i] = *v15 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponseQuery(r randyTypes, easy bool) *ResponseQuery { + this := &ResponseQuery{} + this.Code = uint32(r.Uint32()) + this.Log = string(randStringTypes(r)) + this.Info = string(randStringTypes(r)) + this.Index = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Index *= -1 + } + v16 := r.Intn(100) + this.Key = make([]byte, v16) + for i := 0; i < v16; i++ { + this.Key[i] = byte(r.Intn(256)) + } + v17 := r.Intn(100) + this.Value = make([]byte, v17) + for i := 0; i < v17; i++ { + this.Value[i] = byte(r.Intn(256)) + } + v18 := r.Intn(100) + this.Proof = make([]byte, v18) + for i := 0; i < v18; i++ { + this.Proof[i] = byte(r.Intn(256)) + } + this.Height = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Height *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponseBeginBlock(r randyTypes, easy bool) *ResponseBeginBlock { + this := &ResponseBeginBlock{} + if r.Intn(10) != 0 { + v19 := r.Intn(5) + this.Tags = make([]common.KVPair, v19) + for i := 0; i < v19; i++ { + v20 := common.NewPopulatedKVPair(r, easy) + this.Tags[i] = *v20 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponseCheckTx(r randyTypes, easy bool) *ResponseCheckTx { + this := &ResponseCheckTx{} + this.Code = uint32(r.Uint32()) + v21 := r.Intn(100) + this.Data = make([]byte, v21) + for i := 0; i < v21; i++ { + this.Data[i] = byte(r.Intn(256)) + } + this.Log = string(randStringTypes(r)) + this.Info = string(randStringTypes(r)) + this.GasWanted = int64(r.Int63()) + if r.Intn(2) == 0 { + this.GasWanted *= -1 + } + this.GasUsed = int64(r.Int63()) + if r.Intn(2) == 0 { + this.GasUsed *= -1 + } + if r.Intn(10) != 0 { + v22 := r.Intn(5) + this.Tags = make([]common.KVPair, v22) + for i := 0; i < v22; i++ { + v23 := common.NewPopulatedKVPair(r, easy) + this.Tags[i] = *v23 + } + } + v24 := common.NewPopulatedKI64Pair(r, easy) + this.Fee = *v24 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponseDeliverTx(r randyTypes, easy bool) *ResponseDeliverTx { + this := &ResponseDeliverTx{} + this.Code = uint32(r.Uint32()) + v25 := r.Intn(100) + this.Data = make([]byte, v25) + for i := 0; i < v25; i++ { + this.Data[i] = byte(r.Intn(256)) + } + this.Log = string(randStringTypes(r)) + this.Info = string(randStringTypes(r)) + this.GasWanted = int64(r.Int63()) + if r.Intn(2) == 0 { + this.GasWanted *= -1 + } + this.GasUsed = int64(r.Int63()) + if r.Intn(2) == 0 { + this.GasUsed *= -1 + } + if r.Intn(10) != 0 { + v26 := r.Intn(5) + this.Tags = make([]common.KVPair, v26) + for i := 0; i < v26; i++ { + v27 := common.NewPopulatedKVPair(r, easy) + this.Tags[i] = *v27 + } + } + v28 := common.NewPopulatedKI64Pair(r, easy) + this.Fee = *v28 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponseEndBlock(r randyTypes, easy bool) *ResponseEndBlock { + this := &ResponseEndBlock{} + if r.Intn(10) != 0 { + v29 := r.Intn(5) + this.ValidatorUpdates = make([]Validator, v29) + for i := 0; i < v29; i++ { + v30 := NewPopulatedValidator(r, easy) + this.ValidatorUpdates[i] = *v30 + } + } + if r.Intn(10) != 0 { + this.ConsensusParamUpdates = NewPopulatedConsensusParams(r, easy) + } + if r.Intn(10) != 0 { + v31 := r.Intn(5) + this.Tags = make([]common.KVPair, v31) + for i := 0; i < v31; i++ { + v32 := common.NewPopulatedKVPair(r, easy) + this.Tags[i] = *v32 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponseCommit(r randyTypes, easy bool) *ResponseCommit { + this := &ResponseCommit{} + v33 := r.Intn(100) + this.Data = make([]byte, v33) + for i := 0; i < v33; i++ { + this.Data[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedConsensusParams(r randyTypes, easy bool) *ConsensusParams { + this := &ConsensusParams{} + if r.Intn(10) != 0 { + this.BlockSize = NewPopulatedBlockSize(r, easy) + } + if r.Intn(10) != 0 { + this.TxSize = NewPopulatedTxSize(r, easy) + } + if r.Intn(10) != 0 { + this.BlockGossip = NewPopulatedBlockGossip(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedBlockSize(r randyTypes, easy bool) *BlockSize { + this := &BlockSize{} + this.MaxBytes = int32(r.Int31()) + if r.Intn(2) == 0 { + this.MaxBytes *= -1 + } + this.MaxTxs = int32(r.Int31()) + if r.Intn(2) == 0 { + this.MaxTxs *= -1 + } + this.MaxGas = int64(r.Int63()) + if r.Intn(2) == 0 { + this.MaxGas *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedTxSize(r randyTypes, easy bool) *TxSize { + this := &TxSize{} + this.MaxBytes = int32(r.Int31()) + if r.Intn(2) == 0 { + this.MaxBytes *= -1 + } + this.MaxGas = int64(r.Int63()) + if r.Intn(2) == 0 { + this.MaxGas *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedBlockGossip(r randyTypes, easy bool) *BlockGossip { + this := &BlockGossip{} + this.BlockPartSizeBytes = int32(r.Int31()) + if r.Intn(2) == 0 { + this.BlockPartSizeBytes *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedHeader(r randyTypes, easy bool) *Header { + this := &Header{} + this.ChainID = string(randStringTypes(r)) + this.Height = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Height *= -1 + } + this.Time = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Time *= -1 + } + this.NumTxs = int32(r.Int31()) + if r.Intn(2) == 0 { + this.NumTxs *= -1 + } + this.TotalTxs = int64(r.Int63()) + if r.Intn(2) == 0 { + this.TotalTxs *= -1 + } + v34 := r.Intn(100) + this.LastBlockHash = make([]byte, v34) + for i := 0; i < v34; i++ { + this.LastBlockHash[i] = byte(r.Intn(256)) + } + v35 := r.Intn(100) + this.ValidatorsHash = make([]byte, v35) + for i := 0; i < v35; i++ { + this.ValidatorsHash[i] = byte(r.Intn(256)) + } + v36 := r.Intn(100) + this.AppHash = make([]byte, v36) + for i := 0; i < v36; i++ { + this.AppHash[i] = byte(r.Intn(256)) + } + v37 := NewPopulatedValidator(r, easy) + this.Proposer = *v37 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedValidator(r randyTypes, easy bool) *Validator { + this := &Validator{} + v38 := r.Intn(100) + this.Address = make([]byte, v38) + for i := 0; i < v38; i++ { + this.Address[i] = byte(r.Intn(256)) + } + v39 := NewPopulatedPubKey(r, easy) + this.PubKey = *v39 + this.Power = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Power *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedSigningValidator(r randyTypes, easy bool) *SigningValidator { + this := &SigningValidator{} + v40 := NewPopulatedValidator(r, easy) + this.Validator = *v40 + this.SignedLastBlock = bool(bool(r.Intn(2) == 0)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedPubKey(r randyTypes, easy bool) *PubKey { + this := &PubKey{} + this.Type = string(randStringTypes(r)) + v41 := r.Intn(100) + this.Data = make([]byte, v41) + for i := 0; i < v41; i++ { + this.Data[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedEvidence(r randyTypes, easy bool) *Evidence { + this := &Evidence{} + this.Type = string(randStringTypes(r)) + v42 := NewPopulatedValidator(r, easy) + this.Validator = *v42 + this.Height = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Height *= -1 + } + this.Time = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Time *= -1 + } + this.TotalVotingPower = int64(r.Int63()) + if r.Intn(2) == 0 { + this.TotalVotingPower *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyTypes interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneTypes(r randyTypes) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringTypes(r randyTypes) string { + v43 := r.Intn(100) + tmps := make([]rune, v43) + for i := 0; i < v43; i++ { + tmps[i] = randUTF8RuneTypes(r) + } + return string(tmps) +} +func randUnrecognizedTypes(r randyTypes, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldTypes(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldTypes(dAtA []byte, r randyTypes, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) + v44 := r.Int63() + if r.Intn(2) == 0 { + v44 *= -1 + } + dAtA = encodeVarintPopulateTypes(dAtA, uint64(v44)) + case 1: + dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateTypes(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateTypes(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Request) Size() (n int) { + var l int + _ = l + if m.Value != nil { + n += m.Value.Size() + } + return n +} + +func (m *Request_Echo) Size() (n int) { + var l int + _ = l + if m.Echo != nil { + l = m.Echo.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_Flush) Size() (n int) { + var l int + _ = l + if m.Flush != nil { + l = m.Flush.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_Info) Size() (n int) { + var l int + _ = l + if m.Info != nil { + l = m.Info.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_SetOption) Size() (n int) { + var l int + _ = l + if m.SetOption != nil { + l = m.SetOption.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_InitChain) Size() (n int) { + var l int + _ = l + if m.InitChain != nil { + l = m.InitChain.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_Query) Size() (n int) { + var l int + _ = l + if m.Query != nil { + l = m.Query.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_BeginBlock) Size() (n int) { + var l int + _ = l + if m.BeginBlock != nil { + l = m.BeginBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_CheckTx) Size() (n int) { + var l int + _ = l + if m.CheckTx != nil { + l = m.CheckTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_EndBlock) Size() (n int) { + var l int + _ = l + if m.EndBlock != nil { + l = m.EndBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_Commit) Size() (n int) { + var l int + _ = l + if m.Commit != nil { + l = m.Commit.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_DeliverTx) Size() (n int) { + var l int + _ = l + if m.DeliverTx != nil { + l = m.DeliverTx.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *RequestEcho) Size() (n int) { + var l int + _ = l + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestFlush) Size() (n int) { + var l int + _ = l + return n +} + +func (m *RequestInfo) Size() (n int) { + var l int + _ = l + l = len(m.Version) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestSetOption) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestInitChain) Size() (n int) { + var l int + _ = l + if m.Time != 0 { + n += 1 + sovTypes(uint64(m.Time)) + } + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.ConsensusParams != nil { + l = m.ConsensusParams.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Validators) > 0 { + for _, e := range m.Validators { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.AppStateBytes) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestQuery) Size() (n int) { + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Prove { + n += 2 + } + return n +} + +func (m *RequestBeginBlock) Size() (n int) { + var l int + _ = l + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.Header.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.Validators) > 0 { + for _, e := range m.Validators { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.ByzantineValidators) > 0 { + for _, e := range m.ByzantineValidators { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *RequestCheckTx) Size() (n int) { + var l int + _ = l + l = len(m.Tx) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestDeliverTx) Size() (n int) { + var l int + _ = l + l = len(m.Tx) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestEndBlock) Size() (n int) { + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + return n +} + +func (m *RequestCommit) Size() (n int) { + var l int + _ = l + return n +} + +func (m *Response) Size() (n int) { + var l int + _ = l + if m.Value != nil { + n += m.Value.Size() + } + return n +} + +func (m *Response_Exception) Size() (n int) { + var l int + _ = l + if m.Exception != nil { + l = m.Exception.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Echo) Size() (n int) { + var l int + _ = l + if m.Echo != nil { + l = m.Echo.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Flush) Size() (n int) { + var l int + _ = l + if m.Flush != nil { + l = m.Flush.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Info) Size() (n int) { + var l int + _ = l + if m.Info != nil { + l = m.Info.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_SetOption) Size() (n int) { + var l int + _ = l + if m.SetOption != nil { + l = m.SetOption.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_InitChain) Size() (n int) { + var l int + _ = l + if m.InitChain != nil { + l = m.InitChain.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Query) Size() (n int) { + var l int + _ = l + if m.Query != nil { + l = m.Query.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_BeginBlock) Size() (n int) { + var l int + _ = l + if m.BeginBlock != nil { + l = m.BeginBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_CheckTx) Size() (n int) { + var l int + _ = l + if m.CheckTx != nil { + l = m.CheckTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_DeliverTx) Size() (n int) { + var l int + _ = l + if m.DeliverTx != nil { + l = m.DeliverTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_EndBlock) Size() (n int) { + var l int + _ = l + if m.EndBlock != nil { + l = m.EndBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Commit) Size() (n int) { + var l int + _ = l + if m.Commit != nil { + l = m.Commit.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *ResponseException) Size() (n int) { + var l int + _ = l + l = len(m.Error) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseEcho) Size() (n int) { + var l int + _ = l + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseFlush) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ResponseInfo) Size() (n int) { + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.LastBlockHeight != 0 { + n += 1 + sovTypes(uint64(m.LastBlockHeight)) + } + l = len(m.LastBlockAppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseSetOption) Size() (n int) { + var l int + _ = l + if m.Code != 0 { + n += 1 + sovTypes(uint64(m.Code)) + } + l = len(m.Log) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Info) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseInitChain) Size() (n int) { + var l int + _ = l + if m.ConsensusParams != nil { + l = m.ConsensusParams.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Validators) > 0 { + for _, e := range m.Validators { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *ResponseQuery) Size() (n int) { + var l int + _ = l + if m.Code != 0 { + n += 1 + sovTypes(uint64(m.Code)) + } + l = len(m.Log) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Info) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Proof) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + return n +} + +func (m *ResponseBeginBlock) Size() (n int) { + var l int + _ = l + if len(m.Tags) > 0 { + for _, e := range m.Tags { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *ResponseCheckTx) Size() (n int) { + var l int + _ = l + if m.Code != 0 { + n += 1 + sovTypes(uint64(m.Code)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Log) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Info) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.GasWanted != 0 { + n += 1 + sovTypes(uint64(m.GasWanted)) + } + if m.GasUsed != 0 { + n += 1 + sovTypes(uint64(m.GasUsed)) + } + if len(m.Tags) > 0 { + for _, e := range m.Tags { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = m.Fee.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func (m *ResponseDeliverTx) Size() (n int) { + var l int + _ = l + if m.Code != 0 { + n += 1 + sovTypes(uint64(m.Code)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Log) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Info) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.GasWanted != 0 { + n += 1 + sovTypes(uint64(m.GasWanted)) + } + if m.GasUsed != 0 { + n += 1 + sovTypes(uint64(m.GasUsed)) + } + if len(m.Tags) > 0 { + for _, e := range m.Tags { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = m.Fee.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func (m *ResponseEndBlock) Size() (n int) { + var l int + _ = l + if len(m.ValidatorUpdates) > 0 { + for _, e := range m.ValidatorUpdates { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.ConsensusParamUpdates != nil { + l = m.ConsensusParamUpdates.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Tags) > 0 { + for _, e := range m.Tags { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *ResponseCommit) Size() (n int) { + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ConsensusParams) Size() (n int) { + var l int + _ = l + if m.BlockSize != nil { + l = m.BlockSize.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.TxSize != nil { + l = m.TxSize.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.BlockGossip != nil { + l = m.BlockGossip.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *BlockSize) Size() (n int) { + var l int + _ = l + if m.MaxBytes != 0 { + n += 1 + sovTypes(uint64(m.MaxBytes)) + } + if m.MaxTxs != 0 { + n += 1 + sovTypes(uint64(m.MaxTxs)) + } + if m.MaxGas != 0 { + n += 1 + sovTypes(uint64(m.MaxGas)) + } + return n +} + +func (m *TxSize) Size() (n int) { + var l int + _ = l + if m.MaxBytes != 0 { + n += 1 + sovTypes(uint64(m.MaxBytes)) + } + if m.MaxGas != 0 { + n += 1 + sovTypes(uint64(m.MaxGas)) + } + return n +} + +func (m *BlockGossip) Size() (n int) { + var l int + _ = l + if m.BlockPartSizeBytes != 0 { + n += 1 + sovTypes(uint64(m.BlockPartSizeBytes)) + } + return n +} + +func (m *Header) Size() (n int) { + var l int + _ = l + l = len(m.ChainID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Time != 0 { + n += 1 + sovTypes(uint64(m.Time)) + } + if m.NumTxs != 0 { + n += 1 + sovTypes(uint64(m.NumTxs)) + } + if m.TotalTxs != 0 { + n += 1 + sovTypes(uint64(m.TotalTxs)) + } + l = len(m.LastBlockHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ValidatorsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.Proposer.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func (m *Validator) Size() (n int) { + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.PubKey.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.Power != 0 { + n += 1 + sovTypes(uint64(m.Power)) + } + return n +} + +func (m *SigningValidator) Size() (n int) { + var l int + _ = l + l = m.Validator.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.SignedLastBlock { + n += 2 + } + return n +} + +func (m *PubKey) Size() (n int) { + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Evidence) Size() (n int) { + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.Validator.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Time != 0 { + n += 1 + sovTypes(uint64(m.Time)) + } + if m.TotalVotingPower != 0 { + n += 1 + sovTypes(uint64(m.TotalVotingPower)) + } + return n +} + +func sovTypes(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Echo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestEcho{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Echo{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flush", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestFlush{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Flush{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestInfo{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Info{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SetOption", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestSetOption{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_SetOption{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitChain", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestInitChain{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_InitChain{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestQuery{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Query{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestBeginBlock{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_BeginBlock{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestCheckTx{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_CheckTx{v} + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestEndBlock{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_EndBlock{v} + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestCommit{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Commit{v} + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestDeliverTx{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_DeliverTx{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestEcho) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestEcho: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestEcho: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestFlush) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestFlush: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestFlush: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestSetOption) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestSetOption: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestSetOption: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestInitChain) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestInitChain: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestInitChain: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + m.Time = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Time |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusParams == nil { + m.ConsensusParams = &ConsensusParams{} + } + if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Validators = append(m.Validators, Validator{}) + if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppStateBytes", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppStateBytes = append(m.AppStateBytes[:0], dAtA[iNdEx:postIndex]...) + if m.AppStateBytes == nil { + m.AppStateBytes = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestQuery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestQuery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestQuery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Prove = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestBeginBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestBeginBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Validators = append(m.Validators, SigningValidator{}) + if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ByzantineValidators = append(m.ByzantineValidators, Evidence{}) + if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestCheckTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestCheckTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) + if m.Tx == nil { + m.Tx = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestDeliverTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestDeliverTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) + if m.Tx == nil { + m.Tx = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestEndBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestCommit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestCommit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestCommit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Response: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Response: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exception", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseException{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Exception{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Echo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseEcho{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Echo{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flush", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseFlush{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Flush{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseInfo{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Info{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SetOption", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseSetOption{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_SetOption{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitChain", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseInitChain{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_InitChain{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseQuery{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Query{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseBeginBlock{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_BeginBlock{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseCheckTx{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_CheckTx{v} + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseDeliverTx{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_DeliverTx{v} + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseEndBlock{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_EndBlock{v} + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseCommit{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Commit{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseException) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseException: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseException: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseEcho) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseEcho: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseEcho: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseFlush) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseFlush: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseFlush: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.ABCIApplication/CheckTx", + return nil +} +func (m *ResponseInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockHeight", wireType) + } + m.LastBlockHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastBlockHeight |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockAppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastBlockAppHash = append(m.LastBlockAppHash[:0], dAtA[iNdEx:postIndex]...) + if m.LastBlockAppHash == nil { + m.LastBlockAppHash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).CheckTx(ctx, req.(*RequestCheckTx)) + + if iNdEx > l { + return io.ErrUnexpectedEOF } - return interceptor(ctx, in, info, handler) + return nil } +func (m *ResponseSetOption) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseSetOption: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseSetOption: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Log = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Info = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } -func _ABCIApplication_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestQuery) - if err := dec(in); err != nil { - return nil, err + if iNdEx > l { + return io.ErrUnexpectedEOF } - if interceptor == nil { - return srv.(ABCIApplicationServer).Query(ctx, in) + return nil +} +func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseInitChain: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseInitChain: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusParams == nil { + m.ConsensusParams = &ConsensusParams{} + } + if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Validators = append(m.Validators, Validator{}) + if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.ABCIApplication/Query", + + if iNdEx > l { + return io.ErrUnexpectedEOF } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).Query(ctx, req.(*RequestQuery)) + return nil +} +func (m *ResponseQuery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseQuery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseQuery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Log = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Info = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...) + if m.Proof == nil { + m.Proof = []byte{} + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - return interceptor(ctx, in, info, handler) + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } +func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseBeginBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseBeginBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tags = append(m.Tags, common.KVPair{}) + if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } -func _ABCIApplication_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestCommit) - if err := dec(in); err != nil { - return nil, err + if iNdEx > l { + return io.ErrUnexpectedEOF } - if interceptor == nil { - return srv.(ABCIApplicationServer).Commit(ctx, in) + return nil +} +func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseCheckTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseCheckTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Log = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Info = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasWanted", wireType) + } + m.GasWanted = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasWanted |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasUsed", wireType) + } + m.GasUsed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasUsed |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tags = append(m.Tags, common.KVPair{}) + if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fee", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Fee.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.ABCIApplication/Commit", + + if iNdEx > l { + return io.ErrUnexpectedEOF } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).Commit(ctx, req.(*RequestCommit)) + return nil +} +func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseDeliverTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseDeliverTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Log = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Info = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasWanted", wireType) + } + m.GasWanted = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasWanted |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasUsed", wireType) + } + m.GasUsed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasUsed |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tags = append(m.Tags, common.KVPair{}) + if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fee", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Fee.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - return interceptor(ctx, in, info, handler) + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } +func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseEndBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorUpdates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorUpdates = append(m.ValidatorUpdates, Validator{}) + if err := m.ValidatorUpdates[len(m.ValidatorUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParamUpdates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusParamUpdates == nil { + m.ConsensusParamUpdates = &ConsensusParams{} + } + if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tags = append(m.Tags, common.KVPair{}) + if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } -func _ABCIApplication_InitChain_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestInitChain) - if err := dec(in); err != nil { - return nil, err + if iNdEx > l { + return io.ErrUnexpectedEOF } - if interceptor == nil { - return srv.(ABCIApplicationServer).InitChain(ctx, in) + return nil +} +func (m *ResponseCommit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseCommit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseCommit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.ABCIApplication/InitChain", + + if iNdEx > l { + return io.ErrUnexpectedEOF } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).InitChain(ctx, req.(*RequestInitChain)) + return nil +} +func (m *ConsensusParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConsensusParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConsensusParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockSize", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BlockSize == nil { + m.BlockSize = &BlockSize{} + } + if err := m.BlockSize.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TxSize", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TxSize == nil { + m.TxSize = &TxSize{} + } + if err := m.TxSize.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockGossip", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BlockGossip == nil { + m.BlockGossip = &BlockGossip{} + } + if err := m.BlockGossip.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - return interceptor(ctx, in, info, handler) + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } +func (m *BlockSize) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockSize: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockSize: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxBytes", wireType) + } + m.MaxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxBytes |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTxs", wireType) + } + m.MaxTxs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTxs |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxGas", wireType) + } + m.MaxGas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxGas |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } -func _ABCIApplication_BeginBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestBeginBlock) - if err := dec(in); err != nil { - return nil, err + if iNdEx > l { + return io.ErrUnexpectedEOF } - if interceptor == nil { - return srv.(ABCIApplicationServer).BeginBlock(ctx, in) + return nil +} +func (m *TxSize) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TxSize: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TxSize: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxBytes", wireType) + } + m.MaxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxBytes |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxGas", wireType) + } + m.MaxGas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxGas |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.ABCIApplication/BeginBlock", + + if iNdEx > l { + return io.ErrUnexpectedEOF } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).BeginBlock(ctx, req.(*RequestBeginBlock)) + return nil +} +func (m *BlockGossip) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockGossip: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockGossip: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockPartSizeBytes", wireType) + } + m.BlockPartSizeBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockPartSizeBytes |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - return interceptor(ctx, in, info, handler) + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } +func (m *Header) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Header: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + m.Time = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Time |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumTxs", wireType) + } + m.NumTxs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumTxs |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalTxs", wireType) + } + m.TotalTxs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalTxs |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastBlockHash = append(m.LastBlockHash[:0], dAtA[iNdEx:postIndex]...) + if m.LastBlockHash == nil { + m.LastBlockHash = []byte{} + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorsHash = append(m.ValidatorsHash[:0], dAtA[iNdEx:postIndex]...) + if m.ValidatorsHash == nil { + m.ValidatorsHash = []byte{} + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proposer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Proposer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } -func _ABCIApplication_EndBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestEndBlock) - if err := dec(in); err != nil { - return nil, err + if iNdEx > l { + return io.ErrUnexpectedEOF } - if interceptor == nil { - return srv.(ABCIApplicationServer).EndBlock(ctx, in) + return nil +} +func (m *Validator) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Validator: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Validator: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = append(m.Address[:0], dAtA[iNdEx:postIndex]...) + if m.Address == nil { + m.Address = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Power", wireType) + } + m.Power = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Power |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.ABCIApplication/EndBlock", + + if iNdEx > l { + return io.ErrUnexpectedEOF } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).EndBlock(ctx, req.(*RequestEndBlock)) + return nil +} +func (m *SigningValidator) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SigningValidator: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SigningValidator: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Validator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SignedLastBlock", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SignedLastBlock = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - return interceptor(ctx, in, info, handler) + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } +func (m *PubKey) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PubKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PubKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } -var _ABCIApplication_serviceDesc = grpc.ServiceDesc{ - ServiceName: "types.ABCIApplication", - HandlerType: (*ABCIApplicationServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Echo", - Handler: _ABCIApplication_Echo_Handler, - }, - { - MethodName: "Flush", - Handler: _ABCIApplication_Flush_Handler, - }, - { - MethodName: "Info", - Handler: _ABCIApplication_Info_Handler, - }, - { - MethodName: "SetOption", - Handler: _ABCIApplication_SetOption_Handler, - }, - { - MethodName: "DeliverTx", - Handler: _ABCIApplication_DeliverTx_Handler, - }, - { - MethodName: "CheckTx", - Handler: _ABCIApplication_CheckTx_Handler, - }, - { - MethodName: "Query", - Handler: _ABCIApplication_Query_Handler, - }, - { - MethodName: "Commit", - Handler: _ABCIApplication_Commit_Handler, - }, - { - MethodName: "InitChain", - Handler: _ABCIApplication_InitChain_Handler, - }, - { - MethodName: "BeginBlock", - Handler: _ABCIApplication_BeginBlock_Handler, - }, - { - MethodName: "EndBlock", - Handler: _ABCIApplication_EndBlock_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "types/types.proto", + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Evidence) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Evidence: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Evidence: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Validator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + m.Time = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Time |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalVotingPower", wireType) + } + m.TotalVotingPower = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalVotingPower |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthTypes + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTypes(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") } -func init() { proto.RegisterFile("types/types.proto", fileDescriptorTypes) } +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptorTypes) } +func init() { golang_proto.RegisterFile("abci/types/types.proto", fileDescriptorTypes) } var fileDescriptorTypes = []byte{ - // 1846 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x58, 0xcd, 0x6e, 0x1b, 0xc9, - 0x11, 0x16, 0xff, 0x39, 0xa5, 0x1f, 0xd2, 0x2d, 0xdb, 0xa2, 0xb9, 0x08, 0x6c, 0x0c, 0x02, 0xaf, - 0x9c, 0xd5, 0x8a, 0x89, 0x76, 0x6d, 0xd8, 0xbb, 0xc9, 0x22, 0x92, 0xd6, 0x59, 0x0a, 0x9b, 0x1f, - 0x65, 0xec, 0x75, 0x80, 0x5c, 0x88, 0x26, 0xa7, 0x45, 0x0e, 0x4c, 0xce, 0xcc, 0x4e, 0x37, 0xb5, - 0x94, 0x6f, 0xb9, 0x2f, 0x72, 0xcd, 0x39, 0x2f, 0x90, 0x43, 0x80, 0xbc, 0x42, 0x90, 0x97, 0x88, - 0x0f, 0x49, 0x4e, 0x79, 0x89, 0x04, 0x55, 0xdd, 0xf3, 0xab, 0xa1, 0xe1, 0x38, 0xc7, 0xbd, 0x48, - 0x5d, 0x5d, 0x55, 0x3d, 0x5d, 0xc5, 0xaa, 0xaf, 0xaa, 0x1a, 0x6e, 0xa8, 0xab, 0x50, 0xc8, 0x01, - 0xfd, 0x3d, 0x0c, 0xa3, 0x40, 0x05, 0xac, 0x41, 0x44, 0xff, 0xc3, 0xa9, 0xa7, 0x66, 0xcb, 0xf1, - 0xe1, 0x24, 0x58, 0x0c, 0xa6, 0xc1, 0x34, 0x18, 0x10, 0x77, 0xbc, 0xbc, 0x20, 0x8a, 0x08, 0x5a, - 0x69, 0xad, 0xfe, 0x20, 0x23, 0xae, 0x84, 0xef, 0x8a, 0x68, 0xe1, 0xf9, 0x6a, 0xa0, 0x16, 0x73, - 0x6f, 0x2c, 0x07, 0x93, 0x60, 0xb1, 0x08, 0xfc, 0xec, 0x67, 0xec, 0xbf, 0xd6, 0xa1, 0xe5, 0x88, - 0xaf, 0x97, 0x42, 0x2a, 0xb6, 0x0f, 0x75, 0x31, 0x99, 0x05, 0xbd, 0xea, 0xbd, 0xca, 0xfe, 0xe6, - 0x11, 0x3b, 0xd4, 0x72, 0x86, 0xfb, 0x74, 0x32, 0x0b, 0x86, 0x1b, 0x0e, 0x49, 0xb0, 0x0f, 0xa0, - 0x71, 0x31, 0x5f, 0xca, 0x59, 0xaf, 0x46, 0xa2, 0xbb, 0x79, 0xd1, 0x9f, 0x21, 0x6b, 0xb8, 0xe1, - 0x68, 0x19, 0x3c, 0xd6, 0xf3, 0x2f, 0x82, 0x5e, 0xbd, 0xec, 0xd8, 0x33, 0xff, 0x82, 0x8e, 0x45, - 0x09, 0xf6, 0x18, 0x40, 0x0a, 0x35, 0x0a, 0x42, 0xe5, 0x05, 0x7e, 0xaf, 0x41, 0xf2, 0x7b, 0x79, - 0xf9, 0x67, 0x42, 0xfd, 0x8a, 0xd8, 0xc3, 0x0d, 0xc7, 0x92, 0x31, 0x81, 0x9a, 0x9e, 0xef, 0xa9, - 0xd1, 0x64, 0xc6, 0x3d, 0xbf, 0xd7, 0x2c, 0xd3, 0x3c, 0xf3, 0x3d, 0x75, 0x8a, 0x6c, 0xd4, 0xf4, - 0x62, 0x02, 0x4d, 0xf9, 0x7a, 0x29, 0xa2, 0xab, 0x5e, 0xab, 0xcc, 0x94, 0x5f, 0x23, 0x0b, 0x4d, - 0x21, 0x19, 0xf6, 0x29, 0x6c, 0x8e, 0xc5, 0xd4, 0xf3, 0x47, 0xe3, 0x79, 0x30, 0x79, 0xd9, 0x6b, - 0x93, 0x4a, 0x2f, 0xaf, 0x72, 0x82, 0x02, 0x27, 0xc8, 0x1f, 0x6e, 0x38, 0x30, 0x4e, 0x28, 0x76, - 0x04, 0xed, 0xc9, 0x4c, 0x4c, 0x5e, 0x8e, 0xd4, 0xaa, 0x67, 0x91, 0xe6, 0xad, 0xbc, 0xe6, 0x29, - 0x72, 0x9f, 0xaf, 0x86, 0x1b, 0x4e, 0x6b, 0xa2, 0x97, 0x68, 0x97, 0x2b, 0xe6, 0xde, 0xa5, 0x88, - 0x50, 0x6b, 0xb7, 0xcc, 0xae, 0xcf, 0x35, 0x9f, 0xf4, 0x2c, 0x37, 0x26, 0xd8, 0x43, 0xb0, 0x84, - 0xef, 0x9a, 0x8b, 0x6e, 0x92, 0xe2, 0xed, 0xc2, 0x2f, 0xea, 0xbb, 0xf1, 0x35, 0xdb, 0xc2, 0xac, - 0xd9, 0x21, 0x34, 0x31, 0x4a, 0x3c, 0xd5, 0xdb, 0x22, 0x9d, 0x9b, 0x85, 0x2b, 0x12, 0x6f, 0xb8, - 0xe1, 0x18, 0xa9, 0x93, 0x16, 0x34, 0x2e, 0xf9, 0x7c, 0x29, 0xec, 0xf7, 0x61, 0x33, 0x13, 0x29, - 0xac, 0x07, 0xad, 0x85, 0x90, 0x92, 0x4f, 0x45, 0xaf, 0x72, 0xaf, 0xb2, 0x6f, 0x39, 0x31, 0x69, - 0xef, 0xc0, 0x56, 0x36, 0x4e, 0x32, 0x8a, 0x18, 0x0b, 0xa8, 0x78, 0x29, 0x22, 0x89, 0x01, 0x60, - 0x14, 0x0d, 0x69, 0x7f, 0x02, 0xdd, 0x62, 0x10, 0xb0, 0x2e, 0xd4, 0x5e, 0x8a, 0x2b, 0x23, 0x89, - 0x4b, 0x76, 0xd3, 0x5c, 0x88, 0xa2, 0xd8, 0x72, 0xcc, 0xed, 0xfe, 0x55, 0x49, 0x94, 0x93, 0x38, - 0x60, 0x0c, 0xea, 0xca, 0x5b, 0xe8, 0x0b, 0xd6, 0x1c, 0x5a, 0xb3, 0x3b, 0xf8, 0x23, 0x71, 0xcf, - 0x1f, 0x79, 0xae, 0x39, 0xa1, 0x45, 0xf4, 0x99, 0xcb, 0x8e, 0xa1, 0x3b, 0x09, 0x7c, 0x29, 0x7c, - 0xb9, 0x94, 0xa3, 0x90, 0x47, 0x7c, 0x21, 0x4d, 0xfc, 0xc7, 0x8e, 0x3d, 0x8d, 0xd9, 0xe7, 0xc4, - 0x75, 0x3a, 0x93, 0xfc, 0x06, 0x7b, 0x04, 0x70, 0xc9, 0xe7, 0x9e, 0xcb, 0x55, 0x10, 0xc9, 0x5e, - 0xfd, 0x5e, 0x6d, 0x7f, 0xf3, 0xa8, 0x6b, 0x94, 0x5f, 0xc4, 0x8c, 0x93, 0xfa, 0xdf, 0x5e, 0xdf, - 0xdd, 0x70, 0x32, 0x92, 0xec, 0x3e, 0x74, 0x78, 0x18, 0x8e, 0xa4, 0xe2, 0x4a, 0x8c, 0xc6, 0x57, - 0x4a, 0x48, 0xca, 0x8e, 0x2d, 0x67, 0x9b, 0x87, 0xe1, 0x33, 0xdc, 0x3d, 0xc1, 0x4d, 0xdb, 0x4d, - 0x7c, 0x4b, 0x81, 0x8b, 0x16, 0xba, 0x5c, 0x71, 0xb2, 0x70, 0xcb, 0xa1, 0x35, 0xee, 0x85, 0x5c, - 0xcd, 0x8c, 0x75, 0xb4, 0x66, 0xb7, 0xa1, 0x39, 0x13, 0xde, 0x74, 0xa6, 0xc8, 0xa0, 0x9a, 0x63, - 0x28, 0x74, 0x66, 0x18, 0x05, 0x97, 0x82, 0x72, 0xb7, 0xed, 0x68, 0xc2, 0xfe, 0x7b, 0x05, 0x6e, - 0x5c, 0x0b, 0x76, 0x3c, 0x77, 0xc6, 0xe5, 0x2c, 0xfe, 0x16, 0xae, 0xd9, 0x07, 0x78, 0x2e, 0x77, - 0x45, 0x64, 0x30, 0x65, 0xdb, 0xd8, 0x3a, 0xa4, 0x4d, 0x63, 0xa8, 0x11, 0x61, 0x3f, 0xc9, 0x39, - 0xa7, 0x46, 0xce, 0x89, 0x63, 0xfd, 0x99, 0x37, 0xf5, 0x3d, 0x7f, 0xfa, 0x26, 0x1f, 0x0d, 0xe1, - 0xe6, 0xf8, 0xea, 0x15, 0xf7, 0x95, 0xe7, 0x8b, 0xd1, 0x35, 0x2f, 0x77, 0xcc, 0x41, 0x4f, 0x2f, - 0x3d, 0x57, 0xf8, 0x13, 0x61, 0x0e, 0xd8, 0x4d, 0x54, 0x92, 0xa3, 0xa5, 0x7d, 0x0f, 0x76, 0xf2, - 0x19, 0xc9, 0x76, 0xa0, 0xaa, 0x56, 0xc6, 0xb2, 0xaa, 0x5a, 0xd9, 0x76, 0x12, 0x4d, 0x49, 0xf6, - 0x5d, 0x93, 0x79, 0x00, 0x9d, 0x42, 0xa2, 0x65, 0xdc, 0x5c, 0xc9, 0xba, 0xd9, 0xee, 0xc0, 0x76, - 0x2e, 0xbf, 0xec, 0x6f, 0x1b, 0xd0, 0x76, 0x84, 0x0c, 0x31, 0x7c, 0xd8, 0x63, 0xb0, 0xc4, 0x6a, - 0x22, 0x34, 0x28, 0x56, 0x0a, 0x90, 0xa3, 0x65, 0x9e, 0xc6, 0x7c, 0xc4, 0x80, 0x44, 0x98, 0x3d, - 0xc8, 0x01, 0xfa, 0x6e, 0x51, 0x29, 0x8b, 0xe8, 0x07, 0x79, 0x44, 0xbf, 0x59, 0x90, 0x2d, 0x40, - 0xfa, 0x83, 0x1c, 0xa4, 0x17, 0x0f, 0xce, 0x61, 0xfa, 0x93, 0x12, 0x4c, 0x2f, 0x5e, 0x7f, 0x0d, - 0xa8, 0x3f, 0x29, 0x01, 0xf5, 0xde, 0xb5, 0x6f, 0x95, 0xa2, 0xfa, 0x41, 0x1e, 0xd5, 0x8b, 0xe6, - 0x14, 0x60, 0xfd, 0xc7, 0x65, 0xb0, 0x7e, 0xa7, 0xa0, 0xb3, 0x16, 0xd7, 0x3f, 0xba, 0x86, 0xeb, - 0xb7, 0x0b, 0xaa, 0x25, 0xc0, 0xfe, 0x24, 0x07, 0xec, 0x50, 0x6a, 0xdb, 0x1a, 0x64, 0x7f, 0x74, - 0x1d, 0xd9, 0xf7, 0x8a, 0x3f, 0x6d, 0x19, 0xb4, 0x0f, 0x0a, 0xd0, 0x7e, 0xab, 0x78, 0xcb, 0xb5, - 0xd8, 0xfe, 0x00, 0xf3, 0xbd, 0x10, 0x69, 0x88, 0x0d, 0x22, 0x8a, 0x82, 0xc8, 0x80, 0xaf, 0x26, - 0xec, 0x7d, 0x44, 0xa0, 0x34, 0xbe, 0xde, 0x50, 0x07, 0x28, 0xe8, 0x33, 0xd1, 0x65, 0xff, 0xa1, - 0x92, 0xea, 0x52, 0x29, 0xc8, 0xa2, 0x97, 0x65, 0xd0, 0x2b, 0x53, 0x1e, 0xaa, 0xb9, 0xf2, 0xc0, - 0x7e, 0x00, 0x37, 0xe6, 0x5c, 0x2a, 0xed, 0x97, 0x51, 0x0e, 0xce, 0x3a, 0xc8, 0xd0, 0x0e, 0xd1, - 0xb8, 0xf6, 0x21, 0xec, 0x66, 0x64, 0x11, 0x5a, 0x09, 0xba, 0xea, 0x94, 0xbc, 0xdd, 0x44, 0xfa, - 0x38, 0x0c, 0x87, 0x5c, 0xce, 0xec, 0x5f, 0xa4, 0xf6, 0xa7, 0xa5, 0x87, 0x41, 0x7d, 0x12, 0xb8, - 0xda, 0xac, 0x6d, 0x87, 0xd6, 0x58, 0x8e, 0xe6, 0xc1, 0x94, 0xbe, 0x6a, 0x39, 0xb8, 0x44, 0xa9, - 0x24, 0x53, 0x2c, 0x9d, 0x12, 0xf6, 0xef, 0x2b, 0xe9, 0x79, 0x69, 0x35, 0x2a, 0x2b, 0x2f, 0x95, - 0xff, 0xa7, 0xbc, 0x54, 0xdf, 0xb6, 0xbc, 0xd8, 0x7f, 0xa9, 0xa4, 0xbf, 0x45, 0x52, 0x38, 0xde, - 0xcd, 0x38, 0x0c, 0x0b, 0xcf, 0x77, 0xc5, 0x8a, 0x52, 0xbd, 0xe6, 0x68, 0x22, 0xae, 0xd3, 0x4d, - 0x72, 0x70, 0xbe, 0x4e, 0xb7, 0x68, 0x4f, 0x13, 0xa6, 0xe0, 0x04, 0x17, 0x94, 0x83, 0x5b, 0x8e, - 0x26, 0x32, 0xb8, 0x69, 0xe5, 0x70, 0xf3, 0x1c, 0xd8, 0xf5, 0xec, 0x64, 0x9f, 0x40, 0x5d, 0xf1, - 0x29, 0x3a, 0x0f, 0xed, 0xdf, 0x39, 0xd4, 0x5d, 0xef, 0xe1, 0x97, 0x2f, 0xce, 0xb9, 0x17, 0x9d, - 0xdc, 0x46, 0xeb, 0xff, 0xfd, 0xfa, 0xee, 0x0e, 0xca, 0x1c, 0x04, 0x0b, 0x4f, 0x89, 0x45, 0xa8, - 0xae, 0x1c, 0xd2, 0xb1, 0xff, 0x53, 0x41, 0xd4, 0xce, 0x65, 0x6d, 0xa9, 0x2f, 0xe2, 0xd0, 0xac, - 0x66, 0x0a, 0xeb, 0xdb, 0xf9, 0xe7, 0x7b, 0x00, 0x53, 0x2e, 0x47, 0xdf, 0x70, 0x5f, 0x09, 0xd7, - 0x38, 0xc9, 0x9a, 0x72, 0xf9, 0x1b, 0xda, 0xc0, 0xfe, 0x03, 0xd9, 0x4b, 0x29, 0x5c, 0xf2, 0x56, - 0xcd, 0x69, 0x4d, 0xb9, 0xfc, 0x4a, 0x0a, 0x37, 0xb1, 0xab, 0xf5, 0xbf, 0xdb, 0xc5, 0xf6, 0xa1, - 0x76, 0x21, 0x84, 0x41, 0xb6, 0x6e, 0xa2, 0x7a, 0xf6, 0xe8, 0x63, 0x52, 0xd6, 0x21, 0x81, 0x22, - 0xf6, 0xef, 0xaa, 0x69, 0x70, 0xa6, 0xc5, 0xed, 0xbb, 0xe5, 0x83, 0x7f, 0x52, 0xb7, 0x98, 0x87, - 0x52, 0x76, 0x0a, 0x37, 0x92, 0x94, 0x19, 0x2d, 0x43, 0x97, 0x63, 0x17, 0x56, 0x79, 0x63, 0x8e, - 0x75, 0x13, 0x85, 0xaf, 0xb4, 0x3c, 0xfb, 0x25, 0xec, 0x15, 0x92, 0x3c, 0x39, 0xaa, 0xfa, 0xc6, - 0x5c, 0xbf, 0x95, 0xcf, 0xf5, 0xf8, 0xbc, 0xd8, 0x1f, 0xb5, 0x77, 0x88, 0xf5, 0xef, 0x63, 0x9b, - 0x93, 0x85, 0xfe, 0xb2, 0x5f, 0xd4, 0xfe, 0x63, 0x05, 0x3a, 0x85, 0xcb, 0xb0, 0x01, 0x80, 0x46, - 0x4e, 0xe9, 0xbd, 0x12, 0x06, 0xa4, 0x62, 0x1f, 0x90, 0xb3, 0x9e, 0x79, 0xaf, 0x84, 0x63, 0x8d, - 0xe3, 0x25, 0xbb, 0x0f, 0x2d, 0xb5, 0xd2, 0xd2, 0xf9, 0x46, 0xf0, 0xf9, 0x8a, 0x44, 0x9b, 0x8a, - 0xfe, 0xb3, 0x87, 0xb0, 0xa5, 0x0f, 0x9e, 0x06, 0x52, 0x7a, 0xa1, 0x69, 0x46, 0x58, 0xf6, 0xe8, - 0x2f, 0x88, 0xe3, 0x6c, 0x8e, 0x53, 0xc2, 0xfe, 0x2d, 0x58, 0xc9, 0x67, 0xd9, 0x7b, 0x60, 0x2d, - 0xf8, 0xca, 0x74, 0xc9, 0x78, 0xb7, 0x86, 0xd3, 0x5e, 0xf0, 0x15, 0x35, 0xc8, 0x6c, 0x0f, 0x5a, - 0xc8, 0x54, 0x2b, 0xed, 0xef, 0x86, 0xd3, 0x5c, 0xf0, 0xd5, 0xf3, 0x55, 0xc2, 0x98, 0x72, 0x19, - 0xb7, 0xc0, 0x0b, 0xbe, 0xfa, 0x82, 0x4b, 0xfb, 0x33, 0x68, 0xea, 0x4b, 0xbe, 0xd5, 0xc1, 0xa8, - 0x5f, 0xcd, 0xe9, 0xff, 0x14, 0x36, 0x33, 0xf7, 0x66, 0x3f, 0x82, 0x5b, 0xda, 0xc2, 0x90, 0x47, - 0x8a, 0x3c, 0x92, 0x3b, 0x90, 0x11, 0xf3, 0x9c, 0x47, 0x0a, 0x3f, 0xa9, 0x9b, 0xfa, 0x3f, 0x57, - 0xa1, 0xa9, 0x1b, 0x66, 0x76, 0x3f, 0x33, 0x9d, 0x50, 0x55, 0x3c, 0xd9, 0xfc, 0xc7, 0xeb, 0xbb, - 0x2d, 0x2a, 0x20, 0x67, 0x9f, 0xa7, 0xa3, 0x4a, 0x0a, 0x98, 0xd5, 0x5c, 0x3f, 0x1f, 0x4f, 0x3c, - 0xb5, 0xcc, 0xc4, 0xb3, 0x07, 0x2d, 0x7f, 0xb9, 0x20, 0x97, 0xd4, 0xb5, 0x4b, 0xfc, 0xe5, 0x02, - 0x5d, 0xf2, 0x1e, 0x58, 0x2a, 0x50, 0x7c, 0x4e, 0x2c, 0x9d, 0xa4, 0x6d, 0xda, 0x40, 0xe6, 0x7d, - 0xe8, 0x64, 0xab, 0x2d, 0x56, 0x4f, 0x0d, 0xee, 0xdb, 0x69, 0xad, 0xc5, 0x09, 0xe0, 0x7d, 0xe8, - 0xa4, 0x85, 0x46, 0xcb, 0x69, 0xc0, 0xdf, 0x49, 0xb7, 0x49, 0xf0, 0x0e, 0xb4, 0x93, 0x3a, 0xac, - 0xc1, 0xbf, 0xc5, 0x75, 0xf9, 0xc5, 0xc1, 0x39, 0x8c, 0x82, 0x30, 0x90, 0x22, 0x32, 0x0d, 0xd6, - 0xba, 0x84, 0x4b, 0xe4, 0x6c, 0x0f, 0xac, 0x84, 0x89, 0x4d, 0x03, 0x77, 0xdd, 0x48, 0x48, 0x69, - 0xfa, 0xf3, 0x98, 0x64, 0x07, 0xd0, 0x0a, 0x97, 0xe3, 0x11, 0xd6, 0xa6, 0x7c, 0x60, 0x9e, 0x2f, - 0xc7, 0x5f, 0x8a, 0xab, 0x78, 0x42, 0x09, 0x89, 0xa2, 0xea, 0x14, 0x7c, 0x23, 0x22, 0xe3, 0x3f, - 0x4d, 0xd8, 0x0a, 0xba, 0xc5, 0xf1, 0x84, 0x7d, 0x0c, 0x56, 0x62, 0x5f, 0x21, 0x41, 0x8a, 0x77, - 0x4e, 0x05, 0xb1, 0x85, 0x91, 0xde, 0xd4, 0x17, 0xee, 0x28, 0xf5, 0x2d, 0xdd, 0xab, 0xed, 0x74, - 0x34, 0xe3, 0xe7, 0xb1, 0x73, 0xed, 0x1f, 0x42, 0x53, 0xdf, 0x91, 0x7e, 0xd4, 0xab, 0x30, 0xee, - 0xaf, 0x68, 0x5d, 0x9a, 0xc9, 0x7f, 0xaa, 0x40, 0x3b, 0x1e, 0x7f, 0x4a, 0x95, 0x72, 0x97, 0xae, - 0xbe, 0xed, 0xa5, 0xd7, 0xcd, 0x8e, 0x71, 0xac, 0xd5, 0x33, 0xb1, 0x76, 0x00, 0x4c, 0x87, 0xd4, - 0x65, 0xa0, 0x3c, 0x7f, 0x3a, 0xd2, 0xde, 0xd4, 0xb1, 0xd5, 0x25, 0xce, 0x0b, 0x62, 0x9c, 0xe3, - 0xfe, 0xd1, 0xb7, 0x0d, 0xe8, 0x1c, 0x9f, 0x9c, 0x9e, 0x1d, 0x87, 0xe1, 0xdc, 0x9b, 0x70, 0xea, - 0xba, 0x06, 0x50, 0xa7, 0xbe, 0xb2, 0xe4, 0x75, 0xaa, 0x5f, 0x36, 0xe0, 0xb0, 0x23, 0x68, 0x50, - 0x7b, 0xc9, 0xca, 0x1e, 0xa9, 0xfa, 0xa5, 0x73, 0x0e, 0x7e, 0x44, 0x37, 0xa0, 0xd7, 0xdf, 0xaa, - 0xfa, 0x65, 0xc3, 0x0e, 0xfb, 0x0c, 0xac, 0xb4, 0x31, 0x5c, 0xf7, 0x62, 0xd5, 0x5f, 0x3b, 0xf6, - 0xa0, 0x7e, 0x5a, 0x6b, 0xd7, 0xbd, 0xef, 0xf4, 0xd7, 0xce, 0x07, 0xec, 0x31, 0xb4, 0xe2, 0x6e, - 0xa5, 0xfc, 0x4d, 0xa9, 0xbf, 0x66, 0x24, 0x41, 0xf7, 0xe8, 0x8e, 0xaf, 0xec, 0xe1, 0xab, 0x5f, - 0x3a, 0x37, 0xb1, 0x87, 0xd0, 0x34, 0x05, 0xa3, 0xf4, 0x75, 0xa8, 0x5f, 0x3e, 0x58, 0xa0, 0x91, - 0x69, 0xb7, 0xbb, 0xee, 0x71, 0xae, 0xbf, 0x76, 0xc0, 0x63, 0xc7, 0x00, 0x99, 0x2e, 0x6f, 0xed, - 0xab, 0x5b, 0x7f, 0xfd, 0xe0, 0xc6, 0x3e, 0x85, 0x76, 0x3a, 0x8c, 0x97, 0xbf, 0x86, 0xf5, 0xd7, - 0xcd, 0x52, 0xe3, 0x26, 0xbd, 0x98, 0x7e, 0xf4, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe5, 0xf3, - 0xb2, 0x34, 0xad, 0x15, 0x00, 0x00, + // 1892 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x58, 0x4b, 0x73, 0x1c, 0x49, + 0x11, 0x56, 0xcf, 0xbb, 0x53, 0x8f, 0x19, 0x97, 0x6c, 0x69, 0x3c, 0x0b, 0xb2, 0xa3, 0x83, 0xf0, + 0xca, 0xac, 0x56, 0x02, 0xed, 0xda, 0x61, 0xef, 0xc2, 0x06, 0x1a, 0xad, 0xd9, 0x51, 0x2c, 0x0f, + 0xd1, 0xf6, 0x9a, 0x08, 0x2e, 0x13, 0x35, 0xd3, 0xa5, 0x9e, 0x0e, 0xcf, 0x74, 0xf7, 0x76, 0xd5, + 0x68, 0x47, 0xbe, 0x71, 0xdf, 0xe0, 0xca, 0x99, 0x1b, 0x27, 0x0e, 0x44, 0x10, 0xc1, 0x91, 0x13, + 0xb1, 0x47, 0xfe, 0x00, 0x0e, 0xd0, 0x72, 0xe2, 0x17, 0x70, 0x83, 0xa8, 0xac, 0xea, 0xa7, 0x7a, + 0x1c, 0xc2, 0x1c, 0xb9, 0x48, 0x95, 0x9d, 0x99, 0x55, 0x95, 0x39, 0x99, 0x5f, 0x66, 0x16, 0x6c, + 0xd1, 0xd1, 0xd8, 0x3b, 0x10, 0x17, 0x21, 0xe3, 0xea, 0xef, 0x7e, 0x18, 0x05, 0x22, 0x20, 0x75, + 0x24, 0x7a, 0xef, 0xba, 0x9e, 0x98, 0xcc, 0x47, 0xfb, 0xe3, 0x60, 0x76, 0xe0, 0x06, 0x6e, 0x70, + 0x80, 0xdc, 0xd1, 0xfc, 0x0c, 0x29, 0x24, 0x70, 0xa5, 0xb4, 0x7a, 0x8f, 0x32, 0xe2, 0x82, 0xf9, + 0x0e, 0x8b, 0x66, 0x9e, 0x2f, 0xb2, 0xcb, 0xa9, 0x37, 0xe2, 0x07, 0xe3, 0x60, 0x36, 0x0b, 0xfc, + 0xec, 0x79, 0xd6, 0x9f, 0x6b, 0xd0, 0xb4, 0xd9, 0xe7, 0x73, 0xc6, 0x05, 0xd9, 0x85, 0x1a, 0x1b, + 0x4f, 0x82, 0x6e, 0xe5, 0xae, 0xb1, 0xbb, 0x7a, 0x48, 0xf6, 0x95, 0x9c, 0xe6, 0x3e, 0x19, 0x4f, + 0x82, 0xc1, 0x8a, 0x8d, 0x12, 0xe4, 0x1d, 0xa8, 0x9f, 0x4d, 0xe7, 0x7c, 0xd2, 0xad, 0xa2, 0xe8, + 0x66, 0x5e, 0xf4, 0x87, 0x92, 0x35, 0x58, 0xb1, 0x95, 0x8c, 0xdc, 0xd6, 0xf3, 0xcf, 0x82, 0x6e, + 0xad, 0x6c, 0xdb, 0x13, 0xff, 0x0c, 0xb7, 0x95, 0x12, 0xe4, 0x11, 0x00, 0x67, 0x62, 0x18, 0x84, + 0xc2, 0x0b, 0xfc, 0x6e, 0x1d, 0xe5, 0xb7, 0xf3, 0xf2, 0x4f, 0x99, 0xf8, 0x29, 0xb2, 0x07, 0x2b, + 0xb6, 0xc9, 0x63, 0x42, 0x6a, 0x7a, 0xbe, 0x27, 0x86, 0xe3, 0x09, 0xf5, 0xfc, 0x6e, 0xa3, 0x4c, + 0xf3, 0xc4, 0xf7, 0xc4, 0xb1, 0x64, 0x4b, 0x4d, 0x2f, 0x26, 0xa4, 0x29, 0x9f, 0xcf, 0x59, 0x74, + 0xd1, 0x6d, 0x96, 0x99, 0xf2, 0x33, 0xc9, 0x92, 0xa6, 0xa0, 0x0c, 0xf9, 0x10, 0x56, 0x47, 0xcc, + 0xf5, 0xfc, 0xe1, 0x68, 0x1a, 0x8c, 0x5f, 0x74, 0x5b, 0xa8, 0xd2, 0xcd, 0xab, 0xf4, 0xa5, 0x40, + 0x5f, 0xf2, 0x07, 0x2b, 0x36, 0x8c, 0x12, 0x8a, 0x1c, 0x42, 0x6b, 0x3c, 0x61, 0xe3, 0x17, 0x43, + 0xb1, 0xe8, 0x9a, 0xa8, 0x79, 0x2b, 0xaf, 0x79, 0x2c, 0xb9, 0xcf, 0x16, 0x83, 0x15, 0xbb, 0x39, + 0x56, 0x4b, 0xf2, 0x00, 0x4c, 0xe6, 0x3b, 0xfa, 0xb8, 0x55, 0x54, 0xda, 0x2a, 0xfc, 0x2e, 0xbe, + 0x13, 0x1f, 0xd6, 0x62, 0x7a, 0x4d, 0xf6, 0xa1, 0x21, 0x7f, 0x6b, 0x4f, 0x74, 0xd7, 0x50, 0xe7, + 0x66, 0xe1, 0x20, 0xe4, 0x0d, 0x56, 0x6c, 0x2d, 0x25, 0xdd, 0xe7, 0xb0, 0xa9, 0x77, 0xce, 0x22, + 0x79, 0xb9, 0xcd, 0x32, 0xf7, 0x7d, 0xac, 0xf8, 0x78, 0x3d, 0xd3, 0x89, 0x89, 0x7e, 0x13, 0xea, + 0xe7, 0x74, 0x3a, 0x67, 0xd6, 0xdb, 0xb0, 0x9a, 0x89, 0x14, 0xd2, 0x85, 0xe6, 0x8c, 0x71, 0x4e, + 0x5d, 0xd6, 0x35, 0xee, 0x1a, 0xbb, 0xa6, 0x1d, 0x93, 0xd6, 0x06, 0xac, 0x65, 0xe3, 0x24, 0xa3, + 0x28, 0x63, 0x41, 0x2a, 0x9e, 0xb3, 0x88, 0xcb, 0x00, 0xd0, 0x8a, 0x9a, 0xb4, 0x3e, 0x80, 0x4e, + 0x31, 0x08, 0x48, 0x07, 0xaa, 0x2f, 0xd8, 0x85, 0x96, 0x94, 0x4b, 0x72, 0x53, 0x5f, 0x08, 0xa3, + 0xd8, 0xb4, 0xf5, 0xed, 0xfe, 0x61, 0x24, 0xca, 0x49, 0x1c, 0x10, 0x02, 0x35, 0xe1, 0xcd, 0xd4, + 0x05, 0xab, 0x36, 0xae, 0xc9, 0x6d, 0xf9, 0x23, 0x51, 0xcf, 0x1f, 0x7a, 0x8e, 0xde, 0xa1, 0x89, + 0xf4, 0x89, 0x43, 0x8e, 0xa0, 0x33, 0x0e, 0x7c, 0xce, 0x7c, 0x3e, 0xe7, 0xc3, 0x90, 0x46, 0x74, + 0xc6, 0x75, 0xfc, 0xc7, 0x3f, 0xc9, 0x71, 0xcc, 0x3e, 0x45, 0xae, 0xdd, 0x1e, 0xe7, 0x3f, 0x90, + 0x87, 0x00, 0xe7, 0x74, 0xea, 0x39, 0x54, 0x04, 0x11, 0xef, 0xd6, 0xee, 0x56, 0x77, 0x57, 0x0f, + 0x3b, 0x5a, 0xf9, 0x79, 0xcc, 0xe8, 0xd7, 0xbe, 0x7a, 0x75, 0x67, 0xc5, 0xce, 0x48, 0x92, 0x7b, + 0xd0, 0xa6, 0x61, 0x38, 0xe4, 0x82, 0x0a, 0x36, 0x1c, 0x5d, 0x08, 0xc6, 0x31, 0x3b, 0xd6, 0xec, + 0x75, 0x1a, 0x86, 0x4f, 0xe5, 0xd7, 0xbe, 0xfc, 0x68, 0x39, 0x89, 0x6f, 0x31, 0x70, 0xa5, 0x85, + 0x0e, 0x15, 0x14, 0x2d, 0x5c, 0xb3, 0x71, 0x2d, 0xbf, 0x85, 0x54, 0x4c, 0xb4, 0x75, 0xb8, 0x26, + 0x5b, 0xd0, 0x98, 0x30, 0xcf, 0x9d, 0x08, 0x34, 0xa8, 0x6a, 0x6b, 0x4a, 0x3a, 0x33, 0x8c, 0x82, + 0x73, 0x86, 0xb9, 0xdb, 0xb2, 0x15, 0x61, 0xfd, 0xd5, 0x80, 0x1b, 0x57, 0x82, 0x5d, 0xee, 0x3b, + 0xa1, 0x7c, 0x12, 0x9f, 0x25, 0xd7, 0xe4, 0x1d, 0xb9, 0x2f, 0x75, 0x58, 0xa4, 0x31, 0x65, 0x5d, + 0xdb, 0x3a, 0xc0, 0x8f, 0xda, 0x50, 0x2d, 0x42, 0xbe, 0x9f, 0x73, 0x4e, 0x15, 0x9d, 0x13, 0x07, + 0xe1, 0x53, 0xcf, 0xf5, 0x3d, 0xdf, 0x7d, 0x9d, 0x8f, 0x06, 0x70, 0x73, 0x74, 0xf1, 0x92, 0xfa, + 0xc2, 0xf3, 0xd9, 0xf0, 0x8a, 0x97, 0xdb, 0x7a, 0xa3, 0x27, 0xe7, 0x9e, 0xc3, 0xfc, 0x31, 0xd3, + 0x1b, 0x6c, 0x26, 0x2a, 0xc9, 0xd6, 0xdc, 0xba, 0x0b, 0x1b, 0xf9, 0x8c, 0x24, 0x1b, 0x50, 0x11, + 0x0b, 0x6d, 0x59, 0x45, 0x2c, 0x2c, 0x2b, 0x89, 0xa6, 0x24, 0x2d, 0xae, 0xc8, 0xdc, 0x87, 0x76, + 0x21, 0x45, 0x33, 0x6e, 0x36, 0xb2, 0x6e, 0xb6, 0xda, 0xb0, 0x9e, 0xcb, 0x4c, 0xeb, 0xcb, 0x3a, + 0xb4, 0x6c, 0xc6, 0x43, 0x19, 0x3e, 0xe4, 0x11, 0x98, 0x6c, 0x31, 0x66, 0x0a, 0x14, 0x8d, 0x02, + 0xe4, 0x28, 0x99, 0x27, 0x31, 0x5f, 0x26, 0x67, 0x22, 0x4c, 0xee, 0xe7, 0x00, 0x7d, 0xb3, 0xa8, + 0x94, 0x45, 0xf4, 0xbd, 0x3c, 0xa2, 0xdf, 0x2c, 0xc8, 0x16, 0x20, 0xfd, 0x7e, 0x0e, 0xd2, 0x8b, + 0x1b, 0xe7, 0x30, 0xfd, 0x71, 0x09, 0xa6, 0x17, 0xaf, 0xbf, 0x04, 0xd4, 0x1f, 0x97, 0x80, 0x7a, + 0xf7, 0xca, 0x59, 0xa5, 0xa8, 0xbe, 0x97, 0x47, 0xf5, 0xa2, 0x39, 0x05, 0x58, 0xff, 0x5e, 0x19, + 0xac, 0xdf, 0x2e, 0xe8, 0x2c, 0xc5, 0xf5, 0xf7, 0xae, 0xe0, 0xfa, 0x56, 0x41, 0xb5, 0x04, 0xd8, + 0x1f, 0xe7, 0x10, 0x17, 0x4a, 0x6d, 0x2b, 0x87, 0x5c, 0xf2, 0xf0, 0x6a, 0x4d, 0xd8, 0x2e, 0xfe, + 0xb4, 0x65, 0x45, 0xe1, 0xa0, 0x50, 0x14, 0x6e, 0x15, 0x6f, 0x59, 0xa8, 0x0a, 0x29, 0xb6, 0xdf, + 0x97, 0xf9, 0x5e, 0x88, 0x34, 0x89, 0x0d, 0x2c, 0x8a, 0x82, 0x48, 0x83, 0xaf, 0x22, 0xac, 0x5d, + 0x89, 0x40, 0x69, 0x7c, 0xbd, 0xa6, 0x0e, 0x60, 0xd0, 0x67, 0xa2, 0xcb, 0xfa, 0xb5, 0x91, 0xea, + 0x62, 0x29, 0xc8, 0xa2, 0x97, 0xa9, 0xd1, 0x2b, 0x53, 0x1e, 0x2a, 0xb9, 0xf2, 0x40, 0xbe, 0x0d, + 0x37, 0xa6, 0x94, 0x0b, 0xe5, 0x97, 0x61, 0x0e, 0xce, 0xda, 0x92, 0xa1, 0x1c, 0xa2, 0x70, 0xed, + 0x5d, 0xd8, 0xcc, 0xc8, 0x4a, 0x68, 0x45, 0xe8, 0xaa, 0x61, 0xf2, 0x76, 0x12, 0xe9, 0xa3, 0x30, + 0x1c, 0x50, 0x3e, 0xb1, 0x7e, 0x9c, 0xda, 0x9f, 0x96, 0x1e, 0x02, 0xb5, 0x71, 0xe0, 0x28, 0xb3, + 0xd6, 0x6d, 0x5c, 0xcb, 0x72, 0x34, 0x0d, 0x5c, 0x3c, 0xd5, 0xb4, 0xe5, 0x52, 0x4a, 0x25, 0x99, + 0x62, 0xaa, 0x94, 0xb0, 0x7e, 0x65, 0xa4, 0xfb, 0xa5, 0xd5, 0xa8, 0xac, 0xbc, 0x18, 0xff, 0x4b, + 0x79, 0xa9, 0x5c, 0xb7, 0xbc, 0x58, 0x7f, 0x30, 0xd2, 0xdf, 0x22, 0x29, 0x1c, 0x6f, 0x66, 0x9c, + 0x0c, 0x0b, 0xcf, 0x77, 0xd8, 0x02, 0x53, 0xbd, 0x6a, 0x2b, 0x22, 0xae, 0xd3, 0x0d, 0x74, 0x70, + 0xbe, 0x4e, 0x37, 0xf1, 0x9b, 0x22, 0x74, 0xc1, 0x09, 0xce, 0x30, 0x07, 0xd7, 0x6c, 0x45, 0x64, + 0x70, 0xd3, 0xcc, 0xe1, 0xe6, 0x29, 0x90, 0xab, 0xd9, 0x49, 0x3e, 0x80, 0x9a, 0xa0, 0xae, 0x74, + 0x9e, 0xb4, 0x7f, 0x63, 0x5f, 0x75, 0xbd, 0xfb, 0x9f, 0x3e, 0x3f, 0xa5, 0x5e, 0xd4, 0xdf, 0x92, + 0xd6, 0xff, 0xf3, 0xd5, 0x9d, 0x0d, 0x29, 0xb3, 0x17, 0xcc, 0x3c, 0xc1, 0x66, 0xa1, 0xb8, 0xb0, + 0x51, 0xc7, 0xfa, 0xb7, 0x21, 0x51, 0x3b, 0x97, 0xb5, 0xa5, 0xbe, 0x88, 0x43, 0xb3, 0x92, 0x29, + 0xac, 0xd7, 0xf3, 0xcf, 0x37, 0x01, 0x5c, 0xca, 0x87, 0x5f, 0x50, 0x5f, 0x30, 0x47, 0x3b, 0xc9, + 0x74, 0x29, 0xff, 0x39, 0x7e, 0x90, 0xfd, 0x87, 0x64, 0xcf, 0x39, 0x73, 0xd0, 0x5b, 0x55, 0xbb, + 0xe9, 0x52, 0xfe, 0x19, 0x67, 0x4e, 0x62, 0x57, 0xf3, 0xbf, 0xb7, 0x8b, 0xec, 0x42, 0xf5, 0x8c, + 0x31, 0x8d, 0x6c, 0x9d, 0x44, 0xf5, 0xe4, 0xe1, 0xfb, 0xa8, 0xac, 0x42, 0x42, 0x8a, 0x58, 0xbf, + 0xac, 0xa4, 0xc1, 0x99, 0x16, 0xb7, 0xff, 0x2f, 0x1f, 0x7c, 0x8d, 0xdd, 0x62, 0x1e, 0x4a, 0xc9, + 0x31, 0xdc, 0x48, 0x52, 0x66, 0x38, 0x0f, 0x1d, 0x2a, 0xbb, 0x30, 0xe3, 0xb5, 0x39, 0xd6, 0x49, + 0x14, 0x3e, 0x53, 0xf2, 0xe4, 0x27, 0xb0, 0x5d, 0x48, 0xf2, 0x64, 0xab, 0xca, 0x6b, 0x73, 0xfd, + 0x56, 0x3e, 0xd7, 0xe3, 0xfd, 0x62, 0x7f, 0x54, 0xdf, 0x20, 0xd6, 0xbf, 0x25, 0xdb, 0x9c, 0x2c, + 0xf4, 0x97, 0xfd, 0xa2, 0xd6, 0x6f, 0x0c, 0x68, 0x17, 0x2e, 0x43, 0x0e, 0x00, 0x14, 0x72, 0x72, + 0xef, 0x25, 0xd3, 0x20, 0x15, 0xfb, 0x00, 0x9d, 0xf5, 0xd4, 0x7b, 0xc9, 0x6c, 0x73, 0x14, 0x2f, + 0xc9, 0x3d, 0x68, 0x8a, 0x85, 0x92, 0xce, 0x37, 0x82, 0xcf, 0x16, 0x28, 0xda, 0x10, 0xf8, 0x9f, + 0x3c, 0x80, 0x35, 0xb5, 0xb1, 0x1b, 0x70, 0xee, 0x85, 0xba, 0x19, 0x21, 0xd9, 0xad, 0x3f, 0x41, + 0x8e, 0xbd, 0x3a, 0x4a, 0x09, 0xeb, 0x17, 0x60, 0x26, 0xc7, 0x92, 0xb7, 0xc0, 0x9c, 0xd1, 0x85, + 0xee, 0x92, 0xe5, 0xdd, 0xea, 0x76, 0x6b, 0x46, 0x17, 0xd8, 0x20, 0x93, 0x6d, 0x68, 0x4a, 0xa6, + 0x58, 0x28, 0x7f, 0xd7, 0xed, 0xc6, 0x8c, 0x2e, 0x9e, 0x2d, 0x12, 0x86, 0x4b, 0x79, 0xdc, 0x02, + 0xcf, 0xe8, 0xe2, 0x13, 0xca, 0xad, 0x8f, 0xa0, 0xa1, 0x2e, 0x79, 0xad, 0x8d, 0xa5, 0x7e, 0x25, + 0xa7, 0xff, 0x03, 0x58, 0xcd, 0xdc, 0x9b, 0x7c, 0x17, 0x6e, 0x29, 0x0b, 0x43, 0x1a, 0x09, 0xf4, + 0x48, 0x6e, 0x43, 0x82, 0xcc, 0x53, 0x1a, 0x09, 0x79, 0xa4, 0x6a, 0xea, 0x7f, 0x5f, 0x81, 0x86, + 0x6a, 0x98, 0xc9, 0xbd, 0xcc, 0x74, 0x82, 0x55, 0xb1, 0xbf, 0x7a, 0xf9, 0xea, 0x4e, 0x13, 0x0b, + 0xc8, 0xc9, 0xc7, 0xe9, 0xa8, 0x92, 0x02, 0x66, 0x25, 0xd7, 0xcf, 0xc7, 0x13, 0x4f, 0x35, 0x33, + 0xf1, 0x6c, 0x43, 0xd3, 0x9f, 0xcf, 0xd0, 0x25, 0x35, 0xe5, 0x12, 0x7f, 0x3e, 0x93, 0x2e, 0x79, + 0x0b, 0x4c, 0x11, 0x08, 0x3a, 0x45, 0x96, 0x4a, 0xd2, 0x16, 0x7e, 0x90, 0xcc, 0x7b, 0xd0, 0xce, + 0x56, 0x5b, 0x59, 0x3d, 0x15, 0xb8, 0xaf, 0xa7, 0xb5, 0x56, 0x4e, 0x00, 0x6f, 0x43, 0x3b, 0x2d, + 0x34, 0x4a, 0x4e, 0x01, 0xfe, 0x46, 0xfa, 0x19, 0x05, 0x6f, 0x43, 0x2b, 0xa9, 0xc3, 0x0a, 0xfc, + 0x9b, 0x54, 0x95, 0x5f, 0x39, 0x38, 0x87, 0x51, 0x10, 0x06, 0x9c, 0x45, 0xba, 0xc1, 0x5a, 0x96, + 0x70, 0x89, 0x9c, 0xe5, 0x81, 0x99, 0x30, 0x65, 0xd3, 0x40, 0x1d, 0x27, 0x62, 0x9c, 0xeb, 0xfe, + 0x3c, 0x26, 0xc9, 0x1e, 0x34, 0xc3, 0xf9, 0x68, 0x28, 0x6b, 0x53, 0x3e, 0x30, 0x4f, 0xe7, 0xa3, + 0x4f, 0xd9, 0x45, 0x3c, 0xa1, 0x84, 0x48, 0x61, 0x75, 0x0a, 0xbe, 0x60, 0x91, 0xf6, 0x9f, 0x22, + 0x2c, 0x01, 0x9d, 0xe2, 0x78, 0x42, 0xde, 0x07, 0x33, 0xb1, 0xaf, 0x90, 0x20, 0xc5, 0x3b, 0xa7, + 0x82, 0xb2, 0x85, 0xe1, 0x9e, 0xeb, 0x33, 0x67, 0x98, 0xfa, 0x16, 0xef, 0xd5, 0xb2, 0xdb, 0x8a, + 0xf1, 0xa3, 0xd8, 0xb9, 0xd6, 0x77, 0xa0, 0xa1, 0xee, 0x88, 0x3f, 0xea, 0x45, 0x18, 0xf7, 0x57, + 0xb8, 0x2e, 0xcd, 0xe4, 0xdf, 0x19, 0xd0, 0x8a, 0xc7, 0x9f, 0x52, 0xa5, 0xdc, 0xa5, 0x2b, 0xd7, + 0xbd, 0xf4, 0xb2, 0xd9, 0x31, 0x8e, 0xb5, 0x5a, 0x26, 0xd6, 0xf6, 0x80, 0xa8, 0x90, 0x3a, 0x0f, + 0x84, 0xe7, 0xbb, 0x43, 0xe5, 0x4d, 0x15, 0x5b, 0x1d, 0xe4, 0x3c, 0x47, 0xc6, 0xa9, 0xfc, 0x7e, + 0xf8, 0x65, 0x1d, 0xda, 0x47, 0xfd, 0xe3, 0x93, 0xa3, 0x30, 0x9c, 0x7a, 0x63, 0x8a, 0x5d, 0xd7, + 0x01, 0xd4, 0xb0, 0xaf, 0x2c, 0x79, 0x9d, 0xea, 0x95, 0x0d, 0x38, 0xe4, 0x10, 0xea, 0xd8, 0x5e, + 0x92, 0xb2, 0x47, 0xaa, 0x5e, 0xe9, 0x9c, 0x23, 0x0f, 0x51, 0x0d, 0xe8, 0xd5, 0xb7, 0xaa, 0x5e, + 0xd9, 0xb0, 0x43, 0x3e, 0x02, 0x33, 0x6d, 0x0c, 0x97, 0xbd, 0x58, 0xf5, 0x96, 0x8e, 0x3d, 0x52, + 0x3f, 0xad, 0xb5, 0xcb, 0x1e, 0x5e, 0x7a, 0x4b, 0xe7, 0x03, 0xf2, 0x08, 0x9a, 0x71, 0xb7, 0x52, + 0xfe, 0xa6, 0xd4, 0x5b, 0x32, 0x92, 0x48, 0xf7, 0xa8, 0x8e, 0xaf, 0xec, 0xe1, 0xab, 0x57, 0x3a, + 0x37, 0x91, 0x07, 0xd0, 0xd0, 0x05, 0xa3, 0xf4, 0x5d, 0xa9, 0x57, 0x3e, 0x58, 0x48, 0x23, 0xd3, + 0x6e, 0x77, 0xd9, 0xe3, 0x5c, 0x6f, 0xe9, 0x80, 0x47, 0x8e, 0x00, 0x32, 0x5d, 0xde, 0xd2, 0x57, + 0xb7, 0xde, 0xf2, 0xc1, 0x8d, 0x7c, 0x08, 0xad, 0x74, 0x18, 0x2f, 0x7f, 0x47, 0xeb, 0x2d, 0x9b, + 0xa5, 0xfa, 0xdf, 0xf8, 0xd7, 0xdf, 0x77, 0x8c, 0xdf, 0x5e, 0xee, 0x18, 0x7f, 0xbc, 0xdc, 0x31, + 0xbe, 0xba, 0xdc, 0x31, 0xfe, 0x72, 0xb9, 0x63, 0xfc, 0xed, 0x72, 0xc7, 0xf8, 0xd3, 0xd7, 0x3b, + 0xc6, 0xa8, 0x81, 0xef, 0xa9, 0xef, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0xf4, 0x59, 0x8b, 0xa6, + 0xd9, 0x15, 0x00, 0x00, } diff --git a/abci/types/types.proto b/abci/types/types.proto index b4f4b2aa6..0e1c18430 100644 --- a/abci/types/types.proto +++ b/abci/types/types.proto @@ -4,12 +4,21 @@ package types; // For more information on gogo.proto, see: // https://github.com/gogo/protobuf/blob/master/extensions.md import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "github.com/tendermint/tmlibs/common/types.proto"; +import "github.com/tendermint/tendermint/libs/common/types.proto"; // This file is copied from http://github.com/tendermint/abci // NOTE: When using custom types, mind the warnings. // https://github.com/gogo/protobuf/blob/master/custom_types.md#warnings-and-issues +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.goproto_registration) = true; +// Generate tests +option (gogoproto.populate_all) = true; +option (gogoproto.equal_all) = true; +option (gogoproto.testgen_all) = true; + //---------------------------------------- // Request types diff --git a/abci/types/typespb_test.go b/abci/types/typespb_test.go new file mode 100644 index 000000000..d1316239e --- /dev/null +++ b/abci/types/typespb_test.go @@ -0,0 +1,4281 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: abci/types/types.proto + +/* +Package types is a generated protocol buffer package. + +It is generated from these files: + abci/types/types.proto + +It has these top-level messages: + Request + RequestEcho + RequestFlush + RequestInfo + RequestSetOption + RequestInitChain + RequestQuery + RequestBeginBlock + RequestCheckTx + RequestDeliverTx + RequestEndBlock + RequestCommit + Response + ResponseException + ResponseEcho + ResponseFlush + ResponseInfo + ResponseSetOption + ResponseInitChain + ResponseQuery + ResponseBeginBlock + ResponseCheckTx + ResponseDeliverTx + ResponseEndBlock + ResponseCommit + ConsensusParams + BlockSize + TxSize + BlockGossip + Header + Validator + SigningValidator + PubKey + Evidence +*/ +package types + +import testing "testing" +import rand "math/rand" +import time "time" +import proto "github.com/gogo/protobuf/proto" +import jsonpb "github.com/gogo/protobuf/jsonpb" +import golang_proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/tendermint/tendermint/libs/common" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = golang_proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func TestRequestProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequest(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Request{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestRequestMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequest(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Request{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestEchoProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestEcho(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestEcho{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestRequestEchoMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestEcho(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestEcho{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestFlushProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestFlush(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestFlush{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestRequestFlushMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestFlush(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestFlush{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestInfoProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestInfo(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestInfo{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestRequestInfoMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestInfo(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestInfo{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestSetOptionProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestSetOption(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestSetOption{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestRequestSetOptionMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestSetOption(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestSetOption{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestInitChainProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestInitChain(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestInitChain{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestRequestInitChainMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestInitChain(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestInitChain{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestQueryProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestQuery(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestQuery{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestRequestQueryMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestQuery(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestQuery{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestBeginBlockProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestBeginBlock(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestBeginBlock{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestRequestBeginBlockMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestBeginBlock(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestBeginBlock{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestCheckTxProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestCheckTx(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestCheckTx{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestRequestCheckTxMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestCheckTx(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestCheckTx{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestDeliverTxProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestDeliverTx(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestDeliverTx{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestRequestDeliverTxMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestDeliverTx(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestDeliverTx{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestEndBlockProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestEndBlock(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestEndBlock{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestRequestEndBlockMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestEndBlock(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestEndBlock{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestCommitProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestCommit(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestCommit{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestRequestCommitMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestCommit(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestCommit{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponse(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Response{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestResponseMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponse(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Response{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseExceptionProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseException(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseException{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestResponseExceptionMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseException(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseException{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseEchoProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseEcho(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseEcho{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestResponseEchoMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseEcho(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseEcho{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseFlushProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseFlush(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseFlush{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestResponseFlushMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseFlush(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseFlush{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseInfoProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseInfo(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseInfo{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestResponseInfoMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseInfo(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseInfo{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseSetOptionProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseSetOption(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseSetOption{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestResponseSetOptionMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseSetOption(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseSetOption{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseInitChainProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseInitChain(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseInitChain{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestResponseInitChainMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseInitChain(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseInitChain{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseQueryProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseQuery(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseQuery{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestResponseQueryMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseQuery(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseQuery{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseBeginBlockProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseBeginBlock(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseBeginBlock{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestResponseBeginBlockMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseBeginBlock(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseBeginBlock{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseCheckTxProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseCheckTx(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseCheckTx{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestResponseCheckTxMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseCheckTx(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseCheckTx{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseDeliverTxProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseDeliverTx(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseDeliverTx{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestResponseDeliverTxMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseDeliverTx(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseDeliverTx{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseEndBlockProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseEndBlock(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseEndBlock{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestResponseEndBlockMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseEndBlock(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseEndBlock{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseCommitProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseCommit(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseCommit{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestResponseCommitMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseCommit(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseCommit{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestConsensusParamsProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedConsensusParams(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ConsensusParams{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestConsensusParamsMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedConsensusParams(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ConsensusParams{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestBlockSizeProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedBlockSize(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &BlockSize{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestBlockSizeMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedBlockSize(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &BlockSize{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestTxSizeProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedTxSize(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &TxSize{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestTxSizeMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedTxSize(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &TxSize{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestBlockGossipProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedBlockGossip(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &BlockGossip{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestBlockGossipMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedBlockGossip(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &BlockGossip{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestHeaderProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedHeader(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Header{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestHeaderMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedHeader(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Header{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestValidatorProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedValidator(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Validator{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestValidatorMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedValidator(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Validator{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestSigningValidatorProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedSigningValidator(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &SigningValidator{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestSigningValidatorMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedSigningValidator(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &SigningValidator{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestPubKeyProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedPubKey(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &PubKey{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestPubKeyMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedPubKey(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &PubKey{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestEvidenceProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedEvidence(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Evidence{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestEvidenceMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedEvidence(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Evidence{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequest(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Request{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestRequestEchoJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestEcho(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestEcho{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestRequestFlushJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestFlush(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestFlush{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestRequestInfoJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestInfo(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestInfo{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestRequestSetOptionJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestSetOption(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestSetOption{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestRequestInitChainJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestInitChain(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestInitChain{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestRequestQueryJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestQuery(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestQuery{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestRequestBeginBlockJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestBeginBlock(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestBeginBlock{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestRequestCheckTxJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestCheckTx(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestCheckTx{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestRequestDeliverTxJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestDeliverTx(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestDeliverTx{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestRequestEndBlockJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestEndBlock(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestEndBlock{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestRequestCommitJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestCommit(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestCommit{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestResponseJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponse(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Response{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestResponseExceptionJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseException(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseException{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestResponseEchoJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseEcho(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseEcho{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestResponseFlushJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseFlush(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseFlush{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestResponseInfoJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseInfo(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseInfo{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestResponseSetOptionJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseSetOption(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseSetOption{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestResponseInitChainJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseInitChain(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseInitChain{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestResponseQueryJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseQuery(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseQuery{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestResponseBeginBlockJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseBeginBlock(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseBeginBlock{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestResponseCheckTxJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseCheckTx(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseCheckTx{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestResponseDeliverTxJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseDeliverTx(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseDeliverTx{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestResponseEndBlockJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseEndBlock(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseEndBlock{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestResponseCommitJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseCommit(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseCommit{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestConsensusParamsJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedConsensusParams(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ConsensusParams{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestBlockSizeJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedBlockSize(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &BlockSize{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestTxSizeJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedTxSize(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &TxSize{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestBlockGossipJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedBlockGossip(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &BlockGossip{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestHeaderJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedHeader(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Header{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestValidatorJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedValidator(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Validator{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestSigningValidatorJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedSigningValidator(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &SigningValidator{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestPubKeyJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedPubKey(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &PubKey{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestEvidenceJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedEvidence(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Evidence{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestRequestProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequest(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &Request{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequest(popr, true) + dAtA := proto.CompactTextString(p) + msg := &Request{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestEchoProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestEcho(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &RequestEcho{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestEchoProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestEcho(popr, true) + dAtA := proto.CompactTextString(p) + msg := &RequestEcho{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestFlushProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestFlush(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &RequestFlush{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestFlushProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestFlush(popr, true) + dAtA := proto.CompactTextString(p) + msg := &RequestFlush{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestInfoProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestInfo(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &RequestInfo{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestInfoProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestInfo(popr, true) + dAtA := proto.CompactTextString(p) + msg := &RequestInfo{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestSetOptionProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestSetOption(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &RequestSetOption{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestSetOptionProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestSetOption(popr, true) + dAtA := proto.CompactTextString(p) + msg := &RequestSetOption{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestInitChainProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestInitChain(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &RequestInitChain{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestInitChainProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestInitChain(popr, true) + dAtA := proto.CompactTextString(p) + msg := &RequestInitChain{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestQueryProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestQuery(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &RequestQuery{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestQueryProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestQuery(popr, true) + dAtA := proto.CompactTextString(p) + msg := &RequestQuery{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestBeginBlockProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestBeginBlock(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &RequestBeginBlock{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestBeginBlockProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestBeginBlock(popr, true) + dAtA := proto.CompactTextString(p) + msg := &RequestBeginBlock{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestCheckTxProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestCheckTx(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &RequestCheckTx{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestCheckTxProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestCheckTx(popr, true) + dAtA := proto.CompactTextString(p) + msg := &RequestCheckTx{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestDeliverTxProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestDeliverTx(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &RequestDeliverTx{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestDeliverTxProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestDeliverTx(popr, true) + dAtA := proto.CompactTextString(p) + msg := &RequestDeliverTx{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestEndBlockProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestEndBlock(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &RequestEndBlock{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestEndBlockProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestEndBlock(popr, true) + dAtA := proto.CompactTextString(p) + msg := &RequestEndBlock{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestCommitProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestCommit(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &RequestCommit{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestCommitProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestCommit(popr, true) + dAtA := proto.CompactTextString(p) + msg := &RequestCommit{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponse(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &Response{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponse(popr, true) + dAtA := proto.CompactTextString(p) + msg := &Response{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseExceptionProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseException(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &ResponseException{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseExceptionProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseException(popr, true) + dAtA := proto.CompactTextString(p) + msg := &ResponseException{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseEchoProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseEcho(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &ResponseEcho{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseEchoProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseEcho(popr, true) + dAtA := proto.CompactTextString(p) + msg := &ResponseEcho{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseFlushProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseFlush(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &ResponseFlush{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseFlushProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseFlush(popr, true) + dAtA := proto.CompactTextString(p) + msg := &ResponseFlush{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseInfoProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseInfo(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &ResponseInfo{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseInfoProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseInfo(popr, true) + dAtA := proto.CompactTextString(p) + msg := &ResponseInfo{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseSetOptionProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseSetOption(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &ResponseSetOption{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseSetOptionProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseSetOption(popr, true) + dAtA := proto.CompactTextString(p) + msg := &ResponseSetOption{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseInitChainProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseInitChain(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &ResponseInitChain{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseInitChainProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseInitChain(popr, true) + dAtA := proto.CompactTextString(p) + msg := &ResponseInitChain{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseQueryProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseQuery(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &ResponseQuery{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseQueryProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseQuery(popr, true) + dAtA := proto.CompactTextString(p) + msg := &ResponseQuery{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseBeginBlockProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseBeginBlock(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &ResponseBeginBlock{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseBeginBlockProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseBeginBlock(popr, true) + dAtA := proto.CompactTextString(p) + msg := &ResponseBeginBlock{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseCheckTxProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseCheckTx(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &ResponseCheckTx{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseCheckTxProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseCheckTx(popr, true) + dAtA := proto.CompactTextString(p) + msg := &ResponseCheckTx{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseDeliverTxProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseDeliverTx(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &ResponseDeliverTx{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseDeliverTxProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseDeliverTx(popr, true) + dAtA := proto.CompactTextString(p) + msg := &ResponseDeliverTx{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseEndBlockProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseEndBlock(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &ResponseEndBlock{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseEndBlockProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseEndBlock(popr, true) + dAtA := proto.CompactTextString(p) + msg := &ResponseEndBlock{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseCommitProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseCommit(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &ResponseCommit{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseCommitProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseCommit(popr, true) + dAtA := proto.CompactTextString(p) + msg := &ResponseCommit{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestConsensusParamsProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedConsensusParams(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &ConsensusParams{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestConsensusParamsProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedConsensusParams(popr, true) + dAtA := proto.CompactTextString(p) + msg := &ConsensusParams{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestBlockSizeProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedBlockSize(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &BlockSize{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestBlockSizeProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedBlockSize(popr, true) + dAtA := proto.CompactTextString(p) + msg := &BlockSize{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestTxSizeProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedTxSize(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &TxSize{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestTxSizeProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedTxSize(popr, true) + dAtA := proto.CompactTextString(p) + msg := &TxSize{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestBlockGossipProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedBlockGossip(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &BlockGossip{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestBlockGossipProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedBlockGossip(popr, true) + dAtA := proto.CompactTextString(p) + msg := &BlockGossip{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestHeaderProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedHeader(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &Header{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestHeaderProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedHeader(popr, true) + dAtA := proto.CompactTextString(p) + msg := &Header{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestValidatorProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedValidator(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &Validator{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestValidatorProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedValidator(popr, true) + dAtA := proto.CompactTextString(p) + msg := &Validator{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestSigningValidatorProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedSigningValidator(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &SigningValidator{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestSigningValidatorProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedSigningValidator(popr, true) + dAtA := proto.CompactTextString(p) + msg := &SigningValidator{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestPubKeyProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedPubKey(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &PubKey{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestPubKeyProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedPubKey(popr, true) + dAtA := proto.CompactTextString(p) + msg := &PubKey{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestEvidenceProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedEvidence(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &Evidence{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestEvidenceProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedEvidence(popr, true) + dAtA := proto.CompactTextString(p) + msg := &Evidence{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequest(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestRequestEchoSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestEcho(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestRequestFlushSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestFlush(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestRequestInfoSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestInfo(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestRequestSetOptionSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestSetOption(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestRequestInitChainSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestInitChain(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestRequestQuerySize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestQuery(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestRequestBeginBlockSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestBeginBlock(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestRequestCheckTxSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestCheckTx(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestRequestDeliverTxSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestDeliverTx(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestRequestEndBlockSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestEndBlock(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestRequestCommitSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestCommit(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestResponseSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponse(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestResponseExceptionSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseException(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestResponseEchoSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseEcho(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestResponseFlushSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseFlush(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestResponseInfoSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseInfo(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestResponseSetOptionSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseSetOption(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestResponseInitChainSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseInitChain(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestResponseQuerySize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseQuery(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestResponseBeginBlockSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseBeginBlock(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestResponseCheckTxSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseCheckTx(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestResponseDeliverTxSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseDeliverTx(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestResponseEndBlockSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseEndBlock(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestResponseCommitSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseCommit(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestConsensusParamsSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedConsensusParams(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestBlockSizeSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedBlockSize(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestTxSizeSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedTxSize(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestBlockGossipSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedBlockGossip(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestHeaderSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedHeader(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestValidatorSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedValidator(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestSigningValidatorSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedSigningValidator(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestPubKeySize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedPubKey(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestEvidenceSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedEvidence(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +//These tests are generated by github.com/gogo/protobuf/plugin/testgen diff --git a/abci/types/util.go b/abci/types/util.go index 0924ab5ff..458024c58 100644 --- a/abci/types/util.go +++ b/abci/types/util.go @@ -5,7 +5,7 @@ import ( "encoding/json" "sort" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) //------------------------------------------------------------------------------ diff --git a/benchmarks/codec_test.go b/benchmarks/codec_test.go index ee61cc9a4..09487563a 100644 --- a/benchmarks/codec_test.go +++ b/benchmarks/codec_test.go @@ -6,8 +6,8 @@ import ( "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/crypto" proto "github.com/tendermint/tendermint/benchmarks/proto" + "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/p2p" ctypes "github.com/tendermint/tendermint/rpc/core/types" ) @@ -16,7 +16,7 @@ func BenchmarkEncodeStatusWire(b *testing.B) { b.StopTimer() cdc := amino.NewCodec() ctypes.RegisterAmino(cdc) - nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()} + nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()} status := &ctypes.ResultStatus{ NodeInfo: p2p.NodeInfo{ ID: nodeKey.ID(), @@ -52,7 +52,7 @@ func BenchmarkEncodeNodeInfoWire(b *testing.B) { b.StopTimer() cdc := amino.NewCodec() ctypes.RegisterAmino(cdc) - nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()} + nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()} nodeInfo := p2p.NodeInfo{ ID: nodeKey.ID(), Moniker: "SOMENAME", @@ -77,7 +77,7 @@ func BenchmarkEncodeNodeInfoBinary(b *testing.B) { b.StopTimer() cdc := amino.NewCodec() ctypes.RegisterAmino(cdc) - nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()} + nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()} nodeInfo := p2p.NodeInfo{ ID: nodeKey.ID(), Moniker: "SOMENAME", @@ -98,7 +98,7 @@ func BenchmarkEncodeNodeInfoBinary(b *testing.B) { func BenchmarkEncodeNodeInfoProto(b *testing.B) { b.StopTimer() - nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()} + nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()} nodeID := string(nodeKey.ID()) someName := "SOMENAME" someAddr := "SOMEADDR" diff --git a/benchmarks/map_test.go b/benchmarks/map_test.go index 2d9789026..d13a19edf 100644 --- a/benchmarks/map_test.go +++ b/benchmarks/map_test.go @@ -3,7 +3,7 @@ package benchmarks import ( "testing" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func BenchmarkSomething(b *testing.B) { diff --git a/benchmarks/os_test.go b/benchmarks/os_test.go index dfadc3128..406038b9d 100644 --- a/benchmarks/os_test.go +++ b/benchmarks/os_test.go @@ -4,7 +4,7 @@ import ( "os" "testing" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func BenchmarkFileWrite(b *testing.B) { diff --git a/benchmarks/simu/counter.go b/benchmarks/simu/counter.go index dd00408cb..b7d2c4d63 100644 --- a/benchmarks/simu/counter.go +++ b/benchmarks/simu/counter.go @@ -7,7 +7,7 @@ import ( "time" rpcclient "github.com/tendermint/tendermint/rpc/lib/client" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func main() { diff --git a/blockchain/pool.go b/blockchain/pool.go index efd5c6a3a..a881c7cb7 100644 --- a/blockchain/pool.go +++ b/blockchain/pool.go @@ -8,9 +8,9 @@ import ( "sync/atomic" "time" - cmn "github.com/tendermint/tmlibs/common" - flow "github.com/tendermint/tmlibs/flowrate" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + flow "github.com/tendermint/tendermint/libs/flowrate" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" diff --git a/blockchain/pool_test.go b/blockchain/pool_test.go index 82120eaef..01187bcfe 100644 --- a/blockchain/pool_test.go +++ b/blockchain/pool_test.go @@ -1,12 +1,11 @@ package blockchain import ( - "math/rand" "testing" "time" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" @@ -25,7 +24,7 @@ func makePeers(numPeers int, minHeight, maxHeight int64) map[p2p.ID]testPeer { peers := make(map[p2p.ID]testPeer, numPeers) for i := 0; i < numPeers; i++ { peerID := p2p.ID(cmn.RandStr(12)) - height := minHeight + rand.Int63n(maxHeight-minHeight) + height := minHeight + cmn.RandInt63n(maxHeight-minHeight) peers[peerID] = testPeer{peerID, height} } return peers @@ -80,7 +79,7 @@ func TestBasic(t *testing.T) { } // Request desired, pretend like we got the block immediately. go func() { - block := &types.Block{Header: &types.Header{Height: request.Height}} + block := &types.Block{Header: types.Header{Height: request.Height}} pool.AddBlock(request.PeerID, block, 123) t.Logf("Added block from peer %v (height: %v)", request.PeerID, request.Height) }() diff --git a/blockchain/reactor.go b/blockchain/reactor.go index bf6214e01..eadeedc91 100644 --- a/blockchain/reactor.go +++ b/blockchain/reactor.go @@ -5,12 +5,13 @@ import ( "reflect" "time" - "github.com/tendermint/go-amino" + amino "github.com/tendermint/go-amino" + + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" ) const ( @@ -176,7 +177,7 @@ func (bcR *BlockchainReactor) respondToPeer(msg *bcBlockRequestMessage, // Receive implements Reactor by handling 4 types of messages (look below). func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - msg, err := DecodeMessage(msgBytes) + msg, err := decodeMsg(msgBytes) if err != nil { bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) bcR.Switch.StopPeerForError(src, err) @@ -366,17 +367,11 @@ func RegisterBlockchainMessages(cdc *amino.Codec) { cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/mempool/StatusRequest", nil) } -// DecodeMessage decodes BlockchainMessage. -// TODO: ensure that bz is completely read. -func DecodeMessage(bz []byte) (msg BlockchainMessage, err error) { +func decodeMsg(bz []byte) (msg BlockchainMessage, err error) { if len(bz) > maxMsgSize { - return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", - len(bz), maxMsgSize) + return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize) } err = cdc.UnmarshalBinaryBare(bz, &msg) - if err != nil { - err = cmn.ErrorWrap(err, "DecodeMessage() had bytes left over") - } return } diff --git a/blockchain/reactor_test.go b/blockchain/reactor_test.go index c7f7e9afd..21eaae4ba 100644 --- a/blockchain/reactor_test.go +++ b/blockchain/reactor_test.go @@ -4,9 +4,9 @@ import ( "net" "testing" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p" @@ -206,3 +206,4 @@ func (tp *bcrTestPeer) IsPersistent() bool { return true } func (tp *bcrTestPeer) Get(s string) interface{} { return s } func (tp *bcrTestPeer) Set(string, interface{}) {} func (tp *bcrTestPeer) RemoteIP() net.IP { return []byte{127, 0, 0, 1} } +func (tp *bcrTestPeer) OriginalAddr() *p2p.NetAddress { return nil } diff --git a/blockchain/store.go b/blockchain/store.go index e7608b2cc..f02d4facb 100644 --- a/blockchain/store.go +++ b/blockchain/store.go @@ -4,8 +4,8 @@ import ( "fmt" "sync" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/types" ) diff --git a/blockchain/store_test.go b/blockchain/store_test.go index 1e0c223ad..c81654005 100644 --- a/blockchain/store_test.go +++ b/blockchain/store_test.go @@ -10,8 +10,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" ) @@ -126,7 +126,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { eraseSeenCommitInDB bool }{ { - block: newBlock(&header1, commitAtH10), + block: newBlock(header1, commitAtH10), parts: validPartSet, seenCommit: seenCommit1, }, @@ -137,35 +137,35 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { }, { - block: newBlock(&header2, commitAtH10), + block: newBlock(header2, commitAtH10), parts: uncontiguousPartSet, wantPanic: "only save contiguous blocks", // and incomplete and uncontiguous parts }, { - block: newBlock(&header1, commitAtH10), + block: newBlock(header1, commitAtH10), parts: incompletePartSet, wantPanic: "only save complete block", // incomplete parts }, { - block: newBlock(&header1, commitAtH10), + block: newBlock(header1, commitAtH10), parts: validPartSet, seenCommit: seenCommit1, corruptCommitInDB: true, // Corrupt the DB's commit entry - wantPanic: "Error reading block commit", + wantPanic: "unmarshal to types.Commit failed", }, { - block: newBlock(&header1, commitAtH10), + block: newBlock(header1, commitAtH10), parts: validPartSet, seenCommit: seenCommit1, - wantPanic: "Error reading block", + wantPanic: "unmarshal to types.BlockMeta failed", corruptBlockInDB: true, // Corrupt the DB's block entry }, { - block: newBlock(&header1, commitAtH10), + block: newBlock(header1, commitAtH10), parts: validPartSet, seenCommit: seenCommit1, @@ -174,16 +174,16 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { }, { - block: newBlock(&header1, commitAtH10), + block: newBlock(header1, commitAtH10), parts: validPartSet, seenCommit: seenCommit1, corruptSeenCommitInDB: true, - wantPanic: "Error reading block seen commit", + wantPanic: "unmarshal to types.Commit failed", }, { - block: newBlock(&header1, commitAtH10), + block: newBlock(header1, commitAtH10), parts: validPartSet, seenCommit: seenCommit1, @@ -287,7 +287,7 @@ func TestLoadBlockPart(t *testing.T) { db.Set(calcBlockPartKey(height, index), []byte("Tendermint")) res, _, panicErr = doFn(loadPart) require.NotNil(t, panicErr, "expecting a non-nil panic") - require.Contains(t, panicErr.Error(), "Error reading block part") + require.Contains(t, panicErr.Error(), "unmarshal to types.Part failed") // 3. A good block serialized and saved to the DB should be retrievable db.Set(calcBlockPartKey(height, index), cdc.MustMarshalBinaryBare(part1)) @@ -316,7 +316,7 @@ func TestLoadBlockMeta(t *testing.T) { db.Set(calcBlockMetaKey(height), []byte("Tendermint-Meta")) res, _, panicErr = doFn(loadMeta) require.NotNil(t, panicErr, "expecting a non-nil panic") - require.Contains(t, panicErr.Error(), "Error reading block meta") + require.Contains(t, panicErr.Error(), "unmarshal to types.BlockMeta") // 3. A good blockMeta serialized and saved to the DB should be retrievable meta := &types.BlockMeta{} @@ -375,7 +375,7 @@ func doFn(fn func() (interface{}, error)) (res interface{}, err error, panicErr return res, err, panicErr } -func newBlock(hdr *types.Header, lastCommit *types.Commit) *types.Block { +func newBlock(hdr types.Header, lastCommit *types.Commit) *types.Block { return &types.Block{ Header: hdr, LastCommit: lastCommit, diff --git a/blockchain/wire.go b/blockchain/wire.go index 70b50565d..ff02d58c1 100644 --- a/blockchain/wire.go +++ b/blockchain/wire.go @@ -2,12 +2,12 @@ package blockchain import ( "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/crypto" + cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino" ) var cdc = amino.NewCodec() func init() { RegisterBlockchainMessages(cdc) - crypto.RegisterAmino(cdc) + cryptoAmino.RegisterAmino(cdc) } diff --git a/cmd/priv_val_server/main.go b/cmd/priv_val_server/main.go index ac7dd91ba..03aa57f4e 100644 --- a/cmd/priv_val_server/main.go +++ b/cmd/priv_val_server/main.go @@ -4,9 +4,9 @@ import ( "flag" "os" - crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/privval" ) @@ -37,7 +37,7 @@ func main() { *chainID, *addr, pv, - crypto.GenPrivKeyEd25519(), + ed25519.GenPrivKey(), ) err := rs.Start() if err != nil { diff --git a/cmd/tendermint/commands/gen_node_key.go b/cmd/tendermint/commands/gen_node_key.go index 4990be477..7aedcd0dc 100644 --- a/cmd/tendermint/commands/gen_node_key.go +++ b/cmd/tendermint/commands/gen_node_key.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" "github.com/tendermint/tendermint/p2p" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // GenNodeKeyCmd allows the generation of a node key. It prints node's ID to diff --git a/cmd/tendermint/commands/init.go b/cmd/tendermint/commands/init.go index 45812b9ed..a44c73ebf 100644 --- a/cmd/tendermint/commands/init.go +++ b/cmd/tendermint/commands/init.go @@ -9,7 +9,7 @@ import ( "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // InitFilesCmd initialises a fresh Tendermint Core instance. @@ -52,8 +52,9 @@ func initFilesWithConfig(config *cfg.Config) error { logger.Info("Found genesis file", "path", genFile) } else { genDoc := types.GenesisDoc{ - ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)), - GenesisTime: time.Now(), + ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)), + GenesisTime: time.Now(), + ConsensusParams: types.DefaultConsensusParams(), } genDoc.Validators = []types.GenesisValidator{{ PubKey: pv.GetPubKey(), diff --git a/cmd/tendermint/commands/lite.go b/cmd/tendermint/commands/lite.go index 6987b7f19..d57598816 100644 --- a/cmd/tendermint/commands/lite.go +++ b/cmd/tendermint/commands/lite.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/lite/proxy" rpcclient "github.com/tendermint/tendermint/rpc/client" diff --git a/cmd/tendermint/commands/reset_priv_validator.go b/cmd/tendermint/commands/reset_priv_validator.go index 32d7b1433..ef0ba3019 100644 --- a/cmd/tendermint/commands/reset_priv_validator.go +++ b/cmd/tendermint/commands/reset_priv_validator.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" ) // ResetAllCmd removes the database of this Tendermint core diff --git a/cmd/tendermint/commands/root.go b/cmd/tendermint/commands/root.go index f229a7889..3c67ddc14 100644 --- a/cmd/tendermint/commands/root.go +++ b/cmd/tendermint/commands/root.go @@ -7,9 +7,9 @@ import ( "github.com/spf13/viper" cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tmlibs/cli" - tmflags "github.com/tendermint/tmlibs/cli/flags" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/cli" + tmflags "github.com/tendermint/tendermint/libs/cli/flags" + "github.com/tendermint/tendermint/libs/log" ) var ( diff --git a/cmd/tendermint/commands/root_test.go b/cmd/tendermint/commands/root_test.go index 59d258af7..e8095b387 100644 --- a/cmd/tendermint/commands/root_test.go +++ b/cmd/tendermint/commands/root_test.go @@ -14,8 +14,8 @@ import ( "github.com/stretchr/testify/require" cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tmlibs/cli" - cmn "github.com/tendermint/tmlibs/common" + "github.com/tendermint/tendermint/libs/cli" + cmn "github.com/tendermint/tendermint/libs/common" ) var ( diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index 0d50f9e4b..542e5c991 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -33,7 +33,7 @@ func AddNodeFlags(cmd *cobra.Command) { cmd.Flags().String("p2p.laddr", config.P2P.ListenAddress, "Node listen address. (0.0.0.0:0 means any interface, any port)") cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "Comma-delimited ID@host:port seed nodes") cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "Comma-delimited ID@host:port persistent peers") - cmd.Flags().Bool("p2p.skip_upnp", config.P2P.SkipUPNP, "Skip UPNP configuration") + cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "Enable/disable UPNP port forwarding") cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "Enable/disable Peer-Exchange") cmd.Flags().Bool("p2p.seed_mode", config.P2P.SeedMode, "Enable/disable seed mode") cmd.Flags().String("p2p.private_peer_ids", config.P2P.PrivatePeerIDs, "Comma-delimited private peer IDs") diff --git a/cmd/tendermint/commands/testnet.go b/cmd/tendermint/commands/testnet.go index 29d29502e..f7639fb27 100644 --- a/cmd/tendermint/commands/testnet.go +++ b/cmd/tendermint/commands/testnet.go @@ -14,7 +14,7 @@ import ( "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) var ( diff --git a/cmd/tendermint/commands/wire.go b/cmd/tendermint/commands/wire.go index a09019133..0f0b536df 100644 --- a/cmd/tendermint/commands/wire.go +++ b/cmd/tendermint/commands/wire.go @@ -2,11 +2,11 @@ package commands import ( "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/crypto" + cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino" ) var cdc = amino.NewCodec() func init() { - crypto.RegisterAmino(cdc) + cryptoAmino.RegisterAmino(cdc) } diff --git a/cmd/tendermint/main.go b/cmd/tendermint/main.go index 8c7f0cd17..a5a8d2d80 100644 --- a/cmd/tendermint/main.go +++ b/cmd/tendermint/main.go @@ -4,7 +4,7 @@ import ( "os" "path/filepath" - "github.com/tendermint/tmlibs/cli" + "github.com/tendermint/tendermint/libs/cli" cmd "github.com/tendermint/tendermint/cmd/tendermint/commands" cfg "github.com/tendermint/tendermint/config" diff --git a/codecov.yml b/codecov.yml index b190853de..4b3d527fa 100644 --- a/codecov.yml +++ b/codecov.yml @@ -21,3 +21,4 @@ ignore: - "docs" - "DOCKER" - "scripts" + - "**/*.pb.go" diff --git a/config/config.go b/config/config.go index e86b0e871..fb8e79086 100644 --- a/config/config.go +++ b/config/config.go @@ -276,16 +276,18 @@ type P2PConfig struct { // Address to listen for incoming connections ListenAddress string `mapstructure:"laddr"` + // Address to advertise to peers for them to dial + ExternalAddress string `mapstructure:"external_address"` + // Comma separated list of seed nodes to connect to // We only use these if we can’t connect to peers in the addrbook Seeds string `mapstructure:"seeds"` // Comma separated list of nodes to keep persistent connections to - // Do not add private peers to this list if you don't want them advertised PersistentPeers string `mapstructure:"persistent_peers"` - // Skip UPNP port forwarding - SkipUPNP bool `mapstructure:"skip_upnp"` + // UPNP port forwarding + UPNP bool `mapstructure:"upnp"` // Path to address book AddrBook string `mapstructure:"addr_book_file"` @@ -299,9 +301,8 @@ type P2PConfig struct { // Time to wait before flushing messages out on the connection, in ms FlushThrottleTimeout int `mapstructure:"flush_throttle_timeout"` - // Maximum size of a message packet, in bytes - // Includes a header, which is ~13 bytes - MaxPacketMsgSize int `mapstructure:"max_packet_msg_size"` + // Maximum size of a message packet payload, in bytes + MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"` // Rate at which packets can be sent, in bytes/second SendRate int64 `mapstructure:"send_rate"` @@ -340,22 +341,24 @@ type P2PConfig struct { // DefaultP2PConfig returns a default configuration for the peer-to-peer layer func DefaultP2PConfig() *P2PConfig { return &P2PConfig{ - ListenAddress: "tcp://0.0.0.0:26656", - AddrBook: defaultAddrBookPath, - AddrBookStrict: true, - MaxNumPeers: 50, - FlushThrottleTimeout: 100, - MaxPacketMsgSize: 1024, // 1 kB - SendRate: 512000, // 500 kB/s - RecvRate: 512000, // 500 kB/s - PexReactor: true, - SeedMode: false, - AllowDuplicateIP: true, // so non-breaking yet - HandshakeTimeout: 20 * time.Second, - DialTimeout: 3 * time.Second, - TestDialFail: false, - TestFuzz: false, - TestFuzzConfig: DefaultFuzzConnConfig(), + ListenAddress: "tcp://0.0.0.0:26656", + ExternalAddress: "", + UPNP: false, + AddrBook: defaultAddrBookPath, + AddrBookStrict: true, + MaxNumPeers: 50, + FlushThrottleTimeout: 100, + MaxPacketMsgPayloadSize: 1024, // 1 kB + SendRate: 5120000, // 5 mB/s + RecvRate: 5120000, // 5 mB/s + PexReactor: true, + SeedMode: false, + AllowDuplicateIP: true, // so non-breaking yet + HandshakeTimeout: 20 * time.Second, + DialTimeout: 3 * time.Second, + TestDialFail: false, + TestFuzz: false, + TestFuzzConfig: DefaultFuzzConnConfig(), } } @@ -363,7 +366,6 @@ func DefaultP2PConfig() *P2PConfig { func TestP2PConfig() *P2PConfig { cfg := DefaultP2PConfig() cfg.ListenAddress = "tcp://0.0.0.0:36656" - cfg.SkipUPNP = true cfg.FlushThrottleTimeout = 10 cfg.AllowDuplicateIP = true return cfg @@ -454,10 +456,6 @@ type ConsensusConfig struct { // Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) SkipTimeoutCommit bool `mapstructure:"skip_timeout_commit"` - // BlockSize - MaxBlockSizeTxs int `mapstructure:"max_block_size_txs"` - MaxBlockSizeBytes int `mapstructure:"max_block_size_bytes"` - // EmptyBlocks mode and possible interval between empty blocks in seconds CreateEmptyBlocks bool `mapstructure:"create_empty_blocks"` CreateEmptyBlocksInterval int `mapstructure:"create_empty_blocks_interval"` @@ -479,8 +477,6 @@ func DefaultConsensusConfig() *ConsensusConfig { TimeoutPrecommitDelta: 500, TimeoutCommit: 1000, SkipTimeoutCommit: false, - MaxBlockSizeTxs: 10000, - MaxBlockSizeBytes: 1, // TODO CreateEmptyBlocks: true, CreateEmptyBlocksInterval: 0, PeerGossipSleepDuration: 100, @@ -609,6 +605,12 @@ type InstrumentationConfig struct { // Address to listen for Prometheus collector(s) connections. PrometheusListenAddr string `mapstructure:"prometheus_listen_addr"` + + // Maximum number of simultaneous connections. + // If you want to accept more significant number than the default, make sure + // you increase your OS limits. + // 0 - unlimited. + MaxOpenConnections int `mapstructure:"max_open_connections"` } // DefaultInstrumentationConfig returns a default configuration for metrics @@ -617,6 +619,7 @@ func DefaultInstrumentationConfig() *InstrumentationConfig { return &InstrumentationConfig{ Prometheus: false, PrometheusListenAddr: ":26660", + MaxOpenConnections: 3, } } diff --git a/config/toml.go b/config/toml.go index c0840e440..60ce15de8 100644 --- a/config/toml.go +++ b/config/toml.go @@ -6,7 +6,7 @@ import ( "path/filepath" "text/template" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) var configTemplate *template.Template @@ -142,13 +142,21 @@ max_open_connections = {{ .RPC.MaxOpenConnections }} # Address to listen for incoming connections laddr = "{{ .P2P.ListenAddress }}" +# Address to advertise to peers for them to dial +# If empty, will use the same port as the laddr, +# and will introspect on the listener or use UPnP +# to figure out the address. +external_address = "{{ .P2P.ExternalAddress }}" + # Comma separated list of seed nodes to connect to seeds = "{{ .P2P.Seeds }}" # Comma separated list of nodes to keep persistent connections to -# Do not add private peers to this list if you don't want them advertised persistent_peers = "{{ .P2P.PersistentPeers }}" +# UPNP port forwarding +upnp = {{ .P2P.UPNP }} + # Path to address book addr_book_file = "{{ js .P2P.AddrBook }}" @@ -161,9 +169,8 @@ flush_throttle_timeout = {{ .P2P.FlushThrottleTimeout }} # Maximum number of peers to connect to max_num_peers = {{ .P2P.MaxNumPeers }} -# Maximum size of a message packet, in bytes -# Includes a header, which is ~13 bytes -max_packet_msg_size = {{ .P2P.MaxPacketMsgSize }} +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = {{ .P2P.MaxPacketMsgPayloadSize }} # Rate at which packets can be sent, in bytes/second send_rate = {{ .P2P.SendRate }} @@ -214,10 +221,6 @@ timeout_commit = {{ .Consensus.TimeoutCommit }} # Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) skip_timeout_commit = {{ .Consensus.SkipTimeoutCommit }} -# BlockSize -max_block_size_txs = {{ .Consensus.MaxBlockSizeTxs }} -max_block_size_bytes = {{ .Consensus.MaxBlockSizeBytes }} - # EmptyBlocks mode and possible interval between empty blocks in seconds create_empty_blocks = {{ .Consensus.CreateEmptyBlocks }} create_empty_blocks_interval = {{ .Consensus.CreateEmptyBlocksInterval }} @@ -258,6 +261,12 @@ prometheus = {{ .Instrumentation.Prometheus }} # Address to listen for Prometheus collector(s) connections prometheus_listen_addr = "{{ .Instrumentation.PrometheusListenAddr }}" + +# Maximum number of simultaneous connections. +# If you want to accept more significant number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = {{ .Instrumentation.MaxOpenConnections }} ` /****** these are for test settings ***********/ diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index d3be8c358..5360a92c9 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func init() { diff --git a/consensus/common_test.go b/consensus/common_test.go index b990f525c..2df226ba1 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -22,9 +22,9 @@ import ( "github.com/tendermint/tendermint/privval" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/abci/example/counter" "github.com/tendermint/tendermint/abci/example/kvstore" diff --git a/consensus/mempool_test.go b/consensus/mempool_test.go index 032cf2f32..a811de731 100644 --- a/consensus/mempool_test.go +++ b/consensus/mempool_test.go @@ -10,7 +10,7 @@ import ( "github.com/tendermint/tendermint/abci/example/code" abci "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/types" ) diff --git a/consensus/reactor.go b/consensus/reactor.go index 2034ad344..3eb1d73aa 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -9,11 +9,11 @@ import ( "github.com/pkg/errors" amino "github.com/tendermint/go-amino" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" cstypes "github.com/tendermint/tendermint/consensus/types" + cmn "github.com/tendermint/tendermint/libs/common" tmevents "github.com/tendermint/tendermint/libs/events" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" @@ -80,6 +80,9 @@ func (conR *ConsensusReactor) OnStop() { conR.BaseReactor.OnStop() conR.unsubscribeFromBroadcastEvents() conR.conS.Stop() + if !conR.FastSync() { + conR.conS.Wait() + } } // SwitchToConsensus switches from fast_sync mode to consensus mode. @@ -183,7 +186,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) return } - msg, err := DecodeMessage(msgBytes) + msg, err := decodeMsg(msgBytes) if err != nil { conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) conR.Switch.StopPeerForError(src, err) @@ -1306,11 +1309,9 @@ func RegisterConsensusMessages(cdc *amino.Codec) { cdc.RegisterConcrete(&ProposalHeartbeatMessage{}, "tendermint/ProposalHeartbeat", nil) } -// DecodeMessage decodes the given bytes into a ConsensusMessage. -func DecodeMessage(bz []byte) (msg ConsensusMessage, err error) { +func decodeMsg(bz []byte) (msg ConsensusMessage, err error) { if len(bz) > maxMsgSize { - return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", - len(bz), maxMsgSize) + return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize) } err = cdc.UnmarshalBinaryBare(bz, &msg) return diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index 498a857b9..9e2aa0a0b 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -11,8 +11,8 @@ import ( "time" "github.com/tendermint/tendermint/abci/example/kvstore" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p" diff --git a/consensus/replay.go b/consensus/replay.go index f681828cf..dd940998f 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -11,10 +11,10 @@ import ( "time" abci "github.com/tendermint/tendermint/abci/types" - //auto "github.com/tendermint/tmlibs/autofile" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + //auto "github.com/tendermint/tendermint/libs/autofile" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" @@ -273,7 +273,7 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight ChainId: h.genDoc.ChainID, ConsensusParams: csParams, Validators: validators, - AppStateBytes: h.genDoc.AppStateJSON, + AppStateBytes: h.genDoc.AppState, } res, err := proxyApp.Consensus().InitChainSync(req) if err != nil { diff --git a/consensus/replay_file.go b/consensus/replay_file.go index 57204b01a..0c0b0dcb1 100644 --- a/consensus/replay_file.go +++ b/consensus/replay_file.go @@ -16,9 +16,9 @@ import ( "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" ) const ( diff --git a/consensus/replay_test.go b/consensus/replay_test.go index f76651d72..da526d249 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -19,16 +19,16 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" crypto "github.com/tendermint/tendermint/crypto" - auto "github.com/tendermint/tmlibs/autofile" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" + auto "github.com/tendermint/tendermint/libs/autofile" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" ) var consensusReplayConfig *cfg.Config diff --git a/consensus/state.go b/consensus/state.go index 5d6842a81..634f10313 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -10,8 +10,8 @@ import ( "time" fail "github.com/ebuchman/fail-test" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" cfg "github.com/tendermint/tendermint/config" cstypes "github.com/tendermint/tendermint/consensus/types" @@ -81,7 +81,7 @@ type ConsensusState struct { evpool sm.EvidencePool // internal state - mtx sync.Mutex + mtx sync.RWMutex cstypes.RoundState state sm.State // State until height-1. @@ -192,15 +192,15 @@ func (cs *ConsensusState) String() string { // GetState returns a copy of the chain state. func (cs *ConsensusState) GetState() sm.State { - cs.mtx.Lock() - defer cs.mtx.Unlock() + cs.mtx.RLock() + defer cs.mtx.RUnlock() return cs.state.Copy() } // GetRoundState returns a shallow copy of the internal consensus state. func (cs *ConsensusState) GetRoundState() *cstypes.RoundState { - cs.mtx.Lock() - defer cs.mtx.Unlock() + cs.mtx.RLock() + defer cs.mtx.RUnlock() rs := cs.RoundState // copy return &rs @@ -208,24 +208,24 @@ func (cs *ConsensusState) GetRoundState() *cstypes.RoundState { // GetRoundStateJSON returns a json of RoundState, marshalled using go-amino. func (cs *ConsensusState) GetRoundStateJSON() ([]byte, error) { - cs.mtx.Lock() - defer cs.mtx.Unlock() + cs.mtx.RLock() + defer cs.mtx.RUnlock() return cdc.MarshalJSON(cs.RoundState) } // GetRoundStateSimpleJSON returns a json of RoundStateSimple, marshalled using go-amino. func (cs *ConsensusState) GetRoundStateSimpleJSON() ([]byte, error) { - cs.mtx.Lock() - defer cs.mtx.Unlock() + cs.mtx.RLock() + defer cs.mtx.RUnlock() return cdc.MarshalJSON(cs.RoundState.RoundStateSimple()) } // GetValidators returns a copy of the current validators. func (cs *ConsensusState) GetValidators() (int64, []*types.Validator) { - cs.mtx.Lock() - defer cs.mtx.Unlock() + cs.mtx.RLock() + defer cs.mtx.RUnlock() return cs.state.LastBlockHeight, cs.state.Validators.Copy().Validators } @@ -245,8 +245,8 @@ func (cs *ConsensusState) SetTimeoutTicker(timeoutTicker TimeoutTicker) { // LoadCommit loads the commit for a given height. func (cs *ConsensusState) LoadCommit(height int64) *types.Commit { - cs.mtx.Lock() - defer cs.mtx.Unlock() + cs.mtx.RLock() + defer cs.mtx.RUnlock() if height == cs.blockStore.Height() { return cs.blockStore.LoadSeenCommit(height) } @@ -314,16 +314,8 @@ func (cs *ConsensusState) startRoutines(maxSteps int) { // OnStop implements cmn.Service. It stops all routines and waits for the WAL to finish. func (cs *ConsensusState) OnStop() { - cs.BaseService.OnStop() - cs.evsw.Stop() - cs.timeoutTicker.Stop() - - // Make BaseService.Wait() wait until cs.wal.Wait() - if cs.IsRunning() { - cs.wal.Wait() - } } // Wait waits for the the main routine to return. @@ -579,8 +571,8 @@ func (cs *ConsensusState) receiveRoutine(maxSteps int) { var mi msgInfo select { - case height := <-cs.mempool.TxsAvailable(): - cs.handleTxsAvailable(height) + case <-cs.mempool.TxsAvailable(): + cs.handleTxsAvailable() case mi = <-cs.peerMsgQueue: cs.wal.Write(mi) // handles proposals, block parts, votes @@ -603,6 +595,7 @@ func (cs *ConsensusState) receiveRoutine(maxSteps int) { // close wal now that we're done writing to it cs.wal.Stop() + cs.wal.Wait() close(cs.done) return @@ -624,7 +617,7 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) { err = cs.setProposal(msg.Proposal) case *BlockPartMessage: // if the proposal is complete, we'll enterPrevote or tryFinalizeCommit - _, err = cs.addProposalBlockPart(msg.Height, msg.Part) + _, err = cs.addProposalBlockPart(msg, peerID) if err != nil && msg.Round != cs.Round { cs.Logger.Debug("Received block part from wrong round", "height", cs.Height, "csRound", cs.Round, "blockRound", msg.Round) err = nil @@ -690,11 +683,11 @@ func (cs *ConsensusState) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) { } -func (cs *ConsensusState) handleTxsAvailable(height int64) { +func (cs *ConsensusState) handleTxsAvailable() { cs.mtx.Lock() defer cs.mtx.Unlock() // we only need to do this for round 0 - cs.enterPropose(height, 0) + cs.enterPropose(cs.Height, 0) } //----------------------------------------------------------------------------- @@ -932,7 +925,7 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts } // Mempool validated transactions - txs := cs.mempool.Reap(cs.config.MaxBlockSizeTxs) + txs := cs.mempool.Reap(cs.state.ConsensusParams.BlockSize.MaxTxs) block, parts := cs.state.MakeBlock(cs.Height, txs, commit) evidence := cs.evpool.PendingEvidence() block.AddEvidence(evidence) @@ -1399,17 +1392,22 @@ func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error { // NOTE: block is not necessarily valid. // Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit, once we have the full block. -func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part) (added bool, err error) { +func (cs *ConsensusState) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (added bool, err error) { + height, round, part := msg.Height, msg.Round, msg.Part + // Blocks might be reused, so round mismatch is OK if cs.Height != height { - cs.Logger.Debug("Received block part from wrong height", "height", height) + cs.Logger.Debug("Received block part from wrong height", "height", height, "round", round) return false, nil } // We're not expecting a block part. if cs.ProposalBlockParts == nil { - cs.Logger.Info("Received a block part when we're not expecting any", "height", height) - return false, nil // TODO: bad peer? Return error? + // NOTE: this can happen when we've gone to a higher round and + // then receive parts from the previous round - not necessarily a bad peer. + cs.Logger.Info("Received a block part when we're not expecting any", + "height", height, "round", round, "index", part.Index, "peer", peerID) + return false, nil } added, err = cs.ProposalBlockParts.AddPart(part) @@ -1443,7 +1441,7 @@ func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part) ( // procedure at this point. } - if cs.Step == cstypes.RoundStepPropose && cs.isProposalComplete() { + if cs.Step <= cstypes.RoundStepPropose && cs.isProposalComplete() { // Move onto the next step cs.enterPrevote(height, cs.Round) } else if cs.Step == cstypes.RoundStepCommit { diff --git a/consensus/state_test.go b/consensus/state_test.go index d0def6309..6a14e17b5 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -10,8 +10,8 @@ import ( cstypes "github.com/tendermint/tendermint/consensus/types" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" ) func init() { diff --git a/consensus/ticker.go b/consensus/ticker.go index b37b7c495..a1e2174c3 100644 --- a/consensus/ticker.go +++ b/consensus/ticker.go @@ -3,8 +3,8 @@ package consensus import ( "time" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" ) var ( diff --git a/consensus/types/height_vote_set.go b/consensus/types/height_vote_set.go index 3c9867940..70a38668f 100644 --- a/consensus/types/height_vote_set.go +++ b/consensus/types/height_vote_set.go @@ -8,7 +8,7 @@ import ( "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) type RoundVoteSet struct { diff --git a/consensus/types/height_vote_set_test.go b/consensus/types/height_vote_set_test.go index 678d34759..0de656000 100644 --- a/consensus/types/height_vote_set_test.go +++ b/consensus/types/height_vote_set_test.go @@ -6,7 +6,7 @@ import ( cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) var config *cfg.Config // NOTE: must be reset for each _test.go file diff --git a/consensus/types/peer_round_state.go b/consensus/types/peer_round_state.go index dcb6c8e02..7a5d69b8e 100644 --- a/consensus/types/peer_round_state.go +++ b/consensus/types/peer_round_state.go @@ -5,7 +5,7 @@ import ( "time" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) //----------------------------------------------------------------------------- diff --git a/consensus/types/round_state.go b/consensus/types/round_state.go index 14da1f149..cca560ccf 100644 --- a/consensus/types/round_state.go +++ b/consensus/types/round_state.go @@ -6,7 +6,7 @@ import ( "time" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) //----------------------------------------------------------------------------- diff --git a/consensus/types/round_state_test.go b/consensus/types/round_state_test.go index 042d8de7f..bcaa63085 100644 --- a/consensus/types/round_state_test.go +++ b/consensus/types/round_state_test.go @@ -4,10 +4,10 @@ import ( "testing" "time" - "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/crypto" + amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" ) func BenchmarkRoundStateDeepCopy(b *testing.B) { @@ -23,7 +23,7 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) { Hash: cmn.RandBytes(20), }, } - sig := crypto.SignatureEd25519{} + sig := ed25519.SignatureEd25519{} for i := 0; i < nval; i++ { precommits[i] = &types.Vote{ ValidatorAddress: types.Address(cmn.RandBytes(20)), @@ -38,7 +38,7 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) { } // Random block block := &types.Block{ - Header: &types.Header{ + Header: types.Header{ ChainID: cmn.RandStr(12), Time: time.Now(), LastBlockID: blockID, @@ -50,7 +50,7 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) { LastResultsHash: cmn.RandBytes(20), EvidenceHash: cmn.RandBytes(20), }, - Data: &types.Data{ + Data: types.Data{ Txs: txs, }, Evidence: types.EvidenceData{}, diff --git a/consensus/types/wire.go b/consensus/types/wire.go index 6342d7eba..9221de968 100644 --- a/consensus/types/wire.go +++ b/consensus/types/wire.go @@ -2,11 +2,11 @@ package types import ( "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/crypto" + cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino" ) var cdc = amino.NewCodec() func init() { - crypto.RegisterAmino(cdc) + cryptoAmino.RegisterAmino(cdc) } diff --git a/consensus/version.go b/consensus/version.go index 2c137bf7f..5c74a16db 100644 --- a/consensus/version.go +++ b/consensus/version.go @@ -1,7 +1,7 @@ package consensus import ( - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // kind of arbitrary diff --git a/consensus/wal.go b/consensus/wal.go index 3d9bf8afc..8c4c10bc7 100644 --- a/consensus/wal.go +++ b/consensus/wal.go @@ -12,8 +12,8 @@ import ( amino "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/types" - auto "github.com/tendermint/tmlibs/autofile" - cmn "github.com/tendermint/tmlibs/common" + auto "github.com/tendermint/tendermint/libs/autofile" + cmn "github.com/tendermint/tendermint/libs/common" ) const ( diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go index 1a61c3405..f3a365809 100644 --- a/consensus/wal_generator.go +++ b/consensus/wal_generator.go @@ -17,10 +17,10 @@ import ( "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - auto "github.com/tendermint/tmlibs/autofile" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + auto "github.com/tendermint/tendermint/libs/autofile" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" ) // WALWithNBlocks generates a consensus WAL. It does this by spining up a diff --git a/consensus/wal_test.go b/consensus/wal_test.go index eebbc85a2..3ecb4fe8f 100644 --- a/consensus/wal_test.go +++ b/consensus/wal_test.go @@ -9,7 +9,7 @@ import ( "github.com/tendermint/tendermint/consensus/types" tmtypes "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/consensus/wire.go b/consensus/wire.go index 5f231c0c7..cc172bead 100644 --- a/consensus/wire.go +++ b/consensus/wire.go @@ -2,7 +2,7 @@ package consensus import ( "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/crypto" + cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino" ) var cdc = amino.NewCodec() @@ -10,5 +10,5 @@ var cdc = amino.NewCodec() func init() { RegisterConsensusMessages(cdc) RegisterWALMessages(cdc) - crypto.RegisterAmino(cdc) + cryptoAmino.RegisterAmino(cdc) } diff --git a/crypto/Gopkg.lock b/crypto/Gopkg.lock deleted file mode 100644 index 7dbe6ae72..000000000 --- a/crypto/Gopkg.lock +++ /dev/null @@ -1,137 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - name = "github.com/btcsuite/btcd" - packages = ["btcec"] - revision = "86fed781132ac890ee03e906e4ecd5d6fa180c64" - -[[projects]] - branch = "master" - name = "github.com/btcsuite/btcutil" - packages = ["base58"] - revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4" - -[[projects]] - name = "github.com/davecgh/go-spew" - packages = ["spew"] - revision = "346938d642f2ec3594ed81d874461961cd0faa76" - version = "v1.1.0" - -[[projects]] - name = "github.com/go-kit/kit" - packages = [ - "log", - "log/level", - "log/term" - ] - revision = "4dc7be5d2d12881735283bcab7352178e190fc71" - version = "v0.6.0" - -[[projects]] - name = "github.com/go-logfmt/logfmt" - packages = ["."] - revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5" - version = "v0.3.0" - -[[projects]] - name = "github.com/go-stack/stack" - packages = ["."] - revision = "259ab82a6cad3992b4e21ff5cac294ccb06474bc" - version = "v1.7.0" - -[[projects]] - name = "github.com/gogo/protobuf" - packages = [ - "gogoproto", - "proto", - "protoc-gen-gogo/descriptor" - ] - revision = "1adfc126b41513cc696b209667c8656ea7aac67c" - version = "v1.0.0" - -[[projects]] - branch = "master" - name = "github.com/kr/logfmt" - packages = ["."] - revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0" - -[[projects]] - name = "github.com/pkg/errors" - packages = ["."] - revision = "645ef00459ed84a119197bfb8d8205042c6df63d" - version = "v0.8.0" - -[[projects]] - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - name = "github.com/stretchr/testify" - packages = [ - "assert", - "require" - ] - revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" - version = "v1.2.2" - -[[projects]] - branch = "master" - name = "github.com/tendermint/ed25519" - packages = [ - ".", - "edwards25519", - "extra25519" - ] - revision = "d8387025d2b9d158cf4efb07e7ebf814bcce2057" - -[[projects]] - name = "github.com/tendermint/go-amino" - packages = ["."] - revision = "2106ca61d91029c931fd54968c2bb02dc96b1412" - version = "0.10.1" - -[[projects]] - name = "github.com/tendermint/tmlibs" - packages = [ - "common", - "log", - "test" - ] - revision = "692f1d86a6e2c0efa698fd1e4541b68c74ffaf38" - version = "v0.8.4" - -[[projects]] - branch = "master" - name = "golang.org/x/crypto" - packages = [ - "bcrypt", - "blowfish", - "chacha20poly1305", - "hkdf", - "internal/chacha20", - "internal/subtle", - "nacl/secretbox", - "openpgp/armor", - "openpgp/errors", - "poly1305", - "ripemd160", - "salsa20/salsa" - ] - revision = "7f39a6fea4fe9364fb61e1def6a268a51b4f3a06" - -[[projects]] - branch = "master" - name = "golang.org/x/sys" - packages = ["cpu"] - revision = "ad87a3a340fa7f3bed189293fbfa7a9b7e021ae1" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "027b22b86396a971d5d5c1d298947f531f39743975d65a22e98601140aa1b1a1" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/crypto/Gopkg.toml b/crypto/Gopkg.toml deleted file mode 100644 index a614df866..000000000 --- a/crypto/Gopkg.toml +++ /dev/null @@ -1,49 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true - -[[constraint]] - name = "github.com/btcsuite/btcutil" - branch = "master" - -[[constraint]] - name = "github.com/stretchr/testify" - version = "1.2.1" - -[[constraint]] - name = "github.com/tendermint/ed25519" - branch = "master" - -[[constraint]] - name = "github.com/tendermint/go-amino" - version = "0.10.0-rc2" - -[[constraint]] - name = "github.com/tendermint/tmlibs" - version = "0.8.1" - -[prune] - go-tests = true - unused-packages = true diff --git a/crypto/README.md b/crypto/README.md index 32afde699..5fac67338 100644 --- a/crypto/README.md +++ b/crypto/README.md @@ -3,8 +3,15 @@ crypto is the cryptographic package adapted for Tendermint's uses ## Importing it +To get the interfaces, `import "github.com/tendermint/tendermint/crypto"` +For any specific algorithm, use its specific module e.g. +`import "github.com/tendermint/tendermint/crypto/ed25519"` + +If you want to decode bytes into one of the types, but don't care about the specific algorithm, use +`import "github.com/tendermint/tendermint/crypto/amino"` + ## Binary encoding For Binary encoding, please refer to the [Tendermint encoding spec](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/encoding.md). @@ -16,9 +23,9 @@ crypto `.Bytes()` uses Amino:binary encoding, but Amino:JSON is also supported. ```go Example Amino:JSON encodings: -crypto.PrivKeyEd25519 - {"type":"954568A3288910","value":"EVkqJO/jIXp3rkASXfh9YnyToYXRXhBr6g9cQVxPFnQBP/5povV4HTjvsy530kybxKHwEi85iU8YL0qQhSYVoQ=="} +ed25519.PrivKeyEd25519 - {"type":"954568A3288910","value":"EVkqJO/jIXp3rkASXfh9YnyToYXRXhBr6g9cQVxPFnQBP/5povV4HTjvsy530kybxKHwEi85iU8YL0qQhSYVoQ=="} crypto.SignatureEd25519 - {"type":"6BF5903DA1DB28","value":"77sQNZOrf7ltExpf7AV1WaYPCHbyRLgjBsoWVzcduuLk+jIGmYk+s5R6Emm29p12HeiNAuhUJgdFGmwkpeGJCA=="} -crypto.PubKeyEd25519 - {"type":"AC26791624DE60","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="} +ed25519.PubKeyEd25519 - {"type":"AC26791624DE60","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="} crypto.PrivKeySecp256k1 - {"type":"019E82E1B0F798","value":"zx4Pnh67N+g2V+5vZbQzEyRerX9c4ccNZOVzM9RvJ0Y="} crypto.SignatureSecp256k1 - {"type":"6D1EA416E1FEE8","value":"MEUCIQCIg5TqS1l7I+MKTrSPIuUN2+4m5tA29dcauqn3NhEJ2wIgICaZ+lgRc5aOTVahU/XoLopXKn8BZcl0bnuYWLvohR8="} crypto.PubKeySecp256k1 - {"type":"F8CCEAEB5AE980","value":"A8lPKJXcNl5VHt1FK8a244K9EJuS4WX1hFBnwisi0IJx"} diff --git a/crypto/amino.go b/crypto/amino.go deleted file mode 100644 index 6a8703fc9..000000000 --- a/crypto/amino.go +++ /dev/null @@ -1,37 +0,0 @@ -package crypto - -import ( - amino "github.com/tendermint/go-amino" -) - -var cdc = amino.NewCodec() - -func init() { - // NOTE: It's important that there be no conflicts here, - // as that would change the canonical representations, - // and therefore change the address. - // TODO: Add feature to go-amino to ensure that there - // are no conflicts. - RegisterAmino(cdc) -} - -// RegisterAmino registers all crypto related types in the given (amino) codec. -func RegisterAmino(cdc *amino.Codec) { - cdc.RegisterInterface((*PubKey)(nil), nil) - cdc.RegisterConcrete(PubKeyEd25519{}, - "tendermint/PubKeyEd25519", nil) - cdc.RegisterConcrete(PubKeySecp256k1{}, - "tendermint/PubKeySecp256k1", nil) - - cdc.RegisterInterface((*PrivKey)(nil), nil) - cdc.RegisterConcrete(PrivKeyEd25519{}, - "tendermint/PrivKeyEd25519", nil) - cdc.RegisterConcrete(PrivKeySecp256k1{}, - "tendermint/PrivKeySecp256k1", nil) - - cdc.RegisterInterface((*Signature)(nil), nil) - cdc.RegisterConcrete(SignatureEd25519{}, - "tendermint/SignatureEd25519", nil) - cdc.RegisterConcrete(SignatureSecp256k1{}, - "tendermint/SignatureSecp256k1", nil) -} diff --git a/crypto/armor.go b/crypto/armor/armor.go similarity index 98% rename from crypto/armor.go rename to crypto/armor/armor.go index 4146048ad..c15d070e6 100644 --- a/crypto/armor.go +++ b/crypto/armor/armor.go @@ -1,4 +1,4 @@ -package crypto +package armor import ( "bytes" diff --git a/crypto/armor_test.go b/crypto/armor/armor_test.go similarity index 96% rename from crypto/armor_test.go rename to crypto/armor/armor_test.go index 5eae87c00..4aa23b211 100644 --- a/crypto/armor_test.go +++ b/crypto/armor/armor_test.go @@ -1,4 +1,4 @@ -package crypto +package armor import ( "testing" diff --git a/crypto/crypto.go b/crypto/crypto.go new file mode 100644 index 000000000..4c097b351 --- /dev/null +++ b/crypto/crypto.go @@ -0,0 +1,36 @@ +package crypto + +import ( + cmn "github.com/tendermint/tendermint/libs/common" +) + +type PrivKey interface { + Bytes() []byte + Sign(msg []byte) (Signature, error) + PubKey() PubKey + Equals(PrivKey) bool +} + +// An address is a []byte, but hex-encoded even in JSON. +// []byte leaves us the option to change the address length. +// Use an alias so Unmarshal methods (with ptr receivers) are available too. +type Address = cmn.HexBytes + +type PubKey interface { + Address() Address + Bytes() []byte + VerifyBytes(msg []byte, sig Signature) bool + Equals(PubKey) bool +} + +type Signature interface { + Bytes() []byte + IsZero() bool + Equals(Signature) bool +} + +type Symmetric interface { + Keygen() []byte + Encrypt(plaintext []byte, secret []byte) (ciphertext []byte) + Decrypt(ciphertext []byte, secret []byte) (plaintext []byte, err error) +} diff --git a/crypto/doc.go b/crypto/doc.go index 544e0df36..41b3f3021 100644 --- a/crypto/doc.go +++ b/crypto/doc.go @@ -22,7 +22,7 @@ // pubKey := key.PubKey() // For example: -// privKey, err := crypto.GenPrivKeyEd25519() +// privKey, err := ed25519.GenPrivKey() // if err != nil { // ... // } diff --git a/crypto/ed25519/ed25519.go b/crypto/ed25519/ed25519.go new file mode 100644 index 000000000..8b7bd42bd --- /dev/null +++ b/crypto/ed25519/ed25519.go @@ -0,0 +1,227 @@ +package ed25519 + +import ( + "bytes" + "crypto/subtle" + "fmt" + + "github.com/tendermint/ed25519" + "github.com/tendermint/ed25519/extra25519" + amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/tmhash" + cmn "github.com/tendermint/tendermint/libs/common" +) + +//------------------------------------- + +var _ crypto.PrivKey = PrivKeyEd25519{} + +const ( + Ed25519PrivKeyAminoRoute = "tendermint/PrivKeyEd25519" + Ed25519PubKeyAminoRoute = "tendermint/PubKeyEd25519" + Ed25519SignatureAminoRoute = "tendermint/SignatureEd25519" +) + +var cdc = amino.NewCodec() + +func init() { + cdc.RegisterInterface((*crypto.PubKey)(nil), nil) + cdc.RegisterConcrete(PubKeyEd25519{}, + Ed25519PubKeyAminoRoute, nil) + + cdc.RegisterInterface((*crypto.PrivKey)(nil), nil) + cdc.RegisterConcrete(PrivKeyEd25519{}, + Ed25519PrivKeyAminoRoute, nil) + + cdc.RegisterInterface((*crypto.Signature)(nil), nil) + cdc.RegisterConcrete(SignatureEd25519{}, + Ed25519SignatureAminoRoute, nil) +} + +// PrivKeyEd25519 implements crypto.PrivKey. +type PrivKeyEd25519 [64]byte + +// Bytes marshals the privkey using amino encoding. +func (privKey PrivKeyEd25519) Bytes() []byte { + return cdc.MustMarshalBinaryBare(privKey) +} + +// Sign produces a signature on the provided message. +func (privKey PrivKeyEd25519) Sign(msg []byte) (crypto.Signature, error) { + privKeyBytes := [64]byte(privKey) + signatureBytes := ed25519.Sign(&privKeyBytes, msg) + return SignatureEd25519(*signatureBytes), nil +} + +// PubKey gets the corresponding public key from the private key. +func (privKey PrivKeyEd25519) PubKey() crypto.PubKey { + privKeyBytes := [64]byte(privKey) + initialized := false + // If the latter 32 bytes of the privkey are all zero, compute the pubkey + // otherwise privkey is initialized and we can use the cached value inside + // of the private key. + for _, v := range privKeyBytes[32:] { + if v != 0 { + initialized = true + break + } + } + if initialized { + var pubkeyBytes [PubKeyEd25519Size]byte + copy(pubkeyBytes[:], privKeyBytes[32:]) + return PubKeyEd25519(pubkeyBytes) + } + + pubBytes := *ed25519.MakePublicKey(&privKeyBytes) + return PubKeyEd25519(pubBytes) +} + +// Equals - you probably don't need to use this. +// Runs in constant time based on length of the keys. +func (privKey PrivKeyEd25519) Equals(other crypto.PrivKey) bool { + if otherEd, ok := other.(PrivKeyEd25519); ok { + return subtle.ConstantTimeCompare(privKey[:], otherEd[:]) == 1 + } else { + return false + } +} + +// ToCurve25519 takes a private key and returns its representation on +// Curve25519. Curve25519 is birationally equivalent to Edwards25519, +// which Ed25519 uses internally. This method is intended for use in +// an X25519 Diffie Hellman key exchange. +func (privKey PrivKeyEd25519) ToCurve25519() *[PubKeyEd25519Size]byte { + keyCurve25519 := new([32]byte) + privKeyBytes := [64]byte(privKey) + extra25519.PrivateKeyToCurve25519(keyCurve25519, &privKeyBytes) + return keyCurve25519 +} + +// GenPrivKey generates a new ed25519 private key. +// It uses OS randomness in conjunction with the current global random seed +// in tendermint/libs/common to generate the private key. +func GenPrivKey() PrivKeyEd25519 { + privKey := new([64]byte) + copy(privKey[:32], crypto.CRandBytes(32)) + // ed25519.MakePublicKey(privKey) alters the last 32 bytes of privKey. + // It places the pubkey in the last 32 bytes of privKey, and returns the + // public key. + ed25519.MakePublicKey(privKey) + return PrivKeyEd25519(*privKey) +} + +// GenPrivKeyFromSecret hashes the secret with SHA2, and uses +// that 32 byte output to create the private key. +// NOTE: secret should be the output of a KDF like bcrypt, +// if it's derived from user input. +func GenPrivKeyFromSecret(secret []byte) PrivKeyEd25519 { + privKey32 := crypto.Sha256(secret) // Not Ripemd160 because we want 32 bytes. + privKey := new([64]byte) + copy(privKey[:32], privKey32) + // ed25519.MakePublicKey(privKey) alters the last 32 bytes of privKey. + // It places the pubkey in the last 32 bytes of privKey, and returns the + // public key. + ed25519.MakePublicKey(privKey) + return PrivKeyEd25519(*privKey) +} + +//------------------------------------- + +var _ crypto.PubKey = PubKeyEd25519{} + +// PubKeyEd25519Size is the number of bytes in an Ed25519 signature. +const PubKeyEd25519Size = 32 + +// PubKeyEd25519 implements crypto.PubKey for the Ed25519 signature scheme. +type PubKeyEd25519 [PubKeyEd25519Size]byte + +// Address is the SHA256-20 of the raw pubkey bytes. +func (pubKey PubKeyEd25519) Address() crypto.Address { + return crypto.Address(tmhash.Sum(pubKey[:])) +} + +// Bytes marshals the PubKey using amino encoding. +func (pubKey PubKeyEd25519) Bytes() []byte { + bz, err := cdc.MarshalBinaryBare(pubKey) + if err != nil { + panic(err) + } + return bz +} + +func (pubKey PubKeyEd25519) VerifyBytes(msg []byte, sig_ crypto.Signature) bool { + // make sure we use the same algorithm to sign + sig, ok := sig_.(SignatureEd25519) + if !ok { + return false + } + pubKeyBytes := [PubKeyEd25519Size]byte(pubKey) + sigBytes := [SignatureEd25519Size]byte(sig) + return ed25519.Verify(&pubKeyBytes, msg, &sigBytes) +} + +// ToCurve25519 takes a public key and returns its representation on +// Curve25519. Curve25519 is birationally equivalent to Edwards25519, +// which Ed25519 uses internally. This method is intended for use in +// an X25519 Diffie Hellman key exchange. +// +// If there is an error, then this function returns nil. +func (pubKey PubKeyEd25519) ToCurve25519() *[PubKeyEd25519Size]byte { + keyCurve25519, pubKeyBytes := new([PubKeyEd25519Size]byte), [PubKeyEd25519Size]byte(pubKey) + ok := extra25519.PublicKeyToCurve25519(keyCurve25519, &pubKeyBytes) + if !ok { + return nil + } + return keyCurve25519 +} + +func (pubKey PubKeyEd25519) String() string { + return fmt.Sprintf("PubKeyEd25519{%X}", pubKey[:]) +} + +// nolint: golint +func (pubKey PubKeyEd25519) Equals(other crypto.PubKey) bool { + if otherEd, ok := other.(PubKeyEd25519); ok { + return bytes.Equal(pubKey[:], otherEd[:]) + } else { + return false + } +} + +//------------------------------------- + +var _ crypto.Signature = SignatureEd25519{} + +// Size of an Edwards25519 signature. Namely the size of a compressed +// Edwards25519 point, and a field element. Both of which are 32 bytes. +const SignatureEd25519Size = 64 + +// SignatureEd25519 implements crypto.Signature +type SignatureEd25519 [SignatureEd25519Size]byte + +func (sig SignatureEd25519) Bytes() []byte { + bz, err := cdc.MarshalBinaryBare(sig) + if err != nil { + panic(err) + } + return bz +} + +func (sig SignatureEd25519) IsZero() bool { return len(sig) == 0 } + +func (sig SignatureEd25519) String() string { return fmt.Sprintf("/%X.../", cmn.Fingerprint(sig[:])) } + +func (sig SignatureEd25519) Equals(other crypto.Signature) bool { + if otherEd, ok := other.(SignatureEd25519); ok { + return subtle.ConstantTimeCompare(sig[:], otherEd[:]) == 1 + } else { + return false + } +} + +func SignatureEd25519FromBytes(data []byte) crypto.Signature { + var sig SignatureEd25519 + copy(sig[:], data) + return sig +} diff --git a/crypto/ed25519/ed25519_test.go b/crypto/ed25519/ed25519_test.go new file mode 100644 index 000000000..5c407ccd1 --- /dev/null +++ b/crypto/ed25519/ed25519_test.go @@ -0,0 +1,31 @@ +package ed25519_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/ed25519" +) + +func TestSignAndValidateEd25519(t *testing.T) { + + privKey := ed25519.GenPrivKey() + pubKey := privKey.PubKey() + + msg := crypto.CRandBytes(128) + sig, err := privKey.Sign(msg) + require.Nil(t, err) + + // Test the signature + assert.True(t, pubKey.VerifyBytes(msg, sig)) + + // Mutate the signature, just one bit. + // TODO: Replace this with a much better fuzzer, tendermint/ed25519/issues/10 + sigEd := sig.(ed25519.SignatureEd25519) + sigEd[7] ^= byte(0x01) + sig = sigEd + + assert.False(t, pubKey.VerifyBytes(msg, sig)) +} diff --git a/crypto/encoding/amino/amino.go b/crypto/encoding/amino/amino.go new file mode 100644 index 000000000..2b5e15b4f --- /dev/null +++ b/crypto/encoding/amino/amino.go @@ -0,0 +1,57 @@ +package cryptoAmino + +import ( + amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/secp256k1" +) + +var cdc = amino.NewCodec() + +func init() { + // NOTE: It's important that there be no conflicts here, + // as that would change the canonical representations, + // and therefore change the address. + // TODO: Remove above note when + // https://github.com/tendermint/go-amino/issues/9 + // is resolved + RegisterAmino(cdc) +} + +// RegisterAmino registers all crypto related types in the given (amino) codec. +func RegisterAmino(cdc *amino.Codec) { + // These are all written here instead of + cdc.RegisterInterface((*crypto.PubKey)(nil), nil) + cdc.RegisterConcrete(ed25519.PubKeyEd25519{}, + "tendermint/PubKeyEd25519", nil) + cdc.RegisterConcrete(secp256k1.PubKeySecp256k1{}, + "tendermint/PubKeySecp256k1", nil) + + cdc.RegisterInterface((*crypto.PrivKey)(nil), nil) + cdc.RegisterConcrete(ed25519.PrivKeyEd25519{}, + "tendermint/PrivKeyEd25519", nil) + cdc.RegisterConcrete(secp256k1.PrivKeySecp256k1{}, + "tendermint/PrivKeySecp256k1", nil) + + cdc.RegisterInterface((*crypto.Signature)(nil), nil) + cdc.RegisterConcrete(ed25519.SignatureEd25519{}, + "tendermint/SignatureEd25519", nil) + cdc.RegisterConcrete(secp256k1.SignatureSecp256k1{}, + "tendermint/SignatureSecp256k1", nil) +} + +func PrivKeyFromBytes(privKeyBytes []byte) (privKey crypto.PrivKey, err error) { + err = cdc.UnmarshalBinaryBare(privKeyBytes, &privKey) + return +} + +func PubKeyFromBytes(pubKeyBytes []byte) (pubKey crypto.PubKey, err error) { + err = cdc.UnmarshalBinaryBare(pubKeyBytes, &pubKey) + return +} + +func SignatureFromBytes(pubKeyBytes []byte) (pubKey crypto.Signature, err error) { + err = cdc.UnmarshalBinaryBare(pubKeyBytes, &pubKey) + return +} diff --git a/crypto/encode_test.go b/crypto/encoding/amino/encode_test.go similarity index 71% rename from crypto/encode_test.go rename to crypto/encoding/amino/encode_test.go index 16555bf71..e01206089 100644 --- a/crypto/encode_test.go +++ b/crypto/encoding/amino/encode_test.go @@ -1,4 +1,4 @@ -package crypto +package cryptoAmino import ( "os" @@ -6,6 +6,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/secp256k1" ) type byter interface { @@ -56,64 +59,70 @@ func ExamplePrintRegisteredTypes() { func TestKeyEncodings(t *testing.T) { cases := []struct { - privKey PrivKey + privKey crypto.PrivKey privSize, pubSize int // binary sizes }{ { - privKey: GenPrivKeyEd25519(), + privKey: ed25519.GenPrivKey(), privSize: 69, pubSize: 37, }, { - privKey: GenPrivKeySecp256k1(), + privKey: secp256k1.GenPrivKey(), privSize: 37, pubSize: 38, }, } - for _, tc := range cases { + for tcIndex, tc := range cases { // Check (de/en)codings of PrivKeys. - var priv2, priv3 PrivKey + var priv2, priv3 crypto.PrivKey checkAminoBinary(t, tc.privKey, &priv2, tc.privSize) - assert.EqualValues(t, tc.privKey, priv2) + assert.EqualValues(t, tc.privKey, priv2, "tc #%d", tcIndex) checkAminoJSON(t, tc.privKey, &priv3, false) // TODO also check Prefix bytes. - assert.EqualValues(t, tc.privKey, priv3) + assert.EqualValues(t, tc.privKey, priv3, "tc #%d", tcIndex) // Check (de/en)codings of Signatures. - var sig1, sig2, sig3 Signature + var sig1, sig2, sig3 crypto.Signature sig1, err := tc.privKey.Sign([]byte("something")) - assert.NoError(t, err) + assert.NoError(t, err, "tc #%d", tcIndex) checkAminoBinary(t, sig1, &sig2, -1) // Signature size changes for Secp anyways. - assert.EqualValues(t, sig1, sig2) + assert.EqualValues(t, sig1, sig2, "tc #%d", tcIndex) checkAminoJSON(t, sig1, &sig3, false) // TODO also check Prefix bytes. - assert.EqualValues(t, sig1, sig3) + assert.EqualValues(t, sig1, sig3, "tc #%d", tcIndex) // Check (de/en)codings of PubKeys. pubKey := tc.privKey.PubKey() - var pub2, pub3 PubKey + var pub2, pub3 crypto.PubKey checkAminoBinary(t, pubKey, &pub2, tc.pubSize) - assert.EqualValues(t, pubKey, pub2) + assert.EqualValues(t, pubKey, pub2, "tc #%d", tcIndex) checkAminoJSON(t, pubKey, &pub3, false) // TODO also check Prefix bytes. - assert.EqualValues(t, pubKey, pub3) + assert.EqualValues(t, pubKey, pub3, "tc #%d", tcIndex) } } func TestNilEncodings(t *testing.T) { // Check nil Signature. - var a, b Signature + var a, b crypto.Signature checkAminoJSON(t, &a, &b, true) assert.EqualValues(t, a, b) // Check nil PubKey. - var c, d PubKey + var c, d crypto.PubKey checkAminoJSON(t, &c, &d, true) assert.EqualValues(t, c, d) // Check nil PrivKey. - var e, f PrivKey + var e, f crypto.PrivKey checkAminoJSON(t, &e, &f, true) assert.EqualValues(t, e, f) } + +func TestPubKeyInvalidDataProperReturnsEmpty(t *testing.T) { + pk, err := PubKeyFromBytes([]byte("foo")) + require.NotNil(t, err, "expecting a non-nil error") + require.Nil(t, pk, "expecting an empty public key on error") +} diff --git a/crypto/hash.go b/crypto/hash.go index 165b1e153..c1fb41f7a 100644 --- a/crypto/hash.go +++ b/crypto/hash.go @@ -2,6 +2,7 @@ package crypto import ( "crypto/sha256" + "golang.org/x/crypto/ripemd160" ) diff --git a/crypto/hkdfchacha20poly1305/hkdfchachapoly.go b/crypto/hkdfchacha20poly1305/hkdfchachapoly.go index ab3b9df3a..4bf24cb18 100644 --- a/crypto/hkdfchacha20poly1305/hkdfchachapoly.go +++ b/crypto/hkdfchacha20poly1305/hkdfchachapoly.go @@ -14,6 +14,7 @@ import ( "golang.org/x/crypto/hkdf" ) +// Implements crypto.AEAD type hkdfchacha20poly1305 struct { key [KeySize]byte } diff --git a/crypto/merkle/doc.go b/crypto/merkle/doc.go index da65dd858..865c30217 100644 --- a/crypto/merkle/doc.go +++ b/crypto/merkle/doc.go @@ -28,4 +28,4 @@ https://bitcointalk.org/?topic=102395 TODO(ismail): add 2nd pre-image protection or clarify further on how we use this and why this secure. */ -package merkle \ No newline at end of file +package merkle diff --git a/crypto/merkle/simple_map.go b/crypto/merkle/simple_map.go index 86a9bad9c..ba4b9309a 100644 --- a/crypto/merkle/simple_map.go +++ b/crypto/merkle/simple_map.go @@ -2,7 +2,7 @@ package merkle import ( "github.com/tendermint/tendermint/crypto/tmhash" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // Merkle tree from a map. diff --git a/crypto/merkle/simple_tree.go b/crypto/merkle/simple_tree.go index 35a6eaa7c..46a075909 100644 --- a/crypto/merkle/simple_tree.go +++ b/crypto/merkle/simple_tree.go @@ -9,12 +9,12 @@ func SimpleHashFromTwoHashes(left, right []byte) []byte { var hasher = tmhash.New() err := encodeByteSlice(hasher, left) if err != nil { - panic(err) - } + panic(err) + } err = encodeByteSlice(hasher, right) if err != nil { - panic(err) - } + panic(err) + } return hasher.Sum(nil) } diff --git a/crypto/merkle/simple_tree_test.go b/crypto/merkle/simple_tree_test.go index a721bccea..488e0c907 100644 --- a/crypto/merkle/simple_tree_test.go +++ b/crypto/merkle/simple_tree_test.go @@ -3,11 +3,11 @@ package merkle import ( "bytes" - cmn "github.com/tendermint/tmlibs/common" - . "github.com/tendermint/tmlibs/test" + cmn "github.com/tendermint/tendermint/libs/common" + . "github.com/tendermint/tendermint/libs/test" - "testing" "github.com/tendermint/tendermint/crypto/tmhash" + "testing" ) type testItem []byte diff --git a/crypto/priv_key.go b/crypto/priv_key.go deleted file mode 100644 index dbfe64c33..000000000 --- a/crypto/priv_key.go +++ /dev/null @@ -1,164 +0,0 @@ -package crypto - -import ( - "crypto/subtle" - - secp256k1 "github.com/btcsuite/btcd/btcec" - "github.com/tendermint/ed25519" - "github.com/tendermint/ed25519/extra25519" -) - -func PrivKeyFromBytes(privKeyBytes []byte) (privKey PrivKey, err error) { - err = cdc.UnmarshalBinaryBare(privKeyBytes, &privKey) - return -} - -//---------------------------------------- - -type PrivKey interface { - Bytes() []byte - Sign(msg []byte) (Signature, error) - PubKey() PubKey - Equals(PrivKey) bool -} - -//------------------------------------- - -var _ PrivKey = PrivKeyEd25519{} - -// Implements PrivKey -type PrivKeyEd25519 [64]byte - -func (privKey PrivKeyEd25519) Bytes() []byte { - return cdc.MustMarshalBinaryBare(privKey) -} - -func (privKey PrivKeyEd25519) Sign(msg []byte) (Signature, error) { - privKeyBytes := [64]byte(privKey) - signatureBytes := ed25519.Sign(&privKeyBytes, msg) - return SignatureEd25519(*signatureBytes), nil -} - -func (privKey PrivKeyEd25519) PubKey() PubKey { - privKeyBytes := [64]byte(privKey) - pubBytes := *ed25519.MakePublicKey(&privKeyBytes) - return PubKeyEd25519(pubBytes) -} - -// Equals - you probably don't need to use this. -// Runs in constant time based on length of the keys. -func (privKey PrivKeyEd25519) Equals(other PrivKey) bool { - if otherEd, ok := other.(PrivKeyEd25519); ok { - return subtle.ConstantTimeCompare(privKey[:], otherEd[:]) == 1 - } else { - return false - } -} - -func (privKey PrivKeyEd25519) ToCurve25519() *[32]byte { - keyCurve25519 := new([32]byte) - privKeyBytes := [64]byte(privKey) - extra25519.PrivateKeyToCurve25519(keyCurve25519, &privKeyBytes) - return keyCurve25519 -} - -// Deterministically generates new priv-key bytes from key. -func (privKey PrivKeyEd25519) Generate(index int) PrivKeyEd25519 { - bz, err := cdc.MarshalBinaryBare(struct { - PrivKey [64]byte - Index int - }{privKey, index}) - if err != nil { - panic(err) - } - newBytes := Sha256(bz) - newKey := new([64]byte) - copy(newKey[:32], newBytes) - ed25519.MakePublicKey(newKey) - return PrivKeyEd25519(*newKey) -} - -func GenPrivKeyEd25519() PrivKeyEd25519 { - privKeyBytes := new([64]byte) - copy(privKeyBytes[:32], CRandBytes(32)) - ed25519.MakePublicKey(privKeyBytes) - return PrivKeyEd25519(*privKeyBytes) -} - -// NOTE: secret should be the output of a KDF like bcrypt, -// if it's derived from user input. -func GenPrivKeyEd25519FromSecret(secret []byte) PrivKeyEd25519 { - privKey32 := Sha256(secret) // Not Ripemd160 because we want 32 bytes. - privKeyBytes := new([64]byte) - copy(privKeyBytes[:32], privKey32) - ed25519.MakePublicKey(privKeyBytes) - return PrivKeyEd25519(*privKeyBytes) -} - -//------------------------------------- - -var _ PrivKey = PrivKeySecp256k1{} - -// Implements PrivKey -type PrivKeySecp256k1 [32]byte - -func (privKey PrivKeySecp256k1) Bytes() []byte { - return cdc.MustMarshalBinaryBare(privKey) -} - -func (privKey PrivKeySecp256k1) Sign(msg []byte) (Signature, error) { - priv__, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey[:]) - sig__, err := priv__.Sign(Sha256(msg)) - if err != nil { - return nil, err - } - return SignatureSecp256k1(sig__.Serialize()), nil -} - -func (privKey PrivKeySecp256k1) PubKey() PubKey { - _, pub__ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey[:]) - var pub PubKeySecp256k1 - copy(pub[:], pub__.SerializeCompressed()) - return pub -} - -// Equals - you probably don't need to use this. -// Runs in constant time based on length of the keys. -func (privKey PrivKeySecp256k1) Equals(other PrivKey) bool { - if otherSecp, ok := other.(PrivKeySecp256k1); ok { - return subtle.ConstantTimeCompare(privKey[:], otherSecp[:]) == 1 - } else { - return false - } -} - -/* -// Deterministically generates new priv-key bytes from key. -func (key PrivKeySecp256k1) Generate(index int) PrivKeySecp256k1 { - newBytes := cdc.BinarySha256(struct { - PrivKey [64]byte - Index int - }{key, index}) - var newKey [64]byte - copy(newKey[:], newBytes) - return PrivKeySecp256k1(newKey) -} -*/ - -func GenPrivKeySecp256k1() PrivKeySecp256k1 { - privKeyBytes := [32]byte{} - copy(privKeyBytes[:], CRandBytes(32)) - priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKeyBytes[:]) - copy(privKeyBytes[:], priv.Serialize()) - return PrivKeySecp256k1(privKeyBytes) -} - -// NOTE: secret should be the output of a KDF like bcrypt, -// if it's derived from user input. -func GenPrivKeySecp256k1FromSecret(secret []byte) PrivKeySecp256k1 { - privKey32 := Sha256(secret) // Not Ripemd160 because we want 32 bytes. - priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey32) - privKeyBytes := [32]byte{} - copy(privKeyBytes[:], priv.Serialize()) - return PrivKeySecp256k1(privKeyBytes) -} diff --git a/crypto/priv_key_test.go b/crypto/priv_key_test.go deleted file mode 100644 index c1ae33ed1..000000000 --- a/crypto/priv_key_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package crypto_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/crypto" -) - -func TestGeneratePrivKey(t *testing.T) { - testPriv := crypto.GenPrivKeyEd25519() - testGenerate := testPriv.Generate(1) - signBytes := []byte("something to sign") - pub := testGenerate.PubKey() - sig, err := testGenerate.Sign(signBytes) - assert.NoError(t, err) - assert.True(t, pub.VerifyBytes(signBytes, sig)) -} - -/* - -type BadKey struct { - PrivKeyEd25519 -} - -func TestReadPrivKey(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - // garbage in, garbage out - garbage := []byte("hjgewugfbiewgofwgewr") - XXX This test wants to register BadKey globally to crypto, - but we don't want to support that. - _, err := PrivKeyFromBytes(garbage) - require.Error(err) - - edKey := GenPrivKeyEd25519() - badKey := BadKey{edKey} - - cases := []struct { - key PrivKey - valid bool - }{ - {edKey, true}, - {badKey, false}, - } - - for i, tc := range cases { - data := tc.key.Bytes() - fmt.Println(">>>", data) - key, err := PrivKeyFromBytes(data) - fmt.Printf("!!! %#v\n", key, err) - if tc.valid { - assert.NoError(err, "%d", i) - assert.Equal(tc.key, key, "%d", i) - } else { - assert.Error(err, "%d: %#v", i, key) - } - } -} -*/ diff --git a/crypto/pub_key.go b/crypto/pub_key.go deleted file mode 100644 index 51ef6c54e..000000000 --- a/crypto/pub_key.go +++ /dev/null @@ -1,149 +0,0 @@ -package crypto - -import ( - "bytes" - "crypto/sha256" - "fmt" - - "golang.org/x/crypto/ripemd160" - - secp256k1 "github.com/btcsuite/btcd/btcec" - - "github.com/tendermint/ed25519" - "github.com/tendermint/ed25519/extra25519" - - cmn "github.com/tendermint/tmlibs/common" - - "github.com/tendermint/tendermint/crypto/tmhash" -) - -// An address is a []byte, but hex-encoded even in JSON. -// []byte leaves us the option to change the address length. -// Use an alias so Unmarshal methods (with ptr receivers) are available too. -type Address = cmn.HexBytes - -func PubKeyFromBytes(pubKeyBytes []byte) (pubKey PubKey, err error) { - err = cdc.UnmarshalBinaryBare(pubKeyBytes, &pubKey) - return -} - -//---------------------------------------- - -type PubKey interface { - Address() Address - Bytes() []byte - VerifyBytes(msg []byte, sig Signature) bool - Equals(PubKey) bool -} - -//------------------------------------- - -var _ PubKey = PubKeyEd25519{} - -// Implements PubKeyInner -type PubKeyEd25519 [32]byte - -// Address is the SHA256-20 of the raw pubkey bytes. -func (pubKey PubKeyEd25519) Address() Address { - return Address(tmhash.Sum(pubKey[:])) -} - -func (pubKey PubKeyEd25519) Bytes() []byte { - bz, err := cdc.MarshalBinaryBare(pubKey) - if err != nil { - panic(err) - } - return bz -} - -func (pubKey PubKeyEd25519) VerifyBytes(msg []byte, sig_ Signature) bool { - // make sure we use the same algorithm to sign - sig, ok := sig_.(SignatureEd25519) - if !ok { - return false - } - pubKeyBytes := [32]byte(pubKey) - sigBytes := [64]byte(sig) - return ed25519.Verify(&pubKeyBytes, msg, &sigBytes) -} - -// For use with golang/crypto/nacl/box -// If error, returns nil. -func (pubKey PubKeyEd25519) ToCurve25519() *[32]byte { - keyCurve25519, pubKeyBytes := new([32]byte), [32]byte(pubKey) - ok := extra25519.PublicKeyToCurve25519(keyCurve25519, &pubKeyBytes) - if !ok { - return nil - } - return keyCurve25519 -} - -func (pubKey PubKeyEd25519) String() string { - return fmt.Sprintf("PubKeyEd25519{%X}", pubKey[:]) -} - -func (pubKey PubKeyEd25519) Equals(other PubKey) bool { - if otherEd, ok := other.(PubKeyEd25519); ok { - return bytes.Equal(pubKey[:], otherEd[:]) - } else { - return false - } -} - -//------------------------------------- - -var _ PubKey = PubKeySecp256k1{} - -// Implements PubKey. -// Compressed pubkey (just the x-cord), -// prefixed with 0x02 or 0x03, depending on the y-cord. -type PubKeySecp256k1 [33]byte - -// Implements Bitcoin style addresses: RIPEMD160(SHA256(pubkey)) -func (pubKey PubKeySecp256k1) Address() Address { - hasherSHA256 := sha256.New() - hasherSHA256.Write(pubKey[:]) // does not error - sha := hasherSHA256.Sum(nil) - - hasherRIPEMD160 := ripemd160.New() - hasherRIPEMD160.Write(sha) // does not error - return Address(hasherRIPEMD160.Sum(nil)) -} - -func (pubKey PubKeySecp256k1) Bytes() []byte { - bz, err := cdc.MarshalBinaryBare(pubKey) - if err != nil { - panic(err) - } - return bz -} - -func (pubKey PubKeySecp256k1) VerifyBytes(msg []byte, sig_ Signature) bool { - // and assert same algorithm to sign and verify - sig, ok := sig_.(SignatureSecp256k1) - if !ok { - return false - } - - pub__, err := secp256k1.ParsePubKey(pubKey[:], secp256k1.S256()) - if err != nil { - return false - } - sig__, err := secp256k1.ParseDERSignature(sig[:], secp256k1.S256()) - if err != nil { - return false - } - return sig__.Verify(Sha256(msg), pub__) -} - -func (pubKey PubKeySecp256k1) String() string { - return fmt.Sprintf("PubKeySecp256k1{%X}", pubKey[:]) -} - -func (pubKey PubKeySecp256k1) Equals(other PubKey) bool { - if otherSecp, ok := other.(PubKeySecp256k1); ok { - return bytes.Equal(pubKey[:], otherSecp[:]) - } else { - return false - } -} diff --git a/crypto/pub_key_test.go b/crypto/pub_key_test.go deleted file mode 100644 index 7b856cf18..000000000 --- a/crypto/pub_key_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package crypto - -import ( - "encoding/hex" - "testing" - - "github.com/btcsuite/btcutil/base58" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -type keyData struct { - priv string - pub string - addr string -} - -var secpDataTable = []keyData{ - { - priv: "a96e62ed3955e65be32703f12d87b6b5cf26039ecfa948dc5107a495418e5330", - pub: "02950e1cdfcb133d6024109fd489f734eeb4502418e538c28481f22bce276f248c", - addr: "1CKZ9Nx4zgds8tU7nJHotKSDr4a9bYJCa3", - }, -} - -func TestPubKeySecp256k1Address(t *testing.T) { - for _, d := range secpDataTable { - privB, _ := hex.DecodeString(d.priv) - pubB, _ := hex.DecodeString(d.pub) - addrBbz, _, _ := base58.CheckDecode(d.addr) - addrB := Address(addrBbz) - - var priv PrivKeySecp256k1 - copy(priv[:], privB) - - pubKey := priv.PubKey() - pubT, _ := pubKey.(PubKeySecp256k1) - pub := pubT[:] - addr := pubKey.Address() - - assert.Equal(t, pub, pubB, "Expected pub keys to match") - assert.Equal(t, addr, addrB, "Expected addresses to match") - } -} - -func TestPubKeyInvalidDataProperReturnsEmpty(t *testing.T) { - pk, err := PubKeyFromBytes([]byte("foo")) - require.NotNil(t, err, "expecting a non-nil error") - require.Nil(t, pk, "expecting an empty public key on error") -} diff --git a/crypto/random.go b/crypto/random.go index 66da035a9..5c5057d30 100644 --- a/crypto/random.go +++ b/crypto/random.go @@ -9,7 +9,7 @@ import ( "io" "sync" - . "github.com/tendermint/tmlibs/common" + . "github.com/tendermint/tendermint/libs/common" ) var gRandInfo *randInfo diff --git a/crypto/secp256k1/secp256k1.go b/crypto/secp256k1/secp256k1.go new file mode 100644 index 000000000..4b210dc7f --- /dev/null +++ b/crypto/secp256k1/secp256k1.go @@ -0,0 +1,198 @@ +package secp256k1 + +import ( + "bytes" + "crypto/sha256" + "crypto/subtle" + "fmt" + + secp256k1 "github.com/btcsuite/btcd/btcec" + amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/libs/common" + "golang.org/x/crypto/ripemd160" +) + +//------------------------------------- +const ( + Secp256k1PrivKeyAminoRoute = "tendermint/PrivKeySecp256k1" + Secp256k1PubKeyAminoRoute = "tendermint/PubKeySecp256k1" + Secp256k1SignatureAminoRoute = "tendermint/SignatureSecp256k1" +) + +var cdc = amino.NewCodec() + +func init() { + cdc.RegisterInterface((*crypto.PubKey)(nil), nil) + cdc.RegisterConcrete(PubKeySecp256k1{}, + Secp256k1PubKeyAminoRoute, nil) + + cdc.RegisterInterface((*crypto.PrivKey)(nil), nil) + cdc.RegisterConcrete(PrivKeySecp256k1{}, + Secp256k1PrivKeyAminoRoute, nil) + + cdc.RegisterInterface((*crypto.Signature)(nil), nil) + cdc.RegisterConcrete(SignatureSecp256k1{}, + Secp256k1SignatureAminoRoute, nil) +} + +//------------------------------------- + +var _ crypto.PrivKey = PrivKeySecp256k1{} + +// PrivKeySecp256k1 implements PrivKey. +type PrivKeySecp256k1 [32]byte + +// Bytes marshalls the private key using amino encoding. +func (privKey PrivKeySecp256k1) Bytes() []byte { + return cdc.MustMarshalBinaryBare(privKey) +} + +// Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg. +func (privKey PrivKeySecp256k1) Sign(msg []byte) (crypto.Signature, error) { + priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey[:]) + sig, err := priv.Sign(crypto.Sha256(msg)) + if err != nil { + return nil, err + } + return SignatureSecp256k1(sig.Serialize()), nil +} + +// PubKey performs the point-scalar multiplication from the privKey on the +// generator point to get the pubkey. +func (privKey PrivKeySecp256k1) PubKey() crypto.PubKey { + _, pubkeyObject := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey[:]) + var pubkeyBytes PubKeySecp256k1 + copy(pubkeyBytes[:], pubkeyObject.SerializeCompressed()) + return pubkeyBytes +} + +// Equals - you probably don't need to use this. +// Runs in constant time based on length of the keys. +func (privKey PrivKeySecp256k1) Equals(other crypto.PrivKey) bool { + if otherSecp, ok := other.(PrivKeySecp256k1); ok { + return subtle.ConstantTimeCompare(privKey[:], otherSecp[:]) == 1 + } + return false +} + +// GenPrivKey generates a new ECDSA private key on curve secp256k1 private key. +// It uses OS randomness in conjunction with the current global random seed +// in tendermint/libs/common to generate the private key. +func GenPrivKey() PrivKeySecp256k1 { + privKeyBytes := [32]byte{} + copy(privKeyBytes[:], crypto.CRandBytes(32)) + // crypto.CRandBytes is guaranteed to be 32 bytes long, so it can be + // casted to PrivKeySecp256k1. + return PrivKeySecp256k1(privKeyBytes) +} + +// GenPrivKeySecp256k1 hashes the secret with SHA2, and uses +// that 32 byte output to create the private key. +// NOTE: secret should be the output of a KDF like bcrypt, +// if it's derived from user input. +func GenPrivKeySecp256k1(secret []byte) PrivKeySecp256k1 { + privKey32 := sha256.Sum256(secret) + // sha256.Sum256() is guaranteed to be 32 bytes long, so it can be + // casted to PrivKeySecp256k1. + return PrivKeySecp256k1(privKey32) +} + +//------------------------------------- + +var _ crypto.PubKey = PubKeySecp256k1{} + +// PubKeySecp256k1Size is comprised of 32 bytes for one field element +// (the x-coordinate), plus one byte for the parity of the y-coordinate. +const PubKeySecp256k1Size = 33 + +// PubKeySecp256k1 implements crypto.PubKey. +// It is the compressed form of the pubkey. The first byte depends is a 0x02 byte +// if the y-coordinate is the lexicographically largest of the two associated with +// the x-coordinate. Otherwise the first byte is a 0x03. +// This prefix is followed with the x-coordinate. +type PubKeySecp256k1 [PubKeySecp256k1Size]byte + +// Address returns a Bitcoin style addresses: RIPEMD160(SHA256(pubkey)) +func (pubKey PubKeySecp256k1) Address() crypto.Address { + hasherSHA256 := sha256.New() + hasherSHA256.Write(pubKey[:]) // does not error + sha := hasherSHA256.Sum(nil) + + hasherRIPEMD160 := ripemd160.New() + hasherRIPEMD160.Write(sha) // does not error + return crypto.Address(hasherRIPEMD160.Sum(nil)) +} + +// Bytes returns the pubkey marshalled with amino encoding. +func (pubKey PubKeySecp256k1) Bytes() []byte { + bz, err := cdc.MarshalBinaryBare(pubKey) + if err != nil { + panic(err) + } + return bz +} + +func (pubKey PubKeySecp256k1) VerifyBytes(msg []byte, interfaceSig crypto.Signature) bool { + // and assert same algorithm to sign and verify + sig, ok := interfaceSig.(SignatureSecp256k1) + if !ok { + return false + } + + pub, err := secp256k1.ParsePubKey(pubKey[:], secp256k1.S256()) + if err != nil { + return false + } + parsedSig, err := secp256k1.ParseDERSignature(sig[:], secp256k1.S256()) + if err != nil { + return false + } + return parsedSig.Verify(crypto.Sha256(msg), pub) +} + +func (pubKey PubKeySecp256k1) String() string { + return fmt.Sprintf("PubKeySecp256k1{%X}", pubKey[:]) +} + +func (pubKey PubKeySecp256k1) Equals(other crypto.PubKey) bool { + if otherSecp, ok := other.(PubKeySecp256k1); ok { + return bytes.Equal(pubKey[:], otherSecp[:]) + } + return false +} + +//------------------------------------- + +var _ crypto.Signature = SignatureSecp256k1{} + +// SignatureSecp256k1 implements crypto.Signature +type SignatureSecp256k1 []byte + +func (sig SignatureSecp256k1) Bytes() []byte { + bz, err := cdc.MarshalBinaryBare(sig) + if err != nil { + panic(err) + } + return bz +} + +func (sig SignatureSecp256k1) IsZero() bool { return len(sig) == 0 } + +func (sig SignatureSecp256k1) String() string { + return fmt.Sprintf("/%X.../", common.Fingerprint(sig[:])) +} + +func (sig SignatureSecp256k1) Equals(other crypto.Signature) bool { + if otherSecp, ok := other.(SignatureSecp256k1); ok { + return subtle.ConstantTimeCompare(sig[:], otherSecp[:]) == 1 + } else { + return false + } +} + +func SignatureSecp256k1FromBytes(data []byte) crypto.Signature { + sig := make(SignatureSecp256k1, len(data)) + copy(sig[:], data) + return sig +} diff --git a/crypto/secp256k1/secpk256k1_test.go b/crypto/secp256k1/secpk256k1_test.go new file mode 100644 index 000000000..46a27b3e6 --- /dev/null +++ b/crypto/secp256k1/secpk256k1_test.go @@ -0,0 +1,88 @@ +package secp256k1_test + +import ( + "encoding/hex" + "testing" + + "github.com/btcsuite/btcutil/base58" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/secp256k1" + + underlyingSecp256k1 "github.com/btcsuite/btcd/btcec" +) + +type keyData struct { + priv string + pub string + addr string +} + +var secpDataTable = []keyData{ + { + priv: "a96e62ed3955e65be32703f12d87b6b5cf26039ecfa948dc5107a495418e5330", + pub: "02950e1cdfcb133d6024109fd489f734eeb4502418e538c28481f22bce276f248c", + addr: "1CKZ9Nx4zgds8tU7nJHotKSDr4a9bYJCa3", + }, +} + +func TestPubKeySecp256k1Address(t *testing.T) { + for _, d := range secpDataTable { + privB, _ := hex.DecodeString(d.priv) + pubB, _ := hex.DecodeString(d.pub) + addrBbz, _, _ := base58.CheckDecode(d.addr) + addrB := crypto.Address(addrBbz) + + var priv secp256k1.PrivKeySecp256k1 + copy(priv[:], privB) + + pubKey := priv.PubKey() + pubT, _ := pubKey.(secp256k1.PubKeySecp256k1) + pub := pubT[:] + addr := pubKey.Address() + + assert.Equal(t, pub, pubB, "Expected pub keys to match") + assert.Equal(t, addr, addrB, "Expected addresses to match") + } +} + +func TestSignAndValidateSecp256k1(t *testing.T) { + privKey := secp256k1.GenPrivKey() + pubKey := privKey.PubKey() + + msg := crypto.CRandBytes(128) + sig, err := privKey.Sign(msg) + require.Nil(t, err) + + assert.True(t, pubKey.VerifyBytes(msg, sig)) + + // Mutate the signature, just one bit. + sigEd := sig.(secp256k1.SignatureSecp256k1) + sigEd[3] ^= byte(0x01) + sig = sigEd + + assert.False(t, pubKey.VerifyBytes(msg, sig)) +} + +// This test is intended to justify the removal of calls to the underlying library +// in creating the privkey. +func TestSecp256k1LoadPrivkeyAndSerializeIsIdentity(t *testing.T) { + numberOfTests := 256 + for i := 0; i < numberOfTests; i++ { + // Seed the test case with some random bytes + privKeyBytes := [32]byte{} + copy(privKeyBytes[:], crypto.CRandBytes(32)) + + // This function creates a private and public key in the underlying libraries format. + // The private key is basically calling new(big.Int).SetBytes(pk), which removes leading zero bytes + priv, _ := underlyingSecp256k1.PrivKeyFromBytes(underlyingSecp256k1.S256(), privKeyBytes[:]) + // this takes the bytes returned by `(big int).Bytes()`, and if the length is less than 32 bytes, + // pads the bytes from the left with zero bytes. Therefore these two functions composed + // result in the identity function on privKeyBytes, hence the following equality check + // always returning true. + serializedBytes := priv.Serialize() + require.Equal(t, privKeyBytes[:], serializedBytes) + } +} diff --git a/crypto/signature.go b/crypto/signature.go deleted file mode 100644 index 1ffb45ea3..000000000 --- a/crypto/signature.go +++ /dev/null @@ -1,88 +0,0 @@ -package crypto - -import ( - "fmt" - - "crypto/subtle" - - . "github.com/tendermint/tmlibs/common" -) - -func SignatureFromBytes(pubKeyBytes []byte) (pubKey Signature, err error) { - err = cdc.UnmarshalBinaryBare(pubKeyBytes, &pubKey) - return -} - -//---------------------------------------- - -type Signature interface { - Bytes() []byte - IsZero() bool - Equals(Signature) bool -} - -//------------------------------------- - -var _ Signature = SignatureEd25519{} - -// Implements Signature -type SignatureEd25519 [64]byte - -func (sig SignatureEd25519) Bytes() []byte { - bz, err := cdc.MarshalBinaryBare(sig) - if err != nil { - panic(err) - } - return bz -} - -func (sig SignatureEd25519) IsZero() bool { return len(sig) == 0 } - -func (sig SignatureEd25519) String() string { return fmt.Sprintf("/%X.../", Fingerprint(sig[:])) } - -func (sig SignatureEd25519) Equals(other Signature) bool { - if otherEd, ok := other.(SignatureEd25519); ok { - return subtle.ConstantTimeCompare(sig[:], otherEd[:]) == 1 - } else { - return false - } -} - -func SignatureEd25519FromBytes(data []byte) Signature { - var sig SignatureEd25519 - copy(sig[:], data) - return sig -} - -//------------------------------------- - -var _ Signature = SignatureSecp256k1{} - -// Implements Signature -type SignatureSecp256k1 []byte - -func (sig SignatureSecp256k1) Bytes() []byte { - bz, err := cdc.MarshalBinaryBare(sig) - if err != nil { - panic(err) - } - return bz -} - -func (sig SignatureSecp256k1) IsZero() bool { return len(sig) == 0 } - -func (sig SignatureSecp256k1) String() string { return fmt.Sprintf("/%X.../", Fingerprint(sig[:])) } - -func (sig SignatureSecp256k1) Equals(other Signature) bool { - if otherSecp, ok := other.(SignatureSecp256k1); ok { - return subtle.ConstantTimeCompare(sig[:], otherSecp[:]) == 1 - } else { - return false - } -} - -func SignatureSecp256k1FromBytes(data []byte) Signature { - sig := make(SignatureSecp256k1, len(data)) - copy(sig[:], data) - return sig -} diff --git a/crypto/signature_test.go b/crypto/signature_test.go deleted file mode 100644 index d6ae2b7a9..000000000 --- a/crypto/signature_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package crypto - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestSignAndValidateEd25519(t *testing.T) { - - privKey := GenPrivKeyEd25519() - pubKey := privKey.PubKey() - - msg := CRandBytes(128) - sig, err := privKey.Sign(msg) - require.Nil(t, err) - - // Test the signature - assert.True(t, pubKey.VerifyBytes(msg, sig)) - - // Mutate the signature, just one bit. - sigEd := sig.(SignatureEd25519) - sigEd[7] ^= byte(0x01) - sig = sigEd - - assert.False(t, pubKey.VerifyBytes(msg, sig)) -} - -func TestSignAndValidateSecp256k1(t *testing.T) { - privKey := GenPrivKeySecp256k1() - pubKey := privKey.PubKey() - - msg := CRandBytes(128) - sig, err := privKey.Sign(msg) - require.Nil(t, err) - - assert.True(t, pubKey.VerifyBytes(msg, sig)) - - // Mutate the signature, just one bit. - sigEd := sig.(SignatureSecp256k1) - sigEd[3] ^= byte(0x01) - sig = sigEd - - assert.False(t, pubKey.VerifyBytes(msg, sig)) -} diff --git a/crypto/symmetric.go b/crypto/xsalsa20symmetric/symmetric.go similarity index 79% rename from crypto/symmetric.go rename to crypto/xsalsa20symmetric/symmetric.go index d4ac9b55b..d2369675d 100644 --- a/crypto/symmetric.go +++ b/crypto/xsalsa20symmetric/symmetric.go @@ -1,12 +1,15 @@ -package crypto +package xsalsa20symmetric import ( "errors" - . "github.com/tendermint/tmlibs/common" + "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" "golang.org/x/crypto/nacl/secretbox" ) +// TODO, make this into a struct that implements crypto.Symmetric. + const nonceLen = 24 const secretLen = 32 @@ -15,9 +18,9 @@ const secretLen = 32 // NOTE: call crypto.MixEntropy() first. func EncryptSymmetric(plaintext []byte, secret []byte) (ciphertext []byte) { if len(secret) != secretLen { - PanicSanity(Fmt("Secret must be 32 bytes long, got len %v", len(secret))) + cmn.PanicSanity(cmn.Fmt("Secret must be 32 bytes long, got len %v", len(secret))) } - nonce := CRandBytes(nonceLen) + nonce := crypto.CRandBytes(nonceLen) nonceArr := [nonceLen]byte{} copy(nonceArr[:], nonce) secretArr := [secretLen]byte{} @@ -32,7 +35,7 @@ func EncryptSymmetric(plaintext []byte, secret []byte) (ciphertext []byte) { // The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext. func DecryptSymmetric(ciphertext []byte, secret []byte) (plaintext []byte, err error) { if len(secret) != secretLen { - PanicSanity(Fmt("Secret must be 32 bytes long, got len %v", len(secret))) + cmn.PanicSanity(cmn.Fmt("Secret must be 32 bytes long, got len %v", len(secret))) } if len(ciphertext) <= secretbox.Overhead+nonceLen { return nil, errors.New("Ciphertext is too short") diff --git a/crypto/symmetric_test.go b/crypto/xsalsa20symmetric/symmetric_test.go similarity index 81% rename from crypto/symmetric_test.go rename to crypto/xsalsa20symmetric/symmetric_test.go index d92bff1aa..d955307ea 100644 --- a/crypto/symmetric_test.go +++ b/crypto/xsalsa20symmetric/symmetric_test.go @@ -1,4 +1,4 @@ -package crypto +package xsalsa20symmetric import ( "testing" @@ -6,12 +6,13 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto" "golang.org/x/crypto/bcrypt" ) func TestSimple(t *testing.T) { - MixEntropy([]byte("someentropy")) + crypto.MixEntropy([]byte("someentropy")) plaintext := []byte("sometext") secret := []byte("somesecretoflengththirtytwo===32") @@ -24,7 +25,7 @@ func TestSimple(t *testing.T) { func TestSimpleWithKDF(t *testing.T) { - MixEntropy([]byte("someentropy")) + crypto.MixEntropy([]byte("someentropy")) plaintext := []byte("sometext") secretPass := []byte("somesecret") @@ -32,7 +33,7 @@ func TestSimpleWithKDF(t *testing.T) { if err != nil { t.Error(err) } - secret = Sha256(secret) + secret = crypto.Sha256(secret) ciphertext := EncryptSymmetric(plaintext, secret) plaintext2, err := DecryptSymmetric(ciphertext, secret) diff --git a/docs/DOCS_README.md b/docs/DOCS_README.md new file mode 100644 index 000000000..016bac5e4 --- /dev/null +++ b/docs/DOCS_README.md @@ -0,0 +1,21 @@ +# Documentation Maintenance Overview + +The documentation found in this directory is hosted at: + +- https://tendermint.com/docs/ + +and built using [VuePress](https://vuepress.vuejs.org/) from the tendermint website repo: + +- https://github.com/tendermint/tendermint.com + +which has a [configuration file](https://github.com/tendermint/tendermint.com/blob/develop/docs/.vuepress/config.js) for displaying +the Table of Contents that lists all the documentation. + +Under the hood, Jenkins listens for changes in ./docs then pushes a `docs-staging` branch to the tendermint.com repo with the latest documentation. That branch must be manually PR'd to `develop` then `master` for staging then production. This process should happen in synchrony with a release. + +The `README.md` in this directory is the landing page for +website documentation and the following folders are intentionally +ommitted: + +- `architecture/` ==> contains Architecture Design Records +- `spec/` ==> contains the detailed specification diff --git a/docs/README.md b/docs/README.md index 180acdcb9..16ea708ad 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,14 +1,27 @@ -Here lies our documentation. After making edits, run: +# Tendermint -``` -pip install -r requirements.txt -make html -``` +Welcome to the Tendermint Core documentation! The introduction below provides +an overview to help you navigate to your area of interest. -to build the docs locally then open the file `_build/html/index.html` in your browser. +## Introduction -**WARNING:** This documentation is intended to be viewed at: +Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state +transition machine - written in any programming language - and securely +replicates it on many machines. In other words, a blockchain. -https://tendermint.readthedocs.io +Tendermint requires an application running over the Application Blockchain +Interface (ABCI) - and comes packaged with an example application to do so. +Follow the [installation instructions](./introduction/install) to get up and running +quickly. For more details on [using tendermint](./tendermint-core/using-tendermint) see that +and the following sections. -and may contain broken internal links when viewed from Github. +## Networks + +Testnets can be setup manually on one or more machines, or automatically on one +or more machine, using a variety of methods described in the [deploy testnets +section](./networks/deploy-testnets). + +## Application Development + +The first step to building application on Tendermint is to [install +ABCI-CLI](./app-dev/getting-started) and play with the example applications. diff --git a/docs/_static/custom_collapsible_code.css b/docs/_static/custom_collapsible_code.css deleted file mode 100644 index 695268a83..000000000 --- a/docs/_static/custom_collapsible_code.css +++ /dev/null @@ -1,17 +0,0 @@ -.toggle { - padding-bottom: 1em ; -} - -.toggle .header { - display: block; - clear: both; - cursor: pointer; -} - -.toggle .header:after { - content: " ▼"; -} - -.toggle .header.open:after { - content: " ▲"; -} \ No newline at end of file diff --git a/docs/_static/custom_collapsible_code.js b/docs/_static/custom_collapsible_code.js deleted file mode 100644 index f4ff22adc..000000000 --- a/docs/_static/custom_collapsible_code.js +++ /dev/null @@ -1,10 +0,0 @@ -let makeCodeBlocksCollapsible = function() { - $(".toggle > *").hide(); - $(".toggle .header").show(); - $(".toggle .header").click(function() { - $(this).parent().children().not(".header").toggle({"duration": 400}); - $(this).parent().children(".header").toggleClass("open"); - }); -}; -// we could use the }(); way if we would have access to jQuery in HEAD, i.e. we would need to force the theme -// to load jQuery before our custom scripts diff --git a/docs/_templates/layout.html b/docs/_templates/layout.html deleted file mode 100644 index 736460bc0..000000000 --- a/docs/_templates/layout.html +++ /dev/null @@ -1,20 +0,0 @@ -{% extends "!layout.html" %} - -{% set css_files = css_files + ["_static/custom_collapsible_code.css"] %} - -# sadly, I didn't find a css style way to add custom JS to a list that is automagically added to head like CSS (above) #} -{% block extrahead %} - -{% endblock %} - -{% block footer %} - -{% endblock %} - - diff --git a/docs/abci-spec.md b/docs/abci-spec.md deleted file mode 100644 index f9677f448..000000000 --- a/docs/abci-spec.md +++ /dev/null @@ -1,324 +0,0 @@ -# ABCI Specification - -## Message Types - -ABCI requests/responses are defined as simple Protobuf messages in [this -schema file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto). -TendermintCore sends the requests, and the ABCI application sends the -responses. Here, we provide an overview of the messages types and how -they are used by Tendermint. Then we describe each request-response pair -as a function with arguments and return values, and add some notes on -usage. - -Some messages (`Echo, Info, InitChain, BeginBlock, EndBlock, Commit`), -don't return errors because an error would indicate a critical failure -in the application and there's nothing Tendermint can do. The problem -should be addressed and both Tendermint and the application restarted. -All other messages (`SetOption, Query, CheckTx, DeliverTx`) return an -application-specific response `Code uint32`, where only `0` is reserved -for `OK`. - -Some messages (`SetOption, Query, CheckTx, DeliverTx`) return -non-deterministic data in the form of `Info` and `Log`. The `Log` is -intended for the literal output from the application's logger, while the -`Info` is any additional info that should be returned. - -The first time a new blockchain is started, Tendermint calls -`InitChain`. From then on, the Block Execution Sequence that causes the -committed state to be updated is as follows: - -`BeginBlock, [DeliverTx], EndBlock, Commit` - -where one `DeliverTx` is called for each transaction in the block. -Cryptographic commitments to the results of DeliverTx, EndBlock, and -Commit are included in the header of the next block. - -Tendermint opens three connections to the application to handle the -different message types: - -- `Consensus Connection - InitChain, BeginBlock, DeliverTx, EndBlock, Commit` -- `Mempool Connection - CheckTx` -- `Info Connection - Info, SetOption, Query` - -The `Flush` message is used on every connection, and the `Echo` message -is only used for debugging. - -Note that messages may be sent concurrently across all connections -a -typical application will thus maintain a distinct state for each -connection. They may be referred to as the `DeliverTx state`, the -`CheckTx state`, and the `Commit state` respectively. - -See below for more details on the message types and how they are used. - -## Request/Response Messages - -### Echo - -- **Request**: - - `Message (string)`: A string to echo back -- **Response**: - - `Message (string)`: The input string -- **Usage**: - - Echo a string to test an abci client/server implementation - -### Flush - -- **Usage**: - - Signals that messages queued on the client should be flushed to - the server. It is called periodically by the client - implementation to ensure asynchronous requests are actually - sent, and is called immediately to make a synchronous request, - which returns when the Flush response comes back. - -### Info - -- **Request**: - - `Version (string)`: The Tendermint version -- **Response**: - - `Data (string)`: Some arbitrary information - - `Version (Version)`: Version information - - `LastBlockHeight (int64)`: Latest block for which the app has - called Commit - - `LastBlockAppHash ([]byte)`: Latest result of Commit -- **Usage**: - - Return information about the application state. - - Used to sync Tendermint with the application during a handshake - that happens on startup. - - Tendermint expects `LastBlockAppHash` and `LastBlockHeight` to - be updated during `Commit`, ensuring that `Commit` is never - called twice for the same block height. - -### SetOption - -- **Request**: - - `Key (string)`: Key to set - - `Value (string)`: Value to set for key -- **Response**: - - `Code (uint32)`: Response code - - `Log (string)`: The output of the application's logger. May - be non-deterministic. - - `Info (string)`: Additional information. May - be non-deterministic. -- **Usage**: - - Set non-consensus critical application specific options. - - e.g. Key="min-fee", Value="100fermion" could set the minimum fee - required for CheckTx (but not DeliverTx - that would be - consensus critical). - -### InitChain - -- **Request**: - - `Validators ([]Validator)`: Initial genesis validators - - `AppStateBytes ([]byte)`: Serialized initial application state -- **Response**: - - `ConsensusParams (ConsensusParams)`: Initial - consensus-critical parameters. - - `Validators ([]Validator)`: Initial validator set. -- **Usage**: - - Called once upon genesis. - -### Query - -- **Request**: - - `Data ([]byte)`: Raw query bytes. Can be used with or in lieu - of Path. - - `Path (string)`: Path of request, like an HTTP GET path. Can be - used with or in liue of Data. - - Apps MUST interpret '/store' as a query by key on the - underlying store. The key SHOULD be specified in the Data field. - - Apps SHOULD allow queries over specific types like - '/accounts/...' or '/votes/...' - - `Height (int64)`: The block height for which you want the query - (default=0 returns data for the latest committed block). Note - that this is the height of the block containing the - application's Merkle root hash, which represents the state as it - was after committing the block at Height-1 - - `Prove (bool)`: Return Merkle proof with response if possible -- **Response**: - - `Code (uint32)`: Response code. - - `Log (string)`: The output of the application's logger. May - be non-deterministic. - - `Info (string)`: Additional information. May - be non-deterministic. - - `Index (int64)`: The index of the key in the tree. - - `Key ([]byte)`: The key of the matching data. - - `Value ([]byte)`: The value of the matching data. - - `Proof ([]byte)`: Proof for the data, if requested. - - `Height (int64)`: The block height from which data was derived. - Note that this is the height of the block containing the - application's Merkle root hash, which represents the state as it - was after committing the block at Height-1 -- **Usage**: - - Query for data from the application at current or past height. - - Optionally return Merkle proof. - -### BeginBlock - -- **Request**: - - `Hash ([]byte)`: The block's hash. This can be derived from the - block header. - - `Header (struct{})`: The block header - - `Validators ([]SigningValidator)`: List of validators in the current validator - set and whether or not they signed a vote in the LastCommit - - `ByzantineValidators ([]Evidence)`: List of evidence of - validators that acted maliciously -- **Response**: - - `Tags ([]cmn.KVPair)`: Key-Value tags for filtering and indexing -- **Usage**: - - Signals the beginning of a new block. Called prior to - any DeliverTxs. - - The header is expected to at least contain the Height. - - The `Validators` and `ByzantineValidators` can be used to - determine rewards and punishments for the validators. - -### CheckTx - -- **Request**: - - `Tx ([]byte)`: The request transaction bytes -- **Response**: - - `Code (uint32)`: Response code - - `Data ([]byte)`: Result bytes, if any. - - `Log (string)`: The output of the application's logger. May - be non-deterministic. - - `Info (string)`: Additional information. May - be non-deterministic. - - `GasWanted (int64)`: Amount of gas request for transaction. - - `GasUsed (int64)`: Amount of gas consumed by transaction. - - `Tags ([]cmn.KVPair)`: Key-Value tags for filtering and indexing - transactions (eg. by account). - - `Fee (cmn.KI64Pair)`: Fee paid for the transaction. -- **Usage**: Validate a mempool transaction, prior to broadcasting - or proposing. CheckTx should perform stateful but light-weight - checks of the validity of the transaction (like checking signatures - and account balances), but need not execute in full (like running a - smart contract). - - Tendermint runs CheckTx and DeliverTx concurrently with eachother, - though on distinct ABCI connections - the mempool connection and the - consensus connection, respectively. - - The application should maintain a separate state to support CheckTx. - This state can be reset to the latest committed state during - `Commit`, where Tendermint ensures the mempool is locked and not - sending new `CheckTx`. After `Commit`, the mempool will rerun - CheckTx on all remaining transactions, throwing out any that are no - longer valid. - - Keys and values in Tags must be UTF-8 encoded strings (e.g. - "account.owner": "Bob", "balance": "100.0", "date": "2018-01-02") - -### DeliverTx - -- **Request**: - - `Tx ([]byte)`: The request transaction bytes. -- **Response**: - - `Code (uint32)`: Response code. - - `Data ([]byte)`: Result bytes, if any. - - `Log (string)`: The output of the application's logger. May - be non-deterministic. - - `Info (string)`: Additional information. May - be non-deterministic. - - `GasWanted (int64)`: Amount of gas requested for transaction. - - `GasUsed (int64)`: Amount of gas consumed by transaction. - - `Tags ([]cmn.KVPair)`: Key-Value tags for filtering and indexing - transactions (eg. by account). - - `Fee (cmn.KI64Pair)`: Fee paid for the transaction. -- **Usage**: - - Deliver a transaction to be executed in full by the application. - If the transaction is valid, returns CodeType.OK. - - Keys and values in Tags must be UTF-8 encoded strings (e.g. - "account.owner": "Bob", "balance": "100.0", - "time": "2018-01-02T12:30:00Z") - -### EndBlock - -- **Request**: - - `Height (int64)`: Height of the block just executed. -- **Response**: - - `ValidatorUpdates ([]Validator)`: Changes to validator set (set - voting power to 0 to remove). - - `ConsensusParamUpdates (ConsensusParams)`: Changes to - consensus-critical time, size, and other parameters. - - `Tags ([]cmn.KVPair)`: Key-Value tags for filtering and indexing -- **Usage**: - - Signals the end of a block. - - Called prior to each Commit, after all transactions. - - Validator set and consensus params are updated with the result. - - Validator pubkeys are expected to be go-wire encoded. - -### Commit - -- **Response**: - - `Data ([]byte)`: The Merkle root hash -- **Usage**: - - Persist the application state. - - Return a Merkle root hash of the application state. - - It's critical that all application instances return the - same hash. If not, they will not be able to agree on the next - block, because the hash is included in the next block! - -## Data Messages - -### Header - -- **Fields**: - - `ChainID (string)`: ID of the blockchain - - `Height (int64)`: Height of the block in the chain - - `Time (int64)`: Unix time of the block - - `NumTxs (int32)`: Number of transactions in the block - - `TotalTxs (int64)`: Total number of transactions in the blockchain until - now - - `LastBlockHash ([]byte)`: Hash of the previous (parent) block - - `ValidatorsHash ([]byte)`: Hash of the validator set for this block - - `AppHash ([]byte)`: Data returned by the last call to `Commit` - typically the - Merkle root of the application state after executing the previous block's - transactions - - `Proposer (Validator)`: Original proposer for the block -- **Usage**: - - Provided in RequestBeginBlock - - Provides important context about the current state of the blockchain - - especially height and time. - - Provides the proposer of the current block, for use in proposer-based - reward mechanisms. - -### Validator - -- **Fields**: - - `Address ([]byte)`: Address of the validator (hash of the public key) - - `PubKey (PubKey)`: Public key of the validator - - `Power (int64)`: Voting power of the validator -- **Usage**: - - Provides all identifying information about the validator - -### SigningValidator - -- **Fields**: - - `Validator (Validator)`: A validator - - `SignedLastBlock (bool)`: Indicated whether or not the validator signed - the last block -- **Usage**: - - Indicates whether a validator signed the last block, allowing for rewards - based on validator availability - -### PubKey - -- **Fields**: - - `Type (string)`: Type of the public key. A simple string like `"ed25519"`. - In the future, may indicate a serialization algorithm to parse the `Data`, - for instance `"amino"`. - - `Data ([]byte)`: Public key data. For a simple public key, it's just the - raw bytes. If the `Type` indicates an encoding algorithm, this is the - encoded public key. -- **Usage**: - - A generic and extensible typed public key - -### Evidence - -- **Fields**: - - `Type (string)`: Type of the evidence. A hierarchical path like - "duplicate/vote". - - `Validator (Validator`: The offending validator - - `Height (int64)`: Height when the offense was committed - - `Time (int64)`: Unix time of the block at height `Height` - - `TotalVotingPower (int64)`: Total voting power of the validator set at - height `Height` diff --git a/docs/abci-cli.md b/docs/app-dev/abci-cli.md similarity index 54% rename from docs/abci-cli.md rename to docs/app-dev/abci-cli.md index d1ab696eb..4f9019fda 100644 --- a/docs/abci-cli.md +++ b/docs/app-dev/abci-cli.md @@ -10,41 +10,47 @@ Make sure you [have Go installed](https://golang.org/doc/install). Next, install the `abci-cli` tool and example applications: - go get github.com/tendermint/tendermint +``` +go get github.com/tendermint/tendermint +``` to get vendored dependencies: - cd $GOPATH/src/github.com/tendermint/tendermint - make get_tools - make get_vendor_deps - make install_abci +``` +cd $GOPATH/src/github.com/tendermint/tendermint +make get_tools +make get_vendor_deps +make install_abci +``` Now run `abci-cli` to see the list of commands: - Usage: - abci-cli [command] - - Available Commands: - batch Run a batch of abci commands against an application - check_tx Validate a tx - commit Commit the application state and return the Merkle root hash - console Start an interactive abci console for multiple commands - counter ABCI demo example - deliver_tx Deliver a new tx to the application - kvstore ABCI demo example - echo Have the application echo a message - help Help about any command - info Get some info about the application - query Query the application state - set_option Set an options on the application - - Flags: - --abci string socket or grpc (default "socket") - --address string address of application socket (default "tcp://127.0.0.1:26658") - -h, --help help for abci-cli - -v, --verbose print the command and results as if it were a console session - - Use "abci-cli [command] --help" for more information about a command. +``` +Usage: + abci-cli [command] + +Available Commands: + batch Run a batch of abci commands against an application + check_tx Validate a tx + commit Commit the application state and return the Merkle root hash + console Start an interactive abci console for multiple commands + counter ABCI demo example + deliver_tx Deliver a new tx to the application + kvstore ABCI demo example + echo Have the application echo a message + help Help about any command + info Get some info about the application + query Query the application state + set_option Set an options on the application + +Flags: + --abci string socket or grpc (default "socket") + --address string address of application socket (default "tcp://127.0.0.1:26658") + -h, --help help for abci-cli + -v, --verbose print the command and results as if it were a console session + +Use "abci-cli [command] --help" for more information about a command. +``` ## KVStore - First Example @@ -63,59 +69,69 @@ Its code can be found [here](https://github.com/tendermint/tendermint/blob/develop/abci/cmd/abci-cli/abci-cli.go) and looks like: - func cmdKVStore(cmd *cobra.Command, args []string) error { - logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) - - // Create the application - in memory or persisted to disk - var app types.Application - if flagPersist == "" { - app = kvstore.NewKVStoreApplication() - } else { - app = kvstore.NewPersistentKVStoreApplication(flagPersist) - app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore")) - } - - // Start the listener - srv, err := server.NewServer(flagAddrD, flagAbci, app) - if err != nil { - return err - } - srv.SetLogger(logger.With("module", "abci-server")) - if err := srv.Start(); err != nil { - return err - } - - // Wait forever - cmn.TrapSignal(func() { - // Cleanup - srv.Stop() - }) - return nil +``` +func cmdKVStore(cmd *cobra.Command, args []string) error { + logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + + // Create the application - in memory or persisted to disk + var app types.Application + if flagPersist == "" { + app = kvstore.NewKVStoreApplication() + } else { + app = kvstore.NewPersistentKVStoreApplication(flagPersist) + app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore")) } + // Start the listener + srv, err := server.NewServer(flagAddrD, flagAbci, app) + if err != nil { + return err + } + srv.SetLogger(logger.With("module", "abci-server")) + if err := srv.Start(); err != nil { + return err + } + + // Wait forever + cmn.TrapSignal(func() { + // Cleanup + srv.Stop() + }) + return nil +} +``` + Start by running: - abci-cli kvstore +``` +abci-cli kvstore +``` And in another terminal, run - abci-cli echo hello - abci-cli info +``` +abci-cli echo hello +abci-cli info +``` You'll see something like: - -> data: hello - -> data.hex: 68656C6C6F +``` +-> data: hello +-> data.hex: 68656C6C6F +``` and: - -> data: {"size":0} - -> data.hex: 7B2273697A65223A307D +``` +-> data: {"size":0} +-> data.hex: 7B2273697A65223A307D +``` An ABCI application must provide two things: -- a socket server -- a handler for ABCI messages +- a socket server +- a handler for ABCI messages When we run the `abci-cli` tool we open a new connection to the application's socket server, send the given ABCI message, and wait for a @@ -144,52 +160,54 @@ speaking ABCI messages to your application. Try running these commands: - > echo hello - -> code: OK - -> data: hello - -> data.hex: 0x68656C6C6F - - > info - -> code: OK - -> data: {"size":0} - -> data.hex: 0x7B2273697A65223A307D - - > commit - -> code: OK - -> data.hex: 0x0000000000000000 - - > deliver_tx "abc" - -> code: OK - - > info - -> code: OK - -> data: {"size":1} - -> data.hex: 0x7B2273697A65223A317D - - > commit - -> code: OK - -> data.hex: 0x0200000000000000 - - > query "abc" - -> code: OK - -> log: exists - -> height: 0 - -> value: abc - -> value.hex: 616263 - - > deliver_tx "def=xyz" - -> code: OK - - > commit - -> code: OK - -> data.hex: 0x0400000000000000 - - > query "def" - -> code: OK - -> log: exists - -> height: 0 - -> value: xyz - -> value.hex: 78797A +``` +> echo hello +-> code: OK +-> data: hello +-> data.hex: 0x68656C6C6F + +> info +-> code: OK +-> data: {"size":0} +-> data.hex: 0x7B2273697A65223A307D + +> commit +-> code: OK +-> data.hex: 0x0000000000000000 + +> deliver_tx "abc" +-> code: OK + +> info +-> code: OK +-> data: {"size":1} +-> data.hex: 0x7B2273697A65223A317D + +> commit +-> code: OK +-> data.hex: 0x0200000000000000 + +> query "abc" +-> code: OK +-> log: exists +-> height: 0 +-> value: abc +-> value.hex: 616263 + +> deliver_tx "def=xyz" +-> code: OK + +> commit +-> code: OK +-> data.hex: 0x0400000000000000 + +> query "def" +-> code: OK +-> log: exists +-> height: 0 +-> value: xyz +-> value.hex: 78797A +``` Note that if we do `deliver_tx "abc"` it will store `(abc, abc)`, but if we do `deliver_tx "abc=efg"` it will store `(abc, efg)`. @@ -206,29 +224,31 @@ Like the kvstore app, its code can be found [here](https://github.com/tendermint/tendermint/blob/master/abci/cmd/abci-cli/abci-cli.go) and looks like: - func cmdCounter(cmd *cobra.Command, args []string) error { - - app := counter.NewCounterApplication(flagSerial) - - logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) - - // Start the listener - srv, err := server.NewServer(flagAddrC, flagAbci, app) - if err != nil { - return err - } - srv.SetLogger(logger.With("module", "abci-server")) - if err := srv.Start(); err != nil { - return err - } - - // Wait forever - cmn.TrapSignal(func() { - // Cleanup - srv.Stop() - }) - return nil +``` +func cmdCounter(cmd *cobra.Command, args []string) error { + + app := counter.NewCounterApplication(flagSerial) + + logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + + // Start the listener + srv, err := server.NewServer(flagAddrC, flagAbci, app) + if err != nil { + return err } + srv.SetLogger(logger.With("module", "abci-server")) + if err := srv.Start(); err != nil { + return err + } + + // Wait forever + cmn.TrapSignal(func() { + // Cleanup + srv.Stop() + }) + return nil +} +``` The counter app doesn't use a Merkle tree, it just counts how many times we've sent a transaction, asked for a hash, or committed the state. The @@ -256,38 +276,42 @@ whose integer is greater than the last committed one. Let's kill the console and the kvstore application, and start the counter app: - abci-cli counter +``` +abci-cli counter +``` In another window, start the `abci-cli console`: - > set_option serial on - -> code: OK - -> log: OK (SetOption doesn't return anything.) +``` +> set_option serial on +-> code: OK +-> log: OK (SetOption doesn't return anything.) - > check_tx 0x00 - -> code: OK +> check_tx 0x00 +-> code: OK - > check_tx 0xff - -> code: OK +> check_tx 0xff +-> code: OK - > deliver_tx 0x00 - -> code: OK +> deliver_tx 0x00 +-> code: OK - > check_tx 0x00 - -> code: BadNonce - -> log: Invalid nonce. Expected >= 1, got 0 +> check_tx 0x00 +-> code: BadNonce +-> log: Invalid nonce. Expected >= 1, got 0 - > deliver_tx 0x01 - -> code: OK +> deliver_tx 0x01 +-> code: OK - > deliver_tx 0x04 - -> code: BadNonce - -> log: Invalid nonce. Expected 2, got 4 +> deliver_tx 0x04 +-> code: BadNonce +-> log: Invalid nonce. Expected 2, got 4 - > info - -> code: OK - -> data: {"hashes":0,"txs":2} - -> data.hex: 0x7B22686173686573223A302C22747873223A327D +> info +-> code: OK +-> data: {"hashes":0,"txs":2} +-> data.hex: 0x7B22686173686573223A302C22747873223A327D +``` This is a very simple application, but between `counter` and `kvstore`, its easy to see how you can build out arbitrary application states on @@ -304,7 +328,9 @@ example directory](https://github.com/tendermint/tendermint/tree/develop/abci/ex To run the Node JS version, `cd` to `example/js` and run - node app.js +``` +node app.js +``` (you'll have to kill the other counter application process). In another window, run the console and those previous ABCI commands. You should get @@ -323,6 +349,6 @@ connects to the app using three separate connections, each with its own pattern of messages. For more information, see the [application developers -guide](./app-development.html). For examples of running an ABCI app with -Tendermint, see the [getting started guide](./getting-started.html). +guide](./app-development.md). For examples of running an ABCI app with +Tendermint, see the [getting started guide](./getting-started.md). Next is the ABCI specification. diff --git a/docs/app-dev/abci-spec.md b/docs/app-dev/abci-spec.md new file mode 100644 index 000000000..ef274a4e8 --- /dev/null +++ b/docs/app-dev/abci-spec.md @@ -0,0 +1,325 @@ +# ABCI Specification + +## Message Types + +ABCI requests/responses are defined as simple Protobuf messages in [this +schema file](https://github.com/tendermint/tendermint/blob/master/abci/types/types.proto). +TendermintCore sends the requests, and the ABCI application sends the +responses. Here, we provide an overview of the messages types and how +they are used by Tendermint. Then we describe each request-response pair +as a function with arguments and return values, and add some notes on +usage. + +Some messages (`Echo, Info, InitChain, BeginBlock, EndBlock, Commit`), +don't return errors because an error would indicate a critical failure +in the application and there's nothing Tendermint can do. The problem +should be addressed and both Tendermint and the application restarted. +All other messages (`SetOption, Query, CheckTx, DeliverTx`) return an +application-specific response `Code uint32`, where only `0` is reserved +for `OK`. + +Some messages (`SetOption, Query, CheckTx, DeliverTx`) return +non-deterministic data in the form of `Info` and `Log`. The `Log` is +intended for the literal output from the application's logger, while the +`Info` is any additional info that should be returned. + +The first time a new blockchain is started, Tendermint calls +`InitChain`. From then on, the Block Execution Sequence that causes the +committed state to be updated is as follows: + +`BeginBlock, [DeliverTx], EndBlock, Commit` + +where one `DeliverTx` is called for each transaction in the block. +Cryptographic commitments to the results of DeliverTx, EndBlock, and +Commit are included in the header of the next block. + +Tendermint opens three connections to the application to handle the +different message types: + +- `Consensus Connection - InitChain, BeginBlock, DeliverTx, EndBlock, Commit` +- `Mempool Connection - CheckTx` +- `Info Connection - Info, SetOption, Query` + +The `Flush` message is used on every connection, and the `Echo` message +is only used for debugging. + +Note that messages may be sent concurrently across all connections -a +typical application will thus maintain a distinct state for each +connection. They may be referred to as the `DeliverTx state`, the +`CheckTx state`, and the `Commit state` respectively. + +See below for more details on the message types and how they are used. + +## Request/Response Messages + +### Echo + +- **Request**: + - `Message (string)`: A string to echo back +- **Response**: + - `Message (string)`: The input string +- **Usage**: + - Echo a string to test an abci client/server implementation + +### Flush + +- **Usage**: + - Signals that messages queued on the client should be flushed to + the server. It is called periodically by the client + implementation to ensure asynchronous requests are actually + sent, and is called immediately to make a synchronous request, + which returns when the Flush response comes back. + +### Info + +- **Request**: + - `Version (string)`: The Tendermint version +- **Response**: + - `Data (string)`: Some arbitrary information + - `Version (Version)`: Version information + - `LastBlockHeight (int64)`: Latest block for which the app has + called Commit + - `LastBlockAppHash ([]byte)`: Latest result of Commit +- **Usage**: + - Return information about the application state. + - Used to sync Tendermint with the application during a handshake + that happens on startup. + - Tendermint expects `LastBlockAppHash` and `LastBlockHeight` to + be updated during `Commit`, ensuring that `Commit` is never + called twice for the same block height. + +### SetOption + +- **Request**: + - `Key (string)`: Key to set + - `Value (string)`: Value to set for key +- **Response**: + - `Code (uint32)`: Response code + - `Log (string)`: The output of the application's logger. May + be non-deterministic. + - `Info (string)`: Additional information. May + be non-deterministic. +- **Usage**: + - Set non-consensus critical application specific options. + - e.g. Key="min-fee", Value="100fermion" could set the minimum fee + required for CheckTx (but not DeliverTx - that would be + consensus critical). + +### InitChain + +- **Request**: + - `Validators ([]Validator)`: Initial genesis validators + - `AppStateBytes ([]byte)`: Serialized initial application state +- **Response**: + - `ConsensusParams (ConsensusParams)`: Initial + consensus-critical parameters. + - `Validators ([]Validator)`: Initial validator set. +- **Usage**: + - Called once upon genesis. + +### Query + +- **Request**: + - `Data ([]byte)`: Raw query bytes. Can be used with or in lieu + of Path. + - `Path (string)`: Path of request, like an HTTP GET path. Can be + used with or in liue of Data. + - Apps MUST interpret '/store' as a query by key on the + underlying store. The key SHOULD be specified in the Data field. + - Apps SHOULD allow queries over specific types like + '/accounts/...' or '/votes/...' + - `Height (int64)`: The block height for which you want the query + (default=0 returns data for the latest committed block). Note + that this is the height of the block containing the + application's Merkle root hash, which represents the state as it + was after committing the block at Height-1 + - `Prove (bool)`: Return Merkle proof with response if possible +- **Response**: + - `Code (uint32)`: Response code. + - `Log (string)`: The output of the application's logger. May + be non-deterministic. + - `Info (string)`: Additional information. May + be non-deterministic. + - `Index (int64)`: The index of the key in the tree. + - `Key ([]byte)`: The key of the matching data. + - `Value ([]byte)`: The value of the matching data. + - `Proof ([]byte)`: Proof for the data, if requested. + - `Height (int64)`: The block height from which data was derived. + Note that this is the height of the block containing the + application's Merkle root hash, which represents the state as it + was after committing the block at Height-1 +- **Usage**: + - Query for data from the application at current or past height. + - Optionally return Merkle proof. + +### BeginBlock + +- **Request**: + - `Hash ([]byte)`: The block's hash. This can be derived from the + block header. + - `Header (struct{})`: The block header + - `Validators ([]SigningValidator)`: List of validators in the current validator + set and whether or not they signed a vote in the LastCommit + - `ByzantineValidators ([]Evidence)`: List of evidence of + validators that acted maliciously +- **Response**: + - `Tags ([]cmn.KVPair)`: Key-Value tags for filtering and indexing +- **Usage**: + - Signals the beginning of a new block. Called prior to + any DeliverTxs. + - The header is expected to at least contain the Height. + - The `Validators` and `ByzantineValidators` can be used to + determine rewards and punishments for the validators. + +### CheckTx + +- **Request**: + - `Tx ([]byte)`: The request transaction bytes +- **Response**: + - `Code (uint32)`: Response code + - `Data ([]byte)`: Result bytes, if any. + - `Log (string)`: The output of the application's logger. May + be non-deterministic. + - `Info (string)`: Additional information. May + be non-deterministic. + - `GasWanted (int64)`: Amount of gas request for transaction. + - `GasUsed (int64)`: Amount of gas consumed by transaction. + - `Tags ([]cmn.KVPair)`: Key-Value tags for filtering and indexing + transactions (eg. by account). + - `Fee (cmn.KI64Pair)`: Fee paid for the transaction. +- **Usage**: Validate a mempool transaction, prior to broadcasting + or proposing. CheckTx should perform stateful but light-weight + checks of the validity of the transaction (like checking signatures + and account balances), but need not execute in full (like running a + smart contract). + + Tendermint runs CheckTx and DeliverTx concurrently with eachother, + though on distinct ABCI connections - the mempool connection and the + consensus connection, respectively. + + The application should maintain a separate state to support CheckTx. + This state can be reset to the latest committed state during + `Commit`. Before calling Commit, Tendermint will lock and flush the mempool, + ensuring that all existing CheckTx are responded to and no new ones can + begin. After `Commit`, the mempool will rerun + CheckTx for all remaining transactions, throwing out any that are no longer valid. + Then the mempool will unlock and start sending CheckTx again. + + Keys and values in Tags must be UTF-8 encoded strings (e.g. + "account.owner": "Bob", "balance": "100.0", "date": "2018-01-02") + +### DeliverTx + +- **Request**: + - `Tx ([]byte)`: The request transaction bytes. +- **Response**: + - `Code (uint32)`: Response code. + - `Data ([]byte)`: Result bytes, if any. + - `Log (string)`: The output of the application's logger. May + be non-deterministic. + - `Info (string)`: Additional information. May + be non-deterministic. + - `GasWanted (int64)`: Amount of gas requested for transaction. + - `GasUsed (int64)`: Amount of gas consumed by transaction. + - `Tags ([]cmn.KVPair)`: Key-Value tags for filtering and indexing + transactions (eg. by account). + - `Fee (cmn.KI64Pair)`: Fee paid for the transaction. +- **Usage**: + - Deliver a transaction to be executed in full by the application. + If the transaction is valid, returns CodeType.OK. + - Keys and values in Tags must be UTF-8 encoded strings (e.g. + "account.owner": "Bob", "balance": "100.0", + "time": "2018-01-02T12:30:00Z") + +### EndBlock + +- **Request**: + - `Height (int64)`: Height of the block just executed. +- **Response**: + - `ValidatorUpdates ([]Validator)`: Changes to validator set (set + voting power to 0 to remove). + - `ConsensusParamUpdates (ConsensusParams)`: Changes to + consensus-critical time, size, and other parameters. + - `Tags ([]cmn.KVPair)`: Key-Value tags for filtering and indexing +- **Usage**: + - Signals the end of a block. + - Called prior to each Commit, after all transactions. + - Validator set and consensus params are updated with the result. + - Validator pubkeys are expected to be go-wire encoded. + +### Commit + +- **Response**: + - `Data ([]byte)`: The Merkle root hash +- **Usage**: + - Persist the application state. + - Return a Merkle root hash of the application state. + - It's critical that all application instances return the + same hash. If not, they will not be able to agree on the next + block, because the hash is included in the next block! + +## Data Messages + +### Header + +- **Fields**: + - `ChainID (string)`: ID of the blockchain + - `Height (int64)`: Height of the block in the chain + - `Time (int64)`: Unix time of the block + - `NumTxs (int32)`: Number of transactions in the block + - `TotalTxs (int64)`: Total number of transactions in the blockchain until + now + - `LastBlockHash ([]byte)`: Hash of the previous (parent) block + - `ValidatorsHash ([]byte)`: Hash of the validator set for this block + - `AppHash ([]byte)`: Data returned by the last call to `Commit` - typically the + Merkle root of the application state after executing the previous block's + transactions + - `Proposer (Validator)`: Original proposer for the block +- **Usage**: + - Provided in RequestBeginBlock + - Provides important context about the current state of the blockchain - + especially height and time. + - Provides the proposer of the current block, for use in proposer-based + reward mechanisms. + +### Validator + +- **Fields**: + - `Address ([]byte)`: Address of the validator (hash of the public key) + - `PubKey (PubKey)`: Public key of the validator + - `Power (int64)`: Voting power of the validator +- **Usage**: + - Provides all identifying information about the validator + +### SigningValidator + +- **Fields**: + - `Validator (Validator)`: A validator + - `SignedLastBlock (bool)`: Indicated whether or not the validator signed + the last block +- **Usage**: + - Indicates whether a validator signed the last block, allowing for rewards + based on validator availability + +### PubKey + +- **Fields**: + - `Type (string)`: Type of the public key. A simple string like `"ed25519"`. + In the future, may indicate a serialization algorithm to parse the `Data`, + for instance `"amino"`. + - `Data ([]byte)`: Public key data. For a simple public key, it's just the + raw bytes. If the `Type` indicates an encoding algorithm, this is the + encoded public key. +- **Usage**: + - A generic and extensible typed public key + +### Evidence + +- **Fields**: + - `Type (string)`: Type of the evidence. A hierarchical path like + "duplicate/vote". + - `Validator (Validator`: The offending validator + - `Height (int64)`: Height when the offense was committed + - `Time (int64)`: Unix time of the block at height `Height` + - `TotalVotingPower (int64)`: Total voting power of the validator set at + height `Height` diff --git a/docs/app-architecture.md b/docs/app-dev/app-architecture.md similarity index 97% rename from docs/app-architecture.md rename to docs/app-dev/app-architecture.md index 401e28cca..9ce0fae9f 100644 --- a/docs/app-architecture.md +++ b/docs/app-dev/app-architecture.md @@ -17,7 +17,7 @@ transaction is actually processed. The ABCI application must be a deterministic result of the Tendermint consensus - any external influence on the application state that didn't -come through Tendermint could cause a consensus failure. Thus *nothing* +come through Tendermint could cause a consensus failure. Thus _nothing_ should communicate with the application except Tendermint via ABCI. If the application is written in Go, it can be compiled into the @@ -43,6 +43,7 @@ all transactions, and possibly all queries, should still pass through Tendermint. See the following for more extensive documentation: + - [Interchain Standard for the Light-Client REST API](https://github.com/cosmos/cosmos-sdk/pull/1028) - [Tendermint RPC Docs](https://tendermint.github.io/slate/) - [Tendermint in Production](https://github.com/tendermint/tendermint/pull/1618) diff --git a/docs/app-development.md b/docs/app-dev/app-development.md similarity index 60% rename from docs/app-development.md rename to docs/app-dev/app-development.md index 0b6c402ef..a795673f1 100644 --- a/docs/app-development.md +++ b/docs/app-dev/app-development.md @@ -16,28 +16,27 @@ committed in hash-linked blocks. The ABCI design has a few distinct components: -- message protocol - - pairs of request and response messages - - consensus makes requests, application responds - - defined using protobuf -- server/client - - consensus engine runs the client - - application runs the server - - two implementations: - - async raw bytes - - grpc -- blockchain protocol - - abci is connection oriented - - Tendermint Core maintains three connections: - - [mempool connection](#mempool-connection): for checking if - transactions should be relayed before they are committed; - only uses `CheckTx` - - [consensus connection](#consensus-connection): for executing - transactions that have been committed. Message sequence is - -for every block - -`BeginBlock, [DeliverTx, ...], EndBlock, Commit` - - [query connection](#query-connection): for querying the - application state; only uses Query and Info +- message protocol + - pairs of request and response messages + - consensus makes requests, application responds + - defined using protobuf +- server/client + - consensus engine runs the client + - application runs the server + - two implementations: + - async raw bytes + - grpc +- blockchain protocol + - abci is connection oriented + - Tendermint Core maintains three connections: + - [mempool connection](#mempool-connection): for checking if + transactions should be relayed before they are committed; + only uses `CheckTx` + - [consensus connection](#consensus-connection): for executing + transactions that have been committed. Message sequence is + -for every block -`BeginBlock, [DeliverTx, ...], EndBlock, Commit` + - [query connection](#query-connection): for querying the + application state; only uses Query and Info The mempool and consensus logic act as clients, and each maintains an open ABCI connection with the application, which hosts an ABCI server. @@ -64,9 +63,9 @@ To use ABCI in your programming language of choice, there must be a ABCI server in that language. Tendermint supports two kinds of implementation of the server: -- Asynchronous, raw socket server (Tendermint Socket Protocol, also - known as TSP or Teaspoon) -- GRPC +- Asynchronous, raw socket server (Tendermint Socket Protocol, also + known as TSP or Teaspoon) +- GRPC Both can be tested using the `abci-cli` by setting the `--abci` flag appropriately (ie. to `socket` or `grpc`). @@ -99,14 +98,14 @@ performance, or otherwise enjoy programming, you may implement your own ABCI server using the Tendermint Socket Protocol, known affectionately as Teaspoon. The first step is still to auto-generate the relevant data types and codec in your language using `protoc`. Messages coming over -the socket are Protobuf3 encoded, but additionally length-prefixed to -facilitate use as a streaming protocol. Protobuf3 doesn't have an +the socket are proto3 encoded, but additionally length-prefixed to +facilitate use as a streaming protocol. proto3 doesn't have an official length-prefix standard, so we use our own. The first byte in the prefix represents the length of the Big Endian encoded length. The remaining bytes in the prefix are the Big Endian encoded length. -For example, if the Protobuf3 encoded ABCI message is 0xDEADBEEF (4 -bytes), the length-prefixed message is 0x0104DEADBEEF. If the Protobuf3 +For example, if the proto3 encoded ABCI message is 0xDEADBEEF (4 +bytes), the length-prefixed message is 0x0104DEADBEEF. If the proto3 encoded ABCI message is 65535 bytes long, the length-prefixed message would be like 0x02FFFF.... @@ -161,7 +160,7 @@ connection, to query the local state of the app. ### Mempool Connection -The mempool connection is used *only* for CheckTx requests. Transactions +The mempool connection is used _only_ for CheckTx requests. Transactions are run using CheckTx in the same order they were received by the validator. If the CheckTx returns `OK`, the transaction is kept in memory and relayed to other peers in the same order it was received. @@ -180,23 +179,27 @@ mempool state (this behaviour can be turned off with In go: - func (app *KVStoreApplication) CheckTx(tx []byte) types.Result { - return types.OK - } +``` +func (app *KVStoreApplication) CheckTx(tx []byte) types.Result { + return types.OK +} +``` In Java: - ResponseCheckTx requestCheckTx(RequestCheckTx req) { - byte[] transaction = req.getTx().toByteArray(); - - // validate transaction - - if (notValid) { - return ResponseCheckTx.newBuilder().setCode(CodeType.BadNonce).setLog("invalid tx").build(); - } else { - return ResponseCheckTx.newBuilder().setCode(CodeType.OK).build(); - } +``` +ResponseCheckTx requestCheckTx(RequestCheckTx req) { + byte[] transaction = req.getTx().toByteArray(); + + // validate transaction + + if (notValid) { + return ResponseCheckTx.newBuilder().setCode(CodeType.BadNonce).setLog("invalid tx").build(); + } else { + return ResponseCheckTx.newBuilder().setCode(CodeType.OK).build(); } +} +``` ### Replay Protection @@ -242,43 +245,47 @@ merkle root of the data returned by the DeliverTx requests, or both. In go: - // tx is either "key=value" or just arbitrary bytes - func (app *KVStoreApplication) DeliverTx(tx []byte) types.Result { - parts := strings.Split(string(tx), "=") - if len(parts) == 2 { - app.state.Set([]byte(parts[0]), []byte(parts[1])) - } else { - app.state.Set(tx, tx) - } - return types.OK - } +``` +// tx is either "key=value" or just arbitrary bytes +func (app *KVStoreApplication) DeliverTx(tx []byte) types.Result { + parts := strings.Split(string(tx), "=") + if len(parts) == 2 { + app.state.Set([]byte(parts[0]), []byte(parts[1])) + } else { + app.state.Set(tx, tx) + } + return types.OK +} +``` In Java: - /** - * Using Protobuf types from the protoc compiler, we always start with a byte[] - */ - ResponseDeliverTx deliverTx(RequestDeliverTx request) { - byte[] transaction = request.getTx().toByteArray(); - - // validate your transaction - - if (notValid) { - return ResponseDeliverTx.newBuilder().setCode(CodeType.BadNonce).setLog("transaction was invalid").build(); - } else { - ResponseDeliverTx.newBuilder().setCode(CodeType.OK).build(); - } - +``` +/** + * Using Protobuf types from the protoc compiler, we always start with a byte[] + */ +ResponseDeliverTx deliverTx(RequestDeliverTx request) { + byte[] transaction = request.getTx().toByteArray(); + + // validate your transaction + + if (notValid) { + return ResponseDeliverTx.newBuilder().setCode(CodeType.BadNonce).setLog("transaction was invalid").build(); + } else { + ResponseDeliverTx.newBuilder().setCode(CodeType.OK).build(); } +} +``` + ### Commit Once all processing of the block is complete, Tendermint sends the Commit request and blocks waiting for a response. While the mempool may run concurrently with block processing (the BeginBlock, DeliverTxs, and EndBlock), it is locked for the Commit request so that its state can be -safely reset during Commit. This means the app *MUST NOT* do any -blocking communication with the mempool (ie. broadcast\_tx) during +safely reset during Commit. This means the app _MUST NOT_ do any +blocking communication with the mempool (ie. broadcast_tx) during Commit, or there will be deadlock. Note also that all remaining transactions in the mempool are replayed on the mempool connection (CheckTx) following a commit. @@ -294,21 +301,25 @@ job of the [Handshake](#handshake). In go: - func (app *KVStoreApplication) Commit() types.Result { - hash := app.state.Hash() - return types.NewResultOK(hash, "") - } +``` +func (app *KVStoreApplication) Commit() types.Result { + hash := app.state.Hash() + return types.NewResultOK(hash, "") +} +``` In Java: - ResponseCommit requestCommit(RequestCommit requestCommit) { - - // update the internal app-state - byte[] newAppState = calculateAppState(); - - // and return it to the node - return ResponseCommit.newBuilder().setCode(CodeType.OK).setData(ByteString.copyFrom(newAppState)).build(); - } +``` +ResponseCommit requestCommit(RequestCommit requestCommit) { + + // update the internal app-state + byte[] newAppState = calculateAppState(); + + // and return it to the node + return ResponseCommit.newBuilder().setCode(CodeType.OK).setData(ByteString.copyFrom(newAppState)).build(); +} +``` ### BeginBlock @@ -322,31 +333,35 @@ pick up from when it restarts. See information on the Handshake, below. In go: - // Track the block hash and header information - func (app *PersistentKVStoreApplication) BeginBlock(params types.RequestBeginBlock) { - // update latest block info - app.blockHeader = params.Header - - // reset valset changes - app.changes = make([]*types.Validator, 0) - } +``` +// Track the block hash and header information +func (app *PersistentKVStoreApplication) BeginBlock(params types.RequestBeginBlock) { + // update latest block info + app.blockHeader = params.Header + + // reset valset changes + app.changes = make([]*types.Validator, 0) +} +``` In Java: - /* - * all types come from protobuf definition - */ - ResponseBeginBlock requestBeginBlock(RequestBeginBlock req) { - - Header header = req.getHeader(); - byte[] prevAppHash = header.getAppHash().toByteArray(); - long prevHeight = header.getHeight(); - long numTxs = header.getNumTxs(); - - // run your pre-block logic. Maybe prepare a state snapshot, message components, etc - - return ResponseBeginBlock.newBuilder().build(); - } +``` +/* + * all types come from protobuf definition + */ +ResponseBeginBlock requestBeginBlock(RequestBeginBlock req) { + + Header header = req.getHeader(); + byte[] prevAppHash = header.getAppHash().toByteArray(); + long prevHeight = header.getHeight(); + long numTxs = header.getNumTxs(); + + // run your pre-block logic. Maybe prepare a state snapshot, message components, etc + + return ResponseBeginBlock.newBuilder().build(); +} +``` ### EndBlock @@ -364,25 +379,29 @@ for details on how it tracks validators. In go: - // Update the validator set - func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock { - return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates} - } +``` +// Update the validator set +func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock { + return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates} +} +``` In Java: - /* - * Assume that one validator changes. The new validator has a power of 10 - */ - ResponseEndBlock requestEndBlock(RequestEndBlock req) { - final long currentHeight = req.getHeight(); - final byte[] validatorPubKey = getValPubKey(); - - ResponseEndBlock.Builder builder = ResponseEndBlock.newBuilder(); - builder.addDiffs(1, Types.Validator.newBuilder().setPower(10L).setPubKey(ByteString.copyFrom(validatorPubKey)).build()); - - return builder.build(); - } +``` +/* + * Assume that one validator changes. The new validator has a power of 10 + */ +ResponseEndBlock requestEndBlock(RequestEndBlock req) { + final long currentHeight = req.getHeight(); + final byte[] validatorPubKey = getValPubKey(); + + ResponseEndBlock.Builder builder = ResponseEndBlock.newBuilder(); + builder.addDiffs(1, Types.Validator.newBuilder().setPower(10L).setPubKey(ByteString.copyFrom(validatorPubKey)).build()); + + return builder.build(); +} +``` ### Query Connection @@ -394,71 +413,75 @@ serialize each query as a single byte array. Additionally, certain instance about which peers to connect to. Tendermint Core currently uses the Query connection to filter peers upon -connecting, according to IP address or public key. For instance, +connecting, according to IP address or node ID. For instance, returning non-OK ABCI response to either of the following queries will cause Tendermint to not connect to the corresponding peer: -- `p2p/filter/addr/`, where `` is an IP address. -- `p2p/filter/pubkey/`, where `` is the hex-encoded - ED25519 key of the node (not it's validator key) +- `p2p/filter/addr/`, where `` is an IP address. +- `p2p/filter/id/`, where `` is the hex-encoded node ID (the hash of + the node's p2p pubkey). Note: these query formats are subject to change! In go: - func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) { - if reqQuery.Prove { - value, proof, exists := app.state.Proof(reqQuery.Data) - resQuery.Index = -1 // TODO make Proof return index - resQuery.Key = reqQuery.Data - resQuery.Value = value - resQuery.Proof = proof - if exists { - resQuery.Log = "exists" - } else { - resQuery.Log = "does not exist" - } - return - } else { - index, value, exists := app.state.Get(reqQuery.Data) - resQuery.Index = int64(index) - resQuery.Value = value - if exists { - resQuery.Log = "exists" - } else { - resQuery.Log = "does not exist" - } - return - } +``` +func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) { + if reqQuery.Prove { + value, proof, exists := app.state.Proof(reqQuery.Data) + resQuery.Index = -1 // TODO make Proof return index + resQuery.Key = reqQuery.Data + resQuery.Value = value + resQuery.Proof = proof + if exists { + resQuery.Log = "exists" + } else { + resQuery.Log = "does not exist" } + return + } else { + index, value, exists := app.state.Get(reqQuery.Data) + resQuery.Index = int64(index) + resQuery.Value = value + if exists { + resQuery.Log = "exists" + } else { + resQuery.Log = "does not exist" + } + return + } +} +``` In Java: - ResponseQuery requestQuery(RequestQuery req) { - final boolean isProveQuery = req.getProve(); - final ResponseQuery.Builder responseBuilder = ResponseQuery.newBuilder(); - - if (isProveQuery) { - com.app.example.ProofResult proofResult = generateProof(req.getData().toByteArray()); - final byte[] proofAsByteArray = proofResult.getAsByteArray(); - - responseBuilder.setProof(ByteString.copyFrom(proofAsByteArray)); - responseBuilder.setKey(req.getData()); - responseBuilder.setValue(ByteString.copyFrom(proofResult.getData())); - responseBuilder.setLog(result.getLogValue()); - } else { - byte[] queryData = req.getData().toByteArray(); - - final com.app.example.QueryResult result = generateQueryResult(queryData); - - responseBuilder.setIndex(result.getIndex()); - responseBuilder.setValue(ByteString.copyFrom(result.getValue())); - responseBuilder.setLog(result.getLogValue()); - } - - return responseBuilder.build(); +``` +ResponseQuery requestQuery(RequestQuery req) { + final boolean isProveQuery = req.getProve(); + final ResponseQuery.Builder responseBuilder = ResponseQuery.newBuilder(); + + if (isProveQuery) { + com.app.example.ProofResult proofResult = generateProof(req.getData().toByteArray()); + final byte[] proofAsByteArray = proofResult.getAsByteArray(); + + responseBuilder.setProof(ByteString.copyFrom(proofAsByteArray)); + responseBuilder.setKey(req.getData()); + responseBuilder.setValue(ByteString.copyFrom(proofResult.getData())); + responseBuilder.setLog(result.getLogValue()); + } else { + byte[] queryData = req.getData().toByteArray(); + + final com.app.example.QueryResult result = generateQueryResult(queryData); + + responseBuilder.setIndex(result.getIndex()); + responseBuilder.setValue(ByteString.copyFrom(result.getValue())); + responseBuilder.setLog(result.getLogValue()); } + return responseBuilder.build(); +} +``` + ### Handshake When the app or tendermint restarts, they need to sync to a common @@ -477,17 +500,21 @@ all blocks. In go: - func (app *KVStoreApplication) Info(req types.RequestInfo) (resInfo types.ResponseInfo) { - return types.ResponseInfo{Data: cmn.Fmt("{\"size\":%v}", app.state.Size())} - } +``` +func (app *KVStoreApplication) Info(req types.RequestInfo) (resInfo types.ResponseInfo) { + return types.ResponseInfo{Data: cmn.Fmt("{\"size\":%v}", app.state.Size())} +} +``` In Java: - ResponseInfo requestInfo(RequestInfo req) { - final byte[] lastAppHash = getLastAppHash(); - final long lastHeight = getLastHeight(); - return ResponseInfo.newBuilder().setLastBlockAppHash(ByteString.copyFrom(lastAppHash)).setLastBlockHeight(lastHeight).build(); - } +``` +ResponseInfo requestInfo(RequestInfo req) { + final byte[] lastAppHash = getLastAppHash(); + final long lastHeight = getLastHeight(); + return ResponseInfo.newBuilder().setLastBlockAppHash(ByteString.copyFrom(lastAppHash)).setLastBlockHeight(lastHeight).build(); +} +``` ### Genesis @@ -497,31 +524,35 @@ consensus params. In go: - // Save the validators in the merkle tree - func (app *PersistentKVStoreApplication) InitChain(params types.RequestInitChain) { - for _, v := range params.Validators { - r := app.updateValidator(v) - if r.IsErr() { - app.logger.Error("Error updating validators", "r", r) - } - } +``` +// Save the validators in the merkle tree +func (app *PersistentKVStoreApplication) InitChain(params types.RequestInitChain) { + for _, v := range params.Validators { + r := app.updateValidator(v) + if r.IsErr() { + app.logger.Error("Error updating validators", "r", r) } + } +} +``` In Java: - /* - * all types come from protobuf definition - */ - ResponseInitChain requestInitChain(RequestInitChain req) { - final int validatorsCount = req.getValidatorsCount(); - final List validatorsList = req.getValidatorsList(); - - validatorsList.forEach((validator) -> { - long power = validator.getPower(); - byte[] validatorPubKey = validator.getPubKey().toByteArray(); - - // do somehing for validator setup in app - }); - - return ResponseInitChain.newBuilder().build(); - } +``` +/* + * all types come from protobuf definition + */ +ResponseInitChain requestInitChain(RequestInitChain req) { + final int validatorsCount = req.getValidatorsCount(); + final List validatorsList = req.getValidatorsList(); + + validatorsList.forEach((validator) -> { + long power = validator.getPower(); + byte[] validatorPubKey = validator.getPubKey().toByteArray(); + + // do somehing for validator setup in app + }); + + return ResponseInitChain.newBuilder().build(); +} +``` diff --git a/docs/app-dev/ecosystem.json b/docs/app-dev/ecosystem.json new file mode 100644 index 000000000..363f18902 --- /dev/null +++ b/docs/app-dev/ecosystem.json @@ -0,0 +1,213 @@ +{ + "abciApps": [ + { + "name": "Cosmos SDK", + "url": "https://github.com/cosmos/cosmos-sdk", + "language": "Go", + "author": "Cosmos", + "description": + "A prototypical account based crypto currency state machine supporting plugins" + }, + { + "name": "cb-ledger", + "url": "https://github.com/block-finance/cpp-abci", + "language": "C++", + "author": "Block Finance", + "description": + "Custodian Bank Ledger, integrating central banking with the blockchains of tomorrow" + }, + { + "name": "Clearchain", + "url": "https://github.com/tendermint/clearchain", + "language": "Go", + "author": "FXCLR", + "description": + "Application to manage a distributed ledger for money transfers that support multi-currency accounts" + }, + { + "name": "Ethermint", + "url": "https://github.com/tendermint/ethermint", + "language": "Go", + "author": "Tendermint", + "description": "The go-ethereum state machine run as an ABCI app" + }, + { + "name": "Merkle AVL Tree", + "url": "https://github.com/tendermint/merkleeyes", + "language": "Go", + "author": "Tendermint", + "description": "Tendermint IAVL tree implemented as an ABCI app" + }, + { + "name": "Burrow", + "url": "https://github.com/hyperledger/burrow", + "language": "Go", + "author": "Monax Industries", + "description": + "Ethereum Virtual Machine augmented with native permissioning scheme and global key-value store" + }, + { + "name": "Merkle AVL Tree", + "url": "https://github.com/jTMSP/MerkleTree", + "language": "Java", + "author": "jTMSP", + "description": "Tendermint IAVL tree implemented as an ABCI app" + }, + { + "name": "TMChat", + "url": "https://github.com/wolfposd/TMChat", + "language": "Java", + "author": "jTMSP", + "description": "P2P chat using Tendermint" + }, + { + "name": "Comit", + "url": "https://github.com/zballs/comit", + "language": "Go", + "author": "Zach Balder", + "description": "Public service reporting and tracking" + }, + { + "name": "Passchain", + "url": "https://github.com/trusch/passchain", + "language": "Go", + "author": "trusch", + "description": + "Tool to securely store and share passwords, tokens and other short secrets" + }, + { + "name": "Passwerk", + "url": "https://github.com/rigelrozanski/passwerk", + "language": "Go", + "author": "Rigel Rozanski", + "description": "Encrypted storage web-utility backed by Tendermint" + }, + { + "name": "py-tendermint", + "url": "https://github.com/davebryson/py-tendermint", + "language": "Python", + "author": "Dave Bryson", + "description": + "A Python microframework for building blockchain applications with Tendermint" + }, + { + "name": "Stratumn SDK", + "url": "https://github.com/stratumn/sdk", + "language": "Go", + "author": "Stratumn", + "description": "SDK for Proof-of-Process networks" + }, + { + "name": "Lotion", + "url": "https://github.com/keppel/lotion", + "language": "Javascript", + "author": "Judd Keppel", + "description": + "A Javascript microframework for building blockchain applications with Tendermint" + }, + { + "name": "Tendermint Blockchain Chat App", + "url": "https://github.com/SaifRehman/tendermint-chat-app/", + "language": "Javascript", + "author": "Saif Rehman", + "description": + "This is a minimal chat application based on Tendermint using Lotion.js in 30 lines of code!. It also includes web/mobile application built using Ionic 3." + }, + { + "name": "BigchainDB", + "url": "https://github.com/bigchaindb/bigchaindb", + "language": "Python", + "author": "BigchainDB GmbH and the BigchainDB community", + "description": "Blockchain database" + }, + { + "name": "Mint", + "url": "https://github.com/Hashnode/mint", + "language": "Go", + "author": "Hashnode", + "description": "Build blockchain-powered social apps" + } + ], + "abciServers": [ + { + "name": "abci", + "url": "https://github.com/tendermint/abci", + "language": "Go", + "author": "Tendermint" + }, + { + "name": "js-abci", + "url": "https://github.com/tendermint/js-abci", + "language": "Javascript", + "author": "Tendermint" + }, + { + "name": "cpp-tmsp", + "url": "https://github.com/mdyring/cpp-tmsp", + "language": "C++", + "author": "Martin Dyring" + }, + { + "name": "jabci", + "url": "https://github.com/jTendermint/jabci", + "language": "Java", + "author": "jTendermint" + }, + { + "name": "ocaml-tmsp", + "url": "https://github.com/zballs/ocaml-tmsp", + "language": "Ocaml", + "author": "Zach Balder" + }, + { + "name": "abci_server", + "url": "https://github.com/KrzysiekJ/abci_server", + "language": "Erlang", + "author": "Krzysztof Jurewicz" + }, + { + "name": "py-abci", + "url": "https://github.com/davebryson/py-abci", + "language": "Python", + "author": "Dave Bryson" + }, + { + "name": "Spearmint", + "url": "https://github.com/dennismckinnon/spearmint", + "language": "Javascript", + "author": "Dennis McKinnon" + } + ], + "deploymentTools": [ + { + "name": "mintnet-kubernetes", + "url": "https://github.com/tendermint/tools", + "technology": "Docker and Kubernetes", + "author": "Tendermint", + "description": + "Deploy a Tendermint test network using Google's kubernetes" + }, + { + "name": "terraforce", + "url": "https://github.com/tendermint/tools", + "technology": "Terraform", + "author": "Tendermint", + "description": + "Terraform + our custom terraforce tool; deploy a production Tendermint network with load balancing over multiple AWS availability zones" + }, + { + "name": "ansible-tendermint", + "url": "https://github.com/tendermint/tools", + "technology": "Ansible", + "author": "Tendermint", + "description": "Ansible playbooks + Tendermint" + }, + { + "name": "brooklyn-tendermint", + "url": "https://github.com/cloudsoft/brooklyn-tendermint", + "technology": "Clocker for Apache Brooklyn ", + "author": "Cloudsoft", + "description": "Deploy a tendermint test network in docker containers " + } + ] +} diff --git a/docs/ecosystem.md b/docs/app-dev/ecosystem.md similarity index 83% rename from docs/ecosystem.md rename to docs/app-dev/ecosystem.md index 6b7f833a8..7960e6c0d 100644 --- a/docs/ecosystem.md +++ b/docs/app-dev/ecosystem.md @@ -3,11 +3,11 @@ The growing list of applications built using various pieces of the Tendermint stack can be found at: -- https://tendermint.com/ecosystem +- https://tendermint.com/ecosystem We thank the community for their contributions thus far and welcome the addition of new projects. A pull request can be submitted to [this -file](https://github.com/tendermint/aib-data/blob/master/json/ecosystem.json) +file](https://github.com/tendermint/tendermint/blob/master/docs/app-dev/ecosystem.json) to include your project. ## Other Tools diff --git a/docs/getting-started.md b/docs/app-dev/getting-started.md similarity index 67% rename from docs/getting-started.md rename to docs/app-dev/getting-started.md index 8294d1e9b..cfc614ddc 100644 --- a/docs/getting-started.md +++ b/docs/app-dev/getting-started.md @@ -7,7 +7,7 @@ application you want to run. So, to run a complete blockchain that does something useful, you must start two programs: one is Tendermint Core, the other is your application, which can be written in any programming language. Recall from [the intro to -ABCI](introduction.html#ABCI-Overview) that Tendermint Core handles all +ABCI](./introduction.md#ABCI-Overview) that Tendermint Core handles all the p2p and consensus stuff, and just forwards transactions to the application when they need to be validated, or when they're ready to be committed to a block. @@ -25,11 +25,13 @@ more info. Then run - go get github.com/tendermint/tendermint - cd $GOPATH/src/github.com/tendermint/tendermint - make get_tools - make get_vendor_deps - make install_abci +``` +go get github.com/tendermint/tendermint +cd $GOPATH/src/github.com/tendermint/tendermint +make get_tools +make get_vendor_deps +make install_abci +``` Now you should have the `abci-cli` installed; you'll see a couple of commands (`counter` and `kvstore`) that are example applications written @@ -47,30 +49,38 @@ full transaction bytes are stored as the key and the value. Let's start a kvstore application. - abci-cli kvstore +``` +abci-cli kvstore +``` In another terminal, we can start Tendermint. If you have never run Tendermint before, use: - tendermint init - tendermint node +``` +tendermint init +tendermint node +``` If you have used Tendermint, you may want to reset the data for a new blockchain by running `tendermint unsafe_reset_all`. Then you can run `tendermint node` to start Tendermint, and connect to the app. For more -details, see [the guide on using Tendermint](./using-tendermint.html). +details, see [the guide on using Tendermint](./using-tendermint.md). You should see Tendermint making blocks! We can get the status of our Tendermint node as follows: - curl -s localhost:26657/status +``` +curl -s localhost:26657/status +``` The `-s` just silences `curl`. For nicer output, pipe the result into a tool like [jq](https://stedolan.github.io/jq/) or `json_pp`. Now let's send some transactions to the kvstore. - curl -s 'localhost:26657/broadcast_tx_commit?tx="abcd"' +``` +curl -s 'localhost:26657/broadcast_tx_commit?tx="abcd"' +``` Note the single quote (`'`) around the url, which ensures that the double quotes (`"`) are not escaped by bash. This command sent a @@ -78,66 +88,76 @@ transaction with bytes `abcd`, so `abcd` will be stored as both the key and the value in the Merkle tree. The response should look something like: - { - "jsonrpc": "2.0", - "id": "", - "result": { - "check_tx": { - "fee": {} +``` +{ + "jsonrpc": "2.0", + "id": "", + "result": { + "check_tx": { + "fee": {} + }, + "deliver_tx": { + "tags": [ + { + "key": "YXBwLmNyZWF0b3I=", + "value": "amFl" }, - "deliver_tx": { - "tags": [ - { - "key": "YXBwLmNyZWF0b3I=", - "value": "amFl" - }, - { - "key": "YXBwLmtleQ==", - "value": "YWJjZA==" - } - ], - "fee": {} - }, - "hash": "9DF66553F98DE3C26E3C3317A3E4CED54F714E39", - "height": 14 - } - } + { + "key": "YXBwLmtleQ==", + "value": "YWJjZA==" + } + ], + "fee": {} + }, + "hash": "9DF66553F98DE3C26E3C3317A3E4CED54F714E39", + "height": 14 + } +} +``` We can confirm that our transaction worked and the value got stored by querying the app: - curl -s 'localhost:26657/abci_query?data="abcd"' +``` +curl -s 'localhost:26657/abci_query?data="abcd"' +``` The result should look like: - { - "jsonrpc": "2.0", - "id": "", - "result": { - "response": { - "log": "exists", - "index": "-1", - "key": "YWJjZA==", - "value": "YWJjZA==" - } - } +``` +{ + "jsonrpc": "2.0", + "id": "", + "result": { + "response": { + "log": "exists", + "index": "-1", + "key": "YWJjZA==", + "value": "YWJjZA==" } + } +} +``` Note the `value` in the result (`YWJjZA==`); this is the base64-encoding of the ASCII of `abcd`. You can verify this in a python 2 shell by -running `"61626364".decode('base64')` or in python 3 shell by running -`import codecs; codecs.decode("61626364", 'base64').decode('ascii')`. +running `"YWJjZA==".decode('base64')` or in python 3 shell by running +`import codecs; codecs.decode("YWJjZA==", 'base64').decode('ascii')`. Stay tuned for a future release that [makes this output more human-readable](https://github.com/tendermint/tendermint/issues/1794). Now let's try setting a different key and value: - curl -s 'localhost:26657/broadcast_tx_commit?tx="name=satoshi"' +``` +curl -s 'localhost:26657/broadcast_tx_commit?tx="name=satoshi"' +``` Now if we query for `name`, we should get `satoshi`, or `c2F0b3NoaQ==` in base64: - curl -s 'localhost:26657/abci_query?data="name"' +``` +curl -s 'localhost:26657/abci_query?data="name"' +``` Try some other transactions and queries to make sure everything is working! @@ -171,60 +191,70 @@ Let's kill the previous instance of `tendermint` and the `kvstore` application, and start the counter app. We can enable `serial=on` with a flag: - abci-cli counter --serial +``` +abci-cli counter --serial +``` In another window, reset then start Tendermint: - tendermint unsafe_reset_all - tendermint node +``` +tendermint unsafe_reset_all +tendermint node +``` Once again, you can see the blocks streaming by. Let's send some transactions. Since we have set `serial=on`, the first transaction must be the number `0`: - curl localhost:26657/broadcast_tx_commit?tx=0x00 +``` +curl localhost:26657/broadcast_tx_commit?tx=0x00 +``` Note the empty (hence successful) response. The next transaction must be the number `1`. If instead, we try to send a `5`, we get an error: - > curl localhost:26657/broadcast_tx_commit?tx=0x05 - { - "jsonrpc": "2.0", - "id": "", - "result": { - "check_tx": { - "fee": {} - }, - "deliver_tx": { - "code": 2, - "log": "Invalid nonce. Expected 1, got 5", - "fee": {} - }, - "hash": "33B93DFF98749B0D6996A70F64071347060DC19C", - "height": 34 - } - } +``` +> curl localhost:26657/broadcast_tx_commit?tx=0x05 +{ + "jsonrpc": "2.0", + "id": "", + "result": { + "check_tx": { + "fee": {} + }, + "deliver_tx": { + "code": 2, + "log": "Invalid nonce. Expected 1, got 5", + "fee": {} + }, + "hash": "33B93DFF98749B0D6996A70F64071347060DC19C", + "height": 34 + } +} +``` But if we send a `1`, it works again: - > curl localhost:26657/broadcast_tx_commit?tx=0x01 - { - "jsonrpc": "2.0", - "id": "", - "result": { - "check_tx": { - "fee": {} - }, - "deliver_tx": { - "fee": {} - }, - "hash": "F17854A977F6FA7EEA1BD758E296710B86F72F3D", - "height": 60 - } - } +``` +> curl localhost:26657/broadcast_tx_commit?tx=0x01 +{ + "jsonrpc": "2.0", + "id": "", + "result": { + "check_tx": { + "fee": {} + }, + "deliver_tx": { + "fee": {} + }, + "hash": "F17854A977F6FA7EEA1BD758E296710B86F72F3D", + "height": 60 + } +} +``` For more details on the `broadcast_tx` API, see [the guide on using -Tendermint](./using-tendermint.html). +Tendermint](./using-tendermint.md). ## CounterJS - Example in Another Language @@ -236,26 +266,34 @@ You'll also need to fetch the relevant repository, from [here](https://github.com/tendermint/js-abci) then install it. As go devs, we keep all our code under the `$GOPATH`, so run: - go get github.com/tendermint/js-abci &> /dev/null - cd $GOPATH/src/github.com/tendermint/js-abci/example - npm install - cd .. +``` +go get github.com/tendermint/js-abci &> /dev/null +cd $GOPATH/src/github.com/tendermint/js-abci/example +npm install +cd .. +``` Kill the previous `counter` and `tendermint` processes. Now run the app: - node example/app.js +``` +node example/counter.js +``` In another window, reset and start `tendermint`: - tendermint unsafe_reset_all - tendermint node +``` +tendermint unsafe_reset_all +tendermint node +``` Once again, you should see blocks streaming by - but now, our application is written in javascript! Try sending some transactions, and like before - the results should be the same: - curl localhost:26657/broadcast_tx_commit?tx=0x00 # ok - curl localhost:26657/broadcast_tx_commit?tx=0x05 # invalid nonce - curl localhost:26657/broadcast_tx_commit?tx=0x01 # ok +``` +curl localhost:26657/broadcast_tx_commit?tx=0x00 # ok +curl localhost:26657/broadcast_tx_commit?tx=0x05 # invalid nonce +curl localhost:26657/broadcast_tx_commit?tx=0x01 # ok +``` Neat, eh? diff --git a/docs/app-dev/indexing-transactions.md b/docs/app-dev/indexing-transactions.md new file mode 100644 index 000000000..3bca10959 --- /dev/null +++ b/docs/app-dev/indexing-transactions.md @@ -0,0 +1,97 @@ +# Indexing Transactions + +Tendermint allows you to index transactions and later query or subscribe +to their results. + +Let's take a look at the `[tx_index]` config section: + +``` +##### transactions indexer configuration options ##### +[tx_index] + +# What indexer to use for transactions +# +# Options: +# 1) "null" (default) +# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +indexer = "kv" + +# Comma-separated list of tags to index (by default the only tag is tx hash) +# +# It's recommended to index only a subset of tags due to possible memory +# bloat. This is, of course, depends on the indexer's DB and the volume of +# transactions. +index_tags = "" + +# When set to true, tells indexer to index all tags. Note this may be not +# desirable (see the comment above). IndexTags has a precedence over +# IndexAllTags (i.e. when given both, IndexTags will be indexed). +index_all_tags = false +``` + +By default, Tendermint will index all transactions by their respective +hashes using an embedded simple indexer. Note, we are planning to add +more options in the future (e.g., Postgresql indexer). + +## Adding tags + +In your application's `DeliverTx` method, add the `Tags` field with the +pairs of UTF-8 encoded strings (e.g. "account.owner": "Bob", "balance": +"100.0", "date": "2018-01-02"). + +Example: + +``` +func (app *KVStoreApplication) DeliverTx(tx []byte) types.Result { + ... + tags := []cmn.KVPair{ + {[]byte("account.name"), []byte("igor")}, + {[]byte("account.address"), []byte("0xdeadbeef")}, + {[]byte("tx.amount"), []byte("7")}, + } + return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags} +} +``` + +If you want Tendermint to only index transactions by "account.name" tag, +in the config set `tx_index.index_tags="account.name"`. If you to index +all tags, set `index_all_tags=true` + +Note, there are a few predefined tags: + +- `tm.event` (event type) +- `tx.hash` (transaction's hash) +- `tx.height` (height of the block transaction was committed in) + +Tendermint will throw a warning if you try to use any of the above keys. + +## Querying transactions + +You can query the transaction results by calling `/tx_search` RPC +endpoint: + +``` +curl "localhost:26657/tx_search?query=\"account.name='igor'\"&prove=true" +``` + +Check out [API docs](https://tendermint.github.io/slate/?shell#txsearch) +for more information on query syntax and other options. + +## Subscribing to transactions + +Clients can subscribe to transactions with the given tags via Websocket +by providing a query to `/subscribe` RPC endpoint. + +``` +{ + "jsonrpc": "2.0", + "method": "subscribe", + "id": "0", + "params": { + "query": "account.name='igor'" + } +} +``` + +Check out [API docs](https://tendermint.github.io/slate/#subscribe) for +more information on query syntax and other options. diff --git a/docs/subscribing-to-events-via-websocket.md b/docs/app-dev/subscribing-to-events-via-websocket.md similarity index 82% rename from docs/subscribing-to-events-via-websocket.md rename to docs/app-dev/subscribing-to-events-via-websocket.md index 43d3f7767..9e7c642a0 100644 --- a/docs/subscribing-to-events-via-websocket.md +++ b/docs/app-dev/subscribing-to-events-via-websocket.md @@ -9,14 +9,16 @@ for third-party applications (for analysys) or inspecting state. You can subscribe to any of the events above by calling `subscribe` RPC method via Websocket. - { - "jsonrpc": "2.0", - "method": "subscribe", - "id": "0", - "params": { - "query": "tm.event='NewBlock'" - } +``` +{ + "jsonrpc": "2.0", + "method": "subscribe", + "id": "0", + "params": { + "query": "tm.event='NewBlock'" } +} +``` Check out [API docs](https://tendermint.github.io/slate/#subscribe) for more information on query syntax and other options. diff --git a/docs/architecture/README.md b/docs/architecture/README.md index 9e41d3064..1cfc7ddce 100644 --- a/docs/architecture/README.md +++ b/docs/architecture/README.md @@ -1,5 +1,22 @@ -# Architecture Decision Records +# Architecture Decision Records (ADR) -This is a location to record all high-level architecture decisions in the tendermint project. Not the implementation details, but the reasoning that happened. This should be refered to for guidance of the "right way" to extend the application. And if we notice that the original decisions were lacking, we should have another open discussion, record the new decisions here, and then modify the code to match. +This is a location to record all high-level architecture decisions in the tendermint project. -Read up on the concept in this [blog post](https://product.reverb.com/documenting-architecture-decisions-the-reverb-way-a3563bb24bd0#.78xhdix6t). +You can read more about the ADR concept in this [blog post](https://product.reverb.com/documenting-architecture-decisions-the-reverb-way-a3563bb24bd0#.78xhdix6t). + +An ADR should provide: + +- Context on the relevant goals and the current state +- Proposed changes to achieve the goals +- Summary of pros and cons +- References +- Changelog + +Note the distinction between an ADR and a spec. The ADR provides the context, intuition, reasoning, and +justification for a change in architecture, or for the architecture of something +new. The spec is much more compressed and streamlined summary of everything as +it stands today. + +If recorded decisions turned out to be lacking, convene a discussion, record the new decisions here, and then modify the code to match. + +Note the context/background should be written in the present tense. diff --git a/docs/architecture/adr-009-ABCI-design.md b/docs/architecture/adr-009-ABCI-design.md new file mode 100644 index 000000000..8b85679b8 --- /dev/null +++ b/docs/architecture/adr-009-ABCI-design.md @@ -0,0 +1,273 @@ +# ADR 009: ABCI UX Improvements + +## Changelog + +23-06-2018: Some minor fixes from review +07-06-2018: Some updates based on discussion with Jae +07-06-2018: Initial draft to match what was released in ABCI v0.11 + +## Context + +The ABCI was first introduced in late 2015. It's purpose is to be: + +- a generic interface between state machines and their replication engines +- agnostic to the language the state machine is written in +- agnostic to the replication engine that drives it + +This means ABCI should provide an interface for both pluggable applications and +pluggable consensus engines. + +To achieve this, it uses Protocol Buffers (proto3) for message types. The dominant +implementation is in Go. + +After some recent discussions with the community on github, the following were +identified as pain points: + +- Amino encoded types +- Managing validator sets +- Imports in the protobuf file + +See the [references](#references) for more. + +### Imports + +The native proto library in Go generates inflexible and verbose code. +Many in the Go community have adopted a fork called +[gogoproto](https://github.com/gogo/protobuf) that provides a +variety of features aimed to improve the developer experience. +While `gogoproto` is nice, it creates an additional dependency, and compiling +the protobuf types for other languages has been reported to fail when `gogoproto` is used. + +### Amino + +Amino is an encoding protocol designed to improve over insufficiencies of protobuf. +It's goal is to be proto4. + +Many people are frustrated by incompatibility with protobuf, +and with the requirement for Amino to be used at all within ABCI. + +We intend to make Amino successful enough that we can eventually use it for ABCI +message types directly. By then it should be called proto4. In the meantime, +we want it to be easy to use. + +### PubKey + +PubKeys are encoded using Amino (and before that, go-wire). +Ideally, PubKeys are an interface type where we don't know all the +implementation types, so its unfitting to use `oneof` or `enum`. + +### Addresses + +The address for ED25519 pubkey is the RIPEMD160 of the Amino +encoded pubkey. This introduces an Amino dependency in the address generation, +a functionality that is widely required and should be easy to compute as +possible. + +### Validators + +To change the validator set, applications can return a list of validator updates +with ResponseEndBlock. In these updates, the public key *must* be included, +because Tendermint requires the public key to verify validator signatures. This +means ABCI developers have to work with PubKeys. That said, it would also be +convenient to work with address information, and for it to be simple to do so. + +### AbsentValidators + +Tendermint also provides a list of validators in BeginBlock who did not sign the +last block. This allows applications to reflect availability behaviour in the +application, for instance by punishing validators for not having votes included +in commits. + +### InitChain + +Tendermint passes in a list of validators here, and nothing else. It would +benefit the application to be able to control the initial validator set. For +instance the genesis file could include application-based information about the +initial validator set that the application could process to determine the +initial validator set. Additionally, InitChain would benefit from getting all +the genesis information. + +### Header + +ABCI provides the Header in RequestBeginBlock so the application can have +important information about the latest state of the blockchain. + +## Decision + +### Imports + +Move away from gogoproto. In the short term, we will just maintain a second +protobuf file without the gogoproto annotations. In the medium term, we will +make copies of all the structs in Golang and shuttle back and forth. In the long +term, we will use Amino. + +### Amino + +To simplify ABCI application development in the short term, +Amino will be completely removed from the ABCI: + +- It will not be required for PubKey encoding +- It will not be required for computing PubKey addresses + +That said, we are working to make Amino a huge success, and to become proto4. +To facilitate adoption and cross-language compatibility in the near-term, Amino +v1 will: + +- be fully compatible with the subset of proto3 that excludes `oneof` +- use the Amino prefix system to provide interface types, as opposed to `oneof` + style union types. + +That said, an Amino v2 will be worked on to improve the performance of the +format and its useability in cryptographic applications. + + +### PubKey + +Encoding schemes infect software. As a generic middleware, ABCI aims to have +some cross scheme compatibility. For this it has no choice but to include opaque +bytes from time to time. While we will not enforce Amino encoding for these +bytes yet, we need to provide a type system. The simplest way to do this is to +use a type string. + +PubKey will now look like: + +``` +message PubKey { + string type + bytes data +} +``` + +where `type` can be: + +- "ed225519", with `data = ` +- "secp256k1", with `data = <33-byte OpenSSL compressed pubkey>` + + +As we want to retain flexibility here, and since ideally, PubKey would be an +interface type, we do not use `enum` or `oneof`. + +### Addresses + +To simplify and improve computing addresses, we change it to the first 20-bytes of the SHA256 +of the raw 32-byte public key. + +We continue to use the Bitcoin address scheme for secp256k1 keys. + +### Validators + +Add a `bytes address` field: + +``` +message Validator { + bytes address + PubKey pub_key + int64 power +} +``` + +### RequestBeginBlock and AbsentValidators + +To simplify this, RequestBeginBlock will include the complete validator set, +including the address, and voting power of each validator, along +with a boolean for whether or not they voted: + +``` +message RequestBeginBlock { + bytes hash + Header header + LastCommitInfo last_commit_info + repeated Evidence byzantine_validators +} + +message LastCommitInfo { + int32 CommitRound + repeated SigningValidator validators +} + +message SigningValidator { + Validator validator + bool signed_last_block +} +``` + +Note that in Validators in RequestBeginBlock, we DO NOT include public keys. Public keys are +larger than addresses and in the future, with quantum computers, will be much +larger. The overhead of passing them, especially during fast-sync, is +significant. + +Additional, addresses are changing to be simpler to compute, further removing +the need to include pubkeys here. + +In short, ABCI developers must be aware of both addresses and public keys. + +### ResponseEndBlock + +Since ResponseEndBlock includes Validator, it must now include their address. + +### InitChain + +Change RequestInitChain to give the app all the information from the genesis file: + +``` +message RequestInitChain { + int64 time + string chain_id + ConsensusParams consensus_params + repeated Validator validators + bytes app_state_bytes +} +``` + +Change ResponseInitChain to allow the app to specify the initial validator set +and consensus parameters. + +``` +message ResponseInitChain { + ConsensusParams consensus_params + repeated Validator validators +} +``` + +### Header + +Now that Tendermint Amino will be compatible with proto3, the Header in ABCI +should exactly match the Tendermint header - they will then be encoded +identically in ABCI and in Tendermint Core. + +## Status + +Accepted. + +## Consequences + +### Positive + +- Easier for developers to build on the ABCI +- ABCI and Tendermint headers are identically serialized + +### Negative + +- Maintenance overhead of alternative type encoding scheme +- Performance overhead of passing all validator info every block (at least its + only addresses, and not also pubkeys) +- Maintenance overhead of duplicate types + +### Neutral + +- ABCI developers must know about validator addresses + +## References + +- [ABCI v0.10.3 Specification (before this + proposal)](https://github.com/tendermint/abci/blob/v0.10.3/specification.rst) +- [ABCI v0.11.0 Specification (implementing first draft of this + proposal)](https://github.com/tendermint/abci/blob/v0.11.0/specification.md) +- [Ed25519 addresses](https://github.com/tendermint/go-crypto/issues/103) +- [InitChain contains the + Genesis](https://github.com/tendermint/abci/issues/216) +- [PubKeys](https://github.com/tendermint/tendermint/issues/1524) +- [Notes on + Header](https://github.com/tendermint/tendermint/issues/1605) +- [Gogoproto issues](https://github.com/tendermint/abci/issues/256) +- [Absent Validators](https://github.com/tendermint/abci/issues/231) diff --git a/docs/architecture/adr-010-crypto-changes.md b/docs/architecture/adr-010-crypto-changes.md new file mode 100644 index 000000000..cfe618421 --- /dev/null +++ b/docs/architecture/adr-010-crypto-changes.md @@ -0,0 +1,78 @@ +# ADR 010: Crypto Changes + +## Context + +Tendermint is a cryptographic protocol that uses and composes a variety of cryptographic primitives. + +After nearly 4 years of development, Tendermint has recently undergone multiple security reviews to search for vulnerabilities and to assess the the use and composition of cryptographic primitives. + +### Hash Functions + +Tendermint uses RIPEMD160 universally as a hash function, most notably in its Merkle tree implementation. + +RIPEMD160 was chosen because it provides the shortest fingerprint that is long enough to be considered secure (ie. birthday bound of 80-bits). +It was also developed in the open academic community, unlike NSA-designed algorithms like SHA256. + +That said, the cryptographic community appears to unanimously agree on the security of SHA256. It has become a universal standard, especially now that SHA1 is broken, being required in TLS connections and having optimized support in hardware. + +### Merkle Trees + +Tendermint uses a simple Merkle tree to compute digests of large structures like transaction batches +and even blockchain headers. The Merkle tree length prefixes byte arrays before concatenating and hashing them. +It uses RIPEMD160. + +### Addresses + +ED25519 addresses are computed using the RIPEMD160 of the Amino encoding of the public key. +RIPEMD160 is generally considered an outdated hash function, and is much slower +than more modern functions like SHA256 or Blake2. + +### Authenticated Encryption + +Tendermint P2P connections use authenticated encryption to provide privacy and authentication in the communications. +This is done using the simple Station-to-Station protocol with the NaCL Ed25519 library. + +While there have been no vulnerabilities found in the implementation, there are some concerns: + +- NaCL uses Salsa20, a not-widely used and relatively out-dated stream cipher that has been obsoleted by ChaCha20 +- Connections use RIPEMD160 to compute a value that is used for the encryption nonce with subtle requirements on how it's used + +## Decision + +### Hash Functions + +Use the first 20-bytes of the SHA256 hash instead of RIPEMD160 for everything + +### Merkle Trees + +TODO + +### Addresses + +Compute ED25519 addresses as the first 20-bytes of the SHA256 of the raw 32-byte public key + +### Authenticated Encryption + +Make the following changes: + +- Use xChaCha20 instead of xSalsa20 - https://github.com/tendermint/tendermint/issues/1124 +- Use an HKDF instead of RIPEMD160 to compute nonces - https://github.com/tendermint/tendermint/issues/1165 + +## Status + +## Consequences + +### Positive + +- More modern and standard cryptographic functions with wider adoption and hardware acceleration + + +### Negative + +- Exact authenticated encryption construction isn't already provided in a well-used library + + +### Neutral + +## References + diff --git a/docs/deploy-testnets.md b/docs/deploy-testnets.md deleted file mode 100644 index 0c74b2c5e..000000000 --- a/docs/deploy-testnets.md +++ /dev/null @@ -1,68 +0,0 @@ -# Deploy a Testnet - -Now that we've seen how ABCI works, and even played with a few -applications on a single validator node, it's time to deploy a test -network to four validator nodes. - -## Manual Deployments - -It's relatively easy to setup a Tendermint cluster manually. The only -requirements for a particular Tendermint node are a private key for the -validator, stored as `priv_validator.json`, a node key, stored as -`node_key.json` and a list of the public keys of all validators, stored -as `genesis.json`. These files should be stored in -`~/.tendermint/config`, or wherever the `$TMHOME` variable might be set -to. - -Here are the steps to setting up a testnet manually: - -1) Provision nodes on your cloud provider of choice -2) Install Tendermint and the application of interest on all nodes -3) Generate a private key and a node key for each validator using - `tendermint init` -4) Compile a list of public keys for each validator into a - `genesis.json` file and replace the existing file with it. -5) Run - `tendermint node --proxy_app=kvstore --p2p.persistent_peers=< peer addresses >` - on each node, where `< peer addresses >` is a comma separated list - of the IP:PORT combination for each node. The default port for - Tendermint is `26656`. Thus, if the IP addresses of your nodes were - `192.168.0.1, 192.168.0.2, 192.168.0.3, 192.168.0.4`, the command - would look like: - - - tendermint node --proxy_app=kvstore --p2p.persistent_peers=96663a3dd0d7b9d17d4c8211b191af259621c693@192.168.0.1:26656, 429fcf25974313b95673f58d77eacdd434402665@192.168.0.2:26656, 0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@192.168.0.3:26656, f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@192.168.0.4:26656 - -After a few seconds, all the nodes should connect to each other and -start making blocks! For more information, see the Tendermint Networks -section of [the guide to using Tendermint](using-tendermint.html). - -But wait! Steps 3 and 4 are quite manual. Instead, use [this -script](https://github.com/tendermint/tendermint/blob/develop/docs/examples/init_testnet.sh), -which does the heavy lifting for you. And it gets better. - -Instead of the previously linked script to initialize the files required -for a testnet, we have the `tendermint testnet` command. By default, -running `tendermint testnet` will create all the required files, just -like the script. Of course, you'll still need to manually edit some -fields in the `config.toml`. Alternatively, see the available flags to -auto-populate the `config.toml` with the fields that would otherwise be -passed in via flags when running `tendermint node`. As you might -imagine, this command is useful for manual or automated deployments. - -## Automated Deployments - -The easiest and fastest way to get a testnet up in less than 5 minutes. - -### Local - -With `docker` and `docker-compose` installed, run the command: - - make localnet-start - -from the root of the tendermint repository. This will spin up a 4-node -local testnet. Review the target in the Makefile to debug any problems. - -### Cloud - -See the [next section](./terraform-and-ansible.html) for details. diff --git a/docs/examples/init_testnet.sh b/docs/examples/init_testnet.sh deleted file mode 100644 index 84e54674c..000000000 --- a/docs/examples/init_testnet.sh +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env bash - -# make all the files -tendermint init --home ./tester/node0 -tendermint init --home ./tester/node1 -tendermint init --home ./tester/node2 -tendermint init --home ./tester/node3 - -file0=./tester/node0/config/genesis.json -file1=./tester/node1/config/genesis.json -file2=./tester/node2/config/genesis.json -file3=./tester/node3/config/genesis.json - -genesis_time=`cat $file0 | jq '.genesis_time'` -chain_id=`cat $file0 | jq '.chain_id'` - -value0=`cat $file0 | jq '.validators[0].pub_key.value'` -value1=`cat $file1 | jq '.validators[0].pub_key.value'` -value2=`cat $file2 | jq '.validators[0].pub_key.value'` -value3=`cat $file3 | jq '.validators[0].pub_key.value'` - -rm $file0 -rm $file1 -rm $file2 -rm $file3 - -echo "{ - \"genesis_time\": $genesis_time, - \"chain_id\": $chain_id, - \"validators\": [ - { - \"pub_key\": { - \"type\": \"tendermint/PubKeyEd25519\", - \"value\": $value0 - }, - \"power:\": 10, - \"name\":, \"\" - }, - { - \"pub_key\": { - \"type\": \"tendermint/PubKeyEd25519\", - \"value\": $value1 - }, - \"power:\": 10, - \"name\":, \"\" - }, - { - \"pub_key\": { - \"type\": \"tendermint/PubKeyEd25519\", - \"value\": $value2 - }, - \"power:\": 10, - \"name\":, \"\" - }, - { - \"pub_key\": { - \"type\": \"tendermint/PubKeyEd25519\", - \"value\": $value3 - }, - \"power:\": 10, - \"name\":, \"\" - } - ], - \"app_hash\": \"\" -}" >> $file0 - -cp $file0 $file1 -cp $file0 $file2 -cp $file2 $file3 diff --git a/docs/examples/install_tendermint.sh b/docs/examples/install_tendermint.sh deleted file mode 100644 index 5a9c49d78..000000000 --- a/docs/examples/install_tendermint.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env bash - -# XXX: this script is meant to be used only on a fresh Ubuntu 16.04 instance -# and has only been tested on Digital Ocean - -# get and unpack golang -curl -O https://storage.googleapis.com/golang/go1.10.linux-amd64.tar.gz -tar -xvf go1.10.linux-amd64.tar.gz - -apt install make - -## move go and add binary to path -mv go /usr/local -echo "export PATH=\$PATH:/usr/local/go/bin" >> ~/.profile - -## create the GOPATH directory, set GOPATH and put on PATH -mkdir goApps -echo "export GOPATH=/root/goApps" >> ~/.profile -echo "export PATH=\$PATH:\$GOPATH/bin" >> ~/.profile - -source ~/.profile - -## get the code and move into it -REPO=github.com/tendermint/tendermint -go get $REPO -cd $GOPATH/src/$REPO - -## build -git checkout master -make get_tools -make get_vendor_deps -make install diff --git a/docs/examples/node0/config/config.toml b/docs/examples/node0/config/config.toml deleted file mode 100644 index d1ecf238a..000000000 --- a/docs/examples/node0/config/config.toml +++ /dev/null @@ -1,166 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -##### main base config options ##### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "alpha" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: leveldb | memdb -db_backend = "leveldb" - -# Database directory -db_path = "data" - -# Output level for logging, including package level options -log_level = "main:info,state:info,*:error" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_file = "config/priv_validator.json" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# TCP or UNIX socket address for the profiling server to listen on -prof_laddr = "" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - -##### advanced configuration options ##### - -##### rpc server configuration options ##### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -##### peer to peer configuration options ##### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -# Do not add private peers to this list if you don't want them advertised -persistent_peers = "" - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -addr_book_strict = true - -# Time to wait before flushing messages out on the connection, in ms -flush_throttle_timeout = 100 - -# Maximum number of peers to connect to -max_num_peers = 50 - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 512000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 512000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -##### mempool configuration options ##### -[mempool] - -recheck = true -recheck_empty = true -broadcast = true -wal_dir = "data/mempool.wal" - -##### consensus configuration options ##### -[consensus] - -wal_file = "data/cs.wal/wal" - -# All timeouts are in milliseconds -timeout_propose = 3000 -timeout_propose_delta = 500 -timeout_prevote = 1000 -timeout_prevote_delta = 500 -timeout_precommit = 1000 -timeout_precommit_delta = 500 -timeout_commit = 1000 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# BlockSize -max_block_size_txs = 10000 -max_block_size_bytes = 1 - -# EmptyBlocks mode and possible interval between empty blocks in seconds -create_empty_blocks = true -create_empty_blocks_interval = 0 - -# Reactor sleep duration parameters are in milliseconds -peer_gossip_sleep_duration = 100 -peer_query_maj23_sleep_duration = 2000 - -##### transactions indexer configuration options ##### -[tx_index] - -# What indexer to use for transactions -# -# Options: -# 1) "null" (default) -# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -indexer = "kv" - -# Comma-separated list of tags to index (by default the only tag is tx hash) -# -# It's recommended to index only a subset of tags due to possible memory -# bloat. This is, of course, depends on the indexer's DB and the volume of -# transactions. -index_tags = "" - -# When set to true, tells indexer to index all tags. Note this may be not -# desirable (see the comment above). IndexTags has a precedence over -# IndexAllTags (i.e. when given both, IndexTags will be indexed). -index_all_tags = false diff --git a/docs/examples/node0/config/genesis.json b/docs/examples/node0/config/genesis.json deleted file mode 100644 index ac306bd2f..000000000 --- a/docs/examples/node0/config/genesis.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "genesis_time": "0001-01-01T00:00:00Z", - "chain_id": "test-chain-A2i3OZ", - "validators": [ - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k=" - }, - "power": "10", - "name": "" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU=" - }, - "power": "10", - "name": "" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU=" - }, - "power": "10", - "name": "" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU=" - }, - "power": "10", - "name": "" - } - ], - "app_hash": "" -} diff --git a/docs/examples/node0/config/node_key.json b/docs/examples/node0/config/node_key.json deleted file mode 100644 index dec1fd42d..000000000 --- a/docs/examples/node0/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"7lY+k6EDllG8Q9gVbF5313t/ag2YGkBVKdVa0YHJ9xO5k0w3Q/hke0Z7UFT1KgVDGRUEKzwAwwjwFQUvgF0ZWg=="}} diff --git a/docs/examples/node0/config/priv_validator.json b/docs/examples/node0/config/priv_validator.json deleted file mode 100644 index ef2daccbb..000000000 --- a/docs/examples/node0/config/priv_validator.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "address": "122A9414774A2FCAD026201DA477EF3F41970EF0", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k=" - }, - "last_height": "0", - "last_round": "0", - "last_step": 0, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "YLxp3ho+kySgAnzjBptbxDzSGw2ntGZLsIHQsaVxY/cP6TgB2Odg9ZsH3CZp3XfsF2mj+QC6U6hNFCsvL9BziQ==" - } -} diff --git a/docs/examples/node1/config/config.toml b/docs/examples/node1/config/config.toml deleted file mode 100644 index bc5a5bde1..000000000 --- a/docs/examples/node1/config/config.toml +++ /dev/null @@ -1,166 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -##### main base config options ##### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "bravo" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: leveldb | memdb -db_backend = "leveldb" - -# Database directory -db_path = "data" - -# Output level for logging, including package level options -log_level = "main:info,state:info,*:error" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_file = "config/priv_validator.json" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# TCP or UNIX socket address for the profiling server to listen on -prof_laddr = "" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - -##### advanced configuration options ##### - -##### rpc server configuration options ##### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -##### peer to peer configuration options ##### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -# Do not add private peers to this list if you don't want them advertised -persistent_peers = "" - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -addr_book_strict = true - -# Time to wait before flushing messages out on the connection, in ms -flush_throttle_timeout = 100 - -# Maximum number of peers to connect to -max_num_peers = 50 - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 512000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 512000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -##### mempool configuration options ##### -[mempool] - -recheck = true -recheck_empty = true -broadcast = true -wal_dir = "data/mempool.wal" - -##### consensus configuration options ##### -[consensus] - -wal_file = "data/cs.wal/wal" - -# All timeouts are in milliseconds -timeout_propose = 3000 -timeout_propose_delta = 500 -timeout_prevote = 1000 -timeout_prevote_delta = 500 -timeout_precommit = 1000 -timeout_precommit_delta = 500 -timeout_commit = 1000 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# BlockSize -max_block_size_txs = 10000 -max_block_size_bytes = 1 - -# EmptyBlocks mode and possible interval between empty blocks in seconds -create_empty_blocks = true -create_empty_blocks_interval = 0 - -# Reactor sleep duration parameters are in milliseconds -peer_gossip_sleep_duration = 100 -peer_query_maj23_sleep_duration = 2000 - -##### transactions indexer configuration options ##### -[tx_index] - -# What indexer to use for transactions -# -# Options: -# 1) "null" (default) -# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -indexer = "kv" - -# Comma-separated list of tags to index (by default the only tag is tx hash) -# -# It's recommended to index only a subset of tags due to possible memory -# bloat. This is, of course, depends on the indexer's DB and the volume of -# transactions. -index_tags = "" - -# When set to true, tells indexer to index all tags. Note this may be not -# desirable (see the comment above). IndexTags has a precedence over -# IndexAllTags (i.e. when given both, IndexTags will be indexed). -index_all_tags = false diff --git a/docs/examples/node1/config/genesis.json b/docs/examples/node1/config/genesis.json deleted file mode 100644 index ac306bd2f..000000000 --- a/docs/examples/node1/config/genesis.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "genesis_time": "0001-01-01T00:00:00Z", - "chain_id": "test-chain-A2i3OZ", - "validators": [ - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k=" - }, - "power": "10", - "name": "" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU=" - }, - "power": "10", - "name": "" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU=" - }, - "power": "10", - "name": "" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU=" - }, - "power": "10", - "name": "" - } - ], - "app_hash": "" -} diff --git a/docs/examples/node1/config/node_key.json b/docs/examples/node1/config/node_key.json deleted file mode 100644 index 05f4ea846..000000000 --- a/docs/examples/node1/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"H71dc/TIG7nTselfa9nG0WRArXLKYnm7P5eFCk2lk8ASKQ3sIHpbdxCSHQD/RcdHe7TiabJeuOssNPvPWiyQEQ=="}} diff --git a/docs/examples/node1/config/priv_validator.json b/docs/examples/node1/config/priv_validator.json deleted file mode 100644 index 3a38793e4..000000000 --- a/docs/examples/node1/config/priv_validator.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "address": "BEA1B57F5806CF9AC4D54C8CF806DED5C0F102E1", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU=" - }, - "last_height": "0", - "last_round": "0", - "last_step": 0, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "o0IqrHSPtd5YqGefodWxpJuRzvuVBjgbH785vbMgk7Vvno3kYJHVp1xVG4Q2N8rD+aubZ2SFPvA1ldX9IOwqxQ==" - } -} diff --git a/docs/examples/node2/config/config.toml b/docs/examples/node2/config/config.toml deleted file mode 100644 index 1bf06286a..000000000 --- a/docs/examples/node2/config/config.toml +++ /dev/null @@ -1,166 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -##### main base config options ##### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "charlie" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: leveldb | memdb -db_backend = "leveldb" - -# Database directory -db_path = "data" - -# Output level for logging, including package level options -log_level = "main:info,state:info,*:error" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_file = "config/priv_validator.json" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# TCP or UNIX socket address for the profiling server to listen on -prof_laddr = "" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - -##### advanced configuration options ##### - -##### rpc server configuration options ##### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -##### peer to peer configuration options ##### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -# Do not add private peers to this list if you don't want them advertised -persistent_peers = "" - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -addr_book_strict = true - -# Time to wait before flushing messages out on the connection, in ms -flush_throttle_timeout = 100 - -# Maximum number of peers to connect to -max_num_peers = 50 - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 512000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 512000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -##### mempool configuration options ##### -[mempool] - -recheck = true -recheck_empty = true -broadcast = true -wal_dir = "data/mempool.wal" - -##### consensus configuration options ##### -[consensus] - -wal_file = "data/cs.wal/wal" - -# All timeouts are in milliseconds -timeout_propose = 3000 -timeout_propose_delta = 500 -timeout_prevote = 1000 -timeout_prevote_delta = 500 -timeout_precommit = 1000 -timeout_precommit_delta = 500 -timeout_commit = 1000 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# BlockSize -max_block_size_txs = 10000 -max_block_size_bytes = 1 - -# EmptyBlocks mode and possible interval between empty blocks in seconds -create_empty_blocks = true -create_empty_blocks_interval = 0 - -# Reactor sleep duration parameters are in milliseconds -peer_gossip_sleep_duration = 100 -peer_query_maj23_sleep_duration = 2000 - -##### transactions indexer configuration options ##### -[tx_index] - -# What indexer to use for transactions -# -# Options: -# 1) "null" (default) -# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -indexer = "kv" - -# Comma-separated list of tags to index (by default the only tag is tx hash) -# -# It's recommended to index only a subset of tags due to possible memory -# bloat. This is, of course, depends on the indexer's DB and the volume of -# transactions. -index_tags = "" - -# When set to true, tells indexer to index all tags. Note this may be not -# desirable (see the comment above). IndexTags has a precedence over -# IndexAllTags (i.e. when given both, IndexTags will be indexed). -index_all_tags = false diff --git a/docs/examples/node2/config/genesis.json b/docs/examples/node2/config/genesis.json deleted file mode 100644 index ac306bd2f..000000000 --- a/docs/examples/node2/config/genesis.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "genesis_time": "0001-01-01T00:00:00Z", - "chain_id": "test-chain-A2i3OZ", - "validators": [ - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k=" - }, - "power": "10", - "name": "" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU=" - }, - "power": "10", - "name": "" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU=" - }, - "power": "10", - "name": "" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU=" - }, - "power": "10", - "name": "" - } - ], - "app_hash": "" -} diff --git a/docs/examples/node2/config/node_key.json b/docs/examples/node2/config/node_key.json deleted file mode 100644 index 6f15a541b..000000000 --- a/docs/examples/node2/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"COHZ/Y2cWGWxJNkRwtpQBt5sYvOnb6Gpz0lO46XERRJFBIdSWD5x1UMGRSTmnvW1ec5G4bMdg6zUZKOZD+vVPg=="}} diff --git a/docs/examples/node2/config/priv_validator.json b/docs/examples/node2/config/priv_validator.json deleted file mode 100644 index 2bcd31a76..000000000 --- a/docs/examples/node2/config/priv_validator.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "address": "F0AA266949FB29ADA0B679C27889ED930BD1BDA1", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU=" - }, - "last_height": "0", - "last_round": "0", - "last_step": 0, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "khADeZ5K/8u/L99DFaZNRq8V5g+EHWbwfqFjhCrppaAiBkOkm8YDRMBqaJwDyKtzL5Ff8GRSWPoNfAzv3XLAhQ==" - } -} diff --git a/docs/examples/node3/config/config.toml b/docs/examples/node3/config/config.toml deleted file mode 100644 index 8c23f7d38..000000000 --- a/docs/examples/node3/config/config.toml +++ /dev/null @@ -1,166 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -##### main base config options ##### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "delta" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: leveldb | memdb -db_backend = "leveldb" - -# Database directory -db_path = "data" - -# Output level for logging, including package level options -log_level = "main:info,state:info,*:error" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_file = "config/priv_validator.json" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# TCP or UNIX socket address for the profiling server to listen on -prof_laddr = "" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - -##### advanced configuration options ##### - -##### rpc server configuration options ##### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -##### peer to peer configuration options ##### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -# Do not add private peers to this list if you don't want them advertised -persistent_peers = "" - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -addr_book_strict = true - -# Time to wait before flushing messages out on the connection, in ms -flush_throttle_timeout = 100 - -# Maximum number of peers to connect to -max_num_peers = 50 - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 512000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 512000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -##### mempool configuration options ##### -[mempool] - -recheck = true -recheck_empty = true -broadcast = true -wal_dir = "data/mempool.wal" - -##### consensus configuration options ##### -[consensus] - -wal_file = "data/cs.wal/wal" - -# All timeouts are in milliseconds -timeout_propose = 3000 -timeout_propose_delta = 500 -timeout_prevote = 1000 -timeout_prevote_delta = 500 -timeout_precommit = 1000 -timeout_precommit_delta = 500 -timeout_commit = 1000 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# BlockSize -max_block_size_txs = 10000 -max_block_size_bytes = 1 - -# EmptyBlocks mode and possible interval between empty blocks in seconds -create_empty_blocks = true -create_empty_blocks_interval = 0 - -# Reactor sleep duration parameters are in milliseconds -peer_gossip_sleep_duration = 100 -peer_query_maj23_sleep_duration = 2000 - -##### transactions indexer configuration options ##### -[tx_index] - -# What indexer to use for transactions -# -# Options: -# 1) "null" (default) -# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -indexer = "kv" - -# Comma-separated list of tags to index (by default the only tag is tx hash) -# -# It's recommended to index only a subset of tags due to possible memory -# bloat. This is, of course, depends on the indexer's DB and the volume of -# transactions. -index_tags = "" - -# When set to true, tells indexer to index all tags. Note this may be not -# desirable (see the comment above). IndexTags has a precedence over -# IndexAllTags (i.e. when given both, IndexTags will be indexed). -index_all_tags = false diff --git a/docs/examples/node3/config/genesis.json b/docs/examples/node3/config/genesis.json deleted file mode 100644 index ac306bd2f..000000000 --- a/docs/examples/node3/config/genesis.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "genesis_time": "0001-01-01T00:00:00Z", - "chain_id": "test-chain-A2i3OZ", - "validators": [ - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k=" - }, - "power": "10", - "name": "" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU=" - }, - "power": "10", - "name": "" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU=" - }, - "power": "10", - "name": "" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU=" - }, - "power": "10", - "name": "" - } - ], - "app_hash": "" -} diff --git a/docs/examples/node3/config/node_key.json b/docs/examples/node3/config/node_key.json deleted file mode 100644 index 01a14a3bf..000000000 --- a/docs/examples/node3/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"9Y9xp/tUJJ6pHTF5SUV0bGKYSdVbFtMHu+Lr8S0JBSZAwneaejnfOEU1LMKOnQ07skrDUaJcj5di3jAyjxJzqg=="}} diff --git a/docs/examples/node3/config/priv_validator.json b/docs/examples/node3/config/priv_validator.json deleted file mode 100644 index bc8a66513..000000000 --- a/docs/examples/node3/config/priv_validator.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "address": "9A1A6914EB5F4FF0269C7EEEE627C27310CC64F9", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU=" - }, - "last_height": "0", - "last_round": "0", - "last_step": 0, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "jb52LZ5gp+eQ8nJlFK1z06nBMp1gD8ICmyzdM1icGOgoYBl/Fm8hntptt4hDzlTUQIbr4jrYpJ1ofy6VzT46JQ==" - } -} diff --git a/docs/how-to-read-logs.md b/docs/how-to-read-logs.md deleted file mode 100644 index dd59d1848..000000000 --- a/docs/how-to-read-logs.md +++ /dev/null @@ -1,130 +0,0 @@ -# How to read logs - -## Walkabout example - -We first create three connections (mempool, consensus and query) to the -application (running `kvstore` locally in this case). - - I[10-04|13:54:27.364] Starting multiAppConn module=proxy impl=multiAppConn - I[10-04|13:54:27.366] Starting localClient module=abci-client connection=query impl=localClient - I[10-04|13:54:27.366] Starting localClient module=abci-client connection=mempool impl=localClient - I[10-04|13:54:27.367] Starting localClient module=abci-client connection=consensus impl=localClient - -Then Tendermint Core and the application perform a handshake. - - I[10-04|13:54:27.367] ABCI Handshake module=consensus appHeight=90 appHash=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD - I[10-04|13:54:27.368] ABCI Replay Blocks module=consensus appHeight=90 storeHeight=90 stateHeight=90 - I[10-04|13:54:27.368] Completed ABCI Handshake - Tendermint and App are synced module=consensus appHeight=90 appHash=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD - -After that, we start a few more things like the event switch, reactors, -and perform UPNP discover in order to detect the IP address. - - I[10-04|13:54:27.374] Starting EventSwitch module=types impl=EventSwitch - I[10-04|13:54:27.375] This node is a validator module=consensus - I[10-04|13:54:27.379] Starting Node module=main impl=Node - I[10-04|13:54:27.381] Local listener module=p2p ip=:: port=26656 - I[10-04|13:54:27.382] Getting UPNP external address module=p2p - I[10-04|13:54:30.386] Could not perform UPNP discover module=p2p err="write udp4 0.0.0.0:38238->239.255.255.250:1900: i/o timeout" - I[10-04|13:54:30.386] Starting DefaultListener module=p2p impl=Listener(@10.0.2.15:26656) - I[10-04|13:54:30.387] Starting P2P Switch module=p2p impl="P2P Switch" - I[10-04|13:54:30.387] Starting MempoolReactor module=mempool impl=MempoolReactor - I[10-04|13:54:30.387] Starting BlockchainReactor module=blockchain impl=BlockchainReactor - I[10-04|13:54:30.387] Starting ConsensusReactor module=consensus impl=ConsensusReactor - I[10-04|13:54:30.387] ConsensusReactor module=consensus fastSync=false - I[10-04|13:54:30.387] Starting ConsensusState module=consensus impl=ConsensusState - I[10-04|13:54:30.387] Starting WAL module=consensus wal=/home/vagrant/.tendermint/data/cs.wal/wal impl=WAL - I[10-04|13:54:30.388] Starting TimeoutTicker module=consensus impl=TimeoutTicker - -Notice the second row where Tendermint Core reports that "This node is a -validator". It also could be just an observer (regular node). - -Next we replay all the messages from the WAL. - - I[10-04|13:54:30.390] Catchup by replaying consensus messages module=consensus height=91 - I[10-04|13:54:30.390] Replay: New Step module=consensus height=91 round=0 step=RoundStepNewHeight - I[10-04|13:54:30.390] Replay: Done module=consensus - -"Started node" message signals that everything is ready for work. - - I[10-04|13:54:30.391] Starting RPC HTTP server on tcp socket 0.0.0.0:26657 module=rpc-server - I[10-04|13:54:30.392] Started node module=main nodeInfo="NodeInfo{id: DF22D7C92C91082324A1312F092AA1DA197FA598DBBFB6526E, moniker: anonymous, network: test-chain-3MNw2N [remote , listen 10.0.2.15:26656], version: 0.11.0-10f361fc ([wire_version=0.6.2 p2p_version=0.5.0 consensus_version=v1/0.2.2 rpc_version=0.7.0/3 tx_index=on rpc_addr=tcp://0.0.0.0:26657])}" - -Next follows a standard block creation cycle, where we enter a new -round, propose a block, receive more than 2/3 of prevotes, then -precommits and finally have a chance to commit a block. For details, -please refer to [Consensus -Overview](introduction.html#consensus-overview) or [Byzantine Consensus -Algorithm](specification.html). - - I[10-04|13:54:30.393] enterNewRound(91/0). Current: 91/0/RoundStepNewHeight module=consensus - I[10-04|13:54:30.393] enterPropose(91/0). Current: 91/0/RoundStepNewRound module=consensus - I[10-04|13:54:30.393] enterPropose: Our turn to propose module=consensus proposer=125B0E3C5512F5C2B0E1109E31885C4511570C42 privValidator="PrivValidator{125B0E3C5512F5C2B0E1109E31885C4511570C42 LH:90, LR:0, LS:3}" - I[10-04|13:54:30.394] Signed proposal module=consensus height=91 round=0 proposal="Proposal{91/0 1:21B79872514F (-1,:0:000000000000) {/10EDEDD7C84E.../}}" - I[10-04|13:54:30.397] Received complete proposal block module=consensus height=91 hash=F671D562C7B9242900A286E1882EE64E5556FE9E - I[10-04|13:54:30.397] enterPrevote(91/0). Current: 91/0/RoundStepPropose module=consensus - I[10-04|13:54:30.397] enterPrevote: ProposalBlock is valid module=consensus height=91 round=0 - I[10-04|13:54:30.398] Signed and pushed vote module=consensus height=91 round=0 vote="Vote{0:125B0E3C5512 91/00/1(Prevote) F671D562C7B9 {/89047FFC21D8.../}}" err=null - I[10-04|13:54:30.401] Added to prevote module=consensus vote="Vote{0:125B0E3C5512 91/00/1(Prevote) F671D562C7B9 {/89047FFC21D8.../}}" prevotes="VoteSet{H:91 R:0 T:1 +2/3:F671D562C7B9242900A286E1882EE64E5556FE9E:1:21B79872514F BA{1:X} map[]}" - I[10-04|13:54:30.401] enterPrecommit(91/0). Current: 91/0/RoundStepPrevote module=consensus - I[10-04|13:54:30.401] enterPrecommit: +2/3 prevoted proposal block. Locking module=consensus hash=F671D562C7B9242900A286E1882EE64E5556FE9E - I[10-04|13:54:30.402] Signed and pushed vote module=consensus height=91 round=0 vote="Vote{0:125B0E3C5512 91/00/2(Precommit) F671D562C7B9 {/80533478E41A.../}}" err=null - I[10-04|13:54:30.404] Added to precommit module=consensus vote="Vote{0:125B0E3C5512 91/00/2(Precommit) F671D562C7B9 {/80533478E41A.../}}" precommits="VoteSet{H:91 R:0 T:2 +2/3:F671D562C7B9242900A286E1882EE64E5556FE9E:1:21B79872514F BA{1:X} map[]}" - I[10-04|13:54:30.404] enterCommit(91/0). Current: 91/0/RoundStepPrecommit module=consensus - I[10-04|13:54:30.405] Finalizing commit of block with 0 txs module=consensus height=91 hash=F671D562C7B9242900A286E1882EE64E5556FE9E root=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD - I[10-04|13:54:30.405] Block{ - Header{ - ChainID: test-chain-3MNw2N - Height: 91 - Time: 2017-10-04 13:54:30.393 +0000 UTC - NumTxs: 0 - LastBlockID: F15AB8BEF9A6AAB07E457A6E16BC410546AA4DC6:1:D505DA273544 - LastCommit: 56FEF2EFDB8B37E9C6E6D635749DF3169D5F005D - Data: - Validators: CE25FBFF2E10C0D51AA1A07C064A96931BC8B297 - App: E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD - }#F671D562C7B9242900A286E1882EE64E5556FE9E - Data{ - - }# - Commit{ - BlockID: F15AB8BEF9A6AAB07E457A6E16BC410546AA4DC6:1:D505DA273544 - Precommits: Vote{0:125B0E3C5512 90/00/2(Precommit) F15AB8BEF9A6 {/FE98E2B956F0.../}} - }#56FEF2EFDB8B37E9C6E6D635749DF3169D5F005D - }#F671D562C7B9242900A286E1882EE64E5556FE9E module=consensus - I[10-04|13:54:30.408] Executed block module=state height=91 validTxs=0 invalidTxs=0 - I[10-04|13:54:30.410] Committed state module=state height=91 txs=0 hash=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD - I[10-04|13:54:30.410] Recheck txs module=mempool numtxs=0 height=91 - -## List of modules - -Here is the list of modules you may encounter in Tendermint's log and a -little overview what they do. - -- `abci-client` As mentioned in [Application Development Guide](app-development.md#abci-design), Tendermint acts as an ABCI - client with respect to the application and maintains 3 connections: - mempool, consensus and query. The code used by Tendermint Core can - be found [here](https://github.com/tendermint/tendermint/tree/develop/abci/client). -- `blockchain` Provides storage, pool (a group of peers), and reactor - for both storing and exchanging blocks between peers. -- `consensus` The heart of Tendermint core, which is the - implementation of the consensus algorithm. Includes two - "submodules": `wal` (write-ahead logging) for ensuring data - integrity and `replay` to replay blocks and messages on recovery - from a crash. -- `events` Simple event notification system. The list of events can be - found - [here](https://github.com/tendermint/tendermint/blob/master/types/events.go). - You can subscribe to them by calling `subscribe` RPC method. Refer - to [RPC docs](specification/rpc.html) for additional information. -- `mempool` Mempool module handles all incoming transactions, whenever - they are coming from peers or the application. -- `p2p` Provides an abstraction around peer-to-peer communication. For - more details, please check out the - [README](https://github.com/tendermint/tendermint/blob/master/p2p/README.md). -- `rpc` [Tendermint's RPC](specification/rpc.html). -- `rpc-server` RPC server. For implementation details, please read the - [README](https://github.com/tendermint/tendermint/blob/master/rpc/lib/README.md). -- `state` Represents the latest state and execution submodule, which - executes blocks against the application. -- `types` A collection of the publicly exposed types and methods to - work with them. diff --git a/docs/images/tmint-logo-blue.png b/docs/images/tmint-logo-blue.png deleted file mode 100644 index cc4c8fb82..000000000 Binary files a/docs/images/tmint-logo-blue.png and /dev/null differ diff --git a/docs/index.rst b/docs/index.rst index e7d86bc2b..bafbec354 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -6,68 +6,10 @@ Welcome to Tendermint! ====================== +This location for our documentation has been deprecated, please see: -.. image:: assets/tmint-logo-blue.png - :height: 200px - :width: 200px - :align: center +- https://tendermint.com/docs/ -Introduction ------------- +The last version built by Read The Docs will still be available at: -.. toctree:: - :maxdepth: 1 - - introduction.md - install.md - getting-started.md - using-tendermint.md - deploy-testnets.md - ecosystem.md - -Tendermint Tools ----------------- - -.. the tools/ files are pulled in from the tools repo -.. see the bottom of conf.py -.. toctree:: - :maxdepth: 1 - - tools/docker.md - terraform-and-ansible.md - tools/benchmarking.md - tools/monitoring.md - -ABCI, Apps, Logging, Etc ------------------------- - -.. toctree:: - :maxdepth: 1 - - abci-cli.md - abci-spec.md - app-architecture.md - app-development.md - subscribing-to-events-via-websocket.md - indexing-transactions.md - how-to-read-logs.md - running-in-production.md - metrics.md - -Research & Specification ------------------------- - -.. toctree:: - :maxdepth: 1 - - determinism.md - transactional-semantics.md - -.. specification.md ## keep this file for legacy purpose. needs to be fixed though - -* For a deeper dive, see `this thesis `__. -* There is also the `original whitepaper `__, though it is now quite outdated. -* Readers might also be interested in the `Cosmos Whitepaper `__ which describes Tendermint, ABCI, and how to build a scalable, heterogeneous, cryptocurrency network. -* For example applications and related software built by the Tendermint team and other, see the `software ecosystem `__. - -Join the `community `__ to ask questions and discuss projects. +- https://tendermint.readthedocs.io/projects/tools/en/v0.21.0/ diff --git a/docs/indexing-transactions.md b/docs/indexing-transactions.md deleted file mode 100644 index 93a61fe6b..000000000 --- a/docs/indexing-transactions.md +++ /dev/null @@ -1,89 +0,0 @@ -# Indexing Transactions - -Tendermint allows you to index transactions and later query or subscribe -to their results. - -Let's take a look at the `[tx_index]` config section: - - ##### transactions indexer configuration options ##### - [tx_index] - - # What indexer to use for transactions - # - # Options: - # 1) "null" (default) - # 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). - indexer = "kv" - - # Comma-separated list of tags to index (by default the only tag is tx hash) - # - # It's recommended to index only a subset of tags due to possible memory - # bloat. This is, of course, depends on the indexer's DB and the volume of - # transactions. - index_tags = "" - - # When set to true, tells indexer to index all tags. Note this may be not - # desirable (see the comment above). IndexTags has a precedence over - # IndexAllTags (i.e. when given both, IndexTags will be indexed). - index_all_tags = false - -By default, Tendermint will index all transactions by their respective -hashes using an embedded simple indexer. Note, we are planning to add -more options in the future (e.g., Postgresql indexer). - -## Adding tags - -In your application's `DeliverTx` method, add the `Tags` field with the -pairs of UTF-8 encoded strings (e.g. "account.owner": "Bob", "balance": -"100.0", "date": "2018-01-02"). - -Example: - - func (app *KVStoreApplication) DeliverTx(tx []byte) types.Result { - ... - tags := []cmn.KVPair{ - {[]byte("account.name"), []byte("igor")}, - {[]byte("account.address"), []byte("0xdeadbeef")}, - {[]byte("tx.amount"), []byte("7")}, - } - return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags} - } - -If you want Tendermint to only index transactions by "account.name" tag, -in the config set `tx_index.index_tags="account.name"`. If you to index -all tags, set `index_all_tags=true` - -Note, there are a few predefined tags: - -- `tm.event` (event type) -- `tx.hash` (transaction's hash) -- `tx.height` (height of the block transaction was committed in) - -Tendermint will throw a warning if you try to use any of the above keys. - -## Querying transactions - -You can query the transaction results by calling `/tx_search` RPC -endpoint: - - curl "localhost:26657/tx_search?query=\"account.name='igor'\"&prove=true" - -Check out [API docs](https://tendermint.github.io/slate/?shell#txsearch) -for more information on query syntax and other options. - -## Subscribing to transactions - -Clients can subscribe to transactions with the given tags via Websocket -by providing a query to `/subscribe` RPC endpoint. - - { - "jsonrpc": "2.0", - "method": "subscribe", - "id": "0", - "params": { - "query": "account.name='igor'" - } - } - -Check out [API docs](https://tendermint.github.io/slate/#subscribe) for -more information on query syntax and other options. diff --git a/docs/interviews/tendermint-bft.md b/docs/interviews/tendermint-bft.md new file mode 100644 index 000000000..8b3ad5743 --- /dev/null +++ b/docs/interviews/tendermint-bft.md @@ -0,0 +1,250 @@ +# Interview Transcript with Tendermint core researcher, Zarko Milosevic, by Chjango + +**ZM**: Regarding leader election, it's round robin, but a weighted one. You +take into account the amount of bonded tokens. Depending on how much weight +they have of voting power, they would be elected more frequently. So we do +rotate, but just the guys who are having more voting power would be elected +more frequently. We are having 4 validators, and 1 of them have 2 times more +voting power, they have 2 times more elected as a leader. + +**CC**: 2x more absolute voting power or probabilistic voting power? + +**ZM**: It's actually very deterministic. It's not probabilistic at all. See +[Tendermint proposal election specification][1]. In Tendermint, there is no +pseudorandom leader election. It's a deterministic protocol. So leader election +is a built-in function in the code, so you know exactly—depending on the voting +power in the validator set, you'd know who exactly would be the leader in round +x, x + 1, and so on. There is nothing random there; we are not trying to hide +who would be the leader. It's really well known. It's just that there is a +function, it's a mathematical function, and it's just basically—it's kind of an +implementation detail—it starts from the voting power, and when you are +elected, you get decreased some number, and in each round you keep increasing +depending on your voting power, so that you are elected after k rounds again. +But knowing the validator set and the voting power, it's very simple function, +you can calculate yourself to know exactly who would be next. For each round, +this function will return you the leader for that round. In every round, we do +this computation. It's all part of the same flow. It enforces the properties +which are: proportional to your voting power, you will be elected, and we keep +changing the leaders. So it can't happen to have one guy being more elected +than other guys, if they have the same voting power. So one time it will be guy +B, and next time it will be guy B1. So it's not random. + +**CC**: Assuming the validator set remains unchanged for a month, then if you +run this function, are you able to know exactly who is going to go for that +entire month? + +**ZM**: Yes. + +**CC**: What're the attack scenarios for this? + +**ZM**: This is something which is easily attacked by people who argue that +Tendermint is not decentralized enough. They say that by knowing the leader, +you can DDoS the leader. And by DDoSing the leader, you are able to stop the +progress. Because it's true. If you would be able to DDoS the leader, the +leader would not be able to propose and then effectively will not be making +progress. How we are addressing this thing is Sentry Architecture. So the +validator—or at least a proper validator—will never be available. You don't +know the ip address of the validator. You are never able to open the connection +to the validator. So validator is spawning sentry nodes and this is the single +administration domain and there is only connection from validator in the sense +of sentry nodes. And ip address of validator is not shared in the p2p network. +It’s completely private. This is our answer to DDoS attack. By playing clever +at this sentry node architecture and spawning additional sentry nodes in case, +for ex your sentry nodes are being DDoS’d, bc your sentry nodes are public, +then you will be able to connect to sentry nodes. this is where we will expect +the validator to be clever enough that so that in case they are DDoS’d at the +sentry level, they will spawn a different sentry node and then you communicate +through them. We are in a sense pushing the responsibility on the validator. + +**CC**: So if I understand this correctly, the public identity of the validator +doesn’t even matter because that entity can obfuscate where their real full +nodes reside via a proxy through this sentry architecture. + +**ZM**: Exactly. So you do know what is the address or identity of the validator +but you don’t know the network address of it; you’re not able to attack it +because you don’t know where they are. They are completely obfuscated by the +sentry nodes. There is now, if you really want to figure out….There is the +Tendermint protocol, the structure of the protocol is not fully decentralized +in the sense that the flow of information is going from the round proposer, or +the round coordinator, to other nodes, and then after they receive this it’s +basically like [inaudible: “O to 1”]. So by tracking where this information is +coming from, you might be able to identify who are the sentry nodes behind it. +So if you are doing some network analysis, you might be able to deduce +something. If the thing would be completely stuck, where the validator would +never change their sentry nodes or ip addresses of sentry nodes, it could be +possible to deduce something. This is where economic game comes into play. We +are doing an economics game there. We say that it’s a validator business. If +they are not able to hide themselves well enough, they’ll be DDoS’d and they +will be kicked out of the active validator set. So it’s in their interest. + +[Proposer Selection Procedure in Tendermint][1]. This is how it should work no +matter what implementation. + +**CC**: Going back to the proposer, lets say the validator does get DDoS’d, then +the proposer goes down. What happens? + +**ZM**: How the proposal mechanism works—there’s nothing special there—it goes +through a sequence of rounds. Normal execution of Tendermint is that for each +height, we are going through a sequence of rounds, starting from round 0, and +then we are incrementing through the rounds. The nodes are moving through the +rounds as part of normal procedure until they decide to commit. In case you +have one proposer—the proposer of a single round—being DDoS’d, we will probably +not decide in that round, because he will not be able to send his proposal. So +we will go to the next round, and hopefully the next proposer will be able to +communicate with the validators and then we’ll decide in the next round. + +**CC**: Are there timeouts between one round to another, if a round gets +skipped? + +**ZM**: There are timeouts. It’s a bit more complex. I think we have 5 timeouts. +We may be able to simplify this a bit. What is important to understand is: The +only condition which needs to be satisfied so we can go to the next round is +that your validator is able to communicate with more than 2/3rds of voting +power. To be able to move to the next round, you need to receive more than +2/3rd of voting power equivalent of pre-commit messages. + +We have two kinds of messages: 1) Proposal: Where the current round proposer is +suggesting how the next block should look like. This is first one. Every round +starts with proposer sending a proposal. And then there are two more rounds of +voting, where the validator is trying to agree whether they will commit the +proposal or not. And the first of such vote messages is called `pre-vote` and +the second one is `pre-commit`. Now, to be able to move between steps, between +a `pre-vote` and `pre-commit` step, you need to receive enough number of +messages where if message is sent by validator A, then also this message has a +weight, or voting power which is equal to the voting power of the validator who +sent this message. Before you receive more than 2/3 of voting power messages, you are not +able to move to the higher round. Only when you receive more than 2/3 of +messages, you actually start the timeout. The timeout is happening only after +you receive enough messages. And it happens because of the asynchrony of the +message communication so you give more time to guys with this timeout to +receive some messages which are maybe delayed. + +**CC**: In this way that you just described via the whole network gossiping +before we commit a block, that is what makes Tendermint BFT deterministic in a +partially synchronous setting vs Bitcoin which has synchrony assumptions +whereby blocks are first mined and then gossiped to the network. + +**ZM**: It's true that in Bitcoin, this is where the synchrony assumption comes +to play because if they're not able to communicate timely, they are not able to +converge to a single longest chain. Why are they not able to decrease timeout +in Bitcoin? Because if they would decrease, there would be so many forks that +they won't be able to converge to a single chain. By increasing this +complexity and the block time, they're able to have not so many forks. This is +effectively the timing assumption—the block duration in a sense because it's +enough time so that the decided block is propagated through the network before +someone else start deciding on the same block and creating forks. It's very +different from the consensus algorithms in a distributed computing setup where +Tendermint fits. In Tendermint, where we talk about the timing dependency, they +are really part of this 3-communication step protocol I just explained. We have +the following assumption: If the good guys are not able to communicate timely +and reliably without having message loss within a round, the Tendermint will +not make progress—it will not be making blocks. So if you are in a completely +asynchronous network where messages get lost or delayed unpredictably, +Tendermint will not make progress, it will not create forks, but it will not +decide, it will not tell you what is the next block. For termination, it's a +liveness property of consensus. It's a guarantee to decide. We do need timing +assumptions. Within a round, correct validators are able to communicate to each +other the consensus messages, not the transactions, but consensus messages. +They need to communicate in a timely and reliable fashion. But this doesn't +need to hold forever. It's just that what we are assuming when we say it's a +partially synchronous system, we assume that the system will be going through a +period of asynchrony, where we don't have this guarantee; the messages will be +delayed or some will be lost and then will not make progress for some period of +time, or we're not guaranteed to make progress. And the period of synchrony +where these guarantees hold. And if we think about internet, internet is best +described using such a model. Sometimes when we send a message to SF to +Belgrade, it takes 100 ms, sometimes it takes 300 ms, sometimes it takes 1 s. +But in most cases, it takes 100 ms or less than this. + +There is one thing which would be really nice if you understand it. In a global +wide area network, we can't make assumption on the communication unless we are +very conservative about this. If you want to be very fast, then we can't make +assumption and say we'll be for sure communicating with 1 ms communication +delay. Because of the complexity and various congestion issues on the network, +it might happen that during a short period of time, this doesn't hold. If this +doesn't hold and you depend on this for correctness of your protocol, you will +have a fork. So the partially synchronous protocol, most of them like +Tendermint, they don't depend on the timing assumption from the internet for +correctness. This is where we state: safety always. So we never make a fork no +matter how bad our estimates about the internet communication delays are. We'll +never make a fork, but we do make some assumptions, and these assumptions are +built-in our timeouts in our protocol which are actually adaptive. So we are +adapting to the current condition and this is where we're saying...We do assume +some properties, or some communication delays, to eventually hold on the +network. During this period, we guarantee that we will be deciding and +committing blocks. And we will be doing this very fast. We will be basically on +the speed of the current network. + +**CC**: We make liveness assumptions based on the integrity of the validator +businesses, assuming they're up and running fine. + +**ZM**: This is where we are saying, the protocol will be live if we have at +most 1/3, or a bit less than 1/3, of faulty validators. Which means that all +other guys should be online and available. This is also for liveness. This is +related to the condition that we are not able to make progress in rounds if we +don't receive enough messages. If half of our voting power, or half of our +validators are down, we don't have enough messages, so the protocol is +completely blocked. It doesn't make progress in a round, which means it's not +able to be signed. So it's completely critical for Tendermint that we make +progress in rounds. It's like breathing. Tendermint is breathing. If there is +no progress, it's dead; it's blocked, we're not able to breathe, that's why +we're not able to make progress. + +**CC**: How does Tendermint compare to other consensus algos? + +**ZM**: Tendermint is a very interesting protocol. From an academic point of +view, I'm convinced that there is value there. Hopefully, we prove it by +publishing it on some good conference. What is novel is, if we compare first +Tendermint to this existing BFT problem, it's a continuation of academic +research on BFT consensus. What is novel in Tendermint is that it somehow +merges consensus protocol with gossip. This is completely novel idea. +Originally, in BFT, people were assuming the single administration domain, +small number of nodes, local area network, 4-7 nodes max. If you look at the +research paper, 99% of them have this kind of setup. Wide area was studied but +there is significantly less work in wide area networks. No one studied how to +scale those protocols to hundreds or thousands of nodes before blockchain. It +was always a single administration domain. So in Tendermint now, you are able +to reach consensus among different administration domains which are potentially +hundreds of them in wide area network. The system model is potentially harder +because we have more nodes and wide area network. The second thing is that: +normally, in bft protocols, the protocol itself are normally designed in a way +that has two phases, or two parts. The one which is called normal case, which +is normally quite simple, in this normal case. In spite of some failures, which +are part of the normal execution of the protocol, like for example leader +crashes or leader being DDoS'd, they need to go through a quite complex +protocol, which is like being called view change or leader election or +whatever. These two parts of the same protocol are having quite different +complexity. And most of the people only understand this normal case. In +Tendermint, there is no this difference. We have only one protocol, there are +not two protocols. It's always the same steps and they are much closer to the +normal case than this complex view change protocol. + +_This is a bit too technical but this is on a high level things to remember, +that: The system it addresses it's harder than the others and the algorithm +complexity in Tendermint is simpler._ The initial goal of Jae and Bucky which +is inspired by Raft, is that it's simpler so normal engineers could understand. + +**CC**: Can you expand on the termination requirement? + +_Important point about Liveness in Tendermint_ + +**ZM**: In Tendermint, we are saying, for termination, we are making assumption +that the system is partially synchronous. And in a partially synchronous system +model, we are able to mathematically prove that the protocol will make +decisions; it will decide. + +**CC**: What is a persistent peer? + +**ZM**: It's a list of peer identities, which you will try to establish +connection to them, in case connection is broken, Tendermint will automatically +try to reestablish connection. These are important peers, you will really try +persistently to establish connection to them. For other peers, you just drop it +and try from your address book to connect to someone else. The address book is a +list of peers which you discover that they exist, because we are talking about a +very dynamic network—so the nodes are coming and going away—and the gossiping +protocol is discovering new nodes and gossiping them around. So every node will +keep the list of new nodes it discovers, and when you need to establish +connection to a peer, you'll look to address book and get some addresses from +there. There's categorization/ranking of nodes there. + +[1]: https://github.com/tendermint/tendermint/blob/master/docs/spec/reactors/consensus/proposer-selection.md diff --git a/docs/install.md b/docs/introduction/install.md similarity index 99% rename from docs/install.md rename to docs/introduction/install.md index ff101715b..d02691102 100644 --- a/docs/install.md +++ b/docs/introduction/install.md @@ -57,7 +57,7 @@ cd $GOPATH/src/github.com/tendermint/tendermint make install ``` -To upgrade, run +To upgrade, run ``` cd $GOPATH/src/github.com/tendermint/tendermint diff --git a/docs/introduction.md b/docs/introduction/introduction.md similarity index 92% rename from docs/introduction.md rename to docs/introduction/introduction.md index 4503e195e..d43fa9b2d 100644 --- a/docs/introduction.md +++ b/docs/introduction/introduction.md @@ -61,13 +61,14 @@ providing basic services to distributed systems, such as dynamic configuration, service discovery, locking, leader-election, and so on. Tendermint is in essence similar software, but with two key differences: + - It is Byzantine Fault Tolerant, meaning it can only tolerate up to a -1/3 of failures, but those failures can include arbitrary behaviour - -including hacking and malicious attacks. - It does not specify a -particular application, like a fancy key-value store. Instead, it -focuses on arbitrary state machine replication, so developers can build -the application logic that's right for them, from key-value store to -cryptocurrency to e-voting platform and beyond. + 1/3 of failures, but those failures can include arbitrary behaviour - + including hacking and malicious attacks. - It does not specify a + particular application, like a fancy key-value store. Instead, it + focuses on arbitrary state machine replication, so developers can build + the application logic that's right for them, from key-value store to + cryptocurrency to e-voting platform and beyond. The layout of this Tendermint website content is also ripped directly and without shame from [consul.io](https://www.consul.io/) and the other @@ -167,16 +168,16 @@ maintains a fully audited Unspent Transaction Output (UTXO) database. If one wanted to create a Bitcoin-like system on top of ABCI, Tendermint Core would be responsible for -- Sharing blocks and transactions between nodes -- Establishing a canonical/immutable order of transactions - (the blockchain) +- Sharing blocks and transactions between nodes +- Establishing a canonical/immutable order of transactions + (the blockchain) The application will be responsible for -- Maintaining the UTXO database -- Validating cryptographic signatures of transactions -- Preventing transactions from spending non-existent transactions -- Allowing clients to query the UTXO database. +- Maintaining the UTXO database +- Validating cryptographic signatures of transactions +- Preventing transactions from spending non-existent transactions +- Allowing clients to query the UTXO database. Tendermint is able to decompose the blockchain design by offering a very simple API (ie. the ABCI) between the application process and consensus @@ -242,14 +243,14 @@ Java, C++, Python, or Go. Game programmers and blockchain developers are already familiar with creating deterministic programs by avoiding sources of non-determinism such as: -- random number generators (without deterministic seeding) -- race conditions on threads (or avoiding threads altogether) -- the system clock -- uninitialized memory (in unsafe programming languages like C - or C++) -- [floating point - arithmetic](http://gafferongames.com/networking-for-game-programmers/floating-point-determinism/) -- language features that are random (e.g. map iteration in Go) +- random number generators (without deterministic seeding) +- race conditions on threads (or avoiding threads altogether) +- the system clock +- uninitialized memory (in unsafe programming languages like C + or C++) +- [floating point + arithmetic](http://gafferongames.com/networking-for-game-programmers/floating-point-determinism/) +- language features that are random (e.g. map iteration in Go) While programmers can avoid non-determinism by being careful, it is also possible to create a special linter or static analyzer for each language @@ -298,8 +299,8 @@ introduces a few **locking** rules which modulate which paths can be followed in the flow diagram. Once a validator precommits a block, it is locked on that block. Then, -1) it must prevote for the block it is locked on -2) it can only unlock, and precommit for a new block, if there is a +1. it must prevote for the block it is locked on +2. it can only unlock, and precommit for a new block, if there is a polka for that block in a later round ## Stake diff --git a/docs/examples/getting-started.md b/docs/introduction/quick-start.md similarity index 66% rename from docs/examples/getting-started.md rename to docs/introduction/quick-start.md index ed61f9998..8e4908784 100644 --- a/docs/examples/getting-started.md +++ b/docs/introduction/quick-start.md @@ -3,17 +3,16 @@ ## Overview This is a quick start guide. If you have a vague idea about how Tendermint -works and want to get started right away, continue. Otherwise, [review the -documentation](http://tendermint.readthedocs.io/en/master/). +works and want to get started right away, continue. ## Install ### Quick Install -On a fresh Ubuntu 16.04 machine can be done with [this script](https://git.io/vpgEI), like so: +On a fresh Ubuntu 16.04 machine can be done with [this script](https://git.io/fFfOR), like so: ``` -curl -L https://git.io/vpgEI | bash +curl -L https://git.io/fFfOR | bash source ~/.profile ``` @@ -24,6 +23,7 @@ The script is also used to facilitate cluster deployment below. ### Manual Install Requires: + - `go` minimum version 1.10 - `$GOPATH` environment variable must be set - `$GOPATH/bin` must be on your `$PATH` (see https://github.com/tendermint/tendermint/wiki/Setting-GOPATH) @@ -43,7 +43,7 @@ Confirm installation: ``` $ tendermint version -0.18.0-XXXXXXX +0.23.0-dev ``` ## Initialization @@ -122,22 +122,22 @@ First create four Ubuntu cloud machines. The following was tested on Digital Ocean Ubuntu 16.04 x64 (3GB/1CPU, 20GB SSD). We'll refer to their respective IP addresses below as IP1, IP2, IP3, IP4. -Then, `ssh` into each machine, and execute [this script](https://git.io/vNLfY): +Then, `ssh` into each machine, and execute [this script](https://git.io/fFfOR): ``` -curl -L https://git.io/vpgEI | bash +curl -L https://git.io/fFfOR | bash source ~/.profile ``` This will install `go` and other dependencies, get the Tendermint source code, then compile the `tendermint` binary. -Next, `cd` into `docs/examples`. Each command below should be run from each node, in sequence: +Next, use the `tendermint testnet` command to create four directories of config files (found in `./mytestnet`) and copy each directory to the relevant machine in the cloud, so that each machine has `$HOME/mytestnet/node[0-3]` directory. Then from each machine, run: ``` -tendermint node --home ./node0 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:26656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:26656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:26656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:26656" -tendermint node --home ./node1 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:26656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:26656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:26656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:26656" -tendermint node --home ./node2 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:26656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:26656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:26656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:26656" -tendermint node --home ./node3 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:26656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:26656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:26656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:26656" +tendermint node --home ./mytestnet/node0 --proxy_app=kvstore --p2p.persistent_peers="ID1@IP1:26656,ID2@IP2:26656,ID3@IP3:26656,ID4@IP4:26656" +tendermint node --home ./mytestnet/node1 --proxy_app=kvstore --p2p.persistent_peers="ID1@IP1:26656,ID2@IP2:26656,ID3@IP3:26656,ID4@IP4:26656" +tendermint node --home ./mytestnet/node2 --proxy_app=kvstore --p2p.persistent_peers="ID1@IP1:26656,ID2@IP2:26656,ID3@IP3:26656,ID4@IP4:26656" +tendermint node --home ./mytestnet/node3 --proxy_app=kvstore --p2p.persistent_peers="ID1@IP1:26656,ID2@IP2:26656,ID3@IP3:26656,ID4@IP4:26656" ``` Note that after the third node is started, blocks will start to stream in diff --git a/docs/networks/deploy-testnets.md b/docs/networks/deploy-testnets.md new file mode 100644 index 000000000..88e5c6f72 --- /dev/null +++ b/docs/networks/deploy-testnets.md @@ -0,0 +1,74 @@ +# Deploy a Testnet + +Now that we've seen how ABCI works, and even played with a few +applications on a single validator node, it's time to deploy a test +network to four validator nodes. + +## Manual Deployments + +It's relatively easy to setup a Tendermint cluster manually. The only +requirements for a particular Tendermint node are a private key for the +validator, stored as `priv_validator.json`, a node key, stored as +`node_key.json` and a list of the public keys of all validators, stored +as `genesis.json`. These files should be stored in +`~/.tendermint/config`, or wherever the `$TMHOME` variable might be set +to. + +Here are the steps to setting up a testnet manually: + +1. Provision nodes on your cloud provider of choice +2. Install Tendermint and the application of interest on all nodes +3. Generate a private key and a node key for each validator using + `tendermint init` +4. Compile a list of public keys for each validator into a + `genesis.json` file and replace the existing file with it. +5. Run + `tendermint node --proxy_app=kvstore --p2p.persistent_peers=< peer addresses >` on each node, where `< peer addresses >` is a comma separated + list of the ID@IP:PORT combination for each node. The default port for + Tendermint is `26656`. The ID of a node can be obtained by running + `tendermint show_node_id` command. Thus, if the IP addresses of your nodes + were `192.168.0.1, 192.168.0.2, 192.168.0.3, 192.168.0.4`, the command + would look like: + +``` +tendermint node --proxy_app=kvstore --p2p.persistent_peers=96663a3dd0d7b9d17d4c8211b191af259621c693@192.168.0.1:26656, 429fcf25974313b95673f58d77eacdd434402665@192.168.0.2:26656, 0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@192.168.0.3:26656, f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@192.168.0.4:26656 +``` + +After a few seconds, all the nodes should connect to each other and +start making blocks! For more information, see the Tendermint Networks +section of [the guide to using Tendermint](./using-tendermint.md). + +But wait! Steps 3, 4 and 5 are quite manual. Instead, use the `tendermint testnet` command. By default, running `tendermint testnet` will create all the +required files, but it won't populate the list of persistent peers. It will do +it however if you provide the `--populate-persistent-peers` flag and optional +`--starting-ip-address` flag. Run `tendermint testnet --help` for more details +on the available flags. + +``` +tendermint testnet --populate-persistent-peers --starting-ip-address 192.168.0.1 +``` + +This command will generate four folders, prefixed with "node" and put them into +the "./mytestnet" directory by default. + +As you might imagine, this command is useful for manual or automated +deployments. + +## Automated Deployments + +The easiest and fastest way to get a testnet up in less than 5 minutes. + +### Local + +With `docker` and `docker-compose` installed, run the command: + +``` +make localnet-start +``` + +from the root of the tendermint repository. This will spin up a 4-node +local testnet. Review the target in the Makefile to debug any problems. + +### Cloud + +See the [next section](./terraform-and-ansible.html) for details. diff --git a/docs/specification/fast-sync.rst b/docs/networks/fast-sync.md similarity index 62% rename from docs/specification/fast-sync.rst rename to docs/networks/fast-sync.md index c98ec43a3..e92d82394 100644 --- a/docs/specification/fast-sync.rst +++ b/docs/networks/fast-sync.md @@ -1,8 +1,4 @@ -Fast Sync -========= - -Background ----------- +# Fast Sync In a proof of work blockchain, syncing with the chain is the same process as staying up-to-date with the consensus: download blocks, and @@ -14,21 +10,19 @@ scratch can take a very long time. It's much faster to just download blocks and check the merkle tree of validators than to run the real-time consensus gossip protocol. -Fast Sync ---------- +## Using Fast Sync -To support faster syncing, tendermint offers a ``fast-sync`` mode, which -is enabled by default, and can be toggled in the ``config.toml`` or via -``--fast_sync=false``. +To support faster syncing, tendermint offers a `fast-sync` mode, which +is enabled by default, and can be toggled in the `config.toml` or via +`--fast_sync=false`. In this mode, the tendermint daemon will sync hundreds of times faster than if it used the real-time consensus process. Once caught up, the daemon will switch out of fast sync and into the normal consensus mode. -After running for some time, the node is considered ``caught up`` if it +After running for some time, the node is considered `caught up` if it has at least one peer and it's height is at least as high as the max -reported peer height. See `the IsCaughtUp -method `__. +reported peer height. See [the IsCaughtUp +method](https://github.com/tendermint/tendermint/blob/b467515719e686e4678e6da4e102f32a491b85a0/blockchain/pool.go#L128). If we're lagging sufficiently, we should go back to fast syncing, but -this is an open issue: -https://github.com/tendermint/tendermint/issues/129 +this is an [open issue](https://github.com/tendermint/tendermint/issues/129). diff --git a/docs/terraform-and-ansible.md b/docs/networks/terraform-and-ansible.md similarity index 92% rename from docs/terraform-and-ansible.md rename to docs/networks/terraform-and-ansible.md index 55c38cef7..5a4b9c53b 100644 --- a/docs/terraform-and-ansible.md +++ b/docs/networks/terraform-and-ansible.md @@ -12,17 +12,17 @@ script](https://github.com/tendermint/tendermint/blob/develop/networks/remote/in that can be run on a fresh DO droplet and will automatically spin up a 4 node testnet. The script more or less does everything described below. -- Install [Terraform](https://www.terraform.io/downloads.html) and - [Ansible](http://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) - on a Linux machine. -- Create a [DigitalOcean API - token](https://cloud.digitalocean.com/settings/api/tokens) with read - and write capability. -- Install the python dopy package (`pip install dopy`) -- Create SSH keys (`ssh-keygen`) -- Set environment variables: - -``` +- Install [Terraform](https://www.terraform.io/downloads.html) and + [Ansible](http://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) + on a Linux machine. +- Create a [DigitalOcean API + token](https://cloud.digitalocean.com/settings/api/tokens) with read + and write capability. +- Install the python dopy package (`pip install dopy`) +- Create SSH keys (`ssh-keygen`) +- Set environment variables: + +``` export DO_API_TOKEN="abcdef01234567890abcdef01234567890" export SSH_KEY_FILE="$HOME/.ssh/id_rsa.pub" ``` @@ -44,6 +44,7 @@ then: terraform init terraform apply -var DO_API_TOKEN="$DO_API_TOKEN" -var SSH_KEY_FILE="$SSH_KEY_FILE" ``` + and you will get a list of IP addresses that belong to your droplets. With the droplets created and running, let's setup Ansible. @@ -154,7 +155,7 @@ page](https://app.logz.io/#/dashboard/data-sources/Filebeat), then: ``` yum install systemd-devel || echo "This will only work on RHEL-based systems." apt-get install libsystemd-dev || echo "This will only work on Debian-based systems." - + go get github.com/mheese/journalbeat ansible-playbook -i inventory/digital_ocean.py -l sentrynet logzio.yml -e LOGZIO_TOKEN=ABCDEFGHIJKLMNOPQRSTUVWXYZ012345 ``` diff --git a/docs/package.json b/docs/package.json new file mode 100644 index 000000000..c76bb37c4 --- /dev/null +++ b/docs/package.json @@ -0,0 +1,37 @@ +{ + "dependencies": { + "prettier": "^1.13.7", + "remark-cli": "^5.0.0", + "remark-lint-no-dead-urls": "^0.3.0", + "textlint": "^10.2.1" + }, + "name": "tendermint", + "description": "Tendermint Core Documentation", + "version": "0.0.1", + "main": "README.md", + "devDependencies": {}, + "scripts": { + "lint:json": "prettier \"**/*.json\" --write", + "lint:md": "prettier \"**/*.md\" --write && remark . && textlint \"md/**\"", + "lint": "yarn lint:json && yarn lint:md" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/tendermint/tendermint.git" + }, + "keywords": [ + "tendermint", + "blockchain" + ], + "author": "Tendermint", + "license": "ISC", + "bugs": { + "url": "https://github.com/tendermint/tendermint/issues" + }, + "homepage": "https://tendermint.com/docs/", + "remarkConfig": { + "plugins": [ + "remark-lint-no-dead-urls" + ] + } +} diff --git a/docs/determinism.md b/docs/research/determinism.md similarity index 100% rename from docs/determinism.md rename to docs/research/determinism.md diff --git a/docs/transactional-semantics.md b/docs/research/transactional-semantics.md similarity index 100% rename from docs/transactional-semantics.md rename to docs/research/transactional-semantics.md diff --git a/docs/spec/blockchain/encoding.md b/docs/spec/blockchain/encoding.md index e59d3c8ce..49c88475b 100644 --- a/docs/spec/blockchain/encoding.md +++ b/docs/spec/blockchain/encoding.md @@ -2,8 +2,8 @@ ## Amino -Tendermint uses the Protobuf3 derivative [Amino](https://github.com/tendermint/go-amino) for all data structures. -Think of Amino as an object-oriented Protobuf3 with native JSON support. +Tendermint uses the proto3 derivative [Amino](https://github.com/tendermint/go-amino) for all data structures. +Think of Amino as an object-oriented proto3 with native JSON support. The goal of the Amino encoding protocol is to bring parity between application logic objects and persistence objects. @@ -21,7 +21,7 @@ arbitrary object and return the Amino encoded bytes. ## Byte Arrays The encoding of a byte array is simply the raw-bytes prefixed with the length of -the array as a `UVarint` (what Protobuf calls a `Varint`). +the array as a `UVarint` (what proto calls a `Varint`). For details on varints, see the [protobuf spec](https://developers.google.com/protocol-buffers/docs/encoding#varints). @@ -54,17 +54,15 @@ familiar with amino encoding. You can simply use below table and concatenate Prefix || Length (of raw bytes) || raw bytes ( while || stands for byte concatenation here). -| Type | Name | Prefix | Length | -| ---- | ---- | ------ | ----- | -| PubKeyEd25519 | tendermint/PubKeyEd25519 | 0x1624DE62 | 0x20 | -| PubKeyLedgerEd25519 | tendermint/PubKeyLedgerEd25519 | 0x5C3453B2 | 0x20 | -| PubKeySecp256k1 | tendermint/PubKeySecp256k1 | 0xEB5AE982 | 0x21 | -| PrivKeyEd25519 | tendermint/PrivKeyEd25519 | 0xA3288912 | 0x40 | -| PrivKeySecp256k1 | tendermint/PrivKeySecp256k1 | 0xE1B0F79A | 0x20 | -| PrivKeyLedgerSecp256k1 | tendermint/PrivKeyLedgerSecp256k1 | 0x10CAB393 | variable | -| PrivKeyLedgerEd25519 | tendermint/PrivKeyLedgerEd25519 | 0x0CFEEF9B | variable | -| SignatureEd25519 | tendermint/SignatureKeyEd25519 | 0x3DA1DB2A | 0x40 | -| SignatureSecp256k1 | tendermint/SignatureKeySecp256k1 | 0x16E1FEEA | variable | +| Type | Name | Prefix | Length | Notes | +| ---- | ---- | ------ | ----- | ------ | +| PubKeyEd25519 | tendermint/PubKeyEd25519 | 0x1624DE64 | 0x20 | | +| PubKeySecp256k1 | tendermint/PubKeySecp256k1 | 0xEB5AE987 | 0x21 | | +| PrivKeyEd25519 | tendermint/PrivKeyEd25519 | 0xA3288910 | 0x40 | | +| PrivKeySecp256k1 | tendermint/PrivKeySecp256k1 | 0xE1B0F79B | 0x20 | | +| SignatureEd25519 | tendermint/SignatureEd25519 | 0x2031EA53 | 0x40 | | +| SignatureSecp256k1 | tendermint/SignatureSecp256k1 | 0x7FC4A495 | variable | +| ### Examples @@ -84,14 +82,13 @@ Addresses for each public key types are computed as follows: #### Ed25519 -RIPEMD160 hash of the Amino encoded public key: +First 20-bytes of the SHA256 hash of the raw 32-byte public key: ``` -address = RIPEMD160(AMINO(pubkey)) +address = SHA256(pubkey)[:20] ``` -NOTE: this will soon change to the truncated 20-bytes of the SHA256 of the raw -public key +NOTE: before v0.22.0, this was the RIPEMD160 of the Amino encoded public key. #### Secp256k1 @@ -152,9 +149,43 @@ func MakeParts(obj interface{}, partSize int) []Part ## Merkle Trees -Simple Merkle trees are used in numerous places in Tendermint to compute a cryptographic digest of a data structure. +For an overview of Merkle trees, see +[wikipedia](https://en.wikipedia.org/wiki/Merkle_tree) -RIPEMD160 is always used as the hashing function. + +A Simple Tree is a simple compact binary tree for a static list of items. Simple Merkle trees are used in numerous places in Tendermint to compute a cryptographic digest of a data structure. In a Simple Tree, the transactions and validation signatures of a block are hashed using this simple merkle tree logic. + +If the number of items is not a power of two, the tree will not be full +and some leaf nodes will be at different levels. Simple Tree tries to +keep both sides of the tree the same size, but the left side may be one +greater, for example: + +``` + Simple Tree with 6 items Simple Tree with 7 items + + * * + / \ / \ + / \ / \ + / \ / \ + / \ / \ + * * * * + / \ / \ / \ / \ + / \ / \ / \ / \ + / \ / \ / \ / \ + * h2 * h5 * * * h6 + / \ / \ / \ / \ / \ +h0 h1 h3 h4 h0 h1 h2 h3 h4 h5 +``` + +Tendermint always uses the `TMHASH` hash function, which is the first 20-bytes +of the SHA256: + +``` +func TMHASH(bz []byte) []byte { + shasum := SHA256(bz) + return shasum[:20] +} +``` ### Simple Merkle Root @@ -177,7 +208,7 @@ func SimpleMerkleRoot(hashes [][]byte) []byte{ func SimpleConcatHash(left, right []byte) []byte{ left = encodeByteSlice(left) right = encodeByteSlice(right) - return RIPEMD160 (append(left, right)) + return TMHASH(append(left, right)) } ``` @@ -185,8 +216,8 @@ Note that the leaves are Amino encoded as byte-arrays (ie. simple Uvarint length prefix) before being concatenated together and hashed. Note: we will abuse notion and invoke `SimpleMerkleRoot` with arguments of type `struct` or type `[]struct`. -For `struct` arguments, we compute a `[][]byte` by sorting elements of the `struct` according to -field name and then hashing them. +For `struct` arguments, we compute a `[][]byte` containing the hash of each +field in the struct sorted by the hash of the field name. For `[]struct` arguments, we compute a `[][]byte` by hashing the individual `struct` elements. ### Simple Merkle Proof @@ -230,6 +261,18 @@ func computeHashFromAunts(index, total int, leafHash []byte, innerHashes [][]byt } ``` +### Simple Tree with Dictionaries + +The Simple Tree is used to merkelize a list of items, so to merkelize a +(short) dictionary of key-value pairs, encode the dictionary as an +ordered list of ``KVPair`` structs. The block hash is such a hash +derived from all the fields of the block ``Header``. The state hash is +similarly derived. + +### IAVL+ Tree + +Because Tendermint only uses a Simple Merkle Tree, application developers are expect to use their own Merkle tree in their applications. For example, the IAVL+ Tree - an immutable self-balancing binary tree for persisting application state is used by the [Cosmos SDK](https://github.com/cosmos/cosmos-sdk/blob/develop/docs/core/multistore.md) + ## JSON ### Amino diff --git a/docs/spec/consensus/consensus.md b/docs/spec/consensus/consensus.md index 1bf075773..d6804779c 100644 --- a/docs/spec/consensus/consensus.md +++ b/docs/spec/consensus/consensus.md @@ -1,9 +1,329 @@ -We are working to finalize an updated Tendermint specification with formal -proofs of safety and liveness. +# Byzantine Consensus Algorithm -In the meantime, see the [description in the -docs](http://tendermint.readthedocs.io/en/master/specification/byzantine-consensus-algorithm.html). +## Terms -There are also relevant but somewhat outdated descriptions in Jae Kwon's [original -whitepaper](https://tendermint.com/static/docs/tendermint.pdf) and Ethan Buchman's [master's -thesis](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769). +- The network is composed of optionally connected *nodes*. Nodes + directly connected to a particular node are called *peers*. +- The consensus process in deciding the next block (at some *height* + `H`) is composed of one or many *rounds*. +- `NewHeight`, `Propose`, `Prevote`, `Precommit`, and `Commit` + represent state machine states of a round. (aka `RoundStep` or + just "step"). +- A node is said to be *at* a given height, round, and step, or at + `(H,R,S)`, or at `(H,R)` in short to omit the step. +- To *prevote* or *precommit* something means to broadcast a [prevote + vote](https://godoc.org/github.com/tendermint/tendermint/types#Vote) + or [first precommit + vote](https://godoc.org/github.com/tendermint/tendermint/types#FirstPrecommit) + for something. +- A vote *at* `(H,R)` is a vote signed with the bytes for `H` and `R` + included in its [sign-bytes](block-structure.html#vote-sign-bytes). +- *+2/3* is short for "more than 2/3" +- *1/3+* is short for "1/3 or more" +- A set of +2/3 of prevotes for a particular block or `` at + `(H,R)` is called a *proof-of-lock-change* or *PoLC* for short. + +## State Machine Overview + +At each height of the blockchain a round-based protocol is run to +determine the next block. Each round is composed of three *steps* +(`Propose`, `Prevote`, and `Precommit`), along with two special steps +`Commit` and `NewHeight`. + +In the optimal scenario, the order of steps is: + +``` +NewHeight -> (Propose -> Prevote -> Precommit)+ -> Commit -> NewHeight ->... +``` + +The sequence `(Propose -> Prevote -> Precommit)` is called a *round*. +There may be more than one round required to commit a block at a given +height. Examples for why more rounds may be required include: + +- The designated proposer was not online. +- The block proposed by the designated proposer was not valid. +- The block proposed by the designated proposer did not propagate + in time. +- The block proposed was valid, but +2/3 of prevotes for the proposed + block were not received in time for enough validator nodes by the + time they reached the `Precommit` step. Even though +2/3 of prevotes + are necessary to progress to the next step, at least one validator + may have voted `` or maliciously voted for something else. +- The block proposed was valid, and +2/3 of prevotes were received for + enough nodes, but +2/3 of precommits for the proposed block were not + received for enough validator nodes. + +Some of these problems are resolved by moving onto the next round & +proposer. Others are resolved by increasing certain round timeout +parameters over each successive round. + +## State Machine Diagram + +``` + +-------------------------------------+ + v |(Wait til `CommmitTime+timeoutCommit`) + +-----------+ +-----+-----+ + +----------> | Propose +--------------+ | NewHeight | + | +-----------+ | +-----------+ + | | ^ + |(Else, after timeoutPrecommit) v | ++-----+-----+ +-----------+ | +| Precommit | <------------------------+ Prevote | | ++-----+-----+ +-----------+ | + |(When +2/3 Precommits for block found) | + v | ++--------------------------------------------------------------------+ + | Commit | + | | + | * Set CommitTime = now; | + | * Wait for block, then stage/save/commit block; | + +--------------------------------------------------------------------+ +``` + +Background Gossip +================= + +A node may not have a corresponding validator private key, but it +nevertheless plays an active role in the consensus process by relaying +relevant meta-data, proposals, blocks, and votes to its peers. A node +that has the private keys of an active validator and is engaged in +signing votes is called a *validator-node*. All nodes (not just +validator-nodes) have an associated state (the current height, round, +and step) and work to make progress. + +Between two nodes there exists a `Connection`, and multiplexed on top of +this connection are fairly throttled `Channel`s of information. An +epidemic gossip protocol is implemented among some of these channels to +bring peers up to speed on the most recent state of consensus. For +example, + +- Nodes gossip `PartSet` parts of the current round's proposer's + proposed block. A LibSwift inspired algorithm is used to quickly + broadcast blocks across the gossip network. +- Nodes gossip prevote/precommit votes. A node `NODE_A` that is ahead + of `NODE_B` can send `NODE_B` prevotes or precommits for `NODE_B`'s + current (or future) round to enable it to progress forward. +- Nodes gossip prevotes for the proposed PoLC (proof-of-lock-change) + round if one is proposed. +- Nodes gossip to nodes lagging in blockchain height with block + [commits](https://godoc.org/github.com/tendermint/tendermint/types#Commit) + for older blocks. +- Nodes opportunistically gossip `HasVote` messages to hint peers what + votes it already has. +- Nodes broadcast their current state to all neighboring peers. (but + is not gossiped further) + +There's more, but let's not get ahead of ourselves here. + +## Proposals + +A proposal is signed and published by the designated proposer at each +round. The proposer is chosen by a deterministic and non-choking round +robin selection algorithm that selects proposers in proportion to their +voting power (see +[implementation](https://github.com/tendermint/tendermint/blob/develop/types/validator_set.go)). + +A proposal at `(H,R)` is composed of a block and an optional latest +`PoLC-Round < R` which is included iff the proposer knows of one. This +hints the network to allow nodes to unlock (when safe) to ensure the +liveness property. + +## State Machine Spec + +### Propose Step (height:H,round:R) + +Upon entering `Propose`: - The designated proposer proposes a block at +`(H,R)`. + +The `Propose` step ends: - After `timeoutProposeR` after entering +`Propose`. --> goto `Prevote(H,R)` - After receiving proposal block +and all prevotes at `PoLC-Round`. --> goto `Prevote(H,R)` - After +[common exit conditions](#common-exit-conditions) + +### Prevote Step (height:H,round:R) + +Upon entering `Prevote`, each validator broadcasts its prevote vote. + +- First, if the validator is locked on a block since `LastLockRound` + but now has a PoLC for something else at round `PoLC-Round` where + `LastLockRound < PoLC-Round < R`, then it unlocks. +- If the validator is still locked on a block, it prevotes that. +- Else, if the proposed block from `Propose(H,R)` is good, it + prevotes that. +- Else, if the proposal is invalid or wasn't received on time, it + prevotes ``. + +The `Prevote` step ends: - After +2/3 prevotes for a particular block or +``. -->; goto `Precommit(H,R)` - After `timeoutPrevote` after +receiving any +2/3 prevotes. --> goto `Precommit(H,R)` - After +[common exit conditions](#common-exit-conditions) + +### Precommit Step (height:H,round:R) + +Upon entering `Precommit`, each validator broadcasts its precommit vote. +- If the validator has a PoLC at `(H,R)` for a particular block `B`, it +(re)locks (or changes lock to) and precommits `B` and sets +`LastLockRound = R`. - Else, if the validator has a PoLC at `(H,R)` for +``, it unlocks and precommits ``. - Else, it keeps the lock +unchanged and precommits ``. + +A precommit for `` means "I didn’t see a PoLC for this round, but I +did get +2/3 prevotes and waited a bit". + +The Precommit step ends: - After +2/3 precommits for ``. --> +goto `Propose(H,R+1)` - After `timeoutPrecommit` after receiving any ++2/3 precommits. --> goto `Propose(H,R+1)` - After [common exit +conditions](#common-exit-conditions) + +### Common exit conditions + +- After +2/3 precommits for a particular block. --> goto + `Commit(H)` +- After any +2/3 prevotes received at `(H,R+x)`. --> goto + `Prevote(H,R+x)` +- After any +2/3 precommits received at `(H,R+x)`. --> goto + `Precommit(H,R+x)` + +### Commit Step (height:H) + +- Set `CommitTime = now()` +- Wait until block is received. --> goto `NewHeight(H+1)` + +### NewHeight Step (height:H) + +- Move `Precommits` to `LastCommit` and increment height. +- Set `StartTime = CommitTime+timeoutCommit` +- Wait until `StartTime` to receive straggler commits. --> goto + `Propose(H,0)` + +## Proofs + +### Proof of Safety + +Assume that at most -1/3 of the voting power of validators is byzantine. +If a validator commits block `B` at round `R`, it's because it saw +2/3 +of precommits at round `R`. This implies that 1/3+ of honest nodes are +still locked at round `R' > R`. These locked validators will remain +locked until they see a PoLC at `R' > R`, but this won't happen because +1/3+ are locked and honest, so at most -2/3 are available to vote for +anything other than `B`. + +### Proof of Liveness + +If 1/3+ honest validators are locked on two different blocks from +different rounds, a proposers' `PoLC-Round` will eventually cause nodes +locked from the earlier round to unlock. Eventually, the designated +proposer will be one that is aware of a PoLC at the later round. Also, +`timeoutProposalR` increments with round `R`, while the size of a +proposal are capped, so eventually the network is able to "fully gossip" +the whole proposal (e.g. the block & PoLC). + +### Proof of Fork Accountability + +Define the JSet (justification-vote-set) at height `H` of a validator +`V1` to be all the votes signed by the validator at `H` along with +justification PoLC prevotes for each lock change. For example, if `V1` +signed the following precommits: `Precommit(B1 @ round 0)`, +`Precommit( @ round 1)`, `Precommit(B2 @ round 4)` (note that no +precommits were signed for rounds 2 and 3, and that's ok), +`Precommit(B1 @ round 0)` must be justified by a PoLC at round 0, and +`Precommit(B2 @ round 4)` must be justified by a PoLC at round 4; but +the precommit for `` at round 1 is not a lock-change by definition +so the JSet for `V1` need not include any prevotes at round 1, 2, or 3 +(unless `V1` happened to have prevoted for those rounds). + +Further, define the JSet at height `H` of a set of validators `VSet` to +be the union of the JSets for each validator in `VSet`. For a given +commit by honest validators at round `R` for block `B` we can construct +a JSet to justify the commit for `B` at `R`. We say that a JSet +*justifies* a commit at `(H,R)` if all the committers (validators in the +commit-set) are each justified in the JSet with no duplicitous vote +signatures (by the committers). + +- **Lemma**: When a fork is detected by the existence of two + conflicting [commits](./validators.html#commiting-a-block), the + union of the JSets for both commits (if they can be compiled) must + include double-signing by at least 1/3+ of the validator set. + **Proof**: The commit cannot be at the same round, because that + would immediately imply double-signing by 1/3+. Take the union of + the JSets of both commits. If there is no double-signing by at least + 1/3+ of the validator set in the union, then no honest validator + could have precommitted any different block after the first commit. + Yet, +2/3 did. Reductio ad absurdum. + +As a corollary, when there is a fork, an external process can determine +the blame by requiring each validator to justify all of its round votes. +Either we will find 1/3+ who cannot justify at least one of their votes, +and/or, we will find 1/3+ who had double-signed. + +### Alternative algorithm + +Alternatively, we can take the JSet of a commit to be the "full commit". +That is, if light clients and validators do not consider a block to be +committed unless the JSet of the commit is also known, then we get the +desirable property that if there ever is a fork (e.g. there are two +conflicting "full commits"), then 1/3+ of the validators are immediately +punishable for double-signing. + +There are many ways to ensure that the gossip network efficiently share +the JSet of a commit. One solution is to add a new message type that +tells peers that this node has (or does not have) a +2/3 majority for B +(or) at (H,R), and a bitarray of which votes contributed towards that +majority. Peers can react by responding with appropriate votes. + +We will implement such an algorithm for the next iteration of the +Tendermint consensus protocol. + +Other potential improvements include adding more data in votes such as +the last known PoLC round that caused a lock change, and the last voted +round/step (or, we may require that validators not skip any votes). This +may make JSet verification/gossip logic easier to implement. + +### Censorship Attacks + +Due to the definition of a block +[commit](../../tendermint-core/validator.md#commiting-a-block), any 1/3+ coalition of +validators can halt the blockchain by not broadcasting their votes. Such +a coalition can also censor particular transactions by rejecting blocks +that include these transactions, though this would result in a +significant proportion of block proposals to be rejected, which would +slow down the rate of block commits of the blockchain, reducing its +utility and value. The malicious coalition might also broadcast votes in +a trickle so as to grind blockchain block commits to a near halt, or +engage in any combination of these attacks. + +If a global active adversary were also involved, it can partition the +network in such a way that it may appear that the wrong subset of +validators were responsible for the slowdown. This is not just a +limitation of Tendermint, but rather a limitation of all consensus +protocols whose network is potentially controlled by an active +adversary. + +### Overcoming Forks and Censorship Attacks + +For these types of attacks, a subset of the validators through external +means should coordinate to sign a reorg-proposal that chooses a fork +(and any evidence thereof) and the initial subset of validators with +their signatures. Validators who sign such a reorg-proposal forego its +collateral on all other forks. Clients should verify the signatures on +the reorg-proposal, verify any evidence, and make a judgement or prompt +the end-user for a decision. For example, a phone wallet app may prompt +the user with a security warning, while a refrigerator may accept any +reorg-proposal signed by +1/2 of the original validators. + +No non-synchronous Byzantine fault-tolerant algorithm can come to +consensus when 1/3+ of validators are dishonest, yet a fork assumes that +1/3+ of validators have already been dishonest by double-signing or +lock-changing without justification. So, signing the reorg-proposal is a +coordination problem that cannot be solved by any non-synchronous +protocol (i.e. automatically, and without making assumptions about the +reliability of the underlying network). It must be provided by means +external to the weakly-synchronous Tendermint consensus algorithm. For +now, we leave the problem of reorg-proposal coordination to human +coordination via internet media. Validators must take care to ensure +that there are no significant network partitions, to avoid situations +where two conflicting reorg-proposals are signed. + +Assuming that the external coordination medium and protocol is robust, +it follows that forks are less of a concern than [censorship +attacks](#censorship-attacks). diff --git a/docs/spec/scripts/crypto.go b/docs/spec/scripts/crypto.go index aeca07eeb..e040f0e6a 100644 --- a/docs/spec/scripts/crypto.go +++ b/docs/spec/scripts/crypto.go @@ -2,132 +2,15 @@ package main import ( "fmt" + "os" - "github.com/tendermint/tendermint/crypto" + amino "github.com/tendermint/go-amino" + cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino" ) -// SECRET -var SECRET = []byte("some secret") - -func printEd() { - priv := crypto.GenPrivKeyEd25519FromSecret(SECRET) - pub := priv.PubKey().(crypto.PubKeyEd25519) - sigV, err := priv.Sign([]byte("hello")) - if err != nil { - fmt.Println("Unexpected error:", err) - } - sig := sigV.(crypto.SignatureEd25519) - - name := "tendermint/PubKeyEd25519" - length := len(pub[:]) - - fmt.Println("### PubKeyEd25519") - fmt.Println("") - fmt.Println("```") - fmt.Printf("// Name: %s\n", name) - fmt.Printf("// PrefixBytes: 0x%X \n", pub.Bytes()[:4]) - fmt.Printf("// Length: 0x%X \n", length) - fmt.Println("// Notes: raw 32-byte Ed25519 pubkey") - fmt.Println("type PubKeyEd25519 [32]byte") - fmt.Println("") - fmt.Println(`func (pubkey PubKeyEd25519) Address() []byte { - // NOTE: hash of the Amino encoded bytes! - return RIPEMD160(AminoEncode(pubkey)) -}`) - fmt.Println("```") - fmt.Println("") - fmt.Printf("For example, the 32-byte Ed25519 pubkey `%X` would be encoded as `%X`.\n\n", pub[:], pub.Bytes()) - fmt.Printf("The address would then be `RIPEMD160(0x%X)` or `%X`\n", pub.Bytes(), pub.Address()) - fmt.Println("") - - name = "tendermint/SignatureKeyEd25519" - length = len(sig[:]) - - fmt.Println("### SignatureEd25519") - fmt.Println("") - fmt.Println("```") - fmt.Printf("// Name: %s\n", name) - fmt.Printf("// PrefixBytes: 0x%X \n", sig.Bytes()[:4]) - fmt.Printf("// Length: 0x%X \n", length) - fmt.Println("// Notes: raw 64-byte Ed25519 signature") - fmt.Println("type SignatureEd25519 [64]byte") - fmt.Println("```") - fmt.Println("") - fmt.Printf("For example, the 64-byte Ed25519 signature `%X` would be encoded as `%X`\n", sig[:], sig.Bytes()) - fmt.Println("") - - name = "tendermint/PrivKeyEd25519" - - fmt.Println("### PrivKeyEd25519") - fmt.Println("") - fmt.Println("```") - fmt.Println("// Name:", name) - fmt.Println("// Notes: raw 32-byte priv key concatenated to raw 32-byte pub key") - fmt.Println("type PrivKeyEd25519 [64]byte") - fmt.Println("```") -} - -func printSecp() { - priv := crypto.GenPrivKeySecp256k1FromSecret(SECRET) - pub := priv.PubKey().(crypto.PubKeySecp256k1) - sigV, err := priv.Sign([]byte("hello")) - if err != nil { - fmt.Println("Unexpected error:", err) - } - sig := sigV.(crypto.SignatureSecp256k1) - - name := "tendermint/PubKeySecp256k1" - length := len(pub[:]) - - fmt.Println("### PubKeySecp256k1") - fmt.Println("") - fmt.Println("```") - fmt.Printf("// Name: %s\n", name) - fmt.Printf("// PrefixBytes: 0x%X \n", pub.Bytes()[:4]) - fmt.Printf("// Length: 0x%X \n", length) - fmt.Println("// Notes: OpenSSL compressed pubkey prefixed with 0x02 or 0x03") - fmt.Println("type PubKeySecp256k1 [33]byte") - fmt.Println("") - fmt.Println(`func (pubkey PubKeySecp256k1) Address() []byte { - // NOTE: hash of the raw pubkey bytes (not Amino encoded!). - // Compatible with Bitcoin addresses. - return RIPEMD160(SHA256(pubkey[:])) -}`) - fmt.Println("```") - fmt.Println("") - fmt.Printf("For example, the 33-byte Secp256k1 pubkey `%X` would be encoded as `%X`\n\n", pub[:], pub.Bytes()) - fmt.Printf("The address would then be `RIPEMD160(SHA256(0x%X))` or `%X`\n", pub[:], pub.Address()) - fmt.Println("") - - name = "tendermint/SignatureKeySecp256k1" - - fmt.Println("### SignatureSecp256k1") - fmt.Println("") - fmt.Println("```") - fmt.Printf("// Name: %s\n", name) - fmt.Printf("// PrefixBytes: 0x%X \n", sig.Bytes()[:4]) - fmt.Printf("// Length: Variable\n") - fmt.Printf("// Encoding prefix: Variable\n") - fmt.Println("// Notes: raw bytes of the Secp256k1 signature") - fmt.Println("type SignatureSecp256k1 []byte") - fmt.Println("```") - fmt.Println("") - fmt.Printf("For example, the Secp256k1 signature `%X` would be encoded as `%X`\n", []byte(sig[:]), sig.Bytes()) - fmt.Println("") - - name = "tendermint/PrivKeySecp256k1" - - fmt.Println("### PrivKeySecp256k1") - fmt.Println("") - fmt.Println("```") - fmt.Println("// Name:", name) - fmt.Println("// Notes: raw 32-byte priv key") - fmt.Println("type PrivKeySecp256k1 [32]byte") - fmt.Println("```") -} - func main() { - printEd() + cdc := amino.NewCodec() + cryptoAmino.RegisterAmino(cdc) + cdc.PrintTypes(os.Stdout) fmt.Println("") - printSecp() } diff --git a/docs/specification.rst b/docs/specification.rst deleted file mode 100644 index 70ebf633f..000000000 --- a/docs/specification.rst +++ /dev/null @@ -1,21 +0,0 @@ -############# -Specification -############# - -Here you'll find details of the Tendermint specification. Tendermint's types are produced by `godoc `__. - -.. toctree:: - :maxdepth: 2 - - specification/block-structure.rst - specification/byzantine-consensus-algorithm.rst - specification/configuration.rst - specification/corruption.rst - specification/fast-sync.rst - specification/genesis.rst - specification/light-client-protocol.rst - specification/merkle.rst - specification/rpc.rst - specification/secure-p2p.rst - specification/validators.rst - specification/wire-protocol.rst diff --git a/docs/specification/block-structure.rst b/docs/specification/block-structure.rst deleted file mode 100644 index 7d8f3464c..000000000 --- a/docs/specification/block-structure.rst +++ /dev/null @@ -1,218 +0,0 @@ -Block Structure -=============== - -The tendermint consensus engine records all agreements by a -supermajority of nodes into a blockchain, which is replicated among all -nodes. This blockchain is accessible via various rpc endpoints, mainly -``/block?height=`` to get the full block, as well as -``/blockchain?minHeight=_&maxHeight=_`` to get a list of headers. But -what exactly is stored in these blocks? - -Block -~~~~~ - -A -`Block `__ -contains: - -- a `Header <#header>`__ contains merkle hashes for various chain - states -- the - `Data `__ - is all transactions which are to be processed -- the `LastCommit <#commit>`__ > 2/3 signatures for the last block - -The signatures returned along with block ``H`` are those validating -block ``H-1``. This can be a little confusing, but we must also consider -that the ``Header`` also contains the ``LastCommitHash``. It would be -impossible for a Header to include the commits that sign it, as it would -cause an infinite loop here. But when we get block ``H``, we find -``Header.LastCommitHash``, which must match the hash of ``LastCommit``. - -Header -~~~~~~ - -The -`Header `__ -contains lots of information (follow link for up-to-date info). Notably, -it maintains the ``Height``, the ``LastBlockID`` (to make it a chain), -and hashes of the data, the app state, and the validator set. This is -important as the only item that is signed by the validators is the -``Header``, and all other data must be validated against one of the -merkle hashes in the ``Header``. - -The ``DataHash`` can provide a nice check on the -`Data `__ -returned in this same block. If you are subscribed to new blocks, via -tendermint RPC, in order to display or process the new transactions you -should at least validate that the ``DataHash`` is valid. If it is -important to verify autheniticity, you must wait for the ``LastCommit`` -from the next block to make sure the block header (including -``DataHash``) was properly signed. - -The ``ValidatorHash`` contains a hash of the current -`Validators `__. -Tracking all changes in the validator set is complex, but a client can -quickly compare this hash with the `hash of the currently known -validators `__ -to see if there have been changes. - -The ``AppHash`` serves as the basis for validating any merkle proofs -that come from the ABCI application. It represents the -state of the actual application, rather that the state of the blockchain -itself. This means it's necessary in order to perform any business -logic, such as verifying an account balance. - -**Note** After the transactions are committed to a block, they still -need to be processed in a separate step, which happens between the -blocks. If you find a given transaction in the block at height ``H``, -the effects of running that transaction will be first visible in the -``AppHash`` from the block header at height ``H+1``. - -Like the ``LastCommit`` issue, this is a requirement of the immutability -of the block chain, as the application only applies transactions *after* -they are commited to the chain. - -Commit -~~~~~~ - -The -`Commit `__ -contains a set of -`Votes `__ -that were made by the validator set to reach consensus on this block. -This is the key to the security in any PoS system, and actually no data -that cannot be traced back to a block header with a valid set of Votes -can be trusted. Thus, getting the Commit data and verifying the votes is -extremely important. - -As mentioned above, in order to find the ``precommit votes`` for block -header ``H``, we need to query block ``H+1``. Then we need to check the -votes, make sure they really are for that block, and properly formatted. -Much of this code is implemented in Go in the -`light-client `__ package. -If you look at the code, you will notice that we need to provide the -``chainID`` of the blockchain in order to properly calculate the votes. -This is to protect anyone from swapping votes between chains to fake (or -frame) a validator. Also note that this ``chainID`` is in the -``genesis.json`` from *Tendermint*, not the ``genesis.json`` from the -basecoin app (`that is a different -chainID... `__). - -Once we have those votes, and we calculated the proper `sign -bytes `__ -using the chainID and a `nice helper -function `__, -we can verify them. The light client is responsible for maintaining a -set of validators that we trust. Each vote only stores the validators -``Address``, as well as the ``Signature``. Assuming we have a local copy -of the trusted validator set, we can look up the ``Public Key`` of the -validator given its ``Address``, then verify that the ``Signature`` -matches the ``SignBytes`` and ``Public Key``. Then we sum up the total -voting power of all validators, whose votes fulfilled all these -stringent requirements. If the total number of voting power for a single -block is greater than 2/3 of all voting power, then we can finally trust -the block header, the AppHash, and the proof we got from the ABCI -application. - -Vote Sign Bytes -^^^^^^^^^^^^^^^ - -The ``sign-bytes`` of a vote is produced by taking a -`stable-json `__-like -deterministic JSON `wire <./wire-protocol.html>`__ encoding of -the vote (excluding the ``Signature`` field), and wrapping it with -``{"chain_id":"my_chain","vote":...}``. - -For example, a precommit vote might have the following ``sign-bytes``: - -.. code:: json - - {"chain_id":"my_chain","vote":{"block_hash":"611801F57B4CE378DF1A3FFF1216656E89209A99","block_parts_header":{"hash":"B46697379DBE0774CC2C3B656083F07CA7E0F9CE","total":123},"height":1234,"round":1,"type":2}} - -Block Hash -~~~~~~~~~~ - -The `block -hash `__ -is the `Simple Tree hash <./merkle.html#simple-tree-with-dictionaries>`__ -of the fields of the block ``Header`` encoded as a list of -``KVPair``\ s. - -Transaction -~~~~~~~~~~~ - -A transaction is any sequence of bytes. It is up to your -ABCI application to accept or reject transactions. - -BlockID -~~~~~~~ - -Many of these data structures refer to the -`BlockID `__, -which is the ``BlockHash`` (hash of the block header, also referred to -by the next block) along with the ``PartSetHeader``. The -``PartSetHeader`` is explained below and is used internally to -orchestrate the p2p propogation. For clients, it is basically opaque -bytes, but they must match for all votes. - -PartSetHeader -~~~~~~~~~~~~~ - -The -`PartSetHeader `__ -contains the total number of pieces in a -`PartSet `__, -and the Merkle root hash of those pieces. - -PartSet -~~~~~~~ - -PartSet is used to split a byteslice of data into parts (pieces) for -transmission. By splitting data into smaller parts and computing a -Merkle root hash on the list, you can verify that a part is legitimately -part of the complete data, and the part can be forwarded to other peers -before all the parts are known. In short, it's a fast way to securely -propagate a large chunk of data (like a block) over a gossip network. - -PartSet was inspired by the LibSwift project. - -Usage: - -.. code:: go - - data := RandBytes(2 << 20) // Something large - - partSet := NewPartSetFromData(data) - partSet.Total() // Total number of 4KB parts - partSet.Count() // Equal to the Total, since we already have all the parts - partSet.Hash() // The Merkle root hash - partSet.BitArray() // A BitArray of partSet.Total() 1's - - header := partSet.Header() // Send this to the peer - header.Total // Total number of parts - header.Hash // The merkle root hash - - // Now we'll reconstruct the data from the parts - partSet2 := NewPartSetFromHeader(header) - partSet2.Total() // Same total as partSet.Total() - partSet2.Count() // Zero, since this PartSet doesn't have any parts yet. - partSet2.Hash() // Same hash as in partSet.Hash() - partSet2.BitArray() // A BitArray of partSet.Total() 0's - - // In a gossip network the parts would arrive in arbitrary order, perhaps - // in response to explicit requests for parts, or optimistically in response - // to the receiving peer's partSet.BitArray(). - for !partSet2.IsComplete() { - part := receivePartFromGossipNetwork() - added, err := partSet2.AddPart(part) - if err != nil { - // A wrong part, - // the merkle trail does not hash to partSet2.Hash() - } else if !added { - // A duplicate part already received - } - } - - data2, _ := ioutil.ReadAll(partSet2.GetReader()) - bytes.Equal(data, data2) // true diff --git a/docs/specification/byzantine-consensus-algorithm.rst b/docs/specification/byzantine-consensus-algorithm.rst deleted file mode 100644 index 15eab32d7..000000000 --- a/docs/specification/byzantine-consensus-algorithm.rst +++ /dev/null @@ -1,349 +0,0 @@ -Byzantine Consensus Algorithm -============================= - -Terms ------ - -- The network is composed of optionally connected *nodes*. Nodes - directly connected to a particular node are called *peers*. -- The consensus process in deciding the next block (at some *height* - ``H``) is composed of one or many *rounds*. -- ``NewHeight``, ``Propose``, ``Prevote``, ``Precommit``, and - ``Commit`` represent state machine states of a round. (aka - ``RoundStep`` or just "step"). -- A node is said to be *at* a given height, round, and step, or at - ``(H,R,S)``, or at ``(H,R)`` in short to omit the step. -- To *prevote* or *precommit* something means to broadcast a `prevote - vote `__ - or `first precommit - vote `__ - for something. -- A vote *at* ``(H,R)`` is a vote signed with the bytes for ``H`` and - ``R`` included in its - `sign-bytes `__. -- *+2/3* is short for "more than 2/3" -- *1/3+* is short for "1/3 or more" -- A set of +2/3 of prevotes for a particular block or ```` at - ``(H,R)`` is called a *proof-of-lock-change* or *PoLC* for short. - -State Machine Overview ----------------------- - -At each height of the blockchain a round-based protocol is run to -determine the next block. Each round is composed of three *steps* -(``Propose``, ``Prevote``, and ``Precommit``), along with two special -steps ``Commit`` and ``NewHeight``. - -In the optimal scenario, the order of steps is: - -:: - - NewHeight -> (Propose -> Prevote -> Precommit)+ -> Commit -> NewHeight ->... - -The sequence ``(Propose -> Prevote -> Precommit)`` is called a *round*. -There may be more than one round required to commit a block at a given -height. Examples for why more rounds may be required include: - -- The designated proposer was not online. -- The block proposed by the designated proposer was not valid. -- The block proposed by the designated proposer did not propagate in - time. -- The block proposed was valid, but +2/3 of prevotes for the proposed - block were not received in time for enough validator nodes by the - time they reached the ``Precommit`` step. Even though +2/3 of - prevotes are necessary to progress to the next step, at least one - validator may have voted ```` or maliciously voted for something - else. -- The block proposed was valid, and +2/3 of prevotes were received for - enough nodes, but +2/3 of precommits for the proposed block were not - received for enough validator nodes. - -Some of these problems are resolved by moving onto the next round & -proposer. Others are resolved by increasing certain round timeout -parameters over each successive round. - -State Machine Diagram ---------------------- - -:: - - +-------------------------------------+ - v |(Wait til `CommmitTime+timeoutCommit`) - +-----------+ +-----+-----+ - +----------> | Propose +--------------+ | NewHeight | - | +-----------+ | +-----------+ - | | ^ - |(Else, after timeoutPrecommit) v | - +-----+-----+ +-----------+ | - | Precommit | <------------------------+ Prevote | | - +-----+-----+ +-----------+ | - |(When +2/3 Precommits for block found) | - v | - +--------------------------------------------------------------------+ - | Commit | - | | - | * Set CommitTime = now; | - | * Wait for block, then stage/save/commit block; | - +--------------------------------------------------------------------+ - -Background Gossip ------------------ - -A node may not have a corresponding validator private key, but it -nevertheless plays an active role in the consensus process by relaying -relevant meta-data, proposals, blocks, and votes to its peers. A node -that has the private keys of an active validator and is engaged in -signing votes is called a *validator-node*. All nodes (not just -validator-nodes) have an associated state (the current height, round, -and step) and work to make progress. - -Between two nodes there exists a ``Connection``, and multiplexed on top -of this connection are fairly throttled ``Channel``\ s of information. -An epidemic gossip protocol is implemented among some of these channels -to bring peers up to speed on the most recent state of consensus. For -example, - -- Nodes gossip ``PartSet`` parts of the current round's proposer's - proposed block. A LibSwift inspired algorithm is used to quickly - broadcast blocks across the gossip network. -- Nodes gossip prevote/precommit votes. A node NODE\_A that is ahead of - NODE\_B can send NODE\_B prevotes or precommits for NODE\_B's current - (or future) round to enable it to progress forward. -- Nodes gossip prevotes for the proposed PoLC (proof-of-lock-change) - round if one is proposed. -- Nodes gossip to nodes lagging in blockchain height with block - `commits `__ - for older blocks. -- Nodes opportunistically gossip ``HasVote`` messages to hint peers - what votes it already has. -- Nodes broadcast their current state to all neighboring peers. (but is - not gossiped further) - -There's more, but let's not get ahead of ourselves here. - -Proposals ---------- - -A proposal is signed and published by the designated proposer at each -round. The proposer is chosen by a deterministic and non-choking round -robin selection algorithm that selects proposers in proportion to their -voting power. (see -`implementation `__) - -A proposal at ``(H,R)`` is composed of a block and an optional latest -``PoLC-Round < R`` which is included iff the proposer knows of one. This -hints the network to allow nodes to unlock (when safe) to ensure the -liveness property. - -State Machine Spec ------------------- - -Propose Step (height:H,round:R) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Upon entering ``Propose``: - The designated proposer proposes a block at -``(H,R)``. - -The ``Propose`` step ends: - After ``timeoutProposeR`` after entering -``Propose``. --> goto ``Prevote(H,R)`` - After receiving proposal block -and all prevotes at ``PoLC-Round``. --> goto ``Prevote(H,R)`` - After -`common exit conditions <#common-exit-conditions>`__ - -Prevote Step (height:H,round:R) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Upon entering ``Prevote``, each validator broadcasts its prevote vote. - -- First, if the validator is locked on a block since ``LastLockRound`` - but now has a PoLC for something else at round ``PoLC-Round`` where - ``LastLockRound < PoLC-Round < R``, then it unlocks. -- If the validator is still locked on a block, it prevotes that. -- Else, if the proposed block from ``Propose(H,R)`` is good, it - prevotes that. -- Else, if the proposal is invalid or wasn't received on time, it - prevotes ````. - -The ``Prevote`` step ends: - After +2/3 prevotes for a particular block -or ````. --> goto ``Precommit(H,R)`` - After ``timeoutPrevote`` -after receiving any +2/3 prevotes. --> goto ``Precommit(H,R)`` - After -`common exit conditions <#common-exit-conditions>`__ - -Precommit Step (height:H,round:R) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Upon entering ``Precommit``, each validator broadcasts its precommit -vote. - If the validator has a PoLC at ``(H,R)`` for a particular block -``B``, it (re)locks (or changes lock to) and precommits ``B`` and sets -``LastLockRound = R``. - Else, if the validator has a PoLC at ``(H,R)`` -for ````, it unlocks and precommits ````. - Else, it keeps the -lock unchanged and precommits ````. - -A precommit for ```` means "I didn’t see a PoLC for this round, but -I did get +2/3 prevotes and waited a bit". - -The Precommit step ends: - After +2/3 precommits for ````. --> goto -``Propose(H,R+1)`` - After ``timeoutPrecommit`` after receiving any +2/3 -precommits. --> goto ``Propose(H,R+1)`` - After `common exit -conditions <#common-exit-conditions>`__ - -common exit conditions -^^^^^^^^^^^^^^^^^^^^^^ - -- After +2/3 precommits for a particular block. --> goto ``Commit(H)`` -- After any +2/3 prevotes received at ``(H,R+x)``. --> goto - ``Prevote(H,R+x)`` -- After any +2/3 precommits received at ``(H,R+x)``. --> goto - ``Precommit(H,R+x)`` - -Commit Step (height:H) -~~~~~~~~~~~~~~~~~~~~~~ - -- Set ``CommitTime = now()`` -- Wait until block is received. --> goto ``NewHeight(H+1)`` - -NewHeight Step (height:H) -~~~~~~~~~~~~~~~~~~~~~~~~~ - -- Move ``Precommits`` to ``LastCommit`` and increment height. -- Set ``StartTime = CommitTime+timeoutCommit`` -- Wait until ``StartTime`` to receive straggler commits. --> goto - ``Propose(H,0)`` - -Proofs ------- - -Proof of Safety -~~~~~~~~~~~~~~~ - -Assume that at most -1/3 of the voting power of validators is byzantine. -If a validator commits block ``B`` at round ``R``, it's because it saw -+2/3 of precommits at round ``R``. This implies that 1/3+ of honest -nodes are still locked at round ``R' > R``. These locked validators will -remain locked until they see a PoLC at ``R' > R``, but this won't happen -because 1/3+ are locked and honest, so at most -2/3 are available to -vote for anything other than ``B``. - -Proof of Liveness -~~~~~~~~~~~~~~~~~ - -If 1/3+ honest validators are locked on two different blocks from -different rounds, a proposers' ``PoLC-Round`` will eventually cause -nodes locked from the earlier round to unlock. Eventually, the -designated proposer will be one that is aware of a PoLC at the later -round. Also, ``timeoutProposalR`` increments with round ``R``, while the -size of a proposal are capped, so eventually the network is able to -"fully gossip" the whole proposal (e.g. the block & PoLC). - -Proof of Fork Accountability -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Define the JSet (justification-vote-set) at height ``H`` of a validator -``V1`` to be all the votes signed by the validator at ``H`` along with -justification PoLC prevotes for each lock change. For example, if ``V1`` -signed the following precommits: ``Precommit(B1 @ round 0)``, -``Precommit( @ round 1)``, ``Precommit(B2 @ round 4)`` (note that -no precommits were signed for rounds 2 and 3, and that's ok), -``Precommit(B1 @ round 0)`` must be justified by a PoLC at round 0, and -``Precommit(B2 @ round 4)`` must be justified by a PoLC at round 4; but -the precommit for ```` at round 1 is not a lock-change by -definition so the JSet for ``V1`` need not include any prevotes at round -1, 2, or 3 (unless ``V1`` happened to have prevoted for those rounds). - -Further, define the JSet at height ``H`` of a set of validators ``VSet`` -to be the union of the JSets for each validator in ``VSet``. For a given -commit by honest validators at round ``R`` for block ``B`` we can -construct a JSet to justify the commit for ``B`` at ``R``. We say that a -JSet *justifies* a commit at ``(H,R)`` if all the committers (validators -in the commit-set) are each justified in the JSet with no duplicitous -vote signatures (by the committers). - -- **Lemma**: When a fork is detected by the existence of two - conflicting `commits <./validators.html#commiting-a-block>`__, - the union of the JSets for both commits (if they can be compiled) - must include double-signing by at least 1/3+ of the validator set. - **Proof**: The commit cannot be at the same round, because that would - immediately imply double-signing by 1/3+. Take the union of the JSets - of both commits. If there is no double-signing by at least 1/3+ of - the validator set in the union, then no honest validator could have - precommitted any different block after the first commit. Yet, +2/3 - did. Reductio ad absurdum. - -As a corollary, when there is a fork, an external process can determine -the blame by requiring each validator to justify all of its round votes. -Either we will find 1/3+ who cannot justify at least one of their votes, -and/or, we will find 1/3+ who had double-signed. - -Alternative algorithm -~~~~~~~~~~~~~~~~~~~~~ - -Alternatively, we can take the JSet of a commit to be the "full commit". -That is, if light clients and validators do not consider a block to be -committed unless the JSet of the commit is also known, then we get the -desirable property that if there ever is a fork (e.g. there are two -conflicting "full commits"), then 1/3+ of the validators are immediately -punishable for double-signing. - -There are many ways to ensure that the gossip network efficiently share -the JSet of a commit. One solution is to add a new message type that -tells peers that this node has (or does not have) a +2/3 majority for B -(or ) at (H,R), and a bitarray of which votes contributed towards that -majority. Peers can react by responding with appropriate votes. - -We will implement such an algorithm for the next iteration of the -Tendermint consensus protocol. - -Other potential improvements include adding more data in votes such as -the last known PoLC round that caused a lock change, and the last voted -round/step (or, we may require that validators not skip any votes). This -may make JSet verification/gossip logic easier to implement. - -Censorship Attacks -~~~~~~~~~~~~~~~~~~ - -Due to the definition of a block -`commit `__, any 1/3+ -coalition of validators can halt the blockchain by not broadcasting -their votes. Such a coalition can also censor particular transactions by -rejecting blocks that include these transactions, though this would -result in a significant proportion of block proposals to be rejected, -which would slow down the rate of block commits of the blockchain, -reducing its utility and value. The malicious coalition might also -broadcast votes in a trickle so as to grind blockchain block commits to -a near halt, or engage in any combination of these attacks. - -If a global active adversary were also involved, it can partition the -network in such a way that it may appear that the wrong subset of -validators were responsible for the slowdown. This is not just a -limitation of Tendermint, but rather a limitation of all consensus -protocols whose network is potentially controlled by an active -adversary. - -Overcoming Forks and Censorship Attacks -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -For these types of attacks, a subset of the validators through external -means should coordinate to sign a reorg-proposal that chooses a fork -(and any evidence thereof) and the initial subset of validators with -their signatures. Validators who sign such a reorg-proposal forego its -collateral on all other forks. Clients should verify the signatures on -the reorg-proposal, verify any evidence, and make a judgement or prompt -the end-user for a decision. For example, a phone wallet app may prompt -the user with a security warning, while a refrigerator may accept any -reorg-proposal signed by +1/2 of the original validators. - -No non-synchronous Byzantine fault-tolerant algorithm can come to -consensus when 1/3+ of validators are dishonest, yet a fork assumes that -1/3+ of validators have already been dishonest by double-signing or -lock-changing without justification. So, signing the reorg-proposal is a -coordination problem that cannot be solved by any non-synchronous -protocol (i.e. automatically, and without making assumptions about the -reliability of the underlying network). It must be provided by means -external to the weakly-synchronous Tendermint consensus algorithm. For -now, we leave the problem of reorg-proposal coordination to human -coordination via internet media. Validators must take care to ensure -that there are no significant network partitions, to avoid situations -where two conflicting reorg-proposals are signed. - -Assuming that the external coordination medium and protocol is robust, -it follows that forks are less of a concern than `censorship -attacks <#censorship-attacks>`__. diff --git a/docs/specification/corruption.rst b/docs/specification/corruption.rst deleted file mode 100644 index 6ae19fb18..000000000 --- a/docs/specification/corruption.rst +++ /dev/null @@ -1,70 +0,0 @@ -Corruption -========== - -Important step --------------- - -Make sure you have a backup of the Tendermint data directory. - -Possible causes ---------------- - -Remember that most corruption is caused by hardware issues: - -- RAID controllers with faulty / worn out battery backup, and an unexpected power loss -- Hard disk drives with write-back cache enabled, and an unexpected power loss -- Cheap SSDs with insufficient power-loss protection, and an unexpected power-loss -- Defective RAM -- Defective or overheating CPU(s) - -Other causes can be: - -- Database systems configured with fsync=off and an OS crash or power loss -- Filesystems configured to use write barriers plus a storage layer that ignores write barriers. LVM is a particular culprit. -- Tendermint bugs -- Operating system bugs -- Admin error - - directly modifying Tendermint data-directory contents - -(Source: https://wiki.postgresql.org/wiki/Corruption) - -WAL Corruption --------------- - -If consensus WAL is corrupted at the lastest height and you are trying to start -Tendermint, replay will fail with panic. - -Recovering from data corruption can be hard and time-consuming. Here are two approaches you can take: - -1) Delete the WAL file and restart Tendermint. It will attempt to sync with other peers. -2) Try to repair the WAL file manually: - - 1. Create a backup of the corrupted WAL file: - - .. code:: bash - - cp "$TMHOME/data/cs.wal/wal" > /tmp/corrupted_wal_backup - - 2. Use ./scripts/wal2json to create a human-readable version - - .. code:: bash - - ./scripts/wal2json/wal2json "$TMHOME/data/cs.wal/wal" > /tmp/corrupted_wal - - 3. Search for a "CORRUPTED MESSAGE" line. - 4. By looking at the previous message and the message after the corrupted one - and looking at the logs, try to rebuild the message. If the consequent - messages are marked as corrupted too (this may happen if length header - got corrupted or some writes did not make it to the WAL ~ truncation), - then remove all the lines starting from the corrupted one and restart - Tendermint. - - .. code:: bash - - $EDITOR /tmp/corrupted_wal - - 5. After editing, convert this file back into binary form by running: - - .. code:: bash - - ./scripts/json2wal/json2wal /tmp/corrupted_wal > "$TMHOME/data/cs.wal/wal" diff --git a/docs/specification/genesis.rst b/docs/specification/genesis.rst deleted file mode 100644 index 427c88bb2..000000000 --- a/docs/specification/genesis.rst +++ /dev/null @@ -1,71 +0,0 @@ -Genesis -======= - -The genesis.json file in ``$TMHOME/config`` defines the initial TendermintCore -state upon genesis of the blockchain (`see -definition `__). - -Fields -~~~~~~ - -- ``genesis_time``: Official time of blockchain start. -- ``chain_id``: ID of the blockchain. This must be unique for every - blockchain. If your testnet blockchains do not have unique chain IDs, - you will have a bad time. -- ``validators``: -- ``pub_key``: The first element specifies the pub\_key type. 1 == - Ed25519. The second element are the pubkey bytes. -- ``power``: The validator's voting power. -- ``name``: Name of the validator (optional). -- ``app_hash``: The expected application hash (as returned by the - ``ResponseInfo`` ABCI message) upon genesis. If the app's hash does not - match, Tendermint will panic. -- ``app_state``: The application state (e.g. initial distribution of tokens). - -Sample genesis.json -~~~~~~~~~~~~~~~~~~~ - -.. code:: json - - { - "genesis_time": "2016-02-05T06:02:31.526Z", - "chain_id": "chain-tTH4mi", - "validators": [ - { - "pub_key": [ - 1, - "9BC5112CB9614D91CE423FA8744885126CD9D08D9FC9D1F42E552D662BAA411E" - ], - "power": 1, - "name": "mach1" - }, - { - "pub_key": [ - 1, - "F46A5543D51F31660D9F59653B4F96061A740FF7433E0DC1ECBC30BE8494DE06" - ], - "power": 1, - "name": "mach2" - }, - { - "pub_key": [ - 1, - "0E7B423C1635FD07C0FC3603B736D5D27953C1C6CA865BB9392CD79DE1A682BB" - ], - "power": 1, - "name": "mach3" - }, - { - "pub_key": [ - 1, - "4F49237B9A32EB50682EDD83C48CE9CDB1D02A7CFDADCFF6EC8C1FAADB358879" - ], - "power": 1, - "name": "mach4" - } - ], - "app_hash": "15005165891224E721CB664D15CB972240F5703F", - "app_state": { - {"account": "Bob", "coins": 5000} - } - } diff --git a/docs/specification/light-client-protocol.rst b/docs/specification/light-client-protocol.rst deleted file mode 100644 index 6c6083b45..000000000 --- a/docs/specification/light-client-protocol.rst +++ /dev/null @@ -1,33 +0,0 @@ -Light Client Protocol -===================== - -Light clients are an important part of the complete blockchain system -for most applications. Tendermint provides unique speed and security -properties for light client applications. - -See our `lite package -`__. - -Overview --------- - -The objective of the light client protocol is to get a -`commit <./validators.html#committing-a-block>`__ for a recent -`block hash <./block-structure.html#block-hash>`__ where the commit -includes a majority of signatures from the last known validator set. -From there, all the application state is verifiable with `merkle -proofs <./merkle.html#iavl-tree>`__. - -Properties ----------- - -- You get the full collateralized security benefits of Tendermint; No - need to wait for confirmations. -- You get the full speed benefits of Tendermint; transactions commit - instantly. -- You can get the most recent version of the application state - non-interactively (without committing anything to the blockchain). - For example, this means that you can get the most recent value of a - name from the name-registry without worrying about fork censorship - attacks, without posting a commit and waiting for confirmations. It's - fast, secure, and free! diff --git a/docs/specification/merkle.rst b/docs/specification/merkle.rst deleted file mode 100644 index 588f24a98..000000000 --- a/docs/specification/merkle.rst +++ /dev/null @@ -1,88 +0,0 @@ -Merkle -====== - -For an overview of Merkle trees, see -`wikipedia `__. - -There are two types of Merkle trees used in Tendermint. - -- **IAVL+ Tree**: An immutable self-balancing binary - tree for persistent application state -- **Simple Tree**: A simple compact binary tree for - a static list of items - -IAVL+ Tree ----------- - -The purpose of this data structure is to provide persistent storage for -key-value pairs (e.g. account state, name-registrar data, and -per-contract data) such that a deterministic merkle root hash can be -computed. The tree is balanced using a variant of the `AVL -algorithm `__ so all operations -are O(log(n)). - -Nodes of this tree are immutable and indexed by its hash. Thus any node -serves as an immutable snapshot which lets us stage uncommitted -transactions from the mempool cheaply, and we can instantly roll back to -the last committed state to process transactions of a newly committed -block (which may not be the same set of transactions as those from the -mempool). - -In an AVL tree, the heights of the two child subtrees of any node differ -by at most one. Whenever this condition is violated upon an update, the -tree is rebalanced by creating O(log(n)) new nodes that point to -unmodified nodes of the old tree. In the original AVL algorithm, inner -nodes can also hold key-value pairs. The AVL+ algorithm (note the plus) -modifies the AVL algorithm to keep all values on leaf nodes, while only -using branch-nodes to store keys. This simplifies the algorithm while -minimizing the size of merkle proofs - -In Ethereum, the analog is the `Patricia -trie `__. There are tradeoffs. -Keys do not need to be hashed prior to insertion in IAVL+ trees, so this -provides faster iteration in the key space which may benefit some -applications. The logic is simpler to implement, requiring only two -types of nodes -- inner nodes and leaf nodes. The IAVL+ tree is a binary -tree, so merkle proofs are much shorter than the base 16 Patricia trie. -On the other hand, while IAVL+ trees provide a deterministic merkle root -hash, it depends on the order of updates. In practice this shouldn't be -a problem, since you can efficiently encode the tree structure when -serializing the tree contents. - -Simple Tree ------------ - -For merkelizing smaller static lists, use the Simple Tree. The -transactions and validation signatures of a block are hashed using this -simple merkle tree logic. - -If the number of items is not a power of two, the tree will not be full -and some leaf nodes will be at different levels. Simple Tree tries to -keep both sides of the tree the same size, but the left side may be one -greater. - -:: - - Simple Tree with 6 items Simple Tree with 7 items - - * * - / \ / \ - / \ / \ - / \ / \ - / \ / \ - * * * * - / \ / \ / \ / \ - / \ / \ / \ / \ - / \ / \ / \ / \ - * h2 * h5 * * * h6 - / \ / \ / \ / \ / \ - h0 h1 h3 h4 h0 h1 h2 h3 h4 h5 - -Simple Tree with Dictionaries -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The Simple Tree is used to merkelize a list of items, so to merkelize a -(short) dictionary of key-value pairs, encode the dictionary as an -ordered list of ``KVPair`` structs. The block hash is such a hash -derived from all the fields of the block ``Header``. The state hash is -similarly derived. diff --git a/docs/specification/new-spec/README.md b/docs/specification/new-spec/README.md deleted file mode 100644 index 907ddd945..000000000 --- a/docs/specification/new-spec/README.md +++ /dev/null @@ -1 +0,0 @@ -Spec moved to [docs/spec](https://github.com/tendermint/tendermint/tree/master/docs/spec). diff --git a/docs/specification/wire-protocol.rst b/docs/specification/wire-protocol.rst deleted file mode 100644 index c0bf3b0ef..000000000 --- a/docs/specification/wire-protocol.rst +++ /dev/null @@ -1,172 +0,0 @@ -Wire Protocol -============= - -The `Tendermint wire protocol `__ -encodes data in `c-style binary <#binary>`__ and `JSON <#json>`__ form. - -Supported types ---------------- - -- Primitive types -- ``uint8`` (aka ``byte``), ``uint16``, ``uint32``, ``uint64`` -- ``int8``, ``int16``, ``int32``, ``int64`` -- ``uint``, ``int``: variable length (un)signed integers -- ``string``, ``[]byte`` -- ``time`` -- Derived types -- structs -- var-length arrays of a particular type -- fixed-length arrays of a particular type -- interfaces: registered union types preceded by a ``type byte`` -- pointers - -Binary ------- - -**Fixed-length primitive types** are encoded with 1,2,3, or 4 big-endian -bytes. - ``uint8`` (aka ``byte``), ``uint16``, ``uint32``, ``uint64``: -takes 1,2,3, and 4 bytes respectively - ``int8``, ``int16``, ``int32``, -``int64``: takes 1,2,3, and 4 bytes respectively - ``time``: ``int64`` -representation of nanoseconds since epoch - -**Variable-length integers** are encoded with a single leading byte -representing the length of the following big-endian bytes. For signed -negative integers, the most significant bit of the leading byte is a 1. - -- ``uint``: 1-byte length prefixed variable-size (0 ~ 255 bytes) - unsigned integers -- ``int``: 1-byte length prefixed variable-size (0 ~ 127 bytes) signed - integers - -NOTE: While the number 0 (zero) is encoded with a single byte ``x00``, -the number 1 (one) takes two bytes to represent: ``x0101``. This isn't -the most efficient representation, but the rules are easier to remember. - -+---------------+----------------+----------------+ -| number | binary | binary ``int`` | -| | ``uint`` | | -+===============+================+================+ -| 0 | ``x00`` | ``x00`` | -+---------------+----------------+----------------+ -| 1 | ``x0101`` | ``x0101`` | -+---------------+----------------+----------------+ -| 2 | ``x0102`` | ``x0102`` | -+---------------+----------------+----------------+ -| 256 | ``x020100`` | ``x020100`` | -+---------------+----------------+----------------+ -| 2^(127\ *8)-1 | ``x800100...`` | overflow | -| \| | | | -| ``x7FFFFF...` | | | -| ` | | | -| \| | | | -| ``x7FFFFF...` | | | -| ` | | | -| \| \| | | | -| 2^(127*\ 8) | | | -+---------------+----------------+----------------+ -| 2^(255\*8)-1 | -| \| | -| ``xFFFFFF...` | -| ` | -| \| overflow | -| \| \| -1 \| | -| n/a \| | -| ``x8101`` \| | -| \| -2 \| n/a | -| \| ``x8102`` | -| \| \| -256 \| | -| n/a \| | -| ``x820100`` | -| \| | -+---------------+----------------+----------------+ - -**Structures** are encoded by encoding the field values in order of -declaration. - -.. code:: go - - type Foo struct { - MyString string - MyUint32 uint32 - } - var foo = Foo{"626172", math.MaxUint32} - - /* The binary representation of foo: - 0103626172FFFFFFFF - 0103: `int` encoded length of string, here 3 - 626172: 3 bytes of string "bar" - FFFFFFFF: 4 bytes of uint32 MaxUint32 - */ - -**Variable-length arrays** are encoded with a leading ``int`` denoting -the length of the array followed by the binary representation of the -items. **Fixed-length arrays** are similar but aren't preceded by the -leading ``int``. - -.. code:: go - - foos := []Foo{foo, foo} - - /* The binary representation of foos: - 01020103626172FFFFFFFF0103626172FFFFFFFF - 0102: `int` encoded length of array, here 2 - 0103626172FFFFFFFF: the first `foo` - 0103626172FFFFFFFF: the second `foo` - */ - - foos := [2]Foo{foo, foo} // fixed-length array - - /* The binary representation of foos: - 0103626172FFFFFFFF0103626172FFFFFFFF - 0103626172FFFFFFFF: the first `foo` - 0103626172FFFFFFFF: the second `foo` - */ - -**Interfaces** can represent one of any number of concrete types. The -concrete types of an interface must first be declared with their -corresponding ``type byte``. An interface is then encoded with the -leading ``type byte``, then the binary encoding of the underlying -concrete type. - -NOTE: The byte ``x00`` is reserved for the ``nil`` interface value and -``nil`` pointer values. - -.. code:: go - - type Animal interface{} - type Dog uint32 - type Cat string - - RegisterInterface( - struct{ Animal }{}, // Convenience for referencing the 'Animal' interface - ConcreteType{Dog(0), 0x01}, // Register the byte 0x01 to denote a Dog - ConcreteType{Cat(""), 0x02}, // Register the byte 0x02 to denote a Cat - ) - - var animal Animal = Dog(02) - - /* The binary representation of animal: - 010102 - 01: the type byte for a `Dog` - 0102: the bytes of Dog(02) - */ - -**Pointers** are encoded with a single leading byte ``x00`` for ``nil`` -pointers, otherwise encoded with a leading byte ``x01`` followed by the -binary encoding of the value pointed to. - -NOTE: It's easy to convert pointer types into interface types, since the -``type byte`` ``x00`` is always ``nil``. - -JSON ----- - -The JSON codec is compatible with the ```binary`` <#binary>`__ codec, -and is fairly intuitive if you're already familiar with golang's JSON -encoding. Some quirks are noted below: - -- variable-length and fixed-length bytes are encoded as uppercase - hexadecimal strings -- interface values are encoded as an array of two items: - ``[type_byte, concrete_value]`` -- times are encoded as rfc2822 strings diff --git a/docs/tendermint-core/block-structure.md b/docs/tendermint-core/block-structure.md new file mode 100644 index 000000000..803805529 --- /dev/null +++ b/docs/tendermint-core/block-structure.md @@ -0,0 +1,206 @@ +# Block Structure + +The tendermint consensus engine records all agreements by a +supermajority of nodes into a blockchain, which is replicated among all +nodes. This blockchain is accessible via various rpc endpoints, mainly +`/block?height=` to get the full block, as well as +`/blockchain?minHeight=_&maxHeight=_` to get a list of headers. But what +exactly is stored in these blocks? + +## Block + +A +[Block](https://godoc.org/github.com/tendermint/tendermint/types#Block) +contains: + +- a [Header](#header) contains merkle hashes for various chain states +- the + [Data](https://godoc.org/github.com/tendermint/tendermint/types#Data) + is all transactions which are to be processed +- the [LastCommit](#commit) > 2/3 signatures for the last block + +The signatures returned along with block `H` are those validating block +`H-1`. This can be a little confusing, but we must also consider that +the `Header` also contains the `LastCommitHash`. It would be impossible +for a Header to include the commits that sign it, as it would cause an +infinite loop here. But when we get block `H`, we find +`Header.LastCommitHash`, which must match the hash of `LastCommit`. + +## Header + +The +[Header](https://godoc.org/github.com/tendermint/tendermint/types#Header) +contains lots of information (follow link for up-to-date info). Notably, +it maintains the `Height`, the `LastBlockID` (to make it a chain), and +hashes of the data, the app state, and the validator set. This is +important as the only item that is signed by the validators is the +`Header`, and all other data must be validated against one of the merkle +hashes in the `Header`. + +The `DataHash` can provide a nice check on the +[Data](https://godoc.org/github.com/tendermint/tendermint/types#Data) +returned in this same block. If you are subscribed to new blocks, via +tendermint RPC, in order to display or process the new transactions you +should at least validate that the `DataHash` is valid. If it is +important to verify autheniticity, you must wait for the `LastCommit` +from the next block to make sure the block header (including `DataHash`) +was properly signed. + +The `ValidatorHash` contains a hash of the current +[Validators](https://godoc.org/github.com/tendermint/tendermint/types#Validator). +Tracking all changes in the validator set is complex, but a client can +quickly compare this hash with the [hash of the currently known +validators](https://godoc.org/github.com/tendermint/tendermint/types#ValidatorSet.Hash) +to see if there have been changes. + +The `AppHash` serves as the basis for validating any merkle proofs that +come from the ABCI application. It represents the state of the actual +application, rather that the state of the blockchain itself. This means +it's necessary in order to perform any business logic, such as verifying +an account balance. + +**Note** After the transactions are committed to a block, they still +need to be processed in a separate step, which happens between the +blocks. If you find a given transaction in the block at height `H`, the +effects of running that transaction will be first visible in the +`AppHash` from the block header at height `H+1`. + +Like the `LastCommit` issue, this is a requirement of the immutability +of the block chain, as the application only applies transactions *after* +they are commited to the chain. + +## Commit + +The +[Commit](https://godoc.org/github.com/tendermint/tendermint/types#Commit) +contains a set of +[Votes](https://godoc.org/github.com/tendermint/tendermint/types#Vote) +that were made by the validator set to reach consensus on this block. +This is the key to the security in any PoS system, and actually no data +that cannot be traced back to a block header with a valid set of Votes +can be trusted. Thus, getting the Commit data and verifying the votes is +extremely important. + +As mentioned above, in order to find the `precommit votes` for block +header `H`, we need to query block `H+1`. Then we need to check the +votes, make sure they really are for that block, and properly formatted. +Much of this code is implemented in Go in the +[light-client](https://github.com/tendermint/light-client) package. If +you look at the code, you will notice that we need to provide the +`chainID` of the blockchain in order to properly calculate the votes. +This is to protect anyone from swapping votes between chains to fake (or +frame) a validator. Also note that this `chainID` is in the +`genesis.json` from *Tendermint*, not the `genesis.json` from the +basecoin app ([that is a different +chainID...](https://github.com/cosmos/cosmos-sdk/issues/32)). + +Once we have those votes, and we calculated the proper [sign +bytes](https://godoc.org/github.com/tendermint/tendermint/types#Vote.WriteSignBytes) +using the chainID and a [nice helper +function](https://godoc.org/github.com/tendermint/tendermint/types#SignBytes), +we can verify them. The light client is responsible for maintaining a +set of validators that we trust. Each vote only stores the validators +`Address`, as well as the `Signature`. Assuming we have a local copy of +the trusted validator set, we can look up the `Public Key` of the +validator given its `Address`, then verify that the `Signature` matches +the `SignBytes` and `Public Key`. Then we sum up the total voting power +of all validators, whose votes fulfilled all these stringent +requirements. If the total number of voting power for a single block is +greater than 2/3 of all voting power, then we can finally trust the +block header, the AppHash, and the proof we got from the ABCI +application. + +### Vote Sign Bytes + +The `sign-bytes` of a vote is produced by taking a +[stable-json](https://github.com/substack/json-stable-stringify)-like +deterministic JSON [wire](./wire-protocol.html) encoding of the vote +(excluding the `Signature` field), and wrapping it with +`{"chain_id":"my_chain","vote":...}`. + +For example, a precommit vote might have the following `sign-bytes`: + +``` +{"chain_id":"my_chain","vote":{"block_hash":"611801F57B4CE378DF1A3FFF1216656E89209A99","block_parts_header":{"hash":"B46697379DBE0774CC2C3B656083F07CA7E0F9CE","total":123},"height":1234,"round":1,"type":2}} +``` + +## Block Hash + +The [block +hash](https://godoc.org/github.com/tendermint/tendermint/types#Block.Hash) +is the [Simple Tree hash](./merkle.html#simple-tree-with-dictionaries) +of the fields of the block `Header` encoded as a list of `KVPair`s. + +## Transaction + +A transaction is any sequence of bytes. It is up to your ABCI +application to accept or reject transactions. + +## BlockID + +Many of these data structures refer to the +[BlockID](https://godoc.org/github.com/tendermint/tendermint/types#BlockID), +which is the `BlockHash` (hash of the block header, also referred to by +the next block) along with the `PartSetHeader`. The `PartSetHeader` is +explained below and is used internally to orchestrate the p2p +propogation. For clients, it is basically opaque bytes, but they must +match for all votes. + +## PartSetHeader + +The +[PartSetHeader](https://godoc.org/github.com/tendermint/tendermint/types#PartSetHeader) +contains the total number of pieces in a +[PartSet](https://godoc.org/github.com/tendermint/tendermint/types#PartSet), +and the Merkle root hash of those pieces. + +## PartSet + +PartSet is used to split a byteslice of data into parts (pieces) for +transmission. By splitting data into smaller parts and computing a +Merkle root hash on the list, you can verify that a part is legitimately +part of the complete data, and the part can be forwarded to other peers +before all the parts are known. In short, it's a fast way to securely +propagate a large chunk of data (like a block) over a gossip network. + +PartSet was inspired by the LibSwift project. + +Usage: + +``` +data := RandBytes(2 << 20) // Something large + +partSet := NewPartSetFromData(data) +partSet.Total() // Total number of 4KB parts +partSet.Count() // Equal to the Total, since we already have all the parts +partSet.Hash() // The Merkle root hash +partSet.BitArray() // A BitArray of partSet.Total() 1's + +header := partSet.Header() // Send this to the peer +header.Total // Total number of parts +header.Hash // The merkle root hash + +// Now we'll reconstruct the data from the parts +partSet2 := NewPartSetFromHeader(header) +partSet2.Total() // Same total as partSet.Total() +partSet2.Count() // Zero, since this PartSet doesn't have any parts yet. +partSet2.Hash() // Same hash as in partSet.Hash() +partSet2.BitArray() // A BitArray of partSet.Total() 0's + +// In a gossip network the parts would arrive in arbitrary order, perhaps +// in response to explicit requests for parts, or optimistically in response +// to the receiving peer's partSet.BitArray(). +for !partSet2.IsComplete() { + part := receivePartFromGossipNetwork() + added, err := partSet2.AddPart(part) + if err != nil { + // A wrong part, + // the merkle trail does not hash to partSet2.Hash() + } else if !added { + // A duplicate part already received + } +} + +data2, _ := ioutil.ReadAll(partSet2.GetReader()) +bytes.Equal(data, data2) // true +``` diff --git a/docs/specification/configuration.md b/docs/tendermint-core/configuration.md similarity index 94% rename from docs/specification/configuration.md rename to docs/tendermint-core/configuration.md index 59de9767b..ab2d7cc17 100644 --- a/docs/specification/configuration.md +++ b/docs/tendermint-core/configuration.md @@ -2,8 +2,7 @@ Tendermint Core can be configured via a TOML file in `$TMHOME/config/config.toml`. Some of these parameters can be overridden by -command-line flags. For most users, the options in the `##### main -base configuration options #####` are intended to be modified while +command-line flags. For most users, the options in the `##### main base configuration options #####` are intended to be modified while config options further below are intended for advance power users. ## Options @@ -100,9 +99,11 @@ laddr = "tcp://0.0.0.0:26656" seeds = "" # Comma separated list of nodes to keep persistent connections to -# Do not add private peers to this list if you don't want them advertised persistent_peers = "" +# UPNP port forwarding +upnp = false + # Path to address book addr_book_file = "addrbook.json" @@ -116,13 +117,13 @@ flush_throttle_timeout = 100 max_num_peers = 50 # Maximum size of a message packet payload, in bytes -max_msg_packet_payload_size = 1024 +max_packet_msg_payload_size = 1024 # Rate at which packets can be sent, in bytes/second -send_rate = 512000 +send_rate = 5120000 # Rate at which packets can be received, in bytes/second -recv_rate = 512000 +recv_rate = 5120000 # Set true to enable the peer-exchange reactor pex = true @@ -167,10 +168,6 @@ timeout_commit = 1000 # Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) skip_timeout_commit = false -# BlockSize -max_block_size_txs = 10000 -max_block_size_bytes = 1 - # EmptyBlocks mode and possible interval between empty blocks in seconds create_empty_blocks = true create_empty_blocks_interval = 0 @@ -211,4 +208,10 @@ prometheus = false # Address to listen for Prometheus collector(s) connections prometheus_listen_addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a more significant number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 3 ``` diff --git a/docs/tendermint-core/how-to-read-logs.md b/docs/tendermint-core/how-to-read-logs.md new file mode 100644 index 000000000..83dab3870 --- /dev/null +++ b/docs/tendermint-core/how-to-read-logs.md @@ -0,0 +1,142 @@ +# How to read logs + +## Walkabout example + +We first create three connections (mempool, consensus and query) to the +application (running `kvstore` locally in this case). + +``` +I[10-04|13:54:27.364] Starting multiAppConn module=proxy impl=multiAppConn +I[10-04|13:54:27.366] Starting localClient module=abci-client connection=query impl=localClient +I[10-04|13:54:27.366] Starting localClient module=abci-client connection=mempool impl=localClient +I[10-04|13:54:27.367] Starting localClient module=abci-client connection=consensus impl=localClient +``` + +Then Tendermint Core and the application perform a handshake. + +``` +I[10-04|13:54:27.367] ABCI Handshake module=consensus appHeight=90 appHash=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD +I[10-04|13:54:27.368] ABCI Replay Blocks module=consensus appHeight=90 storeHeight=90 stateHeight=90 +I[10-04|13:54:27.368] Completed ABCI Handshake - Tendermint and App are synced module=consensus appHeight=90 appHash=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD +``` + +After that, we start a few more things like the event switch, reactors, +and perform UPNP discover in order to detect the IP address. + +``` +I[10-04|13:54:27.374] Starting EventSwitch module=types impl=EventSwitch +I[10-04|13:54:27.375] This node is a validator module=consensus +I[10-04|13:54:27.379] Starting Node module=main impl=Node +I[10-04|13:54:27.381] Local listener module=p2p ip=:: port=26656 +I[10-04|13:54:27.382] Getting UPNP external address module=p2p +I[10-04|13:54:30.386] Could not perform UPNP discover module=p2p err="write udp4 0.0.0.0:38238->239.255.255.250:1900: i/o timeout" +I[10-04|13:54:30.386] Starting DefaultListener module=p2p impl=Listener(@10.0.2.15:26656) +I[10-04|13:54:30.387] Starting P2P Switch module=p2p impl="P2P Switch" +I[10-04|13:54:30.387] Starting MempoolReactor module=mempool impl=MempoolReactor +I[10-04|13:54:30.387] Starting BlockchainReactor module=blockchain impl=BlockchainReactor +I[10-04|13:54:30.387] Starting ConsensusReactor module=consensus impl=ConsensusReactor +I[10-04|13:54:30.387] ConsensusReactor module=consensus fastSync=false +I[10-04|13:54:30.387] Starting ConsensusState module=consensus impl=ConsensusState +I[10-04|13:54:30.387] Starting WAL module=consensus wal=/home/vagrant/.tendermint/data/cs.wal/wal impl=WAL +I[10-04|13:54:30.388] Starting TimeoutTicker module=consensus impl=TimeoutTicker +``` + +Notice the second row where Tendermint Core reports that "This node is a +validator". It also could be just an observer (regular node). + +Next we replay all the messages from the WAL. + +``` +I[10-04|13:54:30.390] Catchup by replaying consensus messages module=consensus height=91 +I[10-04|13:54:30.390] Replay: New Step module=consensus height=91 round=0 step=RoundStepNewHeight +I[10-04|13:54:30.390] Replay: Done module=consensus +``` + +"Started node" message signals that everything is ready for work. + +``` +I[10-04|13:54:30.391] Starting RPC HTTP server on tcp socket 0.0.0.0:26657 module=rpc-server +I[10-04|13:54:30.392] Started node module=main nodeInfo="NodeInfo{id: DF22D7C92C91082324A1312F092AA1DA197FA598DBBFB6526E, moniker: anonymous, network: test-chain-3MNw2N [remote , listen 10.0.2.15:26656], version: 0.11.0-10f361fc ([wire_version=0.6.2 p2p_version=0.5.0 consensus_version=v1/0.2.2 rpc_version=0.7.0/3 tx_index=on rpc_addr=tcp://0.0.0.0:26657])}" +``` + +Next follows a standard block creation cycle, where we enter a new +round, propose a block, receive more than 2/3 of prevotes, then +precommits and finally have a chance to commit a block. For details, +please refer to [Consensus +Overview](./introduction.md#consensus-overview) or [Byzantine Consensus +Algorithm](./spec/consensus). + +``` +I[10-04|13:54:30.393] enterNewRound(91/0). Current: 91/0/RoundStepNewHeight module=consensus +I[10-04|13:54:30.393] enterPropose(91/0). Current: 91/0/RoundStepNewRound module=consensus +I[10-04|13:54:30.393] enterPropose: Our turn to propose module=consensus proposer=125B0E3C5512F5C2B0E1109E31885C4511570C42 privValidator="PrivValidator{125B0E3C5512F5C2B0E1109E31885C4511570C42 LH:90, LR:0, LS:3}" +I[10-04|13:54:30.394] Signed proposal module=consensus height=91 round=0 proposal="Proposal{91/0 1:21B79872514F (-1,:0:000000000000) {/10EDEDD7C84E.../}}" +I[10-04|13:54:30.397] Received complete proposal block module=consensus height=91 hash=F671D562C7B9242900A286E1882EE64E5556FE9E +I[10-04|13:54:30.397] enterPrevote(91/0). Current: 91/0/RoundStepPropose module=consensus +I[10-04|13:54:30.397] enterPrevote: ProposalBlock is valid module=consensus height=91 round=0 +I[10-04|13:54:30.398] Signed and pushed vote module=consensus height=91 round=0 vote="Vote{0:125B0E3C5512 91/00/1(Prevote) F671D562C7B9 {/89047FFC21D8.../}}" err=null +I[10-04|13:54:30.401] Added to prevote module=consensus vote="Vote{0:125B0E3C5512 91/00/1(Prevote) F671D562C7B9 {/89047FFC21D8.../}}" prevotes="VoteSet{H:91 R:0 T:1 +2/3:F671D562C7B9242900A286E1882EE64E5556FE9E:1:21B79872514F BA{1:X} map[]}" +I[10-04|13:54:30.401] enterPrecommit(91/0). Current: 91/0/RoundStepPrevote module=consensus +I[10-04|13:54:30.401] enterPrecommit: +2/3 prevoted proposal block. Locking module=consensus hash=F671D562C7B9242900A286E1882EE64E5556FE9E +I[10-04|13:54:30.402] Signed and pushed vote module=consensus height=91 round=0 vote="Vote{0:125B0E3C5512 91/00/2(Precommit) F671D562C7B9 {/80533478E41A.../}}" err=null +I[10-04|13:54:30.404] Added to precommit module=consensus vote="Vote{0:125B0E3C5512 91/00/2(Precommit) F671D562C7B9 {/80533478E41A.../}}" precommits="VoteSet{H:91 R:0 T:2 +2/3:F671D562C7B9242900A286E1882EE64E5556FE9E:1:21B79872514F BA{1:X} map[]}" +I[10-04|13:54:30.404] enterCommit(91/0). Current: 91/0/RoundStepPrecommit module=consensus +I[10-04|13:54:30.405] Finalizing commit of block with 0 txs module=consensus height=91 hash=F671D562C7B9242900A286E1882EE64E5556FE9E root=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD +I[10-04|13:54:30.405] Block{ + Header{ + ChainID: test-chain-3MNw2N + Height: 91 + Time: 2017-10-04 13:54:30.393 +0000 UTC + NumTxs: 0 + LastBlockID: F15AB8BEF9A6AAB07E457A6E16BC410546AA4DC6:1:D505DA273544 + LastCommit: 56FEF2EFDB8B37E9C6E6D635749DF3169D5F005D + Data: + Validators: CE25FBFF2E10C0D51AA1A07C064A96931BC8B297 + App: E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD + }#F671D562C7B9242900A286E1882EE64E5556FE9E + Data{ + + }# + Commit{ + BlockID: F15AB8BEF9A6AAB07E457A6E16BC410546AA4DC6:1:D505DA273544 + Precommits: Vote{0:125B0E3C5512 90/00/2(Precommit) F15AB8BEF9A6 {/FE98E2B956F0.../}} + }#56FEF2EFDB8B37E9C6E6D635749DF3169D5F005D +}#F671D562C7B9242900A286E1882EE64E5556FE9E module=consensus +I[10-04|13:54:30.408] Executed block module=state height=91 validTxs=0 invalidTxs=0 +I[10-04|13:54:30.410] Committed state module=state height=91 txs=0 hash=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD +I[10-04|13:54:30.410] Recheck txs module=mempool numtxs=0 height=91 +``` + +## List of modules + +Here is the list of modules you may encounter in Tendermint's log and a +little overview what they do. + +- `abci-client` As mentioned in [Application Development Guide](./app-development.md), Tendermint acts as an ABCI + client with respect to the application and maintains 3 connections: + mempool, consensus and query. The code used by Tendermint Core can + be found [here](https://github.com/tendermint/tendermint/tree/develop/abci/client). +- `blockchain` Provides storage, pool (a group of peers), and reactor + for both storing and exchanging blocks between peers. +- `consensus` The heart of Tendermint core, which is the + implementation of the consensus algorithm. Includes two + "submodules": `wal` (write-ahead logging) for ensuring data + integrity and `replay` to replay blocks and messages on recovery + from a crash. +- `events` Simple event notification system. The list of events can be + found + [here](https://github.com/tendermint/tendermint/blob/master/types/events.go). + You can subscribe to them by calling `subscribe` RPC method. Refer + to [RPC docs](./specification/rpc.md) for additional information. +- `mempool` Mempool module handles all incoming transactions, whenever + they are coming from peers or the application. +- `p2p` Provides an abstraction around peer-to-peer communication. For + more details, please check out the + [README](https://github.com/tendermint/tendermint/blob/master/p2p/README.md). +- `rpc` [Tendermint's RPC](./specification/rpc.md). +- `rpc-server` RPC server. For implementation details, please read the + [README](https://github.com/tendermint/tendermint/blob/master/rpc/lib/README.md). +- `state` Represents the latest state and execution submodule, which + executes blocks against the application. +- `types` A collection of the publicly exposed types and methods to + work with them. diff --git a/docs/tendermint-core/light-client-protocol.md b/docs/tendermint-core/light-client-protocol.md new file mode 100644 index 000000000..6d905be32 --- /dev/null +++ b/docs/tendermint-core/light-client-protocol.md @@ -0,0 +1,30 @@ +# Light Client Protocol + +Light clients are an important part of the complete blockchain system +for most applications. Tendermint provides unique speed and security +properties for light client applications. + +See our [lite +package](https://godoc.org/github.com/tendermint/tendermint/lite). + +## Overview + +The objective of the light client protocol is to get a +[commit](./validators.md#committing-a-block) for a recent [block +hash](../spec/consensus/consensus.md.md#block-hash) where the commit includes a +majority of signatures from the last known validator set. From there, +all the application state is verifiable with [merkle +proofs](./merkle.md#iavl-tree). + +## Properties + +- You get the full collateralized security benefits of Tendermint; No + need to wait for confirmations. +- You get the full speed benefits of Tendermint; transactions + commit instantly. +- You can get the most recent version of the application state + non-interactively (without committing anything to the blockchain). + For example, this means that you can get the most recent value of a + name from the name-registry without worrying about fork censorship + attacks, without posting a commit and waiting for confirmations. + It's fast, secure, and free! diff --git a/docs/metrics.md b/docs/tendermint-core/metrics.md similarity index 100% rename from docs/metrics.md rename to docs/tendermint-core/metrics.md diff --git a/docs/specification/rpc.md b/docs/tendermint-core/rpc.md similarity index 100% rename from docs/specification/rpc.md rename to docs/tendermint-core/rpc.md diff --git a/docs/running-in-production.md b/docs/tendermint-core/running-in-production.md similarity index 72% rename from docs/running-in-production.md rename to docs/tendermint-core/running-in-production.md index 3ceded499..094734320 100644 --- a/docs/running-in-production.md +++ b/docs/tendermint-core/running-in-production.md @@ -16,12 +16,12 @@ logging level, you can do so by running tendermint with Validators are supposed to setup [Sentry Node Architecture](https://blog.cosmos.network/tendermint-explained-bringing-bft-based-pos-to-the-public-blockchain-domain-f22e274a0fdb) to prevent Denial-of-service attacks. You can read more about it -[here](https://github.com/tendermint/aib-data/blob/develop/medium/TendermintBFT.md). +[here](../interviews/tendermint-bft.md). ### P2P The core of the Tendermint peer-to-peer system is `MConnection`. Each -connection has `MaxPacketMsgSize`, which is the maximum packet +connection has `MaxPacketMsgPayloadSize`, which is the maximum packet size and bounded send & receive queues. One can impose restrictions on send & receive rate per connection (`SendRate`, `RecvRate`). @@ -49,21 +49,25 @@ second TODO is to query the /status RPC endpoint. It provides the necessary info: whenever the node is syncing or not, what height it is on, etc. - $ curl http(s)://{ip}:{rpcPort}/status +``` +curl http(s)://{ip}:{rpcPort}/status +``` `dump_consensus_state` will give you a detailed overview of the consensus state (proposer, lastest validators, peers states). From it, you should be able to figure out why, for example, the network had halted. - $ curl http(s)://{ip}:{rpcPort}/dump_consensus_state +``` +curl http(s)://{ip}:{rpcPort}/dump_consensus_state +``` There is a reduced version of this endpoint - `consensus_state`, which returns just the votes seen at the current height. -- [Github Issues](https://github.com/tendermint/tendermint/issues) -- [StackOverflow - questions](https://stackoverflow.com/questions/tagged/tendermint) +- [Github Issues](https://github.com/tendermint/tendermint/issues) +- [StackOverflow + questions](https://stackoverflow.com/questions/tagged/tendermint) ## Monitoring Tendermint @@ -100,6 +104,69 @@ signals we use the default behaviour in Go: [Default behavior of signals in Go programs](https://golang.org/pkg/os/signal/#hdr-Default_behavior_of_signals_in_Go_programs). +## Corruption + +**NOTE:** Make sure you have a backup of the Tendermint data directory. + +### Possible causes + +Remember that most corruption is caused by hardware issues: + +- RAID controllers with faulty / worn out battery backup, and an unexpected power loss +- Hard disk drives with write-back cache enabled, and an unexpected power loss +- Cheap SSDs with insufficient power-loss protection, and an unexpected power-loss +- Defective RAM +- Defective or overheating CPU(s) + +Other causes can be: + +- Database systems configured with fsync=off and an OS crash or power loss +- Filesystems configured to use write barriers plus a storage layer that ignores write barriers. LVM is a particular culprit. +- Tendermint bugs +- Operating system bugs +- Admin error (e.g., directly modifying Tendermint data-directory contents) + +(Source: https://wiki.postgresql.org/wiki/Corruption) + +### WAL Corruption + +If consensus WAL is corrupted at the lastest height and you are trying to start +Tendermint, replay will fail with panic. + +Recovering from data corruption can be hard and time-consuming. Here are two approaches you can take: + +1) Delete the WAL file and restart Tendermint. It will attempt to sync with other peers. +2) Try to repair the WAL file manually: + + 1. Create a backup of the corrupted WAL file: + +``` +cp "$TMHOME/data/cs.wal/wal" > /tmp/corrupted_wal_backup +``` + + 2. Use `./scripts/wal2json` to create a human-readable version + +``` +./scripts/wal2json/wal2json "$TMHOME/data/cs.wal/wal" > /tmp/corrupted_wal +``` + + 3. Search for a "CORRUPTED MESSAGE" line. + 4. By looking at the previous message and the message after the corrupted one + and looking at the logs, try to rebuild the message. If the consequent + messages are marked as corrupted too (this may happen if length header + got corrupted or some writes did not make it to the WAL ~ truncation), + then remove all the lines starting from the corrupted one and restart + Tendermint. + +``` +$EDITOR /tmp/corrupted_wal +``` + 5. After editing, convert this file back into binary form by running: + +``` +./scripts/json2wal/json2wal /tmp/corrupted_wal > "$TMHOME/data/cs.wal/wal" +``` + ## Hardware ### Processor and Memory @@ -107,18 +174,18 @@ programs](https://golang.org/pkg/os/signal/#hdr-Default_behavior_of_signals_in_G While actual specs vary depending on the load and validators count, minimal requirements are: -- 1GB RAM -- 25GB of disk space -- 1.4 GHz CPU +- 1GB RAM +- 25GB of disk space +- 1.4 GHz CPU SSD disks are preferable for applications with high transaction throughput. Recommended: -- 2GB RAM -- 100GB SSD -- x64 2.0 GHz 2v CPU +- 2GB RAM +- 100GB SSD +- x64 2.0 GHz 2v CPU While for now, Tendermint stores all the history and it may require significant disk space over time, we are planning to implement state @@ -145,21 +212,23 @@ Cosmos network. ## Configuration parameters -- `p2p.flush_throttle_timeout` `p2p.max_packet_msg_payload_size` - `p2p.send_rate` `p2p.recv_rate` +- `p2p.flush_throttle_timeout` `p2p.max_packet_msg_payload_size` + `p2p.send_rate` `p2p.recv_rate` If you are going to use Tendermint in a private domain and you have a private high-speed network among your peers, it makes sense to lower flush throttle timeout and increase other params. - [p2p] +``` +[p2p] - send_rate=20000000 # 2MB/s - recv_rate=20000000 # 2MB/s - flush_throttle_timeout=10 - max_packet_msg_payload_size=10240 # 10KB +send_rate=20000000 # 2MB/s +recv_rate=20000000 # 2MB/s +flush_throttle_timeout=10 +max_packet_msg_payload_size=10240 # 10KB +``` -- `mempool.recheck` +- `mempool.recheck` After every block, Tendermint rechecks every transaction left in the mempool to see if transactions committed in that block affected the @@ -167,13 +236,13 @@ application state, so some of the transactions left may become invalid. If that does not apply to your application, you can disable it by setting `mempool.recheck=false`. -- `mempool.broadcast` +- `mempool.broadcast` Setting this to false will stop the mempool from relaying transactions to other peers until they are included in a block. It means only the peer you send the tx to will see it until it is included in a block. -- `consensus.skip_timeout_commit` +- `consensus.skip_timeout_commit` We want `skip_timeout_commit=false` when there is economics on the line because proposers should wait to hear for more votes. But if you don't @@ -182,22 +251,17 @@ be kept false by default for public deployments (e.g. [Cosmos Hub](https://cosmos.network/intro/hub)) while for enterprise applications, setting it to true is not a problem. -- `consensus.peer_gossip_sleep_duration` +- `consensus.peer_gossip_sleep_duration` You can try to reduce the time your node sleeps before checking if theres something to send its peers. -- `consensus.timeout_commit` +- `consensus.timeout_commit` You can also try lowering `timeout_commit` (time we sleep before proposing the next block). -- `consensus.max_block_size_txs` - -By default, the maximum number of transactions per a block is 10_000. -Feel free to change it to suit your needs. - -- `p2p.addr_book_strict` +- `p2p.addr_book_strict` By default, Tendermint checks whenever a peer's address is routable before saving it to the address book. The address is considered as routable if the IP diff --git a/docs/specification/secure-p2p.rst b/docs/tendermint-core/secure-p2p.md similarity index 72% rename from docs/specification/secure-p2p.rst rename to docs/tendermint-core/secure-p2p.md index de95f0cf0..aad5eac41 100644 --- a/docs/specification/secure-p2p.rst +++ b/docs/tendermint-core/secure-p2p.md @@ -1,12 +1,11 @@ -Secure P2P -========== +# Secure P2P The Tendermint p2p protocol uses an authenticated encryption scheme -based on the `Station-to-Station -Protocol `__. +based on the [Station-to-Station +Protocol](https://en.wikipedia.org/wiki/Station-to-Station_protocol). The implementation uses -`golang's `__ `nacl -box `__ for the actual authenticated +[golang's](https://godoc.org/golang.org/x/crypto/nacl/box) [nacl +box](http://nacl.cr.yp.to/box.html) for the actual authenticated encryption algorithm. Each peer generates an ED25519 key-pair to use as a persistent @@ -19,10 +18,9 @@ their respective ephemeral public keys. This happens in the clear. They then each compute the shared secret. The shared secret is the multiplication of the peer's ephemeral private key by the other peer's ephemeral public key. The result is the same for both peers by the magic -of `elliptic -curves `__. -The shared secret is used as the symmetric key for the encryption -algorithm. +of [elliptic +curves](https://en.wikipedia.org/wiki/Elliptic_curve_cryptography). The +shared secret is used as the symmetric key for the encryption algorithm. The two ephemeral public keys are sorted to establish a canonical order. Then a 24-byte nonce is generated by concatenating the public keys and @@ -52,8 +50,7 @@ time it is used. The communications maintain Perfect Forward Secrecy, as the persistent key pair was not used for generating secrets - only for authenticating. -Caveat ------- +## Caveat This system is still vulnerable to a Man-In-The-Middle attack if the persistent public key of the remote node is not known in advance. The @@ -62,17 +59,15 @@ such as the Web-of-Trust or Certificate Authorities. In our case, we can use the blockchain itself as a certificate authority to ensure that we are connected to at least one validator. -Config ------- +## Config Authenticated encryption is enabled by default. -Additional Reading ------------------- +## Additional Reading -- `Implementation `__ -- `Original STS paper by Whitfield Diffie, Paul C. van Oorschot and - Michael J. - Wiener `__ -- `Further work on secret - handshakes `__ +- [Implementation](https://github.com/tendermint/tendermint/blob/64bae01d007b5bee0d0827ab53259ffd5910b4e6/p2p/conn/secret_connection.go#L47) +- [Original STS paper by Whitfield Diffie, Paul C. van Oorschot and + Michael J. + Wiener](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.216.6107&rep=rep1&type=pdf) +- [Further work on secret + handshakes](https://dominictarr.github.io/secret-handshake-paper/shs.pdf) diff --git a/docs/using-tendermint.md b/docs/tendermint-core/using-tendermint.md similarity index 69% rename from docs/using-tendermint.md rename to docs/tendermint-core/using-tendermint.md index 9e83683be..11949c798 100644 --- a/docs/using-tendermint.md +++ b/docs/tendermint-core/using-tendermint.md @@ -16,7 +16,9 @@ this by setting the `TMHOME` environment variable. Initialize the root directory by running: - tendermint init +``` +tendermint init +``` This will create a new private key (`priv_validator.json`), and a genesis file (`genesis.json`) containing the associated public key, in @@ -25,24 +27,97 @@ with one validator. For more elaborate initialization, see the tesnet command: - tendermint testnet --help +``` +tendermint testnet --help +``` + +### Genesis + +The `genesis.json` file in `$TMHOME/config/` defines the initial +TendermintCore state upon genesis of the blockchain ([see +definition](https://github.com/tendermint/tendermint/blob/master/types/genesis.go)). + +#### Fields + +- `genesis_time`: Official time of blockchain start. +- `chain_id`: ID of the blockchain. This must be unique for + every blockchain. If your testnet blockchains do not have unique + chain IDs, you will have a bad time. +- `validators`: +- `pub_key`: The first element specifies the `pub_key` type. 1 + == Ed25519. The second element are the pubkey bytes. +- `power`: The validator's voting power. +- `name`: Name of the validator (optional). +- `app_hash`: The expected application hash (as returned by the + `ResponseInfo` ABCI message) upon genesis. If the app's hash does + not match, Tendermint will panic. +- `app_state`: The application state (e.g. initial distribution + of tokens). + +#### Sample genesis.json + +``` +{ + "genesis_time": "2018-07-09T22:43:06.255718641Z", + "chain_id": "chain-IAkWsK", + "validators": [ + { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "oX8HhKsErMluxI0QWNSR8djQMSupDvHdAYrHwP7n73k=" + }, + "power": "1", + "name": "node0" + }, + { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "UZNSJA9zmeFQj36Rs296lY+WFQ4Rt6s7snPpuKypl5I=" + }, + "power": "1", + "name": "node1" + }, + { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "i9GrM6/MHB4zjCelMZBUYHNXYIzl4n0RkDCVmmLhS/o=" + }, + "power": "1", + "name": "node2" + }, + { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "0qq7954l87trEqbQV9c7d1gurnjTGMxreXc848ZZ5aw=" + }, + "power": "1", + "name": "node3" + } + ], + "app_hash": "" +} +``` ## Run To run a Tendermint node, use - tendermint node +``` +tendermint node +``` By default, Tendermint will try to connect to an ABCI application on [127.0.0.1:26658](127.0.0.1:26658). If you have the `kvstore` ABCI app installed, run it in another window. If you don't, kill Tendermint and run an in-process version of the `kvstore` app: - tendermint node --proxy_app=kvstore +``` +tendermint node --proxy_app=kvstore +``` After a few seconds you should see blocks start streaming in. Note that blocks are produced regularly, even if there are no transactions. See -*No Empty Blocks*, below, to modify this setting. +_No Empty Blocks_, below, to modify this setting. Tendermint supports in-process versions of the `counter`, `kvstore` and `nil` apps that ship as examples with `abci-cli`. It's easy to compile @@ -51,22 +126,30 @@ app is not written in Go, simply run it in another process, and use the `--proxy_app` flag to specify the address of the socket it is listening on, for instance: - tendermint node --proxy_app=/var/run/abci.sock +``` +tendermint node --proxy_app=/var/run/abci.sock +``` ## Transactions To send a transaction, use `curl` to make requests to the Tendermint RPC server, for example: - curl http://localhost:26657/broadcast_tx_commit?tx=\"abcd\" +``` +curl http://localhost:26657/broadcast_tx_commit?tx=\"abcd\" +``` We can see the chain's status at the `/status` end-point: - curl http://localhost:26657/status | json_pp +``` +curl http://localhost:26657/status | json_pp +``` and the `latest_app_hash` in particular: - curl http://localhost:26657/status | json_pp | grep latest_app_hash +``` +curl http://localhost:26657/status | json_pp | grep latest_app_hash +``` Visit http://localhost:26657> in your browser to see the list of other endpoints. Some take no arguments (like `/status`), while others specify @@ -81,30 +164,40 @@ With `GET`: To send a UTF8 string byte array, quote the value of the tx pramater: - curl 'http://localhost:26657/broadcast_tx_commit?tx="hello"' +``` +curl 'http://localhost:26657/broadcast_tx_commit?tx="hello"' +``` which sends a 5 byte transaction: "h e l l o" \[68 65 6c 6c 6f\]. Note the URL must be wrapped with single quoes, else bash will ignore the double quotes. To avoid the single quotes, escape the double quotes: - curl http://localhost:26657/broadcast_tx_commit?tx=\"hello\" +``` +curl http://localhost:26657/broadcast_tx_commit?tx=\"hello\" +``` Using a special character: - curl 'http://localhost:26657/broadcast_tx_commit?tx="€5"' +``` +curl 'http://localhost:26657/broadcast_tx_commit?tx="€5"' +``` sends a 4 byte transaction: "€5" (UTF8) \[e2 82 ac 35\]. To send as raw hex, omit quotes AND prefix the hex string with `0x`: - curl http://localhost:26657/broadcast_tx_commit?tx=0x01020304 +``` +curl http://localhost:26657/broadcast_tx_commit?tx=0x01020304 +``` which sends a 4 byte transaction: \[01 02 03 04\]. With `POST` (using `json`), the raw hex must be `base64` encoded: - curl --data-binary '{"jsonrpc":"2.0","id":"anything","method":"broadcast_tx_commit","params": {"tx": "AQIDBA=="}}' -H 'content-type:text/plain;' http://localhost:26657 +``` +curl --data-binary '{"jsonrpc":"2.0","id":"anything","method":"broadcast_tx_commit","params": {"tx": "AQIDBA=="}}' -H 'content-type:text/plain;' http://localhost:26657 +``` which sends the same 4 byte transaction: \[01 02 03 04\]. @@ -118,7 +211,9 @@ afford to lose all blockchain data! To reset a blockchain, stop the node, remove the `~/.tendermint/data` directory and run - tendermint unsafe_reset_priv_validator +``` +tendermint unsafe_reset_priv_validator +``` This final step is necessary to reset the `priv_validator.json`, which otherwise prevents you from making conflicting votes in the consensus @@ -129,7 +224,7 @@ new blockchain will not make any blocks. ## Configuration Tendermint uses a `config.toml` for configuration. For details, see [the -config specification](./specification/configuration.html). +config specification](./specification/configuration.md). Notable options include the socket address of the application (`proxy_app`), the listening address of the Tendermint peer @@ -150,21 +245,27 @@ To configure Tendermint to not produce empty blocks unless there are transactions or the app hash changes, run Tendermint with this additional flag: - tendermint node --consensus.create_empty_blocks=false +``` +tendermint node --consensus.create_empty_blocks=false +``` or set the configuration via the `config.toml` file: - [consensus] - create_empty_blocks = false +``` +[consensus] +create_empty_blocks = false +``` -Remember: because the default is to *create empty blocks*, avoiding +Remember: because the default is to _create empty blocks_, avoiding empty blocks requires the config option to be set to `false`. The block interval setting allows for a delay (in seconds) between the creation of each new empty block. It is set via the `config.toml`: - [consensus] - create_empty_blocks_interval = 5 +``` +[consensus] +create_empty_blocks_interval = 5 +``` With this setting, empty blocks will be produced every 5s if no block has been produced otherwise, regardless of the value of @@ -181,9 +282,11 @@ eventually included in a block. Since there are multiple phases to processing a transaction, we offer multiple endpoints to broadcast a transaction: - /broadcast_tx_async - /broadcast_tx_sync - /broadcast_tx_commit +``` +/broadcast_tx_async +/broadcast_tx_sync +/broadcast_tx_commit +``` These correspond to no-processing, processing through the mempool, and processing through a block, respectively. That is, `broadcast_tx_async`, @@ -208,38 +311,42 @@ When `tendermint init` is run, both a `genesis.json` and `priv_validator.json` are created in `~/.tendermint/config`. The `genesis.json` might look like: +``` +{ + "validators" : [ { - "validators" : [ - { - "pub_key" : { - "value" : "h3hk+QE8c6QLTySp8TcfzclJw/BG79ziGB/pIA+DfPE=", - "type" : "tendermint/PubKeyEd25519" - }, - "power" : 10, - "name" : "" - } - ], - "app_hash" : "", - "chain_id" : "test-chain-rDlYSN", - "genesis_time" : "0001-01-01T00:00:00Z" - } - -And the `priv_validator.json`: - - { - "last_step" : 0, - "last_round" : "0", - "address" : "B788DEDE4F50AD8BC9462DE76741CCAFF87D51E2", "pub_key" : { "value" : "h3hk+QE8c6QLTySp8TcfzclJw/BG79ziGB/pIA+DfPE=", "type" : "tendermint/PubKeyEd25519" }, - "last_height" : "0", - "priv_key" : { - "value" : "JPivl82x+LfVkp8i3ztoTjY6c6GJ4pBxQexErOCyhwqHeGT5ATxzpAtPJKnxNx/NyUnD8Ebv3OIYH+kgD4N88Q==", - "type" : "tendermint/PrivKeyEd25519" - } + "power" : 10, + "name" : "" } + ], + "app_hash" : "", + "chain_id" : "test-chain-rDlYSN", + "genesis_time" : "0001-01-01T00:00:00Z" +} +``` + +And the `priv_validator.json`: + +``` +{ + "last_step" : 0, + "last_round" : "0", + "address" : "B788DEDE4F50AD8BC9462DE76741CCAFF87D51E2", + "pub_key" : { + "value" : "h3hk+QE8c6QLTySp8TcfzclJw/BG79ziGB/pIA+DfPE=", + "type" : "tendermint/PubKeyEd25519" + }, + "last_height" : "0", + "priv_key" : { + "value" : "JPivl82x+LfVkp8i3ztoTjY6c6GJ4pBxQexErOCyhwqHeGT5ATxzpAtPJKnxNx/NyUnD8Ebv3OIYH+kgD4N88Q==", + "type" : "tendermint/PrivKeyEd25519" + } +} +``` The `priv_validator.json` actually contains a private key, and should thus be kept absolutely secret; for now we work with the plain text. @@ -260,7 +367,7 @@ but must be positive, thus the range is: 0 through 9223372036854775807. Because of how the current proposer selection algorithm works, we do not recommend having voting powers greater than 10\^12 (ie. 1 trillion) (see [Proposals section of Byzantine Consensus -Algorithm](./specification/byzantine-consensus-algorithm.html#proposals) +Algorithm](./specification/byzantine-consensus-algorithm.md#proposals) for details). If we want to add more nodes to the network, we have two choices: we can @@ -272,6 +379,7 @@ with the consensus protocol. ### Peers #### Seed + A seed node is a node who relays the addresses of other peers which they know of. These nodes constantly crawl the network to try to get more peers. The addresses which the seed node relays get saved into a local address book. Once @@ -282,6 +390,7 @@ only need them on the first start. The seed node will immediately disconnect from you after sending you some addresses. #### Persistent Peer + Persistent peers are people you want to be constantly connected with. If you disconnect you will try to connect directly back to them as opposed to using another address from the address book. On restarts you will always try to @@ -302,12 +411,16 @@ persistent connections with. For example, - tendermint node --p2p.seeds "f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@1.2.3.4:26656,0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@5.6.7.8:26656" +``` +tendermint node --p2p.seeds "f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@1.2.3.4:26656,0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@5.6.7.8:26656" +``` Alternatively, you can use the `/dial_seeds` endpoint of the RPC to specify seeds for a running node to connect to: - curl 'localhost:26657/dial_seeds?seeds=\["f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@1.2.3.4:26656","0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@5.6.7.8:26656"\]' +``` +curl 'localhost:26657/dial_seeds?seeds=\["f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@1.2.3.4:26656","0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@5.6.7.8:26656"\]' +``` Note, with PeX enabled, you should not need seeds after the first start. @@ -318,8 +431,11 @@ maintain a persistent connection with each, you can use the `config.toml` or the `/dial_peers` RPC endpoint to do it without stopping Tendermint core instance. - tendermint node --p2p.persistent_peers "429fcf25974313b95673f58d77eacdd434402665@10.11.12.13:26656,96663a3dd0d7b9d17d4c8211b191af259621c693@10.11.12.14:26656" - curl 'localhost:26657/dial_peers?persistent=true&peers=\["429fcf25974313b95673f58d77eacdd434402665@10.11.12.13:26656","96663a3dd0d7b9d17d4c8211b191af259621c693@10.11.12.14:26656"\]' +``` +tendermint node --p2p.persistent_peers "429fcf25974313b95673f58d77eacdd434402665@10.11.12.13:26656,96663a3dd0d7b9d17d4c8211b191af259621c693@10.11.12.14:26656" + +curl 'localhost:26657/dial_peers?persistent=true&peers=\["429fcf25974313b95673f58d77eacdd434402665@10.11.12.13:26656","96663a3dd0d7b9d17d4c8211b191af259621c693@10.11.12.14:26656"\]' +``` ### Adding a Non-Validator @@ -338,51 +454,57 @@ before starting the network. For instance, we could make a new We can generate a new `priv_validator.json` with the command: - tendermint gen_validator +``` +tendermint gen_validator +``` Now we can update our genesis file. For instance, if the new `priv_validator.json` looks like: +``` +{ + "address" : "5AF49D2A2D4F5AD4C7C8C4CC2FB020131E9C4902", + "pub_key" : { + "value" : "l9X9+fjkeBzDfPGbUM7AMIRE6uJN78zN5+lk5OYotek=", + "type" : "tendermint/PubKeyEd25519" + }, + "priv_key" : { + "value" : "EDJY9W6zlAw+su6ITgTKg2nTZcHAH1NMTW5iwlgmNDuX1f35+OR4HMN88ZtQzsAwhETq4k3vzM3n6WTk5ii16Q==", + "type" : "tendermint/PrivKeyEd25519" + }, + "last_step" : 0, + "last_round" : "0", + "last_height" : "0" +} +``` + +then the new `genesis.json` will be: + +``` +{ + "validators" : [ { - "address" : "5AF49D2A2D4F5AD4C7C8C4CC2FB020131E9C4902", "pub_key" : { - "value" : "l9X9+fjkeBzDfPGbUM7AMIRE6uJN78zN5+lk5OYotek=", + "value" : "h3hk+QE8c6QLTySp8TcfzclJw/BG79ziGB/pIA+DfPE=", "type" : "tendermint/PubKeyEd25519" }, - "priv_key" : { - "value" : "EDJY9W6zlAw+su6ITgTKg2nTZcHAH1NMTW5iwlgmNDuX1f35+OR4HMN88ZtQzsAwhETq4k3vzM3n6WTk5ii16Q==", - "type" : "tendermint/PrivKeyEd25519" - }, - "last_step" : 0, - "last_round" : "0", - "last_height" : "0" - } - -then the new `genesis.json` will be: - + "power" : 10, + "name" : "" + }, { - "validators" : [ - { - "pub_key" : { - "value" : "h3hk+QE8c6QLTySp8TcfzclJw/BG79ziGB/pIA+DfPE=", - "type" : "tendermint/PubKeyEd25519" - }, - "power" : 10, - "name" : "" - }, - { - "pub_key" : { - "value" : "l9X9+fjkeBzDfPGbUM7AMIRE6uJN78zN5+lk5OYotek=", - "type" : "tendermint/PubKeyEd25519" - }, - "power" : 10, - "name" : "" - } - ], - "app_hash" : "", - "chain_id" : "test-chain-rDlYSN", - "genesis_time" : "0001-01-01T00:00:00Z" + "pub_key" : { + "value" : "l9X9+fjkeBzDfPGbUM7AMIRE6uJN78zN5+lk5OYotek=", + "type" : "tendermint/PubKeyEd25519" + }, + "power" : 10, + "name" : "" } + ], + "app_hash" : "", + "chain_id" : "test-chain-rDlYSN", + "genesis_time" : "0001-01-01T00:00:00Z" +} +``` Update the `genesis.json` in `~/.tendermint/config`. Copy the genesis file and the new `priv_validator.json` to the `~/.tendermint/config` on @@ -398,7 +520,7 @@ failing, you need at least four validator nodes (e.g., 2/3). Updating validators in a live network is supported but must be explicitly programmed by the application developer. See the [application -developers guide](./app-development.html) for more details. +developers guide](./app-development.md) for more details. ### Local Network diff --git a/docs/specification/validators.rst b/docs/tendermint-core/validators.md similarity index 58% rename from docs/specification/validators.rst rename to docs/tendermint-core/validators.md index 085994f3d..0c1d7d89a 100644 --- a/docs/specification/validators.rst +++ b/docs/tendermint-core/validators.md @@ -1,5 +1,4 @@ -Validators -========== +# Validators Validators are responsible for committing new blocks in the blockchain. These validators participate in the consensus protocol by broadcasting @@ -19,25 +18,22 @@ to post any collateral at all. Validators have a cryptographic key-pair and an associated amount of "voting power". Voting power need not be the same. -Becoming a Validator --------------------- +## Becoming a Validator There are two ways to become validator. -1. They can be pre-established in the `genesis - state <./genesis.html>`__ -2. The ABCI app responds to the EndBlock message with changes to the - existing validator set. +1. They can be pre-established in the [genesis state](../../tendermint-core/using-tendermint.md#genesis) +2. The ABCI app responds to the EndBlock message with changes to the + existing validator set. -Committing a Block ------------------- +## Committing a Block *+2/3 is short for "more than 2/3"* -A block is committed when +2/3 of the validator set sign `precommit -votes <./block-structure.html#vote>`__ for that block at the same -``round``. The +2/3 set of precommit votes is -called a `*commit* <./block-structure.html#commit>`__. While any -+2/3 set of precommits for the same block at the same height&round can -serve as validation, the canonical commit is included in the next block -(see `LastCommit <./block-structure.html>`__). +A block is committed when +2/3 of the validator set sign [precommit +votes](../spec/blockchain/blockchain.md#vote) for that block at the same `round`. +The +2/3 set of precommit votes is called a +[*commit*](../spec/blockchain/blockchain.md#commit). While any +2/3 set of +precommits for the same block at the same height&round can serve as +validation, the canonical commit is included in the next block (see +[LastCommit](../spec/blockchain/blockchain.md#last-commit)). diff --git a/docs/tools/benchmarking.md b/docs/tools/benchmarking.md new file mode 100644 index 000000000..20c368e29 --- /dev/null +++ b/docs/tools/benchmarking.md @@ -0,0 +1,80 @@ +# tm-bench + +Tendermint blockchain benchmarking tool: + +- https://github.com/tendermint/tools/tree/master/tm-bench + +For example, the following: + +``` +tm-bench -T 10 -r 1000 localhost:26657 +``` + +will output: + +``` +Stats Avg StdDev Max Total +Txs/sec 818 532 1549 9000 +Blocks/sec 0.818 0.386 1 9 +``` + +## Quick Start + +[Install Tendermint](../introduction/install) +This currently is setup to work on tendermint's develop branch. Please ensure +you are on that. (If not, update `tendermint` and `tmlibs` in gopkg.toml to use + the master branch.) + +then run: + +``` +tendermint init +tendermint node --proxy_app=kvstore +``` + +``` +tm-bench localhost:26657 +``` + +with the last command being in a seperate window. + +## Usage + +``` +tm-bench [-c 1] [-T 10] [-r 1000] [-s 250] [endpoints] + +Examples: + tm-bench localhost:26657 +Flags: + -T int + Exit after the specified amount of time in seconds (default 10) + -c int + Connections to keep open per endpoint (default 1) + -r int + Txs per second to send in a connection (default 1000) + -s int + Size per tx in bytes + -v Verbose output +``` + +## How stats are collected + +These stats are derived by having each connection send transactions at the +specified rate (or as close as it can get) for the specified time. After the +specified time, it iterates over all of the blocks that were created in that +time. The average and stddev per second are computed based off of that, by +grouping the data by second. + +To send transactions at the specified rate in each connection, we loop +through the number of transactions. If its too slow, the loop stops at one second. +If its too fast, we wait until the one second mark ends. The transactions per +second stat is computed based off of what ends up in the block. + +Each of the connections is handled via two separate goroutines. + +## Development + +``` +make get_vendor_deps +make test +``` diff --git a/docs/tools/monitoring.md b/docs/tools/monitoring.md new file mode 100644 index 000000000..5cc2ad3b1 --- /dev/null +++ b/docs/tools/monitoring.md @@ -0,0 +1,92 @@ +# tm-monitor + +Tendermint blockchain monitoring tool; watches over one or more nodes, +collecting and providing various statistics to the user: + +- https://github.com/tendermint/tools/tree/master/tm-monitor + +## Quick Start + +### Docker + +Assuming your application is running in another container with the name +`app`: + +``` +docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init +docker run -it --rm -v "/tmp:/tendermint" -p "26657:26657" --name=tm --link=app tendermint/tendermint node --proxy_app=tcp://app:26658 + +docker run -it --rm -p "26670:26670" --link=tm tendermint/monitor tm:26657 +``` + +If you don't have an application yet, but still want to try monitor out, +use `kvstore`: + +``` +docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init +docker run -it --rm -v "/tmp:/tendermint" -p "26657:26657" --name=tm tendermint/tendermint node --proxy_app=kvstore +``` +``` +docker run -it --rm -p "26670:26670" --link=tm tendermint/monitor tm:26657 +``` + +### Using Binaries + +[Install Tendermint](https://github.com/tendermint/tendermint#install) + +then run: + +``` +tendermint init +tendermint node --proxy_app=kvstore +``` + +``` +tm-monitor localhost:26657 +``` + +with the last command being in a seperate window. + +## Usage + +``` +tm-monitor [-v] [-no-ton] [-listen-addr="tcp://0.0.0.0:26670"] [endpoints] + +Examples: + # monitor single instance + tm-monitor localhost:26657 + + # monitor a few instances by providing comma-separated list of RPC endpoints + tm-monitor host1:26657,host2:26657 +Flags: + -listen-addr string + HTTP and Websocket server listen address (default "tcp://0.0.0.0:26670") + -no-ton + Do not show ton (table of nodes) + -v verbose logging +``` + +### RPC UI + +Run `tm-monitor` and visit http://localhost:26670 You should see the +list of the available RPC endpoints: + +``` +http://localhost:26670/status +http://localhost:26670/status/network +http://localhost:26670/monitor?endpoint=_ +http://localhost:26670/status/node?name=_ +http://localhost:26670/unmonitor?endpoint=_ +``` + +The API is available as GET requests with URI encoded parameters, or as +JSONRPC POST requests. The JSONRPC methods are also exposed over +websocket. + +## Development + +``` +make get_tools +make get_vendor_deps +make test +``` diff --git a/docs/yarn.lock b/docs/yarn.lock new file mode 100644 index 000000000..5591b8fa0 --- /dev/null +++ b/docs/yarn.lock @@ -0,0 +1,2507 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +"@azu/format-text@^1.0.1": + version "1.0.1" + resolved "https://registry.yarnpkg.com/@azu/format-text/-/format-text-1.0.1.tgz#6967350a94640f6b02855169bd897ce54d6cebe2" + +"@azu/style-format@^1.0.0": + version "1.0.0" + resolved "https://registry.yarnpkg.com/@azu/style-format/-/style-format-1.0.0.tgz#e70187f8a862e191b1bce6c0268f13acd3a56b20" + dependencies: + "@azu/format-text" "^1.0.1" + +"@sindresorhus/is@^0.7.0": + version "0.7.0" + resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-0.7.0.tgz#9a06f4f137ee84d7df0460c1fdb1135ffa6c50fd" + +"@textlint/ast-node-types@^4.0.2": + version "4.0.2" + resolved "https://registry.yarnpkg.com/@textlint/ast-node-types/-/ast-node-types-4.0.2.tgz#5386a15187798efb48eb71fa1cbf6ca2770b206a" + +"@textlint/ast-traverse@^2.0.8": + version "2.0.8" + resolved "https://registry.yarnpkg.com/@textlint/ast-traverse/-/ast-traverse-2.0.8.tgz#c180fe23dc3b8a6aa68539be70efb4ff17c38a3a" + dependencies: + "@textlint/ast-node-types" "^4.0.2" + +"@textlint/feature-flag@^3.0.4": + version "3.0.4" + resolved "https://registry.yarnpkg.com/@textlint/feature-flag/-/feature-flag-3.0.4.tgz#4290a4bb53da28c1f5f1d5ce0f4ae6630ab939ea" + dependencies: + map-like "^2.0.0" + +"@textlint/fixer-formatter@^3.0.7": + version "3.0.7" + resolved "https://registry.yarnpkg.com/@textlint/fixer-formatter/-/fixer-formatter-3.0.7.tgz#4ef15d5e606e2d32b89257afd382ed9dbb218846" + dependencies: + "@textlint/kernel" "^2.0.9" + chalk "^1.1.3" + debug "^2.1.0" + diff "^2.2.2" + interop-require "^1.0.0" + is-file "^1.0.0" + string-width "^1.0.1" + text-table "^0.2.0" + try-resolve "^1.0.1" + +"@textlint/kernel@^2.0.9": + version "2.0.9" + resolved "https://registry.yarnpkg.com/@textlint/kernel/-/kernel-2.0.9.tgz#a4471b7969e192551230c35ea9fae32d80128ee0" + dependencies: + "@textlint/ast-node-types" "^4.0.2" + "@textlint/ast-traverse" "^2.0.8" + "@textlint/feature-flag" "^3.0.4" + "@types/bluebird" "^3.5.18" + bluebird "^3.5.1" + debug "^2.6.6" + deep-equal "^1.0.1" + object-assign "^4.1.1" + structured-source "^3.0.2" + +"@textlint/linter-formatter@^3.0.7": + version "3.0.7" + resolved "https://registry.yarnpkg.com/@textlint/linter-formatter/-/linter-formatter-3.0.7.tgz#66716cac94c047d94627a7e6af427a0d199eda7c" + dependencies: + "@azu/format-text" "^1.0.1" + "@azu/style-format" "^1.0.0" + "@textlint/kernel" "^2.0.9" + chalk "^1.0.0" + concat-stream "^1.5.1" + js-yaml "^3.2.4" + optionator "^0.8.1" + pluralize "^2.0.0" + string-width "^1.0.1" + string.prototype.padstart "^3.0.0" + strip-ansi "^3.0.1" + table "^3.7.8" + text-table "^0.2.0" + try-resolve "^1.0.1" + xml-escape "^1.0.0" + +"@textlint/markdown-to-ast@^6.0.8": + version "6.0.8" + resolved "https://registry.yarnpkg.com/@textlint/markdown-to-ast/-/markdown-to-ast-6.0.8.tgz#baa509c42f842b4dba36ad91547a288c063396b8" + dependencies: + "@textlint/ast-node-types" "^4.0.2" + debug "^2.1.3" + remark-frontmatter "^1.2.0" + remark-parse "^5.0.0" + structured-source "^3.0.2" + traverse "^0.6.6" + unified "^6.1.6" + +"@textlint/text-to-ast@^3.0.8": + version "3.0.8" + resolved "https://registry.yarnpkg.com/@textlint/text-to-ast/-/text-to-ast-3.0.8.tgz#6211977f369cec484447867f10dc155120f4c082" + dependencies: + "@textlint/ast-node-types" "^4.0.2" + +"@textlint/textlint-plugin-markdown@^4.0.10": + version "4.0.10" + resolved "https://registry.yarnpkg.com/@textlint/textlint-plugin-markdown/-/textlint-plugin-markdown-4.0.10.tgz#a99b4a308067597e89439a9e87bc1c4a7f4d076b" + dependencies: + "@textlint/markdown-to-ast" "^6.0.8" + +"@textlint/textlint-plugin-text@^3.0.10": + version "3.0.10" + resolved "https://registry.yarnpkg.com/@textlint/textlint-plugin-text/-/textlint-plugin-text-3.0.10.tgz#619600bdc352d33a68e7a73d77d58b0c52b2a44f" + dependencies: + "@textlint/text-to-ast" "^3.0.8" + +"@types/bluebird@^3.5.18": + version "3.5.21" + resolved "https://registry.yarnpkg.com/@types/bluebird/-/bluebird-3.5.21.tgz#567615589cc913e84a28ecf9edb031732bdf2634" + +abbrev@1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.1.tgz#f8f2c887ad10bf67f634f005b6987fed3179aac8" + +aggregate-error@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/aggregate-error/-/aggregate-error-1.0.0.tgz#888344dad0220a72e3af50906117f48771925fac" + dependencies: + clean-stack "^1.0.0" + indent-string "^3.0.0" + +ajv-keywords@^1.0.0: + version "1.5.1" + resolved "https://registry.yarnpkg.com/ajv-keywords/-/ajv-keywords-1.5.1.tgz#314dd0a4b3368fad3dfcdc54ede6171b886daf3c" + +ajv@^4.7.0: + version "4.11.8" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-4.11.8.tgz#82ffb02b29e662ae53bdc20af15947706739c536" + dependencies: + co "^4.6.0" + json-stable-stringify "^1.0.1" + +ajv@^5.1.0: + version "5.5.2" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-5.5.2.tgz#73b5eeca3fab653e3d3f9422b341ad42205dc965" + dependencies: + co "^4.6.0" + fast-deep-equal "^1.0.0" + fast-json-stable-stringify "^2.0.0" + json-schema-traverse "^0.3.0" + +ansi-regex@^2.0.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df" + +ansi-regex@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-3.0.0.tgz#ed0317c322064f79466c02966bddb605ab37d998" + +ansi-styles@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe" + +ansi-styles@^3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" + dependencies: + color-convert "^1.9.0" + +anymatch@^1.3.0: + version "1.3.2" + resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-1.3.2.tgz#553dcb8f91e3c889845dfdba34c77721b90b9d7a" + dependencies: + micromatch "^2.1.5" + normalize-path "^2.0.0" + +aproba@^1.0.3: + version "1.2.0" + resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.2.0.tgz#6802e6264efd18c790a1b0d517f0f2627bf2c94a" + +are-we-there-yet@~1.1.2: + version "1.1.5" + resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.5.tgz#4b35c2944f062a8bfcda66410760350fe9ddfc21" + dependencies: + delegates "^1.0.0" + readable-stream "^2.0.6" + +argparse@^1.0.7: + version "1.0.10" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911" + dependencies: + sprintf-js "~1.0.2" + +arr-diff@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-2.0.0.tgz#8f3b827f955a8bd669697e4a4256ac3ceae356cf" + dependencies: + arr-flatten "^1.0.1" + +arr-flatten@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.1.0.tgz#36048bbff4e7b47e136644316c99669ea5ae91f1" + +array-iterate@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/array-iterate/-/array-iterate-1.1.2.tgz#f66a57e84426f8097f4197fbb6c051b8e5cdf7d8" + +array-union@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/array-union/-/array-union-1.0.2.tgz#9a34410e4f4e3da23dea375be5be70f24778ec39" + dependencies: + array-uniq "^1.0.1" + +array-uniq@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/array-uniq/-/array-uniq-1.0.3.tgz#af6ac877a25cc7f74e058894753858dfdb24fdb6" + +array-unique@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.2.1.tgz#a1d97ccafcbc2625cc70fadceb36a50c58b01a53" + +arrify@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d" + +asn1@~0.2.3: + version "0.2.3" + resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.3.tgz#dac8787713c9966849fc8180777ebe9c1ddf3b86" + +assert-plus@1.0.0, assert-plus@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525" + +async-each@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/async-each/-/async-each-1.0.1.tgz#19d386a1d9edc6e7c1c85d388aedbcc56d33602d" + +asynckit@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" + +aws-sign2@~0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.7.0.tgz#b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8" + +aws4@^1.6.0: + version "1.7.0" + resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.7.0.tgz#d4d0e9b9dbfca77bf08eeb0a8a471550fe39e289" + +bail@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/bail/-/bail-1.0.3.tgz#63cfb9ddbac829b02a3128cd53224be78e6c21a3" + +balanced-match@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.0.tgz#89b4d199ab2bee49de164ea02b89ce462d71b767" + +bcrypt-pbkdf@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz#a4301d389b6a43f9b67ff3ca11a3f6637e360e9e" + dependencies: + tweetnacl "^0.14.3" + +binary-extensions@^1.0.0: + version "1.11.0" + resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-1.11.0.tgz#46aa1751fb6a2f93ee5e689bb1087d4b14c6c205" + +bluebird@^3.0.5, bluebird@^3.5.1: + version "3.5.1" + resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.5.1.tgz#d9551f9de98f1fcda1e683d17ee91a0602ee2eb9" + +boundary@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/boundary/-/boundary-1.0.1.tgz#4d67dc2602c0cc16dd9bce7ebf87e948290f5812" + +brace-expansion@^1.1.7: + version "1.1.11" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" + dependencies: + balanced-match "^1.0.0" + concat-map "0.0.1" + +braces@^1.8.2: + version "1.8.5" + resolved "https://registry.yarnpkg.com/braces/-/braces-1.8.5.tgz#ba77962e12dff969d6b76711e914b737857bf6a7" + dependencies: + expand-range "^1.8.1" + preserve "^0.2.0" + repeat-element "^1.1.2" + +buffer-from@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.0.tgz#87fcaa3a298358e0ade6e442cfce840740d1ad04" + +builtin-modules@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/builtin-modules/-/builtin-modules-1.1.1.tgz#270f076c5a72c02f5b65a47df94c5fe3a278892f" + +cacheable-request@^2.1.1: + version "2.1.4" + resolved "https://registry.yarnpkg.com/cacheable-request/-/cacheable-request-2.1.4.tgz#0d808801b6342ad33c91df9d0b44dc09b91e5c3d" + dependencies: + clone-response "1.0.2" + get-stream "3.0.0" + http-cache-semantics "3.8.1" + keyv "3.0.0" + lowercase-keys "1.0.0" + normalize-url "2.0.1" + responselike "1.0.2" + +camelcase@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-4.1.0.tgz#d545635be1e33c542649c69173e5de6acfae34dd" + +capture-stack-trace@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/capture-stack-trace/-/capture-stack-trace-1.0.0.tgz#4a6fa07399c26bba47f0b2496b4d0fb408c5550d" + +caseless@~0.12.0: + version "0.12.0" + resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc" + +ccount@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/ccount/-/ccount-1.0.3.tgz#f1cec43f332e2ea5a569fd46f9f5bde4e6102aff" + +chalk@^1.0.0, chalk@^1.1.1, chalk@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98" + dependencies: + ansi-styles "^2.2.1" + escape-string-regexp "^1.0.2" + has-ansi "^2.0.0" + strip-ansi "^3.0.0" + supports-color "^2.0.0" + +chalk@^2.0.0: + version "2.4.1" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.1.tgz#18c49ab16a037b6eb0152cc83e3471338215b66e" + dependencies: + ansi-styles "^3.2.1" + escape-string-regexp "^1.0.5" + supports-color "^5.3.0" + +character-entities-html4@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/character-entities-html4/-/character-entities-html4-1.1.2.tgz#c44fdde3ce66b52e8d321d6c1bf46101f0150610" + +character-entities-legacy@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/character-entities-legacy/-/character-entities-legacy-1.1.2.tgz#7c6defb81648498222c9855309953d05f4d63a9c" + +character-entities@^1.0.0: + version "1.2.2" + resolved "https://registry.yarnpkg.com/character-entities/-/character-entities-1.2.2.tgz#58c8f371c0774ef0ba9b2aca5f00d8f100e6e363" + +character-reference-invalid@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/character-reference-invalid/-/character-reference-invalid-1.1.2.tgz#21e421ad3d84055952dab4a43a04e73cd425d3ed" + +charenc@~0.0.1: + version "0.0.2" + resolved "https://registry.yarnpkg.com/charenc/-/charenc-0.0.2.tgz#c0a1d2f3a7092e03774bfa83f14c0fc5790a8667" + +chokidar@^1.5.1: + version "1.7.0" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-1.7.0.tgz#798e689778151c8076b4b360e5edd28cda2bb468" + dependencies: + anymatch "^1.3.0" + async-each "^1.0.0" + glob-parent "^2.0.0" + inherits "^2.0.1" + is-binary-path "^1.0.0" + is-glob "^2.0.0" + path-is-absolute "^1.0.0" + readdirp "^2.0.0" + optionalDependencies: + fsevents "^1.0.0" + +chownr@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.0.1.tgz#e2a75042a9551908bebd25b8523d5f9769d79181" + +circular-json@^0.3.1: + version "0.3.3" + resolved "https://registry.yarnpkg.com/circular-json/-/circular-json-0.3.3.tgz#815c99ea84f6809529d2f45791bdf82711352d66" + +clean-stack@^1.0.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/clean-stack/-/clean-stack-1.3.0.tgz#9e821501ae979986c46b1d66d2d432db2fd4ae31" + +clone-response@1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/clone-response/-/clone-response-1.0.2.tgz#d1dc973920314df67fbeb94223b4ee350239e96b" + dependencies: + mimic-response "^1.0.0" + +co@3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/co/-/co-3.1.0.tgz#4ea54ea5a08938153185e15210c68d9092bc1b78" + +co@^4.6.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184" + +code-point-at@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77" + +collapse-white-space@^1.0.2: + version "1.0.4" + resolved "https://registry.yarnpkg.com/collapse-white-space/-/collapse-white-space-1.0.4.tgz#ce05cf49e54c3277ae573036a26851ba430a0091" + +color-convert@^1.9.0: + version "1.9.2" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.2.tgz#49881b8fba67df12a96bdf3f56c0aab9e7913147" + dependencies: + color-name "1.1.1" + +color-name@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.1.tgz#4b1415304cf50028ea81643643bd82ea05803689" + +combined-stream@1.0.6, combined-stream@~1.0.5: + version "1.0.6" + resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.6.tgz#723e7df6e801ac5613113a7e445a9b69cb632818" + dependencies: + delayed-stream "~1.0.0" + +concat-map@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" + +concat-stream@^1.5.1: + version "1.6.2" + resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.6.2.tgz#904bdf194cd3122fc675c77fc4ac3d4ff0fd1a34" + dependencies: + buffer-from "^1.0.0" + inherits "^2.0.3" + readable-stream "^2.2.2" + typedarray "^0.0.6" + +console-control-strings@^1.0.0, console-control-strings@~1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/console-control-strings/-/console-control-strings-1.1.0.tgz#3d7cf4464db6446ea644bf4b39507f9851008e8e" + +core-util-is@1.0.2, core-util-is@~1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" + +create-error-class@^3.0.0: + version "3.0.2" + resolved "https://registry.yarnpkg.com/create-error-class/-/create-error-class-3.0.2.tgz#06be7abef947a3f14a30fd610671d401bca8b7b6" + dependencies: + capture-stack-trace "^1.0.0" + +crypt@~0.0.1: + version "0.0.2" + resolved "https://registry.yarnpkg.com/crypt/-/crypt-0.0.2.tgz#88d7ff7ec0dfb86f713dc87bbb42d044d3e6c41b" + +dashdash@^1.12.0: + version "1.14.1" + resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0" + dependencies: + assert-plus "^1.0.0" + +debug@^2.1.0, debug@^2.1.2, debug@^2.1.3, debug@^2.2.0, debug@^2.6.6: + version "2.6.9" + resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" + dependencies: + ms "2.0.0" + +debug@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/debug/-/debug-3.1.0.tgz#5bb5a0672628b64149566ba16819e61518c67261" + dependencies: + ms "2.0.0" + +decode-uri-component@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/decode-uri-component/-/decode-uri-component-0.2.0.tgz#eb3913333458775cb84cd1a1fae062106bb87545" + +decompress-response@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/decompress-response/-/decompress-response-3.3.0.tgz#80a4dd323748384bfa248083622aedec982adff3" + dependencies: + mimic-response "^1.0.0" + +deep-equal@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/deep-equal/-/deep-equal-1.0.1.tgz#f5d260292b660e084eff4cdbc9f08ad3247448b5" + +deep-extend@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.6.0.tgz#c4fa7c95404a17a9c3e8ca7e1537312b736330ac" + +deep-is@~0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.3.tgz#b369d6fb5dbc13eecf524f91b070feedc357cf34" + +define-properties@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.1.2.tgz#83a73f2fea569898fb737193c8f873caf6d45c94" + dependencies: + foreach "^2.0.5" + object-keys "^1.0.8" + +del@^2.0.2: + version "2.2.2" + resolved "https://registry.yarnpkg.com/del/-/del-2.2.2.tgz#c12c981d067846c84bcaf862cff930d907ffd1a8" + dependencies: + globby "^5.0.0" + is-path-cwd "^1.0.0" + is-path-in-cwd "^1.0.0" + object-assign "^4.0.1" + pify "^2.0.0" + pinkie-promise "^2.0.0" + rimraf "^2.2.8" + +delayed-stream@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" + +delegates@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a" + +detect-libc@^1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/detect-libc/-/detect-libc-1.0.3.tgz#fa137c4bd698edf55cd5cd02ac559f91a4c4ba9b" + +diff@^2.2.2: + version "2.2.3" + resolved "https://registry.yarnpkg.com/diff/-/diff-2.2.3.tgz#60eafd0d28ee906e4e8ff0a52c1229521033bf99" + +dns-packet@^1.1.0: + version "1.3.1" + resolved "https://registry.yarnpkg.com/dns-packet/-/dns-packet-1.3.1.tgz#12aa426981075be500b910eedcd0b47dd7deda5a" + dependencies: + ip "^1.1.0" + safe-buffer "^5.0.1" + +dns-socket@^1.6.2: + version "1.6.3" + resolved "https://registry.yarnpkg.com/dns-socket/-/dns-socket-1.6.3.tgz#5268724fad4aa46ad9c5ca4ffcd16e1de5342aab" + dependencies: + dns-packet "^1.1.0" + +duplexer3@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/duplexer3/-/duplexer3-0.1.4.tgz#ee01dd1cac0ed3cbc7fdbea37dc0a8f1ce002ce2" + +ecc-jsbn@~0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz#0fc73a9ed5f0d53c38193398523ef7e543777505" + dependencies: + jsbn "~0.1.0" + +error-ex@^1.2.0, error-ex@^1.3.1: + version "1.3.2" + resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf" + dependencies: + is-arrayish "^0.2.1" + +es-abstract@^1.4.3: + version "1.12.0" + resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.12.0.tgz#9dbbdd27c6856f0001421ca18782d786bf8a6165" + dependencies: + es-to-primitive "^1.1.1" + function-bind "^1.1.1" + has "^1.0.1" + is-callable "^1.1.3" + is-regex "^1.0.4" + +es-to-primitive@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/es-to-primitive/-/es-to-primitive-1.1.1.tgz#45355248a88979034b6792e19bb81f2b7975dd0d" + dependencies: + is-callable "^1.1.1" + is-date-object "^1.0.1" + is-symbol "^1.0.1" + +escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" + +esprima@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.0.tgz#4499eddcd1110e0b218bacf2fa7f7f59f55ca804" + +expand-brackets@^0.1.4: + version "0.1.5" + resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-0.1.5.tgz#df07284e342a807cd733ac5af72411e581d1177b" + dependencies: + is-posix-bracket "^0.1.0" + +expand-range@^1.8.1: + version "1.8.2" + resolved "https://registry.yarnpkg.com/expand-range/-/expand-range-1.8.2.tgz#a299effd335fe2721ebae8e257ec79644fc85337" + dependencies: + fill-range "^2.1.0" + +extend@^3.0.0, extend@~3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.1.tgz#a755ea7bc1adfcc5a31ce7e762dbaadc5e636444" + +extglob@^0.3.1: + version "0.3.2" + resolved "https://registry.yarnpkg.com/extglob/-/extglob-0.3.2.tgz#2e18ff3d2f49ab2765cec9023f011daa8d8349a1" + dependencies: + is-extglob "^1.0.0" + +extsprintf@1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.3.0.tgz#96918440e3041a7a414f8c52e3c574eb3c3e1e05" + +extsprintf@^1.2.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.4.0.tgz#e2689f8f356fad62cca65a3a91c5df5f9551692f" + +fast-deep-equal@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-1.1.0.tgz#c053477817c86b51daa853c81e059b733d023614" + +fast-json-stable-stringify@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz#d5142c0caee6b1189f87d3a76111064f86c8bbf2" + +fast-levenshtein@~2.0.4: + version "2.0.6" + resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" + +fault@^1.0.0, fault@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/fault/-/fault-1.0.2.tgz#c3d0fec202f172a3a4d414042ad2bb5e2a3ffbaa" + dependencies: + format "^0.2.2" + +file-entry-cache@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/file-entry-cache/-/file-entry-cache-2.0.0.tgz#c392990c3e684783d838b8c84a45d8a048458361" + dependencies: + flat-cache "^1.2.1" + object-assign "^4.0.1" + +filename-regex@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/filename-regex/-/filename-regex-2.0.1.tgz#c1c4b9bee3e09725ddb106b75c1e301fe2f18b26" + +fill-range@^2.1.0: + version "2.2.4" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-2.2.4.tgz#eb1e773abb056dcd8df2bfdf6af59b8b3a936565" + dependencies: + is-number "^2.1.0" + isobject "^2.0.0" + randomatic "^3.0.0" + repeat-element "^1.1.2" + repeat-string "^1.5.2" + +find-up@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-2.1.0.tgz#45d1b7e506c717ddd482775a2b77920a3c0c57a7" + dependencies: + locate-path "^2.0.0" + +flat-cache@^1.2.1: + version "1.3.0" + resolved "https://registry.yarnpkg.com/flat-cache/-/flat-cache-1.3.0.tgz#d3030b32b38154f4e3b7e9c709f490f7ef97c481" + dependencies: + circular-json "^0.3.1" + del "^2.0.2" + graceful-fs "^4.1.2" + write "^0.2.1" + +fn-name@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/fn-name/-/fn-name-2.0.1.tgz#5214d7537a4d06a4a301c0cc262feb84188002e7" + +for-in@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80" + +for-own@^0.1.4: + version "0.1.5" + resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.5.tgz#5265c681a4f294dabbf17c9509b6763aa84510ce" + dependencies: + for-in "^1.0.1" + +foreach@^2.0.5: + version "2.0.5" + resolved "https://registry.yarnpkg.com/foreach/-/foreach-2.0.5.tgz#0bee005018aeb260d0a3af3ae658dd0136ec1b99" + +forever-agent@~0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" + +form-data@~2.3.1: + version "2.3.2" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.3.2.tgz#4970498be604c20c005d4f5c23aecd21d6b49099" + dependencies: + asynckit "^0.4.0" + combined-stream "1.0.6" + mime-types "^2.1.12" + +format@^0.2.2: + version "0.2.2" + resolved "https://registry.yarnpkg.com/format/-/format-0.2.2.tgz#d6170107e9efdc4ed30c9dc39016df942b5cb58b" + +from2@^2.1.1: + version "2.3.0" + resolved "https://registry.yarnpkg.com/from2/-/from2-2.3.0.tgz#8bfb5502bde4a4d36cfdeea007fcca21d7e382af" + dependencies: + inherits "^2.0.1" + readable-stream "^2.0.0" + +fs-minipass@^1.2.5: + version "1.2.5" + resolved "https://registry.yarnpkg.com/fs-minipass/-/fs-minipass-1.2.5.tgz#06c277218454ec288df77ada54a03b8702aacb9d" + dependencies: + minipass "^2.2.1" + +fs.realpath@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" + +fsevents@^1.0.0: + version "1.2.4" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-1.2.4.tgz#f41dcb1af2582af3692da36fc55cbd8e1041c426" + dependencies: + nan "^2.9.2" + node-pre-gyp "^0.10.0" + +function-bind@^1.0.2, function-bind@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d" + +gauge@~2.7.3: + version "2.7.4" + resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7" + dependencies: + aproba "^1.0.3" + console-control-strings "^1.0.0" + has-unicode "^2.0.0" + object-assign "^4.1.0" + signal-exit "^3.0.0" + string-width "^1.0.1" + strip-ansi "^3.0.1" + wide-align "^1.1.0" + +get-stdin@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-5.0.1.tgz#122e161591e21ff4c52530305693f20e6393a398" + +get-stream@3.0.0, get-stream@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-3.0.0.tgz#8e943d1358dc37555054ecbe2edb05aa174ede14" + +getpass@^0.1.1: + version "0.1.7" + resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa" + dependencies: + assert-plus "^1.0.0" + +glob-base@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/glob-base/-/glob-base-0.3.0.tgz#dbb164f6221b1c0b1ccf82aea328b497df0ea3c4" + dependencies: + glob-parent "^2.0.0" + is-glob "^2.0.0" + +glob-parent@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-2.0.0.tgz#81383d72db054fcccf5336daa902f182f6edbb28" + dependencies: + is-glob "^2.0.0" + +glob@^7.0.3, glob@^7.0.5, glob@^7.1.1: + version "7.1.2" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.2.tgz#c19c9df9a028702d678612384a6552404c636d15" + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.0.4" + once "^1.3.0" + path-is-absolute "^1.0.0" + +globby@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/globby/-/globby-5.0.0.tgz#ebd84667ca0dbb330b99bcfc68eac2bc54370e0d" + dependencies: + array-union "^1.0.1" + arrify "^1.0.0" + glob "^7.0.3" + object-assign "^4.0.1" + pify "^2.0.0" + pinkie-promise "^2.0.0" + +got@^6.7.1: + version "6.7.1" + resolved "https://registry.yarnpkg.com/got/-/got-6.7.1.tgz#240cd05785a9a18e561dc1b44b41c763ef1e8db0" + dependencies: + create-error-class "^3.0.0" + duplexer3 "^0.1.4" + get-stream "^3.0.0" + is-redirect "^1.0.0" + is-retry-allowed "^1.0.0" + is-stream "^1.0.0" + lowercase-keys "^1.0.0" + safe-buffer "^5.0.1" + timed-out "^4.0.0" + unzip-response "^2.0.1" + url-parse-lax "^1.0.0" + +got@^8.0.0: + version "8.3.2" + resolved "https://registry.yarnpkg.com/got/-/got-8.3.2.tgz#1d23f64390e97f776cac52e5b936e5f514d2e937" + dependencies: + "@sindresorhus/is" "^0.7.0" + cacheable-request "^2.1.1" + decompress-response "^3.3.0" + duplexer3 "^0.1.4" + get-stream "^3.0.0" + into-stream "^3.1.0" + is-retry-allowed "^1.1.0" + isurl "^1.0.0-alpha5" + lowercase-keys "^1.0.0" + mimic-response "^1.0.0" + p-cancelable "^0.4.0" + p-timeout "^2.0.1" + pify "^3.0.0" + safe-buffer "^5.1.1" + timed-out "^4.0.1" + url-parse-lax "^3.0.0" + url-to-options "^1.0.1" + +graceful-fs@^4.1.2: + version "4.1.11" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.11.tgz#0e8bdfe4d1ddb8854d64e04ea7c00e2a026e5658" + +har-schema@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-2.0.0.tgz#a94c2224ebcac04782a0d9035521f24735b7ec92" + +har-validator@~5.0.3: + version "5.0.3" + resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-5.0.3.tgz#ba402c266194f15956ef15e0fcf242993f6a7dfd" + dependencies: + ajv "^5.1.0" + har-schema "^2.0.0" + +has-ansi@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91" + dependencies: + ansi-regex "^2.0.0" + +has-flag@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-2.0.0.tgz#e8207af1cc7b30d446cc70b734b5e8be18f88d51" + +has-flag@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" + +has-symbol-support-x@^1.4.1: + version "1.4.2" + resolved "https://registry.yarnpkg.com/has-symbol-support-x/-/has-symbol-support-x-1.4.2.tgz#1409f98bc00247da45da67cee0a36f282ff26455" + +has-to-string-tag-x@^1.2.0: + version "1.4.1" + resolved "https://registry.yarnpkg.com/has-to-string-tag-x/-/has-to-string-tag-x-1.4.1.tgz#a045ab383d7b4b2012a00148ab0aa5f290044d4d" + dependencies: + has-symbol-support-x "^1.4.1" + +has-unicode@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9" + +has@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has/-/has-1.0.3.tgz#722d7cbfc1f6aa8241f16dd814e011e1f41e8796" + dependencies: + function-bind "^1.1.1" + +hosted-git-info@^2.1.4: + version "2.6.1" + resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.6.1.tgz#6e4cee78b01bb849dcf93527708c69fdbee410df" + +http-cache-semantics@3.8.1: + version "3.8.1" + resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-3.8.1.tgz#39b0e16add9b605bf0a9ef3d9daaf4843b4cacd2" + +http-signature@~1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.2.0.tgz#9aecd925114772f3d95b65a60abb8f7c18fbace1" + dependencies: + assert-plus "^1.0.0" + jsprim "^1.2.2" + sshpk "^1.7.0" + +iconv-lite@^0.4.4: + version "0.4.23" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.23.tgz#297871f63be507adcfbfca715d0cd0eed84e9a63" + dependencies: + safer-buffer ">= 2.1.2 < 3" + +ignore-walk@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/ignore-walk/-/ignore-walk-3.0.1.tgz#a83e62e7d272ac0e3b551aaa82831a19b69f82f8" + dependencies: + minimatch "^3.0.4" + +ignore@^3.2.0: + version "3.3.10" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-3.3.10.tgz#0a97fb876986e8081c631160f8f9f389157f0043" + +indent-string@^3.0.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-3.2.0.tgz#4a5fd6d27cc332f37e5419a504dbb837105c9289" + +inflight@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" + dependencies: + once "^1.3.0" + wrappy "1" + +inherits@2, inherits@^2.0.1, inherits@^2.0.3, inherits@~2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" + +ini@~1.3.0: + version "1.3.5" + resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.5.tgz#eee25f56db1c9ec6085e0c22778083f596abf927" + +interop-require@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/interop-require/-/interop-require-1.0.0.tgz#e53103679944c88d7e6105b62a9f4475c783971e" + +into-stream@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/into-stream/-/into-stream-3.1.0.tgz#96fb0a936c12babd6ff1752a17d05616abd094c6" + dependencies: + from2 "^2.1.1" + p-is-promise "^1.1.0" + +ip-regex@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/ip-regex/-/ip-regex-2.1.0.tgz#fa78bf5d2e6913c911ce9f819ee5146bb6d844e9" + +ip@^1.1.0: + version "1.1.5" + resolved "https://registry.yarnpkg.com/ip/-/ip-1.1.5.tgz#bdded70114290828c0a039e72ef25f5aaec4354a" + +is-absolute-url@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-absolute-url/-/is-absolute-url-2.1.0.tgz#50530dfb84fcc9aa7dbe7852e83a37b93b9f2aa6" + +is-alphabetical@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-alphabetical/-/is-alphabetical-1.0.2.tgz#1fa6e49213cb7885b75d15862fb3f3d96c884f41" + +is-alphanumeric@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-alphanumeric/-/is-alphanumeric-1.0.0.tgz#4a9cef71daf4c001c1d81d63d140cf53fd6889f4" + +is-alphanumerical@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-alphanumerical/-/is-alphanumerical-1.0.2.tgz#1138e9ae5040158dc6ff76b820acd6b7a181fd40" + dependencies: + is-alphabetical "^1.0.0" + is-decimal "^1.0.0" + +is-arrayish@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" + +is-binary-path@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-binary-path/-/is-binary-path-1.0.1.tgz#75f16642b480f187a711c814161fd3a4a7655898" + dependencies: + binary-extensions "^1.0.0" + +is-buffer@^1.1.4, is-buffer@^1.1.5, is-buffer@~1.1.1: + version "1.1.6" + resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.6.tgz#efaa2ea9daa0d7ab2ea13a97b2b8ad51fefbe8be" + +is-builtin-module@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-builtin-module/-/is-builtin-module-1.0.0.tgz#540572d34f7ac3119f8f76c30cbc1b1e037affbe" + dependencies: + builtin-modules "^1.0.0" + +is-callable@^1.1.1, is-callable@^1.1.3: + version "1.1.4" + resolved "https://registry.yarnpkg.com/is-callable/-/is-callable-1.1.4.tgz#1e1adf219e1eeb684d691f9d6a05ff0d30a24d75" + +is-date-object@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.1.tgz#9aa20eb6aeebbff77fbd33e74ca01b33581d3a16" + +is-decimal@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-decimal/-/is-decimal-1.0.2.tgz#894662d6a8709d307f3a276ca4339c8fa5dff0ff" + +is-dotfile@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/is-dotfile/-/is-dotfile-1.0.3.tgz#a6a2f32ffd2dfb04f5ca25ecd0f6b83cf798a1e1" + +is-empty@^1.0.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/is-empty/-/is-empty-1.2.0.tgz#de9bb5b278738a05a0b09a57e1fb4d4a341a9f6b" + +is-equal-shallow@^0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/is-equal-shallow/-/is-equal-shallow-0.1.3.tgz#2238098fc221de0bcfa5d9eac4c45d638aa1c534" + dependencies: + is-primitive "^2.0.0" + +is-extendable@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89" + +is-extglob@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-1.0.0.tgz#ac468177c4943405a092fc8f29760c6ffc6206c0" + +is-file@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-file/-/is-file-1.0.0.tgz#28a44cfbd9d3db193045f22b65fce8edf9620596" + +is-fullwidth-code-point@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb" + dependencies: + number-is-nan "^1.0.0" + +is-fullwidth-code-point@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz#a3b30a5c4f199183167aaab93beefae3ddfb654f" + +is-glob@^2.0.0, is-glob@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-2.0.1.tgz#d096f926a3ded5600f3fdfd91198cb0888c2d863" + dependencies: + is-extglob "^1.0.0" + +is-hexadecimal@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-hexadecimal/-/is-hexadecimal-1.0.2.tgz#b6e710d7d07bb66b98cb8cece5c9b4921deeb835" + +is-hidden@^1.0.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/is-hidden/-/is-hidden-1.1.1.tgz#82ee6a93aeef3fb007ad5b9457c0584d45329f38" + +is-ip@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-ip/-/is-ip-2.0.0.tgz#68eea07e8a0a0a94c2d080dd674c731ab2a461ab" + dependencies: + ip-regex "^2.0.0" + +is-number@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-2.1.0.tgz#01fcbbb393463a548f2f466cce16dece49db908f" + dependencies: + kind-of "^3.0.2" + +is-number@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-4.0.0.tgz#0026e37f5454d73e356dfe6564699867c6a7f0ff" + +is-object@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-object/-/is-object-1.0.1.tgz#8952688c5ec2ffd6b03ecc85e769e02903083470" + +is-online@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/is-online/-/is-online-7.0.0.tgz#7e2408c0ae1e7e37ba8d50bdb237260d32bfd96e" + dependencies: + got "^6.7.1" + p-any "^1.0.0" + p-timeout "^1.0.0" + public-ip "^2.3.0" + +is-path-cwd@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-path-cwd/-/is-path-cwd-1.0.0.tgz#d225ec23132e89edd38fda767472e62e65f1106d" + +is-path-in-cwd@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-path-in-cwd/-/is-path-in-cwd-1.0.1.tgz#5ac48b345ef675339bd6c7a48a912110b241cf52" + dependencies: + is-path-inside "^1.0.0" + +is-path-inside@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-path-inside/-/is-path-inside-1.0.1.tgz#8ef5b7de50437a3fdca6b4e865ef7aa55cb48036" + dependencies: + path-is-inside "^1.0.1" + +is-plain-obj@^1.0.0, is-plain-obj@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-1.1.0.tgz#71a50c8429dfca773c92a390a4a03b39fcd51d3e" + +is-posix-bracket@^0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz#3334dc79774368e92f016e6fbc0a88f5cd6e6bc4" + +is-primitive@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-primitive/-/is-primitive-2.0.0.tgz#207bab91638499c07b2adf240a41a87210034575" + +is-redirect@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-redirect/-/is-redirect-1.0.0.tgz#1d03dded53bd8db0f30c26e4f95d36fc7c87dc24" + +is-regex@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.0.4.tgz#5517489b547091b0930e095654ced25ee97e9491" + dependencies: + has "^1.0.1" + +is-relative-url@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-relative-url/-/is-relative-url-2.0.0.tgz#72902d7fe04b3d4792e7db15f9db84b7204c9cef" + dependencies: + is-absolute-url "^2.0.0" + +is-retry-allowed@^1.0.0, is-retry-allowed@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-retry-allowed/-/is-retry-allowed-1.1.0.tgz#11a060568b67339444033d0125a61a20d564fb34" + +is-stream@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44" + +is-symbol@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-symbol/-/is-symbol-1.0.1.tgz#3cc59f00025194b6ab2e38dbae6689256b660572" + +is-typedarray@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" + +is-utf8@^0.2.0: + version "0.2.1" + resolved "https://registry.yarnpkg.com/is-utf8/-/is-utf8-0.2.1.tgz#4b0da1442104d1b336340e80797e865cf39f7d72" + +is-whitespace-character@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-whitespace-character/-/is-whitespace-character-1.0.2.tgz#ede53b4c6f6fb3874533751ec9280d01928d03ed" + +is-word-character@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-word-character/-/is-word-character-1.0.2.tgz#46a5dac3f2a1840898b91e576cd40d493f3ae553" + +isarray@1.0.0, isarray@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11" + +isemail@^3.1.2: + version "3.1.2" + resolved "https://registry.yarnpkg.com/isemail/-/isemail-3.1.2.tgz#937cf919002077999a73ea8b1951d590e84e01dd" + dependencies: + punycode "2.x.x" + +isobject@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/isobject/-/isobject-2.1.0.tgz#f065561096a3f1da2ef46272f815c840d87e0c89" + dependencies: + isarray "1.0.0" + +isstream@~0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a" + +isurl@^1.0.0-alpha5: + version "1.0.0" + resolved "https://registry.yarnpkg.com/isurl/-/isurl-1.0.0.tgz#b27f4f49f3cdaa3ea44a0a5b7f3462e6edc39d67" + dependencies: + has-to-string-tag-x "^1.2.0" + is-object "^1.0.1" + +js-yaml@^3.2.4, js-yaml@^3.6.1: + version "3.12.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.12.0.tgz#eaed656ec8344f10f527c6bfa1b6e2244de167d1" + dependencies: + argparse "^1.0.7" + esprima "^4.0.0" + +jsbn@~0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513" + +json-buffer@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/json-buffer/-/json-buffer-3.0.0.tgz#5b1f397afc75d677bde8bcfc0e47e1f9a3d9a898" + +json-parse-better-errors@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz#bb867cfb3450e69107c131d1c514bab3dc8bcaa9" + +json-schema-traverse@^0.3.0: + version "0.3.1" + resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.3.1.tgz#349a6d44c53a51de89b40805c5d5e59b417d3340" + +json-schema@0.2.3: + version "0.2.3" + resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.2.3.tgz#b480c892e59a2f05954ce727bd3f2a4e882f9e13" + +json-stable-stringify@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/json-stable-stringify/-/json-stable-stringify-1.0.1.tgz#9a759d39c5f2ff503fd5300646ed445f88c4f9af" + dependencies: + jsonify "~0.0.0" + +json-stringify-safe@~5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" + +json5@^0.5.0, json5@^0.5.1: + version "0.5.1" + resolved "https://registry.yarnpkg.com/json5/-/json5-0.5.1.tgz#1eade7acc012034ad84e2396767ead9fa5495821" + +jsonify@~0.0.0: + version "0.0.0" + resolved "https://registry.yarnpkg.com/jsonify/-/jsonify-0.0.0.tgz#2c74b6ee41d93ca51b7b5aaee8f503631d252a73" + +jsprim@^1.2.2: + version "1.4.1" + resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.1.tgz#313e66bc1e5cc06e438bc1b7499c2e5c56acb6a2" + dependencies: + assert-plus "1.0.0" + extsprintf "1.3.0" + json-schema "0.2.3" + verror "1.10.0" + +keyv@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/keyv/-/keyv-3.0.0.tgz#44923ba39e68b12a7cec7df6c3268c031f2ef373" + dependencies: + json-buffer "3.0.0" + +kind-of@^3.0.2: + version "3.2.2" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.2.2.tgz#31ea21a734bab9bbb0f32466d893aea51e4a3c64" + dependencies: + is-buffer "^1.1.5" + +kind-of@^6.0.0: + version "6.0.2" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-6.0.2.tgz#01146b36a6218e64e58f3a8d66de5d7fc6f6d051" + +levn@~0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/levn/-/levn-0.3.0.tgz#3b09924edf9f083c0490fdd4c0bc4421e04764ee" + dependencies: + prelude-ls "~1.1.2" + type-check "~0.3.2" + +link-check@^4.1.0: + version "4.4.4" + resolved "https://registry.yarnpkg.com/link-check/-/link-check-4.4.4.tgz#08dbb881b70c23f1c173889c3a34d682c2e68c1a" + dependencies: + is-relative-url "^2.0.0" + isemail "^3.1.2" + ms "^2.1.1" + request "^2.87.0" + +load-json-file@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-1.1.0.tgz#956905708d58b4bab4c2261b04f59f31c99374c0" + dependencies: + graceful-fs "^4.1.2" + parse-json "^2.2.0" + pify "^2.0.0" + pinkie-promise "^2.0.0" + strip-bom "^2.0.0" + +load-json-file@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-4.0.0.tgz#2f5f45ab91e33216234fd53adab668eb4ec0993b" + dependencies: + graceful-fs "^4.1.2" + parse-json "^4.0.0" + pify "^3.0.0" + strip-bom "^3.0.0" + +load-plugin@^2.0.0: + version "2.2.2" + resolved "https://registry.yarnpkg.com/load-plugin/-/load-plugin-2.2.2.tgz#ebc7599491ff33e5077719fbe051d5725a9f7a89" + dependencies: + npm-prefix "^1.2.0" + resolve-from "^4.0.0" + +locate-path@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-2.0.0.tgz#2b568b265eec944c6d9c0de9c3dbbbca0354cd8e" + dependencies: + p-locate "^2.0.0" + path-exists "^3.0.0" + +lodash@^4.0.0: + version "4.17.10" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.10.tgz#1b7793cf7259ea38fb3661d4d38b3260af8ae4e7" + +log-symbols@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-1.0.2.tgz#376ff7b58ea3086a0f09facc74617eca501e1a18" + dependencies: + chalk "^1.0.0" + +longest-streak@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/longest-streak/-/longest-streak-2.0.2.tgz#2421b6ba939a443bb9ffebf596585a50b4c38e2e" + +lowercase-keys@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-1.0.0.tgz#4e3366b39e7f5457e35f1324bdf6f88d0bfc7306" + +lowercase-keys@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-1.0.1.tgz#6f9e30b47084d971a7c820ff15a6c5167b74c26f" + +map-like@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/map-like/-/map-like-2.0.0.tgz#94496d49ad333c0dc3234b27adbbd1e8535953b4" + +markdown-escapes@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/markdown-escapes/-/markdown-escapes-1.0.2.tgz#e639cbde7b99c841c0bacc8a07982873b46d2122" + +markdown-extensions@^1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/markdown-extensions/-/markdown-extensions-1.1.1.tgz#fea03b539faeaee9b4ef02a3769b455b189f7fc3" + +markdown-table@^1.1.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/markdown-table/-/markdown-table-1.1.2.tgz#c78db948fa879903a41bce522e3b96f801c63786" + +math-random@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/math-random/-/math-random-1.0.1.tgz#8b3aac588b8a66e4975e3cdea67f7bb329601fac" + +md5@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/md5/-/md5-2.2.1.tgz#53ab38d5fe3c8891ba465329ea23fac0540126f9" + dependencies: + charenc "~0.0.1" + crypt "~0.0.1" + is-buffer "~1.1.1" + +mdast-util-compact@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/mdast-util-compact/-/mdast-util-compact-1.0.1.tgz#cdb5f84e2b6a2d3114df33bd05d9cb32e3c4083a" + dependencies: + unist-util-modify-children "^1.0.0" + unist-util-visit "^1.1.0" + +micromatch@^2.1.5: + version "2.3.11" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-2.3.11.tgz#86677c97d1720b363431d04d0d15293bd38c1565" + dependencies: + arr-diff "^2.0.0" + array-unique "^0.2.1" + braces "^1.8.2" + expand-brackets "^0.1.4" + extglob "^0.3.1" + filename-regex "^2.0.0" + is-extglob "^1.0.0" + is-glob "^2.0.1" + kind-of "^3.0.2" + normalize-path "^2.0.1" + object.omit "^2.0.0" + parse-glob "^3.0.4" + regex-cache "^0.4.2" + +mime-db@~1.33.0: + version "1.33.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.33.0.tgz#a3492050a5cb9b63450541e39d9788d2272783db" + +mime-types@^2.1.12, mime-types@~2.1.17: + version "2.1.18" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.18.tgz#6f323f60a83d11146f831ff11fd66e2fe5503bb8" + dependencies: + mime-db "~1.33.0" + +mimic-response@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/mimic-response/-/mimic-response-1.0.0.tgz#df3d3652a73fded6b9b0b24146e6fd052353458e" + +minimatch@^3.0.2, minimatch@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083" + dependencies: + brace-expansion "^1.1.7" + +minimist@0.0.8: + version "0.0.8" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d" + +minimist@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.0.tgz#a35008b20f41383eec1fb914f4cd5df79a264284" + +minipass@^2.2.1, minipass@^2.3.3: + version "2.3.3" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-2.3.3.tgz#a7dcc8b7b833f5d368759cce544dccb55f50f233" + dependencies: + safe-buffer "^5.1.2" + yallist "^3.0.0" + +minizlib@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-1.1.0.tgz#11e13658ce46bc3a70a267aac58359d1e0c29ceb" + dependencies: + minipass "^2.2.1" + +mkdirp@^0.5.0, mkdirp@^0.5.1: + version "0.5.1" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903" + dependencies: + minimist "0.0.8" + +ms@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" + +ms@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.1.tgz#30a5864eb3ebb0a66f2ebe6d727af06a09d86e0a" + +nan@^2.9.2: + version "2.10.0" + resolved "https://registry.yarnpkg.com/nan/-/nan-2.10.0.tgz#96d0cd610ebd58d4b4de9cc0c6828cda99c7548f" + +needle@^2.2.0: + version "2.2.1" + resolved "https://registry.yarnpkg.com/needle/-/needle-2.2.1.tgz#b5e325bd3aae8c2678902fa296f729455d1d3a7d" + dependencies: + debug "^2.1.2" + iconv-lite "^0.4.4" + sax "^1.2.4" + +node-pre-gyp@^0.10.0: + version "0.10.2" + resolved "https://registry.yarnpkg.com/node-pre-gyp/-/node-pre-gyp-0.10.2.tgz#e8945c20ef6795a20aac2b44f036eb13cf5146e3" + dependencies: + detect-libc "^1.0.2" + mkdirp "^0.5.1" + needle "^2.2.0" + nopt "^4.0.1" + npm-packlist "^1.1.6" + npmlog "^4.0.2" + rc "^1.2.7" + rimraf "^2.6.1" + semver "^5.3.0" + tar "^4" + +nopt@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/nopt/-/nopt-4.0.1.tgz#d0d4685afd5415193c8c7505602d0d17cd64474d" + dependencies: + abbrev "1" + osenv "^0.1.4" + +normalize-package-data@^2.3.2: + version "2.4.0" + resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.4.0.tgz#12f95a307d58352075a04907b84ac8be98ac012f" + dependencies: + hosted-git-info "^2.1.4" + is-builtin-module "^1.0.0" + semver "2 || 3 || 4 || 5" + validate-npm-package-license "^3.0.1" + +normalize-path@^2.0.0, normalize-path@^2.0.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-2.1.1.tgz#1ab28b556e198363a8c1a6f7e6fa20137fe6aed9" + dependencies: + remove-trailing-separator "^1.0.1" + +normalize-url@2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-2.0.1.tgz#835a9da1551fa26f70e92329069a23aa6574d7e6" + dependencies: + prepend-http "^2.0.0" + query-string "^5.0.1" + sort-keys "^2.0.0" + +npm-bundled@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/npm-bundled/-/npm-bundled-1.0.3.tgz#7e71703d973af3370a9591bafe3a63aca0be2308" + +npm-packlist@^1.1.6: + version "1.1.10" + resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-1.1.10.tgz#1039db9e985727e464df066f4cf0ab6ef85c398a" + dependencies: + ignore-walk "^3.0.1" + npm-bundled "^1.0.1" + +npm-prefix@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/npm-prefix/-/npm-prefix-1.2.0.tgz#e619455f7074ba54cc66d6d0d37dd9f1be6bcbc0" + dependencies: + rc "^1.1.0" + shellsubstitute "^1.1.0" + untildify "^2.1.0" + +npmlog@^4.0.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-4.1.2.tgz#08a7f2a8bf734604779a9efa4ad5cc717abb954b" + dependencies: + are-we-there-yet "~1.1.2" + console-control-strings "~1.1.0" + gauge "~2.7.3" + set-blocking "~2.0.0" + +number-is-nan@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d" + +oauth-sign@~0.8.2: + version "0.8.2" + resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.8.2.tgz#46a6ab7f0aead8deae9ec0565780b7d4efeb9d43" + +object-assign@^4.0.1, object-assign@^4.1.0, object-assign@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" + +object-keys@^1.0.8, object-keys@^1.0.9: + version "1.0.12" + resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.0.12.tgz#09c53855377575310cca62f55bb334abff7b3ed2" + +object.omit@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/object.omit/-/object.omit-2.0.1.tgz#1a9c744829f39dbb858c76ca3579ae2a54ebd1fa" + dependencies: + for-own "^0.1.4" + is-extendable "^0.1.1" + +once@^1.3.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" + dependencies: + wrappy "1" + +optionator@^0.8.0, optionator@^0.8.1: + version "0.8.2" + resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.8.2.tgz#364c5e409d3f4d6301d6c0b4c05bba50180aeb64" + dependencies: + deep-is "~0.1.3" + fast-levenshtein "~2.0.4" + levn "~0.3.0" + prelude-ls "~1.1.2" + type-check "~0.3.2" + wordwrap "~1.0.0" + +os-homedir@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3" + +os-tmpdir@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" + +osenv@^0.1.4: + version "0.1.5" + resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.1.5.tgz#85cdfafaeb28e8677f416e287592b5f3f49ea410" + dependencies: + os-homedir "^1.0.0" + os-tmpdir "^1.0.0" + +p-any@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/p-any/-/p-any-1.1.0.tgz#1d03835c7eed1e34b8e539c47b7b60d0d015d4e1" + dependencies: + p-some "^2.0.0" + +p-cancelable@^0.4.0: + version "0.4.1" + resolved "https://registry.yarnpkg.com/p-cancelable/-/p-cancelable-0.4.1.tgz#35f363d67d52081c8d9585e37bcceb7e0bbcb2a0" + +p-finally@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-1.0.0.tgz#3fbcfb15b899a44123b34b6dcc18b724336a2cae" + +p-is-promise@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/p-is-promise/-/p-is-promise-1.1.0.tgz#9c9456989e9f6588017b0434d56097675c3da05e" + +p-limit@^1.1.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-1.3.0.tgz#b86bd5f0c25690911c7590fcbfc2010d54b3ccb8" + dependencies: + p-try "^1.0.0" + +p-locate@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-2.0.0.tgz#20a0103b222a70c8fd39cc2e580680f3dde5ec43" + dependencies: + p-limit "^1.1.0" + +p-some@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/p-some/-/p-some-2.0.1.tgz#65d87c8b154edbcf5221d167778b6d2e150f6f06" + dependencies: + aggregate-error "^1.0.0" + +p-timeout@^1.0.0: + version "1.2.1" + resolved "https://registry.yarnpkg.com/p-timeout/-/p-timeout-1.2.1.tgz#5eb3b353b7fce99f101a1038880bb054ebbea386" + dependencies: + p-finally "^1.0.0" + +p-timeout@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/p-timeout/-/p-timeout-2.0.1.tgz#d8dd1979595d2dc0139e1fe46b8b646cb3cdf038" + dependencies: + p-finally "^1.0.0" + +p-try@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-try/-/p-try-1.0.0.tgz#cbc79cdbaf8fd4228e13f621f2b1a237c1b207b3" + +parse-entities@^1.0.2, parse-entities@^1.1.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/parse-entities/-/parse-entities-1.1.2.tgz#9eaf719b29dc3bd62246b4332009072e01527777" + dependencies: + character-entities "^1.0.0" + character-entities-legacy "^1.0.0" + character-reference-invalid "^1.0.0" + is-alphanumerical "^1.0.0" + is-decimal "^1.0.0" + is-hexadecimal "^1.0.0" + +parse-glob@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/parse-glob/-/parse-glob-3.0.4.tgz#b2c376cfb11f35513badd173ef0bb6e3a388391c" + dependencies: + glob-base "^0.3.0" + is-dotfile "^1.0.0" + is-extglob "^1.0.0" + is-glob "^2.0.0" + +parse-json@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-2.2.0.tgz#f480f40434ef80741f8469099f8dea18f55a4dc9" + dependencies: + error-ex "^1.2.0" + +parse-json@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-4.0.0.tgz#be35f5425be1f7f6c747184f98a788cb99477ee0" + dependencies: + error-ex "^1.3.1" + json-parse-better-errors "^1.0.1" + +path-exists@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-2.1.0.tgz#0feb6c64f0fc518d9a754dd5efb62c7022761f4b" + dependencies: + pinkie-promise "^2.0.0" + +path-exists@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-3.0.0.tgz#ce0ebeaa5f78cb18925ea7d810d7b59b010fd515" + +path-is-absolute@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" + +path-is-inside@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/path-is-inside/-/path-is-inside-1.0.2.tgz#365417dede44430d1c11af61027facf074bdfc53" + +path-to-glob-pattern@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/path-to-glob-pattern/-/path-to-glob-pattern-1.0.2.tgz#473e6a3a292a9d13fbae3edccee72d3baba8c619" + +path-type@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-1.1.0.tgz#59c44f7ee491da704da415da5a4070ba4f8fe441" + dependencies: + graceful-fs "^4.1.2" + pify "^2.0.0" + pinkie-promise "^2.0.0" + +path-type@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-3.0.0.tgz#cef31dc8e0a1a3bb0d105c0cd97cf3bf47f4e36f" + dependencies: + pify "^3.0.0" + +performance-now@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" + +pify@^2.0.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c" + +pify@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/pify/-/pify-3.0.0.tgz#e5a4acd2c101fdf3d9a4d07f0dbc4db49dd28176" + +pinkie-promise@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa" + dependencies: + pinkie "^2.0.0" + +pinkie@^2.0.0: + version "2.0.4" + resolved "https://registry.yarnpkg.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870" + +pluralize@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/pluralize/-/pluralize-2.0.0.tgz#72b726aa6fac1edeee42256c7d8dc256b335677f" + +prelude-ls@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.1.2.tgz#21932a549f5e52ffd9a827f570e04be62a97da54" + +prepend-http@^1.0.1: + version "1.0.4" + resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-1.0.4.tgz#d4f4562b0ce3696e41ac52d0e002e57a635dc6dc" + +prepend-http@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-2.0.0.tgz#e92434bfa5ea8c19f41cdfd401d741a3c819d897" + +preserve@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/preserve/-/preserve-0.2.0.tgz#815ed1f6ebc65926f865b310c0713bcb3315ce4b" + +prettier@^1.13.7: + version "1.13.7" + resolved "https://registry.yarnpkg.com/prettier/-/prettier-1.13.7.tgz#850f3b8af784a49a6ea2d2eaa7ed1428a34b7281" + +process-nextick-args@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.0.tgz#a37d732f4271b4ab1ad070d35508e8290788ffaa" + +public-ip@^2.3.0: + version "2.4.0" + resolved "https://registry.yarnpkg.com/public-ip/-/public-ip-2.4.0.tgz#f00c028a15366d8c798e47efab6acd09a17666da" + dependencies: + dns-socket "^1.6.2" + got "^8.0.0" + is-ip "^2.0.0" + pify "^3.0.0" + +punycode@2.x.x: + version "2.1.1" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.1.tgz#b58b010ac40c22c5657616c8d2c2c02c7bf479ec" + +punycode@^1.4.1: + version "1.4.1" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e" + +qs@~6.5.1: + version "6.5.2" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.5.2.tgz#cb3ae806e8740444584ef154ce8ee98d403f3e36" + +query-string@^5.0.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/query-string/-/query-string-5.1.1.tgz#a78c012b71c17e05f2e3fa2319dd330682efb3cb" + dependencies: + decode-uri-component "^0.2.0" + object-assign "^4.1.0" + strict-uri-encode "^1.0.0" + +randomatic@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/randomatic/-/randomatic-3.0.0.tgz#d35490030eb4f7578de292ce6dfb04a91a128923" + dependencies: + is-number "^4.0.0" + kind-of "^6.0.0" + math-random "^1.0.1" + +rc-config-loader@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/rc-config-loader/-/rc-config-loader-2.0.1.tgz#8c8452f59bdd10d448a67762dccf7c1b247db860" + dependencies: + debug "^2.2.0" + js-yaml "^3.6.1" + json5 "^0.5.0" + object-assign "^4.1.0" + object-keys "^1.0.9" + path-exists "^2.1.0" + require-from-string "^2.0.1" + +rc@^1.1.0, rc@^1.2.7: + version "1.2.8" + resolved "https://registry.yarnpkg.com/rc/-/rc-1.2.8.tgz#cd924bf5200a075b83c188cd6b9e211b7fc0d3ed" + dependencies: + deep-extend "^0.6.0" + ini "~1.3.0" + minimist "^1.2.0" + strip-json-comments "~2.0.1" + +read-pkg-up@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-3.0.0.tgz#3ed496685dba0f8fe118d0691dc51f4a1ff96f07" + dependencies: + find-up "^2.0.0" + read-pkg "^3.0.0" + +read-pkg@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-1.1.0.tgz#f5ffaa5ecd29cb31c0474bca7d756b6bb29e3f28" + dependencies: + load-json-file "^1.0.0" + normalize-package-data "^2.3.2" + path-type "^1.0.0" + +read-pkg@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-3.0.0.tgz#9cbc686978fee65d16c00e2b19c237fcf6e38389" + dependencies: + load-json-file "^4.0.0" + normalize-package-data "^2.3.2" + path-type "^3.0.0" + +readable-stream@^2.0.0, readable-stream@^2.0.2, readable-stream@^2.0.6, readable-stream@^2.2.2: + version "2.3.6" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.6.tgz#b11c27d88b8ff1fbe070643cf94b0c79ae1b0aaf" + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.3" + isarray "~1.0.0" + process-nextick-args "~2.0.0" + safe-buffer "~5.1.1" + string_decoder "~1.1.1" + util-deprecate "~1.0.1" + +readdirp@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-2.1.0.tgz#4ed0ad060df3073300c48440373f72d1cc642d78" + dependencies: + graceful-fs "^4.1.2" + minimatch "^3.0.2" + readable-stream "^2.0.2" + set-immediate-shim "^1.0.1" + +regex-cache@^0.4.2: + version "0.4.4" + resolved "https://registry.yarnpkg.com/regex-cache/-/regex-cache-0.4.4.tgz#75bdc58a2a1496cec48a12835bc54c8d562336dd" + dependencies: + is-equal-shallow "^0.1.3" + +remark-cli@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/remark-cli/-/remark-cli-5.0.0.tgz#9feefd06474f3d0ff132df21b5334c546df12ab6" + dependencies: + markdown-extensions "^1.1.0" + remark "^9.0.0" + unified-args "^5.0.0" + +remark-frontmatter@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/remark-frontmatter/-/remark-frontmatter-1.2.0.tgz#67905d178c0fe531ed12c57b98759f101fc2c1b5" + dependencies: + fault "^1.0.1" + xtend "^4.0.1" + +remark-lint-no-dead-urls@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/remark-lint-no-dead-urls/-/remark-lint-no-dead-urls-0.3.0.tgz#b640ecbb4ccaf780afe28c8d13e79f5dc6769449" + dependencies: + is-online "^7.0.0" + is-relative-url "^2.0.0" + link-check "^4.1.0" + unified-lint-rule "^1.0.1" + unist-util-visit "^1.1.3" + +remark-parse@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/remark-parse/-/remark-parse-5.0.0.tgz#4c077f9e499044d1d5c13f80d7a98cf7b9285d95" + dependencies: + collapse-white-space "^1.0.2" + is-alphabetical "^1.0.0" + is-decimal "^1.0.0" + is-whitespace-character "^1.0.0" + is-word-character "^1.0.0" + markdown-escapes "^1.0.0" + parse-entities "^1.1.0" + repeat-string "^1.5.4" + state-toggle "^1.0.0" + trim "0.0.1" + trim-trailing-lines "^1.0.0" + unherit "^1.0.4" + unist-util-remove-position "^1.0.0" + vfile-location "^2.0.0" + xtend "^4.0.1" + +remark-stringify@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/remark-stringify/-/remark-stringify-5.0.0.tgz#336d3a4d4a6a3390d933eeba62e8de4bd280afba" + dependencies: + ccount "^1.0.0" + is-alphanumeric "^1.0.0" + is-decimal "^1.0.0" + is-whitespace-character "^1.0.0" + longest-streak "^2.0.1" + markdown-escapes "^1.0.0" + markdown-table "^1.1.0" + mdast-util-compact "^1.0.0" + parse-entities "^1.0.2" + repeat-string "^1.5.4" + state-toggle "^1.0.0" + stringify-entities "^1.0.1" + unherit "^1.0.4" + xtend "^4.0.1" + +remark@^9.0.0: + version "9.0.0" + resolved "https://registry.yarnpkg.com/remark/-/remark-9.0.0.tgz#c5cfa8ec535c73a67c4b0f12bfdbd3a67d8b2f60" + dependencies: + remark-parse "^5.0.0" + remark-stringify "^5.0.0" + unified "^6.0.0" + +remove-trailing-separator@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz#c24bce2a283adad5bc3f58e0d48249b92379d8ef" + +repeat-element@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/repeat-element/-/repeat-element-1.1.2.tgz#ef089a178d1483baae4d93eb98b4f9e4e11d990a" + +repeat-string@^1.5.0, repeat-string@^1.5.2, repeat-string@^1.5.4: + version "1.6.1" + resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637" + +replace-ext@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/replace-ext/-/replace-ext-1.0.0.tgz#de63128373fcbf7c3ccfa4de5a480c45a67958eb" + +request@^2.87.0: + version "2.87.0" + resolved "https://registry.yarnpkg.com/request/-/request-2.87.0.tgz#32f00235cd08d482b4d0d68db93a829c0ed5756e" + dependencies: + aws-sign2 "~0.7.0" + aws4 "^1.6.0" + caseless "~0.12.0" + combined-stream "~1.0.5" + extend "~3.0.1" + forever-agent "~0.6.1" + form-data "~2.3.1" + har-validator "~5.0.3" + http-signature "~1.2.0" + is-typedarray "~1.0.0" + isstream "~0.1.2" + json-stringify-safe "~5.0.1" + mime-types "~2.1.17" + oauth-sign "~0.8.2" + performance-now "^2.1.0" + qs "~6.5.1" + safe-buffer "^5.1.1" + tough-cookie "~2.3.3" + tunnel-agent "^0.6.0" + uuid "^3.1.0" + +require-from-string@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/require-from-string/-/require-from-string-2.0.2.tgz#89a7fdd938261267318eafe14f9c32e598c36909" + +resolve-from@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6" + +responselike@1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/responselike/-/responselike-1.0.2.tgz#918720ef3b631c5642be068f15ade5a46f4ba1e7" + dependencies: + lowercase-keys "^1.0.0" + +rimraf@^2.2.8, rimraf@^2.6.1: + version "2.6.2" + resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.2.tgz#2ed8150d24a16ea8651e6d6ef0f47c4158ce7a36" + dependencies: + glob "^7.0.5" + +safe-buffer@^5.0.1, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1: + version "5.1.2" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" + +"safer-buffer@>= 2.1.2 < 3", safer-buffer@^2.0.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" + +sax@^1.2.4: + version "1.2.4" + resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.4.tgz#2816234e2378bddc4e5354fab5caa895df7100d9" + +"semver@2 || 3 || 4 || 5", semver@^5.3.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-5.5.0.tgz#dc4bbc7a6ca9d916dee5d43516f0092b58f7b8ab" + +set-blocking@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7" + +set-immediate-shim@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/set-immediate-shim/-/set-immediate-shim-1.0.1.tgz#4b2b1b27eb808a9f8dcc481a58e5e56f599f3f61" + +shellsubstitute@^1.1.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/shellsubstitute/-/shellsubstitute-1.2.0.tgz#e4f702a50c518b0f6fe98451890d705af29b6b70" + +signal-exit@^3.0.0: + version "3.0.2" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.2.tgz#b5fdc08f1287ea1178628e415e25132b73646c6d" + +slice-ansi@0.0.4: + version "0.0.4" + resolved "https://registry.yarnpkg.com/slice-ansi/-/slice-ansi-0.0.4.tgz#edbf8903f66f7ce2f8eafd6ceed65e264c831b35" + +sliced@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/sliced/-/sliced-1.0.1.tgz#0b3a662b5d04c3177b1926bea82b03f837a2ef41" + +sort-keys@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/sort-keys/-/sort-keys-2.0.0.tgz#658535584861ec97d730d6cf41822e1f56684128" + dependencies: + is-plain-obj "^1.0.0" + +spdx-correct@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-3.0.0.tgz#05a5b4d7153a195bc92c3c425b69f3b2a9524c82" + dependencies: + spdx-expression-parse "^3.0.0" + spdx-license-ids "^3.0.0" + +spdx-exceptions@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/spdx-exceptions/-/spdx-exceptions-2.1.0.tgz#2c7ae61056c714a5b9b9b2b2af7d311ef5c78fe9" + +spdx-expression-parse@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-3.0.0.tgz#99e119b7a5da00e05491c9fa338b7904823b41d0" + dependencies: + spdx-exceptions "^2.1.0" + spdx-license-ids "^3.0.0" + +spdx-license-ids@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.0.tgz#7a7cd28470cc6d3a1cfe6d66886f6bc430d3ac87" + +sprintf-js@~1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" + +sshpk@^1.7.0: + version "1.14.2" + resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.14.2.tgz#c6fc61648a3d9c4e764fd3fcdf4ea105e492ba98" + dependencies: + asn1 "~0.2.3" + assert-plus "^1.0.0" + dashdash "^1.12.0" + getpass "^0.1.1" + safer-buffer "^2.0.2" + optionalDependencies: + bcrypt-pbkdf "^1.0.0" + ecc-jsbn "~0.1.1" + jsbn "~0.1.0" + tweetnacl "~0.14.0" + +state-toggle@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/state-toggle/-/state-toggle-1.0.1.tgz#c3cb0974f40a6a0f8e905b96789eb41afa1cde3a" + +strict-uri-encode@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz#279b225df1d582b1f54e65addd4352e18faa0713" + +string-width@^1.0.0, string-width@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3" + dependencies: + code-point-at "^1.0.0" + is-fullwidth-code-point "^1.0.0" + strip-ansi "^3.0.0" + +"string-width@^1.0.2 || 2", string-width@^2.0.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-2.1.1.tgz#ab93f27a8dc13d28cac815c462143a6d9012ae9e" + dependencies: + is-fullwidth-code-point "^2.0.0" + strip-ansi "^4.0.0" + +string.prototype.padstart@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/string.prototype.padstart/-/string.prototype.padstart-3.0.0.tgz#5bcfad39f4649bb2d031292e19bcf0b510d4b242" + dependencies: + define-properties "^1.1.2" + es-abstract "^1.4.3" + function-bind "^1.0.2" + +string_decoder@~1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.1.1.tgz#9cf1611ba62685d7030ae9e4ba34149c3af03fc8" + dependencies: + safe-buffer "~5.1.0" + +stringify-entities@^1.0.1: + version "1.3.2" + resolved "https://registry.yarnpkg.com/stringify-entities/-/stringify-entities-1.3.2.tgz#a98417e5471fd227b3e45d3db1861c11caf668f7" + dependencies: + character-entities-html4 "^1.0.0" + character-entities-legacy "^1.0.0" + is-alphanumerical "^1.0.0" + is-hexadecimal "^1.0.0" + +strip-ansi@^3.0.0, strip-ansi@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf" + dependencies: + ansi-regex "^2.0.0" + +strip-ansi@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-4.0.0.tgz#a8479022eb1ac368a871389b635262c505ee368f" + dependencies: + ansi-regex "^3.0.0" + +strip-bom@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-2.0.0.tgz#6219a85616520491f35788bdbf1447a99c7e6b0e" + dependencies: + is-utf8 "^0.2.0" + +strip-bom@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-3.0.0.tgz#2334c18e9c759f7bdd56fdef7e9ae3d588e68ed3" + +strip-json-comments@~2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" + +structured-source@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/structured-source/-/structured-source-3.0.2.tgz#dd802425e0f53dc4a6e7aca3752901a1ccda7af5" + dependencies: + boundary "^1.0.1" + +supports-color@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7" + +supports-color@^4.1.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-4.5.0.tgz#be7a0de484dec5c5cddf8b3d59125044912f635b" + dependencies: + has-flag "^2.0.0" + +supports-color@^5.3.0: + version "5.4.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.4.0.tgz#1c6b337402c2137605efe19f10fec390f6faab54" + dependencies: + has-flag "^3.0.0" + +table@^3.7.8: + version "3.8.3" + resolved "https://registry.yarnpkg.com/table/-/table-3.8.3.tgz#2bbc542f0fda9861a755d3947fefd8b3f513855f" + dependencies: + ajv "^4.7.0" + ajv-keywords "^1.0.0" + chalk "^1.1.1" + lodash "^4.0.0" + slice-ansi "0.0.4" + string-width "^2.0.0" + +tar@^4: + version "4.4.4" + resolved "https://registry.yarnpkg.com/tar/-/tar-4.4.4.tgz#ec8409fae9f665a4355cc3b4087d0820232bb8cd" + dependencies: + chownr "^1.0.1" + fs-minipass "^1.2.5" + minipass "^2.3.3" + minizlib "^1.1.0" + mkdirp "^0.5.0" + safe-buffer "^5.1.2" + yallist "^3.0.2" + +text-table@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4" + +textlint@^10.2.1: + version "10.2.1" + resolved "https://registry.yarnpkg.com/textlint/-/textlint-10.2.1.tgz#ee22b7967d59cef7c74a04a5f4e8883134e5c79d" + dependencies: + "@textlint/ast-node-types" "^4.0.2" + "@textlint/ast-traverse" "^2.0.8" + "@textlint/feature-flag" "^3.0.4" + "@textlint/fixer-formatter" "^3.0.7" + "@textlint/kernel" "^2.0.9" + "@textlint/linter-formatter" "^3.0.7" + "@textlint/textlint-plugin-markdown" "^4.0.10" + "@textlint/textlint-plugin-text" "^3.0.10" + "@types/bluebird" "^3.5.18" + bluebird "^3.0.5" + debug "^2.1.0" + deep-equal "^1.0.1" + file-entry-cache "^2.0.0" + get-stdin "^5.0.1" + glob "^7.1.1" + interop-require "^1.0.0" + is-file "^1.0.0" + log-symbols "^1.0.2" + map-like "^2.0.0" + md5 "^2.2.1" + mkdirp "^0.5.0" + object-assign "^4.0.1" + optionator "^0.8.0" + path-to-glob-pattern "^1.0.2" + rc-config-loader "^2.0.1" + read-pkg "^1.1.0" + read-pkg-up "^3.0.0" + structured-source "^3.0.2" + try-resolve "^1.0.1" + unique-concat "^0.2.2" + +timed-out@^4.0.0, timed-out@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/timed-out/-/timed-out-4.0.1.tgz#f32eacac5a175bea25d7fab565ab3ed8741ef56f" + +to-vfile@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/to-vfile/-/to-vfile-2.2.0.tgz#342d1705e6df526d569b1fc8bfa29f1f36d6c416" + dependencies: + is-buffer "^1.1.4" + vfile "^2.0.0" + x-is-function "^1.0.4" + +tough-cookie@~2.3.3: + version "2.3.4" + resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.3.4.tgz#ec60cee38ac675063ffc97a5c18970578ee83655" + dependencies: + punycode "^1.4.1" + +traverse@^0.6.6: + version "0.6.6" + resolved "https://registry.yarnpkg.com/traverse/-/traverse-0.6.6.tgz#cbdf560fd7b9af632502fed40f918c157ea97137" + +trim-trailing-lines@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/trim-trailing-lines/-/trim-trailing-lines-1.1.1.tgz#e0ec0810fd3c3f1730516b45f49083caaf2774d9" + +trim@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/trim/-/trim-0.0.1.tgz#5858547f6b290757ee95cccc666fb50084c460dd" + +trough@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/trough/-/trough-1.0.2.tgz#7f1663ec55c480139e2de5e486c6aef6cc24a535" + +try-resolve@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/try-resolve/-/try-resolve-1.0.1.tgz#cfde6fabd72d63e5797cfaab873abbe8e700e912" + +tunnel-agent@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd" + dependencies: + safe-buffer "^5.0.1" + +tweetnacl@^0.14.3, tweetnacl@~0.14.0: + version "0.14.5" + resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" + +type-check@~0.3.2: + version "0.3.2" + resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.3.2.tgz#5884cab512cf1d355e3fb784f30804b2b520db72" + dependencies: + prelude-ls "~1.1.2" + +typedarray@^0.0.6: + version "0.0.6" + resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777" + +unherit@^1.0.4: + version "1.1.1" + resolved "https://registry.yarnpkg.com/unherit/-/unherit-1.1.1.tgz#132748da3e88eab767e08fabfbb89c5e9d28628c" + dependencies: + inherits "^2.0.1" + xtend "^4.0.1" + +unified-args@^5.0.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/unified-args/-/unified-args-5.1.0.tgz#1889200e072998a662e6e84d817d6f4b5f448dd1" + dependencies: + camelcase "^4.0.0" + chalk "^2.0.0" + chokidar "^1.5.1" + json5 "^0.5.1" + minimist "^1.2.0" + text-table "^0.2.0" + unified-engine "^5.1.0" + +unified-engine@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/unified-engine/-/unified-engine-5.1.0.tgz#30db83bcc76c821f773bb5a8a491aa0e2471e3d1" + dependencies: + concat-stream "^1.5.1" + debug "^3.1.0" + fault "^1.0.0" + fn-name "^2.0.1" + glob "^7.0.3" + ignore "^3.2.0" + is-empty "^1.0.0" + is-hidden "^1.0.1" + is-object "^1.0.1" + js-yaml "^3.6.1" + load-plugin "^2.0.0" + parse-json "^4.0.0" + to-vfile "^2.0.0" + trough "^1.0.0" + unist-util-inspect "^4.1.2" + vfile-reporter "^4.0.0" + vfile-statistics "^1.1.0" + x-is-function "^1.0.4" + x-is-string "^0.1.0" + xtend "^4.0.1" + +unified-lint-rule@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/unified-lint-rule/-/unified-lint-rule-1.0.3.tgz#e302b0c4a7ac428c0980e049a500e59528001299" + dependencies: + wrapped "^1.0.1" + +unified@^6.0.0, unified@^6.1.6: + version "6.2.0" + resolved "https://registry.yarnpkg.com/unified/-/unified-6.2.0.tgz#7fbd630f719126d67d40c644b7e3f617035f6dba" + dependencies: + bail "^1.0.0" + extend "^3.0.0" + is-plain-obj "^1.1.0" + trough "^1.0.0" + vfile "^2.0.0" + x-is-string "^0.1.0" + +unique-concat@^0.2.2: + version "0.2.2" + resolved "https://registry.yarnpkg.com/unique-concat/-/unique-concat-0.2.2.tgz#9210f9bdcaacc5e1e3929490d7c019df96f18712" + +unist-util-inspect@^4.1.2: + version "4.1.3" + resolved "https://registry.yarnpkg.com/unist-util-inspect/-/unist-util-inspect-4.1.3.tgz#39470e6d77485db285966df78431219aa1287822" + dependencies: + is-empty "^1.0.0" + +unist-util-is@^2.1.1: + version "2.1.2" + resolved "https://registry.yarnpkg.com/unist-util-is/-/unist-util-is-2.1.2.tgz#1193fa8f2bfbbb82150633f3a8d2eb9a1c1d55db" + +unist-util-modify-children@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/unist-util-modify-children/-/unist-util-modify-children-1.1.2.tgz#c7f1b91712554ee59c47a05b551ed3e052a4e2d1" + dependencies: + array-iterate "^1.0.0" + +unist-util-remove-position@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/unist-util-remove-position/-/unist-util-remove-position-1.1.2.tgz#86b5dad104d0bbfbeb1db5f5c92f3570575c12cb" + dependencies: + unist-util-visit "^1.1.0" + +unist-util-stringify-position@^1.0.0, unist-util-stringify-position@^1.1.1: + version "1.1.2" + resolved "https://registry.yarnpkg.com/unist-util-stringify-position/-/unist-util-stringify-position-1.1.2.tgz#3f37fcf351279dcbca7480ab5889bb8a832ee1c6" + +unist-util-visit@^1.1.0, unist-util-visit@^1.1.3: + version "1.3.1" + resolved "https://registry.yarnpkg.com/unist-util-visit/-/unist-util-visit-1.3.1.tgz#c019ac9337a62486be58531bc27e7499ae7d55c7" + dependencies: + unist-util-is "^2.1.1" + +untildify@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/untildify/-/untildify-2.1.0.tgz#17eb2807987f76952e9c0485fc311d06a826a2e0" + dependencies: + os-homedir "^1.0.0" + +unzip-response@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/unzip-response/-/unzip-response-2.0.1.tgz#d2f0f737d16b0615e72a6935ed04214572d56f97" + +url-parse-lax@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/url-parse-lax/-/url-parse-lax-1.0.0.tgz#7af8f303645e9bd79a272e7a14ac68bc0609da73" + dependencies: + prepend-http "^1.0.1" + +url-parse-lax@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/url-parse-lax/-/url-parse-lax-3.0.0.tgz#16b5cafc07dbe3676c1b1999177823d6503acb0c" + dependencies: + prepend-http "^2.0.0" + +url-to-options@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/url-to-options/-/url-to-options-1.0.1.tgz#1505a03a289a48cbd7a434efbaeec5055f5633a9" + +util-deprecate@~1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" + +uuid@^3.1.0: + version "3.3.2" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.3.2.tgz#1b4af4955eb3077c501c23872fc6513811587131" + +validate-npm-package-license@^3.0.1: + version "3.0.3" + resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.3.tgz#81643bcbef1bdfecd4623793dc4648948ba98338" + dependencies: + spdx-correct "^3.0.0" + spdx-expression-parse "^3.0.0" + +verror@1.10.0: + version "1.10.0" + resolved "https://registry.yarnpkg.com/verror/-/verror-1.10.0.tgz#3a105ca17053af55d6e270c1f8288682e18da400" + dependencies: + assert-plus "^1.0.0" + core-util-is "1.0.2" + extsprintf "^1.2.0" + +vfile-location@^2.0.0: + version "2.0.3" + resolved "https://registry.yarnpkg.com/vfile-location/-/vfile-location-2.0.3.tgz#083ba80e50968e8d420be49dd1ea9a992131df77" + +vfile-message@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/vfile-message/-/vfile-message-1.0.1.tgz#51a2ccd8a6b97a7980bb34efb9ebde9632e93677" + dependencies: + unist-util-stringify-position "^1.1.1" + +vfile-reporter@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/vfile-reporter/-/vfile-reporter-4.0.0.tgz#ea6f0ae1342f4841573985e05f941736f27de9da" + dependencies: + repeat-string "^1.5.0" + string-width "^1.0.0" + supports-color "^4.1.0" + unist-util-stringify-position "^1.0.0" + vfile-statistics "^1.1.0" + +vfile-statistics@^1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/vfile-statistics/-/vfile-statistics-1.1.1.tgz#a22fd4eb844c9eaddd781ad3b3246db88375e2e3" + +vfile@^2.0.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/vfile/-/vfile-2.3.0.tgz#e62d8e72b20e83c324bc6c67278ee272488bf84a" + dependencies: + is-buffer "^1.1.4" + replace-ext "1.0.0" + unist-util-stringify-position "^1.0.0" + vfile-message "^1.0.0" + +wide-align@^1.1.0: + version "1.1.3" + resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.3.tgz#ae074e6bdc0c14a431e804e624549c633b000457" + dependencies: + string-width "^1.0.2 || 2" + +wordwrap@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-1.0.0.tgz#27584810891456a4171c8d0226441ade90cbcaeb" + +wrapped@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/wrapped/-/wrapped-1.0.1.tgz#c783d9d807b273e9b01e851680a938c87c907242" + dependencies: + co "3.1.0" + sliced "^1.0.1" + +wrappy@1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" + +write@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/write/-/write-0.2.1.tgz#5fc03828e264cea3fe91455476f7a3c566cb0757" + dependencies: + mkdirp "^0.5.1" + +x-is-function@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/x-is-function/-/x-is-function-1.0.4.tgz#5d294dc3d268cbdd062580e0c5df77a391d1fa1e" + +x-is-string@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/x-is-string/-/x-is-string-0.1.0.tgz#474b50865af3a49a9c4657f05acd145458f77d82" + +xml-escape@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/xml-escape/-/xml-escape-1.1.0.tgz#3904c143fa8eb3a0030ec646d2902a2f1b706c44" + +xtend@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.1.tgz#a5c6d532be656e23db820efb943a1f04998d63af" + +yallist@^3.0.0, yallist@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.0.2.tgz#8452b4bb7e83c7c188d8041c1a837c773d6d8bb9" diff --git a/evidence/pool.go b/evidence/pool.go index 4bad355f7..247629b6b 100644 --- a/evidence/pool.go +++ b/evidence/pool.go @@ -4,9 +4,9 @@ import ( "fmt" "sync" - clist "github.com/tendermint/tmlibs/clist" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + clist "github.com/tendermint/tendermint/libs/clist" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" diff --git a/evidence/pool_test.go b/evidence/pool_test.go index 019076234..915cba327 100644 --- a/evidence/pool_test.go +++ b/evidence/pool_test.go @@ -9,7 +9,7 @@ import ( sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tmlibs/db" + dbm "github.com/tendermint/tendermint/libs/db" ) var mockState = sm.State{} diff --git a/evidence/reactor.go b/evidence/reactor.go index 5159572e3..bf11ac105 100644 --- a/evidence/reactor.go +++ b/evidence/reactor.go @@ -5,10 +5,10 @@ import ( "reflect" "time" - "github.com/tendermint/go-amino" - clist "github.com/tendermint/tmlibs/clist" - "github.com/tendermint/tmlibs/log" + amino "github.com/tendermint/go-amino" + clist "github.com/tendermint/tendermint/libs/clist" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" ) @@ -73,7 +73,7 @@ func (evR *EvidenceReactor) RemovePeer(peer p2p.Peer, reason interface{}) { // Receive implements Reactor. // It adds any received evidence to the evpool. func (evR *EvidenceReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - msg, err := DecodeMessage(msgBytes) + msg, err := decodeMsg(msgBytes) if err != nil { evR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) evR.Switch.StopPeerForError(src, err) @@ -204,11 +204,9 @@ func RegisterEvidenceMessages(cdc *amino.Codec) { "tendermint/evidence/EvidenceListMessage", nil) } -// DecodeMessage decodes a byte-array into a EvidenceMessage. -func DecodeMessage(bz []byte) (msg EvidenceMessage, err error) { +func decodeMsg(bz []byte) (msg EvidenceMessage, err error) { if len(bz) > maxMsgSize { - return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", - len(bz), maxMsgSize) + return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize) } err = cdc.UnmarshalBinaryBare(bz, &msg) return diff --git a/evidence/reactor_test.go b/evidence/reactor_test.go index 2f1c34e6e..1687f25a3 100644 --- a/evidence/reactor_test.go +++ b/evidence/reactor_test.go @@ -10,8 +10,8 @@ import ( "github.com/go-kit/kit/log/term" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p" diff --git a/evidence/store.go b/evidence/store.go index 6af5d75d8..20b37bdb2 100644 --- a/evidence/store.go +++ b/evidence/store.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tmlibs/db" + dbm "github.com/tendermint/tendermint/libs/db" ) /* diff --git a/evidence/store_test.go b/evidence/store_test.go index 3fdb3ba6e..30dc1c4d5 100644 --- a/evidence/store_test.go +++ b/evidence/store_test.go @@ -5,7 +5,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tmlibs/db" + dbm "github.com/tendermint/tendermint/libs/db" ) //------------------------------------------- diff --git a/evidence/wire.go b/evidence/wire.go index d4db37c54..c61b86184 100644 --- a/evidence/wire.go +++ b/evidence/wire.go @@ -2,15 +2,15 @@ package evidence import ( "github.com/tendermint/go-amino" + cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino" "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/crypto" ) var cdc = amino.NewCodec() func init() { RegisterEvidenceMessages(cdc) - crypto.RegisterAmino(cdc) + cryptoAmino.RegisterAmino(cdc) types.RegisterEvidences(cdc) RegisterMockEvidences(cdc) // For testing } diff --git a/libs/.editorconfig b/libs/.editorconfig new file mode 100644 index 000000000..82f774362 --- /dev/null +++ b/libs/.editorconfig @@ -0,0 +1,19 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true + +[Makefile] +indent_style = tab + +[*.sh] +indent_style = tab + +[*.proto] +indent_style = space +indent_size = 2 diff --git a/libs/.gitignore b/libs/.gitignore new file mode 100644 index 000000000..a2ebfde29 --- /dev/null +++ b/libs/.gitignore @@ -0,0 +1,5 @@ +*.sw[opqr] +vendor +.glide + +pubsub/query/fuzz_test/output diff --git a/libs/CHANGELOG.md b/libs/CHANGELOG.md new file mode 100644 index 000000000..0f900c57f --- /dev/null +++ b/libs/CHANGELOG.md @@ -0,0 +1,438 @@ +# Changelog + +## 0.9.0 + +*June 24th, 2018* + +BREAKING: + - [events, pubsub] Removed - moved to github.com/tendermint/tendermint + - [merkle] Use 20-bytes of SHA256 instead of RIPEMD160. NOTE: this package is + moving to github.com/tendermint/go-crypto ! + - [common] Remove gogoproto from KVPair types + - [common] Error simplification, #220 + +FEATURES: + + - [db/remotedb] New DB type using an external CLevelDB process via + GRPC + - [autofile] logjack command for piping stdin to a rotating file + - [bech32] New package. NOTE: should move out of here - it's just two small + functions + - [common] ColoredBytes([]byte) string for printing mixed ascii and bytes + - [db] DebugDB uses ColoredBytes() + +## 0.8.4 + +*June 5, 2018* + +IMPROVEMENTS: + + - [autofile] Flush on Stop; Close() method to Flush and close file + +## 0.8.3 + +*May 21, 2018* + +FEATURES: + + - [common] ASCIITrim() + +## 0.8.2 (April 23rd, 2018) + +FEATURES: + + - [pubsub] TagMap, NewTagMap + - [merkle] SimpleProofsFromMap() + - [common] IsASCIIText() + - [common] PrefixEndBytes // e.g. increment or nil + - [common] BitArray.MarshalJSON/.UnmarshalJSON + - [common] BitArray uses 'x' not 'X' for String() and above. + - [db] DebugDB shows better colorized output + +BUG FIXES: + + - [common] Fix TestParallelAbort nondeterministic failure #201/#202 + - [db] PrefixDB Iterator/ReverseIterator fixes + - [db] DebugDB fixes + +## 0.8.1 (April 5th, 2018) + +FEATURES: + + - [common] Error.Error() includes cause + - [common] IsEmpty() for 0 length + +## 0.8.0 (April 4th, 2018) + +BREAKING: + + - [merkle] `PutVarint->PutUvarint` in encodeByteSlice + - [db] batch.WriteSync() + - [common] Refactored and fixed `Parallel` function + - [common] Refactored `Rand` functionality + - [common] Remove unused `Right/LeftPadString` functions + - [common] Remove StackError, introduce Error interface (to replace use of pkg/errors) + +FEATURES: + + - [db] NewPrefixDB for a DB with all keys prefixed + - [db] NewDebugDB prints everything during operation + - [common] SplitAndTrim func + - [common] rand.Float64(), rand.Int63n(n), rand.Int31n(n) and global equivalents + - [common] HexBytes Format() + +BUG FIXES: + + - [pubsub] Fix unsubscribing + - [cli] Return config errors + - [common] Fix WriteFileAtomic Windows bug + +## 0.7.1 (March 22, 2018) + +IMPROVEMENTS: + + - glide -> dep + +BUG FIXES: + + - [common] Fix panic in NewBitArray for negative bits + - [common] Fix and simplify WriteFileAtomic so it cleans up properly + +## 0.7.0 (February 20, 2018) + +BREAKING: + + - [db] Major API upgrade. See `db/types.go`. + - [common] added `Quit() <-chan struct{}` to Service interface. + The returned channel is closed when service is stopped. + - [common] Remove HTTP functions + - [common] Heap.Push takes an `int`, new Heap.PushComparable takes the comparable. + - [logger] Removed. Use `log` + - [merkle] Major API updade - uses cmn.KVPairs. + - [cli] WriteDemoConfig -> WriteConfigValues + - [all] Remove go-wire dependency! + +FEATURES: + + - [db] New FSDB that uses the filesystem directly + - [common] HexBytes + - [common] KVPair and KI64Pair (protobuf based key-value pair objects) + +IMPROVEMENTS: + + - [clist] add WaitChan() to CList, NextWaitChan() and PrevWaitChan() + to CElement. These can be used instead of blocking `*Wait()` methods + if you need to be able to send quit signal and not block forever + - [common] IsHex handles 0x-prefix + +BUG FIXES: + + - [common] BitArray check for nil arguments + - [common] Fix memory leak in RepeatTimer + +## 0.6.0 (December 29, 2017) + +BREAKING: + - [cli] remove --root + - [pubsub] add String() method to Query interface + +IMPROVEMENTS: + - [common] use a thread-safe and well seeded non-crypto rng + +BUG FIXES + - [clist] fix misuse of wait group + - [common] introduce Ticker interface and logicalTicker for better testing of timers + +## 0.5.0 (December 5, 2017) + +BREAKING: + - [common] replace Service#Start, Service#Stop first return value (bool) with an + error (ErrAlreadyStarted, ErrAlreadyStopped) + - [common] replace Service#Reset first return value (bool) with an error + - [process] removed + +FEATURES: + - [common] IntInSlice and StringInSlice functions + - [pubsub/query] introduce `Condition` struct, expose `Operator`, and add `query.Conditions()` + +## 0.4.1 (November 27, 2017) + +FEATURES: + - [common] `Keys()` method on `CMap` + +IMPROVEMENTS: + - [log] complex types now encoded as "%+v" by default if `String()` method is undefined (previously resulted in error) + - [log] logger logs its own errors + +BUG FIXES: + - [common] fixed `Kill()` to build on Windows (Windows does not have `syscall.Kill`) + +## 0.4.0 (October 26, 2017) + +BREAKING: + - [common] GoPath is now a function + - [db] `DB` and `Iterator` interfaces have new methods to better support iteration + +FEATURES: + - [autofile] `Read([]byte)` and `Write([]byte)` methods on `Group` to support binary WAL + - [common] `Kill()` sends SIGTERM to the current process + +IMPROVEMENTS: + - comments and linting + +BUG FIXES: + - [events] fix allocation error prefixing cache with 1000 empty events + +## 0.3.2 (October 2, 2017) + +BUG FIXES: + +- [autofile] fix AutoFile.Sync() to open file if it's been closed +- [db] fix MemDb.Close() to not empty the database (ie. its just a noop) + + +## 0.3.1 (September 22, 2017) + +BUG FIXES: + +- [common] fix WriteFileAtomic to not use /tmp, which can be on another device + +## 0.3.0 (September 22, 2017) + +BREAKING CHANGES: + +- [log] logger functions no longer returns an error +- [common] NewBaseService takes the new logger +- [cli] RunCaptureWithArgs now captures stderr and stdout + - +func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (stdout, stderr string, err error) + - -func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (output string, err error) + +FEATURES: + +- [common] various common HTTP functionality +- [common] Date range parsing from string (ex. "2015-12-31:2017-12-31") +- [common] ProtocolAndAddress function +- [pubsub] New package for publish-subscribe with more advanced filtering + +BUG FIXES: + +- [common] fix atomicity of WriteFileAtomic by calling fsync +- [db] fix memDb iteration index out of range +- [autofile] fix Flush by calling fsync + +## 0.2.2 (June 16, 2017) + +FEATURES: + +- [common] IsHex and StripHex for handling `0x` prefixed hex strings +- [log] NewTracingLogger returns a logger that output error traces, ala `github.com/pkg/errors` + +IMPROVEMENTS: + +- [cli] Error handling for tests +- [cli] Support dashes in ENV variables + +BUG FIXES: + +- [flowrate] Fix non-deterministic test failures + +## 0.2.1 (June 2, 2017) + +FEATURES: + +- [cli] Log level parsing moved here from tendermint repo + +## 0.2.0 (May 18, 2017) + +BREAKING CHANGES: + +- [common] NewBaseService takes the new logger + + +FEATURES: + +- [cli] New library to standardize building command line tools +- [log] New logging library + +BUG FIXES: + +- [autofile] Close file before rotating + +## 0.1.0 (May 1, 2017) + +Initial release, combines what were previously independent repos: + +- go-autofile +- go-clist +- go-common +- go-db +- go-events +- go-flowrate +- go-logger +- go-merkle +- go-process + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/libs/README.md b/libs/README.md new file mode 100644 index 000000000..9ea618dbd --- /dev/null +++ b/libs/README.md @@ -0,0 +1,49 @@ +# TMLIBS + +This repo is a home for various small packages. + +## autofile + +Autofile is file access with automatic log rotation. A group of files is maintained and rotation happens +when the leading file gets too big. Provides a reader for reading from the file group. + +## cli + +CLI wraps the `cobra` and `viper` packages and handles some common elements of building a CLI like flags and env vars for the home directory and the logger. + +## clist + +Clist provides a linekd list that is safe for concurrent access by many readers. + +## common + +Common provides a hodgepodge of useful functions. + +## db + +DB provides a database interface and a number of implementions, including ones using an in-memory map, the filesystem directory structure, +an implemention of LevelDB in Go, and the official LevelDB in C. + +## events + +Events is a synchronous PubSub package. + +## flowrate + +Flowrate is a fork of https://github.com/mxk/go-flowrate that added a `SetREMA` method. + +## log + +Log is a log package structured around key-value pairs that allows logging level to be set differently for different keys. + +## merkle + +Merkle provides a simple static merkle tree and corresponding proofs. + +## process + +Process is a simple utility for spawning OS processes. + +## pubsub + +PubSub is an asynchronous PubSub package. diff --git a/libs/autofile/README.md b/libs/autofile/README.md new file mode 100644 index 000000000..23799200c --- /dev/null +++ b/libs/autofile/README.md @@ -0,0 +1 @@ +# go-autofile diff --git a/libs/autofile/autofile.go b/libs/autofile/autofile.go new file mode 100644 index 000000000..313da6789 --- /dev/null +++ b/libs/autofile/autofile.go @@ -0,0 +1,142 @@ +package autofile + +import ( + "os" + "sync" + "time" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +/* AutoFile usage + +// Create/Append to ./autofile_test +af, err := OpenAutoFile("autofile_test") +if err != nil { + panic(err) +} + +// Stream of writes. +// During this time, the file may be moved e.g. by logRotate. +for i := 0; i < 60; i++ { + af.Write([]byte(Fmt("LOOP(%v)", i))) + time.Sleep(time.Second) +} + +// Close the AutoFile +err = af.Close() +if err != nil { + panic(err) +} +*/ + +const autoFileOpenDuration = 1000 * time.Millisecond + +// Automatically closes and re-opens file for writing. +// This is useful for using a log file with the logrotate tool. +type AutoFile struct { + ID string + Path string + ticker *time.Ticker + mtx sync.Mutex + file *os.File +} + +func OpenAutoFile(path string) (af *AutoFile, err error) { + af = &AutoFile{ + ID: cmn.RandStr(12) + ":" + path, + Path: path, + ticker: time.NewTicker(autoFileOpenDuration), + } + if err = af.openFile(); err != nil { + return + } + go af.processTicks() + sighupWatchers.addAutoFile(af) + return +} + +func (af *AutoFile) Close() error { + af.ticker.Stop() + err := af.closeFile() + sighupWatchers.removeAutoFile(af) + return err +} + +func (af *AutoFile) processTicks() { + for { + _, ok := <-af.ticker.C + if !ok { + return // Done. + } + af.closeFile() + } +} + +func (af *AutoFile) closeFile() (err error) { + af.mtx.Lock() + defer af.mtx.Unlock() + + file := af.file + if file == nil { + return nil + } + af.file = nil + return file.Close() +} + +func (af *AutoFile) Write(b []byte) (n int, err error) { + af.mtx.Lock() + defer af.mtx.Unlock() + + if af.file == nil { + if err = af.openFile(); err != nil { + return + } + } + + n, err = af.file.Write(b) + return +} + +func (af *AutoFile) Sync() error { + af.mtx.Lock() + defer af.mtx.Unlock() + + if af.file == nil { + if err := af.openFile(); err != nil { + return err + } + } + return af.file.Sync() +} + +func (af *AutoFile) openFile() error { + file, err := os.OpenFile(af.Path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) + if err != nil { + return err + } + af.file = file + return nil +} + +func (af *AutoFile) Size() (int64, error) { + af.mtx.Lock() + defer af.mtx.Unlock() + + if af.file == nil { + err := af.openFile() + if err != nil { + if err == os.ErrNotExist { + return 0, nil + } + return -1, err + } + } + stat, err := af.file.Stat() + if err != nil { + return -1, err + } + return stat.Size(), nil + +} diff --git a/libs/autofile/autofile_test.go b/libs/autofile/autofile_test.go new file mode 100644 index 000000000..b39fb7cf3 --- /dev/null +++ b/libs/autofile/autofile_test.go @@ -0,0 +1,71 @@ +package autofile + +import ( + "os" + "sync/atomic" + "syscall" + "testing" + "time" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +func TestSIGHUP(t *testing.T) { + + // First, create an AutoFile writing to a tempfile dir + file, name := cmn.Tempfile("sighup_test") + if err := file.Close(); err != nil { + t.Fatalf("Error creating tempfile: %v", err) + } + // Here is the actual AutoFile + af, err := OpenAutoFile(name) + if err != nil { + t.Fatalf("Error creating autofile: %v", err) + } + + // Write to the file. + _, err = af.Write([]byte("Line 1\n")) + if err != nil { + t.Fatalf("Error writing to autofile: %v", err) + } + _, err = af.Write([]byte("Line 2\n")) + if err != nil { + t.Fatalf("Error writing to autofile: %v", err) + } + + // Move the file over + err = os.Rename(name, name+"_old") + if err != nil { + t.Fatalf("Error moving autofile: %v", err) + } + + // Send SIGHUP to self. + oldSighupCounter := atomic.LoadInt32(&sighupCounter) + syscall.Kill(syscall.Getpid(), syscall.SIGHUP) + + // Wait a bit... signals are not handled synchronously. + for atomic.LoadInt32(&sighupCounter) == oldSighupCounter { + time.Sleep(time.Millisecond * 10) + } + + // Write more to the file. + _, err = af.Write([]byte("Line 3\n")) + if err != nil { + t.Fatalf("Error writing to autofile: %v", err) + } + _, err = af.Write([]byte("Line 4\n")) + if err != nil { + t.Fatalf("Error writing to autofile: %v", err) + } + if err := af.Close(); err != nil { + t.Fatalf("Error closing autofile") + } + + // Both files should exist + if body := cmn.MustReadFile(name + "_old"); string(body) != "Line 1\nLine 2\n" { + t.Errorf("Unexpected body %s", body) + } + if body := cmn.MustReadFile(name); string(body) != "Line 3\nLine 4\n" { + t.Errorf("Unexpected body %s", body) + } +} diff --git a/libs/autofile/cmd/logjack.go b/libs/autofile/cmd/logjack.go new file mode 100644 index 000000000..17b482bed --- /dev/null +++ b/libs/autofile/cmd/logjack.go @@ -0,0 +1,107 @@ +package main + +import ( + "flag" + "fmt" + "io" + "os" + "strconv" + "strings" + + auto "github.com/tendermint/tendermint/libs/autofile" + cmn "github.com/tendermint/tendermint/libs/common" +) + +const Version = "0.0.1" +const readBufferSize = 1024 // 1KB at a time + +// Parse command-line options +func parseFlags() (headPath string, chopSize int64, limitSize int64, version bool) { + var flagSet = flag.NewFlagSet(os.Args[0], flag.ExitOnError) + var chopSizeStr, limitSizeStr string + flagSet.StringVar(&headPath, "head", "logjack.out", "Destination (head) file.") + flagSet.StringVar(&chopSizeStr, "chop", "100M", "Move file if greater than this") + flagSet.StringVar(&limitSizeStr, "limit", "10G", "Only keep this much (for each specified file). Remove old files.") + flagSet.BoolVar(&version, "version", false, "Version") + flagSet.Parse(os.Args[1:]) + chopSize = parseBytesize(chopSizeStr) + limitSize = parseBytesize(limitSizeStr) + return +} + +func main() { + + // Read options + headPath, chopSize, limitSize, version := parseFlags() + if version { + fmt.Printf("logjack version %v\n", Version) + return + } + + // Open Group + group, err := auto.OpenGroup(headPath) + if err != nil { + fmt.Printf("logjack couldn't create output file %v\n", headPath) + os.Exit(1) + } + group.SetHeadSizeLimit(chopSize) + group.SetTotalSizeLimit(limitSize) + err = group.Start() + if err != nil { + fmt.Printf("logjack couldn't start with file %v\n", headPath) + os.Exit(1) + } + + go func() { + // Forever, read from stdin and write to AutoFile. + buf := make([]byte, readBufferSize) + for { + n, err := os.Stdin.Read(buf) + group.Write(buf[:n]) + group.Flush() + if err != nil { + group.Stop() + if err == io.EOF { + os.Exit(0) + } else { + fmt.Println("logjack errored") + os.Exit(1) + } + } + } + }() + + // Trap signal + cmn.TrapSignal(func() { + fmt.Println("logjack shutting down") + }) +} + +func parseBytesize(chopSize string) int64 { + // Handle suffix multiplier + var multiplier int64 = 1 + if strings.HasSuffix(chopSize, "T") { + multiplier = 1042 * 1024 * 1024 * 1024 + chopSize = chopSize[:len(chopSize)-1] + } + if strings.HasSuffix(chopSize, "G") { + multiplier = 1042 * 1024 * 1024 + chopSize = chopSize[:len(chopSize)-1] + } + if strings.HasSuffix(chopSize, "M") { + multiplier = 1042 * 1024 + chopSize = chopSize[:len(chopSize)-1] + } + if strings.HasSuffix(chopSize, "K") { + multiplier = 1042 + chopSize = chopSize[:len(chopSize)-1] + } + + // Parse the numeric part + chopSizeInt, err := strconv.Atoi(chopSize) + if err != nil { + panic(err) + } + + return int64(chopSizeInt) * multiplier +} diff --git a/libs/autofile/group.go b/libs/autofile/group.go new file mode 100644 index 000000000..b4368ed9e --- /dev/null +++ b/libs/autofile/group.go @@ -0,0 +1,747 @@ +package autofile + +import ( + "bufio" + "errors" + "fmt" + "io" + "log" + "os" + "path" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "time" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +const ( + groupCheckDuration = 5000 * time.Millisecond + defaultHeadSizeLimit = 10 * 1024 * 1024 // 10MB + defaultTotalSizeLimit = 1 * 1024 * 1024 * 1024 // 1GB + maxFilesToRemove = 4 // needs to be greater than 1 +) + +/* +You can open a Group to keep restrictions on an AutoFile, like +the maximum size of each chunk, and/or the total amount of bytes +stored in the group. + +The first file to be written in the Group.Dir is the head file. + + Dir/ + - + +Once the Head file reaches the size limit, it will be rotated. + + Dir/ + - .000 // First rolled file + - // New head path, starts empty. + // The implicit index is 001. + +As more files are written, the index numbers grow... + + Dir/ + - .000 // First rolled file + - .001 // Second rolled file + - ... + - // New head path + +The Group can also be used to binary-search for some line, +assuming that marker lines are written occasionally. +*/ +type Group struct { + cmn.BaseService + + ID string + Head *AutoFile // The head AutoFile to write to + headBuf *bufio.Writer + Dir string // Directory that contains .Head + ticker *time.Ticker + mtx sync.Mutex + headSizeLimit int64 + totalSizeLimit int64 + minIndex int // Includes head + maxIndex int // Includes head, where Head will move to + + // TODO: When we start deleting files, we need to start tracking GroupReaders + // and their dependencies. +} + +// OpenGroup creates a new Group with head at headPath. It returns an error if +// it fails to open head file. +func OpenGroup(headPath string) (g *Group, err error) { + dir := path.Dir(headPath) + head, err := OpenAutoFile(headPath) + if err != nil { + return nil, err + } + + g = &Group{ + ID: "group:" + head.ID, + Head: head, + headBuf: bufio.NewWriterSize(head, 4096*10), + Dir: dir, + ticker: time.NewTicker(groupCheckDuration), + headSizeLimit: defaultHeadSizeLimit, + totalSizeLimit: defaultTotalSizeLimit, + minIndex: 0, + maxIndex: 0, + } + g.BaseService = *cmn.NewBaseService(nil, "Group", g) + + gInfo := g.readGroupInfo() + g.minIndex = gInfo.MinIndex + g.maxIndex = gInfo.MaxIndex + return +} + +// OnStart implements Service by starting the goroutine that checks file and +// group limits. +func (g *Group) OnStart() error { + go g.processTicks() + return nil +} + +// OnStop implements Service by stopping the goroutine described above. +// NOTE: g.Head must be closed separately using Close. +func (g *Group) OnStop() { + g.ticker.Stop() + g.Flush() // flush any uncommitted data +} + +// Close closes the head file. The group must be stopped by this moment. +func (g *Group) Close() { + g.Flush() // flush any uncommitted data + + g.mtx.Lock() + _ = g.Head.closeFile() + g.mtx.Unlock() +} + +// SetHeadSizeLimit allows you to overwrite default head size limit - 10MB. +func (g *Group) SetHeadSizeLimit(limit int64) { + g.mtx.Lock() + g.headSizeLimit = limit + g.mtx.Unlock() +} + +// HeadSizeLimit returns the current head size limit. +func (g *Group) HeadSizeLimit() int64 { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.headSizeLimit +} + +// SetTotalSizeLimit allows you to overwrite default total size limit of the +// group - 1GB. +func (g *Group) SetTotalSizeLimit(limit int64) { + g.mtx.Lock() + g.totalSizeLimit = limit + g.mtx.Unlock() +} + +// TotalSizeLimit returns total size limit of the group. +func (g *Group) TotalSizeLimit() int64 { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.totalSizeLimit +} + +// MaxIndex returns index of the last file in the group. +func (g *Group) MaxIndex() int { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.maxIndex +} + +// MinIndex returns index of the first file in the group. +func (g *Group) MinIndex() int { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.minIndex +} + +// Write writes the contents of p into the current head of the group. It +// returns the number of bytes written. If nn < len(p), it also returns an +// error explaining why the write is short. +// NOTE: Writes are buffered so they don't write synchronously +// TODO: Make it halt if space is unavailable +func (g *Group) Write(p []byte) (nn int, err error) { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.headBuf.Write(p) +} + +// WriteLine writes line into the current head of the group. It also appends "\n". +// NOTE: Writes are buffered so they don't write synchronously +// TODO: Make it halt if space is unavailable +func (g *Group) WriteLine(line string) error { + g.mtx.Lock() + defer g.mtx.Unlock() + _, err := g.headBuf.Write([]byte(line + "\n")) + return err +} + +// Flush writes any buffered data to the underlying file and commits the +// current content of the file to stable storage. +func (g *Group) Flush() error { + g.mtx.Lock() + defer g.mtx.Unlock() + err := g.headBuf.Flush() + if err == nil { + err = g.Head.Sync() + } + return err +} + +func (g *Group) processTicks() { + for { + _, ok := <-g.ticker.C + if !ok { + return // Done. + } + g.checkHeadSizeLimit() + g.checkTotalSizeLimit() + } +} + +// NOTE: for testing +func (g *Group) stopTicker() { + g.ticker.Stop() +} + +// NOTE: this function is called manually in tests. +func (g *Group) checkHeadSizeLimit() { + limit := g.HeadSizeLimit() + if limit == 0 { + return + } + size, err := g.Head.Size() + if err != nil { + panic(err) + } + if size >= limit { + g.RotateFile() + } +} + +func (g *Group) checkTotalSizeLimit() { + limit := g.TotalSizeLimit() + if limit == 0 { + return + } + + gInfo := g.readGroupInfo() + totalSize := gInfo.TotalSize + for i := 0; i < maxFilesToRemove; i++ { + index := gInfo.MinIndex + i + if totalSize < limit { + return + } + if index == gInfo.MaxIndex { + // Special degenerate case, just do nothing. + log.Println("WARNING: Group's head " + g.Head.Path + "may grow without bound") + return + } + pathToRemove := filePathForIndex(g.Head.Path, index, gInfo.MaxIndex) + fileInfo, err := os.Stat(pathToRemove) + if err != nil { + log.Println("WARNING: Failed to fetch info for file @" + pathToRemove) + continue + } + err = os.Remove(pathToRemove) + if err != nil { + log.Println(err) + return + } + totalSize -= fileInfo.Size() + } +} + +// RotateFile causes group to close the current head and assign it some index. +// Note it does not create a new head. +func (g *Group) RotateFile() { + g.mtx.Lock() + defer g.mtx.Unlock() + + headPath := g.Head.Path + + if err := g.Head.closeFile(); err != nil { + panic(err) + } + + indexPath := filePathForIndex(headPath, g.maxIndex, g.maxIndex+1) + if err := os.Rename(headPath, indexPath); err != nil { + panic(err) + } + + g.maxIndex++ +} + +// NewReader returns a new group reader. +// CONTRACT: Caller must close the returned GroupReader. +func (g *Group) NewReader(index int) (*GroupReader, error) { + r := newGroupReader(g) + err := r.SetIndex(index) + if err != nil { + return nil, err + } + return r, nil +} + +// Returns -1 if line comes after, 0 if found, 1 if line comes before. +type SearchFunc func(line string) (int, error) + +// Searches for the right file in Group, then returns a GroupReader to start +// streaming lines. +// Returns true if an exact match was found, otherwise returns the next greater +// line that starts with prefix. +// CONTRACT: Caller must close the returned GroupReader +func (g *Group) Search(prefix string, cmp SearchFunc) (*GroupReader, bool, error) { + g.mtx.Lock() + minIndex, maxIndex := g.minIndex, g.maxIndex + g.mtx.Unlock() + // Now minIndex/maxIndex may change meanwhile, + // but it shouldn't be a big deal + // (maybe we'll want to limit scanUntil though) + + for { + curIndex := (minIndex + maxIndex + 1) / 2 + + // Base case, when there's only 1 choice left. + if minIndex == maxIndex { + r, err := g.NewReader(maxIndex) + if err != nil { + return nil, false, err + } + match, err := scanUntil(r, prefix, cmp) + if err != nil { + r.Close() + return nil, false, err + } + return r, match, err + } + + // Read starting roughly at the middle file, + // until we find line that has prefix. + r, err := g.NewReader(curIndex) + if err != nil { + return nil, false, err + } + foundIndex, line, err := scanNext(r, prefix) + r.Close() + if err != nil { + return nil, false, err + } + + // Compare this line to our search query. + val, err := cmp(line) + if err != nil { + return nil, false, err + } + if val < 0 { + // Line will come later + minIndex = foundIndex + } else if val == 0 { + // Stroke of luck, found the line + r, err := g.NewReader(foundIndex) + if err != nil { + return nil, false, err + } + match, err := scanUntil(r, prefix, cmp) + if !match { + panic("Expected match to be true") + } + if err != nil { + r.Close() + return nil, false, err + } + return r, true, err + } else { + // We passed it + maxIndex = curIndex - 1 + } + } + +} + +// Scans and returns the first line that starts with 'prefix' +// Consumes line and returns it. +func scanNext(r *GroupReader, prefix string) (int, string, error) { + for { + line, err := r.ReadLine() + if err != nil { + return 0, "", err + } + if !strings.HasPrefix(line, prefix) { + continue + } + index := r.CurIndex() + return index, line, nil + } +} + +// Returns true iff an exact match was found. +// Pushes line, does not consume it. +func scanUntil(r *GroupReader, prefix string, cmp SearchFunc) (bool, error) { + for { + line, err := r.ReadLine() + if err != nil { + return false, err + } + if !strings.HasPrefix(line, prefix) { + continue + } + val, err := cmp(line) + if err != nil { + return false, err + } + if val < 0 { + continue + } else if val == 0 { + r.PushLine(line) + return true, nil + } else { + r.PushLine(line) + return false, nil + } + } +} + +// Searches backwards for the last line in Group with prefix. +// Scans each file forward until the end to find the last match. +func (g *Group) FindLast(prefix string) (match string, found bool, err error) { + g.mtx.Lock() + minIndex, maxIndex := g.minIndex, g.maxIndex + g.mtx.Unlock() + + r, err := g.NewReader(maxIndex) + if err != nil { + return "", false, err + } + defer r.Close() + + // Open files from the back and read +GROUP_LOOP: + for i := maxIndex; i >= minIndex; i-- { + err := r.SetIndex(i) + if err != nil { + return "", false, err + } + // Scan each line and test whether line matches + for { + line, err := r.ReadLine() + if err == io.EOF { + if found { + return match, found, nil + } + continue GROUP_LOOP + } else if err != nil { + return "", false, err + } + if strings.HasPrefix(line, prefix) { + match = line + found = true + } + if r.CurIndex() > i { + if found { + return match, found, nil + } + continue GROUP_LOOP + } + } + } + + return +} + +// GroupInfo holds information about the group. +type GroupInfo struct { + MinIndex int // index of the first file in the group, including head + MaxIndex int // index of the last file in the group, including head + TotalSize int64 // total size of the group + HeadSize int64 // size of the head +} + +// Returns info after scanning all files in g.Head's dir. +func (g *Group) ReadGroupInfo() GroupInfo { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.readGroupInfo() +} + +// Index includes the head. +// CONTRACT: caller should have called g.mtx.Lock +func (g *Group) readGroupInfo() GroupInfo { + groupDir := filepath.Dir(g.Head.Path) + headBase := filepath.Base(g.Head.Path) + var minIndex, maxIndex int = -1, -1 + var totalSize, headSize int64 = 0, 0 + + dir, err := os.Open(groupDir) + if err != nil { + panic(err) + } + defer dir.Close() + fiz, err := dir.Readdir(0) + if err != nil { + panic(err) + } + + // For each file in the directory, filter by pattern + for _, fileInfo := range fiz { + if fileInfo.Name() == headBase { + fileSize := fileInfo.Size() + totalSize += fileSize + headSize = fileSize + continue + } else if strings.HasPrefix(fileInfo.Name(), headBase) { + fileSize := fileInfo.Size() + totalSize += fileSize + indexedFilePattern := regexp.MustCompile(`^.+\.([0-9]{3,})$`) + submatch := indexedFilePattern.FindSubmatch([]byte(fileInfo.Name())) + if len(submatch) != 0 { + // Matches + fileIndex, err := strconv.Atoi(string(submatch[1])) + if err != nil { + panic(err) + } + if maxIndex < fileIndex { + maxIndex = fileIndex + } + if minIndex == -1 || fileIndex < minIndex { + minIndex = fileIndex + } + } + } + } + + // Now account for the head. + if minIndex == -1 { + // If there were no numbered files, + // then the head is index 0. + minIndex, maxIndex = 0, 0 + } else { + // Otherwise, the head file is 1 greater + maxIndex++ + } + return GroupInfo{minIndex, maxIndex, totalSize, headSize} +} + +func filePathForIndex(headPath string, index int, maxIndex int) string { + if index == maxIndex { + return headPath + } + return fmt.Sprintf("%v.%03d", headPath, index) +} + +//-------------------------------------------------------------------------------- + +// GroupReader provides an interface for reading from a Group. +type GroupReader struct { + *Group + mtx sync.Mutex + curIndex int + curFile *os.File + curReader *bufio.Reader + curLine []byte +} + +func newGroupReader(g *Group) *GroupReader { + return &GroupReader{ + Group: g, + curIndex: 0, + curFile: nil, + curReader: nil, + curLine: nil, + } +} + +// Close closes the GroupReader by closing the cursor file. +func (gr *GroupReader) Close() error { + gr.mtx.Lock() + defer gr.mtx.Unlock() + + if gr.curReader != nil { + err := gr.curFile.Close() + gr.curIndex = 0 + gr.curReader = nil + gr.curFile = nil + gr.curLine = nil + return err + } + return nil +} + +// Read implements io.Reader, reading bytes from the current Reader +// incrementing index until enough bytes are read. +func (gr *GroupReader) Read(p []byte) (n int, err error) { + lenP := len(p) + if lenP == 0 { + return 0, errors.New("given empty slice") + } + + gr.mtx.Lock() + defer gr.mtx.Unlock() + + // Open file if not open yet + if gr.curReader == nil { + if err = gr.openFile(gr.curIndex); err != nil { + return 0, err + } + } + + // Iterate over files until enough bytes are read + var nn int + for { + nn, err = gr.curReader.Read(p[n:]) + n += nn + if err == io.EOF { + if n >= lenP { + return n, nil + } + // Open the next file + if err1 := gr.openFile(gr.curIndex + 1); err1 != nil { + return n, err1 + } + } else if err != nil { + return n, err + } else if nn == 0 { // empty file + return n, err + } + } +} + +// ReadLine reads a line (without delimiter). +// just return io.EOF if no new lines found. +func (gr *GroupReader) ReadLine() (string, error) { + gr.mtx.Lock() + defer gr.mtx.Unlock() + + // From PushLine + if gr.curLine != nil { + line := string(gr.curLine) + gr.curLine = nil + return line, nil + } + + // Open file if not open yet + if gr.curReader == nil { + err := gr.openFile(gr.curIndex) + if err != nil { + return "", err + } + } + + // Iterate over files until line is found + var linePrefix string + for { + bytesRead, err := gr.curReader.ReadBytes('\n') + if err == io.EOF { + // Open the next file + if err1 := gr.openFile(gr.curIndex + 1); err1 != nil { + return "", err1 + } + if len(bytesRead) > 0 && bytesRead[len(bytesRead)-1] == byte('\n') { + return linePrefix + string(bytesRead[:len(bytesRead)-1]), nil + } + linePrefix += string(bytesRead) + continue + } else if err != nil { + return "", err + } + return linePrefix + string(bytesRead[:len(bytesRead)-1]), nil + } +} + +// IF index > gr.Group.maxIndex, returns io.EOF +// CONTRACT: caller should hold gr.mtx +func (gr *GroupReader) openFile(index int) error { + + // Lock on Group to ensure that head doesn't move in the meanwhile. + gr.Group.mtx.Lock() + defer gr.Group.mtx.Unlock() + + if index > gr.Group.maxIndex { + return io.EOF + } + + curFilePath := filePathForIndex(gr.Head.Path, index, gr.Group.maxIndex) + curFile, err := os.Open(curFilePath) + if err != nil { + return err + } + curReader := bufio.NewReader(curFile) + + // Update gr.cur* + if gr.curFile != nil { + gr.curFile.Close() // TODO return error? + } + gr.curIndex = index + gr.curFile = curFile + gr.curReader = curReader + gr.curLine = nil + return nil +} + +// PushLine makes the given line the current one, so the next time somebody +// calls ReadLine, this line will be returned. +// panics if called twice without calling ReadLine. +func (gr *GroupReader) PushLine(line string) { + gr.mtx.Lock() + defer gr.mtx.Unlock() + + if gr.curLine == nil { + gr.curLine = []byte(line) + } else { + panic("PushLine failed, already have line") + } +} + +// CurIndex returns cursor's file index. +func (gr *GroupReader) CurIndex() int { + gr.mtx.Lock() + defer gr.mtx.Unlock() + return gr.curIndex +} + +// SetIndex sets the cursor's file index to index by opening a file at this +// position. +func (gr *GroupReader) SetIndex(index int) error { + gr.mtx.Lock() + defer gr.mtx.Unlock() + return gr.openFile(index) +} + +//-------------------------------------------------------------------------------- + +// A simple SearchFunc that assumes that the marker is of form +// . +// For example, if prefix is '#HEIGHT:', the markers of expected to be of the form: +// +// #HEIGHT:1 +// ... +// #HEIGHT:2 +// ... +func MakeSimpleSearchFunc(prefix string, target int) SearchFunc { + return func(line string) (int, error) { + if !strings.HasPrefix(line, prefix) { + return -1, errors.New(cmn.Fmt("Marker line did not have prefix: %v", prefix)) + } + i, err := strconv.Atoi(line[len(prefix):]) + if err != nil { + return -1, errors.New(cmn.Fmt("Failed to parse marker line: %v", err.Error())) + } + if target < i { + return 1, nil + } else if target == i { + return 0, nil + } else { + return -1, nil + } + } +} diff --git a/libs/autofile/group_test.go b/libs/autofile/group_test.go new file mode 100644 index 000000000..72581f9e2 --- /dev/null +++ b/libs/autofile/group_test.go @@ -0,0 +1,438 @@ +package autofile + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +// NOTE: Returned group has ticker stopped +func createTestGroup(t *testing.T, headSizeLimit int64) *Group { + testID := cmn.RandStr(12) + testDir := "_test_" + testID + err := cmn.EnsureDir(testDir, 0700) + require.NoError(t, err, "Error creating dir") + headPath := testDir + "/myfile" + g, err := OpenGroup(headPath) + require.NoError(t, err, "Error opening Group") + g.SetHeadSizeLimit(headSizeLimit) + g.stopTicker() + require.NotEqual(t, nil, g, "Failed to create Group") + return g +} + +func destroyTestGroup(t *testing.T, g *Group) { + g.Close() + err := os.RemoveAll(g.Dir) + require.NoError(t, err, "Error removing test Group directory") +} + +func assertGroupInfo(t *testing.T, gInfo GroupInfo, minIndex, maxIndex int, totalSize, headSize int64) { + assert.Equal(t, minIndex, gInfo.MinIndex) + assert.Equal(t, maxIndex, gInfo.MaxIndex) + assert.Equal(t, totalSize, gInfo.TotalSize) + assert.Equal(t, headSize, gInfo.HeadSize) +} + +func TestCheckHeadSizeLimit(t *testing.T) { + g := createTestGroup(t, 1000*1000) + + // At first, there are no files. + assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 0, 0) + + // Write 1000 bytes 999 times. + for i := 0; i < 999; i++ { + err := g.WriteLine(cmn.RandStr(999)) + require.NoError(t, err, "Error appending to head") + } + g.Flush() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000) + + // Even calling checkHeadSizeLimit manually won't rotate it. + g.checkHeadSizeLimit() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000) + + // Write 1000 more bytes. + err := g.WriteLine(cmn.RandStr(999)) + require.NoError(t, err, "Error appending to head") + g.Flush() + + // Calling checkHeadSizeLimit this time rolls it. + g.checkHeadSizeLimit() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 1000000, 0) + + // Write 1000 more bytes. + err = g.WriteLine(cmn.RandStr(999)) + require.NoError(t, err, "Error appending to head") + g.Flush() + + // Calling checkHeadSizeLimit does nothing. + g.checkHeadSizeLimit() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 1001000, 1000) + + // Write 1000 bytes 999 times. + for i := 0; i < 999; i++ { + err = g.WriteLine(cmn.RandStr(999)) + require.NoError(t, err, "Error appending to head") + } + g.Flush() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 2000000, 1000000) + + // Calling checkHeadSizeLimit rolls it again. + g.checkHeadSizeLimit() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2000000, 0) + + // Write 1000 more bytes. + _, err = g.Head.Write([]byte(cmn.RandStr(999) + "\n")) + require.NoError(t, err, "Error appending to head") + g.Flush() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2001000, 1000) + + // Calling checkHeadSizeLimit does nothing. + g.checkHeadSizeLimit() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2001000, 1000) + + // Cleanup + destroyTestGroup(t, g) +} + +func TestSearch(t *testing.T) { + g := createTestGroup(t, 10*1000) + + // Create some files in the group that have several INFO lines in them. + // Try to put the INFO lines in various spots. + for i := 0; i < 100; i++ { + // The random junk at the end ensures that this INFO linen + // is equally likely to show up at the end. + _, err := g.Head.Write([]byte(fmt.Sprintf("INFO %v %v\n", i, cmn.RandStr(123)))) + require.NoError(t, err, "Failed to write to head") + g.checkHeadSizeLimit() + for j := 0; j < 10; j++ { + _, err1 := g.Head.Write([]byte(cmn.RandStr(123) + "\n")) + require.NoError(t, err1, "Failed to write to head") + g.checkHeadSizeLimit() + } + } + + // Create a search func that searches for line + makeSearchFunc := func(target int) SearchFunc { + return func(line string) (int, error) { + parts := strings.Split(line, " ") + if len(parts) != 3 { + return -1, errors.New("Line did not have 3 parts") + } + i, err := strconv.Atoi(parts[1]) + if err != nil { + return -1, errors.New("Failed to parse INFO: " + err.Error()) + } + if target < i { + return 1, nil + } else if target == i { + return 0, nil + } else { + return -1, nil + } + } + } + + // Now search for each number + for i := 0; i < 100; i++ { + t.Log("Testing for i", i) + gr, match, err := g.Search("INFO", makeSearchFunc(i)) + require.NoError(t, err, "Failed to search for line") + assert.True(t, match, "Expected Search to return exact match") + line, err := gr.ReadLine() + require.NoError(t, err, "Failed to read line after search") + if !strings.HasPrefix(line, fmt.Sprintf("INFO %v ", i)) { + t.Fatal("Failed to get correct line") + } + // Make sure we can continue to read from there. + cur := i + 1 + for { + line, err := gr.ReadLine() + if err == io.EOF { + if cur == 99+1 { + // OK! + break + } else { + t.Fatal("Got EOF after the wrong INFO #") + } + } else if err != nil { + t.Fatal("Error reading line", err) + } + if !strings.HasPrefix(line, "INFO ") { + continue + } + if !strings.HasPrefix(line, fmt.Sprintf("INFO %v ", cur)) { + t.Fatalf("Unexpected INFO #. Expected %v got:\n%v", cur, line) + } + cur++ + } + gr.Close() + } + + // Now search for something that is too small. + // We should get the first available line. + { + gr, match, err := g.Search("INFO", makeSearchFunc(-999)) + require.NoError(t, err, "Failed to search for line") + assert.False(t, match, "Expected Search to not return exact match") + line, err := gr.ReadLine() + require.NoError(t, err, "Failed to read line after search") + if !strings.HasPrefix(line, "INFO 0 ") { + t.Error("Failed to fetch correct line, which is the earliest INFO") + } + err = gr.Close() + require.NoError(t, err, "Failed to close GroupReader") + } + + // Now search for something that is too large. + // We should get an EOF error. + { + gr, _, err := g.Search("INFO", makeSearchFunc(999)) + assert.Equal(t, io.EOF, err) + assert.Nil(t, gr) + } + + // Cleanup + destroyTestGroup(t, g) +} + +func TestRotateFile(t *testing.T) { + g := createTestGroup(t, 0) + g.WriteLine("Line 1") + g.WriteLine("Line 2") + g.WriteLine("Line 3") + g.Flush() + g.RotateFile() + g.WriteLine("Line 4") + g.WriteLine("Line 5") + g.WriteLine("Line 6") + g.Flush() + + // Read g.Head.Path+"000" + body1, err := ioutil.ReadFile(g.Head.Path + ".000") + assert.NoError(t, err, "Failed to read first rolled file") + if string(body1) != "Line 1\nLine 2\nLine 3\n" { + t.Errorf("Got unexpected contents: [%v]", string(body1)) + } + + // Read g.Head.Path + body2, err := ioutil.ReadFile(g.Head.Path) + assert.NoError(t, err, "Failed to read first rolled file") + if string(body2) != "Line 4\nLine 5\nLine 6\n" { + t.Errorf("Got unexpected contents: [%v]", string(body2)) + } + + // Cleanup + destroyTestGroup(t, g) +} + +func TestFindLast1(t *testing.T) { + g := createTestGroup(t, 0) + + g.WriteLine("Line 1") + g.WriteLine("Line 2") + g.WriteLine("# a") + g.WriteLine("Line 3") + g.Flush() + g.RotateFile() + g.WriteLine("Line 4") + g.WriteLine("Line 5") + g.WriteLine("Line 6") + g.WriteLine("# b") + g.Flush() + + match, found, err := g.FindLast("#") + assert.NoError(t, err) + assert.True(t, found) + assert.Equal(t, "# b", match) + + // Cleanup + destroyTestGroup(t, g) +} + +func TestFindLast2(t *testing.T) { + g := createTestGroup(t, 0) + + g.WriteLine("Line 1") + g.WriteLine("Line 2") + g.WriteLine("Line 3") + g.Flush() + g.RotateFile() + g.WriteLine("# a") + g.WriteLine("Line 4") + g.WriteLine("Line 5") + g.WriteLine("# b") + g.WriteLine("Line 6") + g.Flush() + + match, found, err := g.FindLast("#") + assert.NoError(t, err) + assert.True(t, found) + assert.Equal(t, "# b", match) + + // Cleanup + destroyTestGroup(t, g) +} + +func TestFindLast3(t *testing.T) { + g := createTestGroup(t, 0) + + g.WriteLine("Line 1") + g.WriteLine("# a") + g.WriteLine("Line 2") + g.WriteLine("# b") + g.WriteLine("Line 3") + g.Flush() + g.RotateFile() + g.WriteLine("Line 4") + g.WriteLine("Line 5") + g.WriteLine("Line 6") + g.Flush() + + match, found, err := g.FindLast("#") + assert.NoError(t, err) + assert.True(t, found) + assert.Equal(t, "# b", match) + + // Cleanup + destroyTestGroup(t, g) +} + +func TestFindLast4(t *testing.T) { + g := createTestGroup(t, 0) + + g.WriteLine("Line 1") + g.WriteLine("Line 2") + g.WriteLine("Line 3") + g.Flush() + g.RotateFile() + g.WriteLine("Line 4") + g.WriteLine("Line 5") + g.WriteLine("Line 6") + g.Flush() + + match, found, err := g.FindLast("#") + assert.NoError(t, err) + assert.False(t, found) + assert.Empty(t, match) + + // Cleanup + destroyTestGroup(t, g) +} + +func TestWrite(t *testing.T) { + g := createTestGroup(t, 0) + + written := []byte("Medusa") + g.Write(written) + g.Flush() + + read := make([]byte, len(written)) + gr, err := g.NewReader(0) + require.NoError(t, err, "failed to create reader") + + _, err = gr.Read(read) + assert.NoError(t, err, "failed to read data") + assert.Equal(t, written, read) + + // Cleanup + destroyTestGroup(t, g) +} + +// test that Read reads the required amount of bytes from all the files in the +// group and returns no error if n == size of the given slice. +func TestGroupReaderRead(t *testing.T) { + g := createTestGroup(t, 0) + + professor := []byte("Professor Monster") + g.Write(professor) + g.Flush() + g.RotateFile() + frankenstein := []byte("Frankenstein's Monster") + g.Write(frankenstein) + g.Flush() + + totalWrittenLength := len(professor) + len(frankenstein) + read := make([]byte, totalWrittenLength) + gr, err := g.NewReader(0) + require.NoError(t, err, "failed to create reader") + + n, err := gr.Read(read) + assert.NoError(t, err, "failed to read data") + assert.Equal(t, totalWrittenLength, n, "not enough bytes read") + professorPlusFrankenstein := professor + professorPlusFrankenstein = append(professorPlusFrankenstein, frankenstein...) + assert.Equal(t, professorPlusFrankenstein, read) + + // Cleanup + destroyTestGroup(t, g) +} + +// test that Read returns an error if number of bytes read < size of +// the given slice. Subsequent call should return 0, io.EOF. +func TestGroupReaderRead2(t *testing.T) { + g := createTestGroup(t, 0) + + professor := []byte("Professor Monster") + g.Write(professor) + g.Flush() + g.RotateFile() + frankenstein := []byte("Frankenstein's Monster") + frankensteinPart := []byte("Frankenstein") + g.Write(frankensteinPart) // note writing only a part + g.Flush() + + totalLength := len(professor) + len(frankenstein) + read := make([]byte, totalLength) + gr, err := g.NewReader(0) + require.NoError(t, err, "failed to create reader") + + // 1) n < (size of the given slice), io.EOF + n, err := gr.Read(read) + assert.Equal(t, io.EOF, err) + assert.Equal(t, len(professor)+len(frankensteinPart), n, "Read more/less bytes than it is in the group") + + // 2) 0, io.EOF + n, err = gr.Read([]byte("0")) + assert.Equal(t, io.EOF, err) + assert.Equal(t, 0, n) + + // Cleanup + destroyTestGroup(t, g) +} + +func TestMinIndex(t *testing.T) { + g := createTestGroup(t, 0) + + assert.Zero(t, g.MinIndex(), "MinIndex should be zero at the beginning") + + // Cleanup + destroyTestGroup(t, g) +} + +func TestMaxIndex(t *testing.T) { + g := createTestGroup(t, 0) + + assert.Zero(t, g.MaxIndex(), "MaxIndex should be zero at the beginning") + + g.WriteLine("Line 1") + g.Flush() + g.RotateFile() + + assert.Equal(t, 1, g.MaxIndex(), "MaxIndex should point to the last file") + + // Cleanup + destroyTestGroup(t, g) +} diff --git a/libs/autofile/sighup_watcher.go b/libs/autofile/sighup_watcher.go new file mode 100644 index 000000000..56fbd4d86 --- /dev/null +++ b/libs/autofile/sighup_watcher.go @@ -0,0 +1,63 @@ +package autofile + +import ( + "os" + "os/signal" + "sync" + "sync/atomic" + "syscall" +) + +func init() { + initSighupWatcher() +} + +var sighupWatchers *SighupWatcher +var sighupCounter int32 // For testing + +func initSighupWatcher() { + sighupWatchers = newSighupWatcher() + + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGHUP) + + go func() { + for range c { + sighupWatchers.closeAll() + atomic.AddInt32(&sighupCounter, 1) + } + }() +} + +// Watchces for SIGHUP events and notifies registered AutoFiles +type SighupWatcher struct { + mtx sync.Mutex + autoFiles map[string]*AutoFile +} + +func newSighupWatcher() *SighupWatcher { + return &SighupWatcher{ + autoFiles: make(map[string]*AutoFile, 10), + } +} + +func (w *SighupWatcher) addAutoFile(af *AutoFile) { + w.mtx.Lock() + w.autoFiles[af.ID] = af + w.mtx.Unlock() +} + +// If AutoFile isn't registered or was already removed, does nothing. +func (w *SighupWatcher) removeAutoFile(af *AutoFile) { + w.mtx.Lock() + delete(w.autoFiles, af.ID) + w.mtx.Unlock() +} + +func (w *SighupWatcher) closeAll() { + w.mtx.Lock() + for _, af := range w.autoFiles { + af.closeFile() + } + w.mtx.Unlock() +} diff --git a/libs/bech32/bech32.go b/libs/bech32/bech32.go new file mode 100644 index 000000000..a4db86d5f --- /dev/null +++ b/libs/bech32/bech32.go @@ -0,0 +1,29 @@ +package bech32 + +import ( + "github.com/btcsuite/btcutil/bech32" + "github.com/pkg/errors" +) + +//ConvertAndEncode converts from a base64 encoded byte string to base32 encoded byte string and then to bech32 +func ConvertAndEncode(hrp string, data []byte) (string, error) { + converted, err := bech32.ConvertBits(data, 8, 5, true) + if err != nil { + return "", errors.Wrap(err, "encoding bech32 failed") + } + return bech32.Encode(hrp, converted) + +} + +//DecodeAndConvert decodes a bech32 encoded string and converts to base64 encoded bytes +func DecodeAndConvert(bech string) (string, []byte, error) { + hrp, data, err := bech32.Decode(bech) + if err != nil { + return "", nil, errors.Wrap(err, "decoding bech32 failed") + } + converted, err := bech32.ConvertBits(data, 5, 8, false) + if err != nil { + return "", nil, errors.Wrap(err, "decoding bech32 failed") + } + return hrp, converted, nil +} diff --git a/libs/bech32/bech32_test.go b/libs/bech32/bech32_test.go new file mode 100644 index 000000000..830942061 --- /dev/null +++ b/libs/bech32/bech32_test.go @@ -0,0 +1,31 @@ +package bech32_test + +import ( + "bytes" + "crypto/sha256" + "testing" + + "github.com/tendermint/tendermint/libs/bech32" +) + +func TestEncodeAndDecode(t *testing.T) { + + sum := sha256.Sum256([]byte("hello world\n")) + + bech, err := bech32.ConvertAndEncode("shasum", sum[:]) + + if err != nil { + t.Error(err) + } + hrp, data, err := bech32.DecodeAndConvert(bech) + + if err != nil { + t.Error(err) + } + if hrp != "shasum" { + t.Error("Invalid hrp") + } + if !bytes.Equal(data, sum[:]) { + t.Error("Invalid decode") + } +} diff --git a/libs/circle.yml b/libs/circle.yml new file mode 100644 index 000000000..390ffb039 --- /dev/null +++ b/libs/circle.yml @@ -0,0 +1,21 @@ +machine: + environment: + GOPATH: "${HOME}/.go_workspace" + PROJECT_PARENT_PATH: "$GOPATH/src/github.com/$CIRCLE_PROJECT_USERNAME" + PROJECT_PATH: $GOPATH/src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME + hosts: + localhost: 127.0.0.1 + +dependencies: + override: + - mkdir -p "$PROJECT_PARENT_PATH" + - ln -sf "$HOME/$CIRCLE_PROJECT_REPONAME/" "$PROJECT_PATH" + post: + - go version + +test: + override: + - cd $PROJECT_PATH && make get_tools && make get_vendor_deps && bash ./test.sh + post: + - cd "$PROJECT_PATH" && bash <(curl -s https://codecov.io/bash) -f coverage.txt + - cd "$PROJECT_PATH" && mv coverage.txt "${CIRCLE_ARTIFACTS}" diff --git a/libs/cli/flags/log_level.go b/libs/cli/flags/log_level.go new file mode 100644 index 000000000..156106a5a --- /dev/null +++ b/libs/cli/flags/log_level.go @@ -0,0 +1,86 @@ +package flags + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + + "github.com/tendermint/tendermint/libs/log" +) + +const ( + defaultLogLevelKey = "*" +) + +// ParseLogLevel parses complex log level - comma-separated +// list of module:level pairs with an optional *:level pair (* means +// all other modules). +// +// Example: +// ParseLogLevel("consensus:debug,mempool:debug,*:error", log.NewTMLogger(os.Stdout), "info") +func ParseLogLevel(lvl string, logger log.Logger, defaultLogLevelValue string) (log.Logger, error) { + if lvl == "" { + return nil, errors.New("Empty log level") + } + + l := lvl + + // prefix simple one word levels (e.g. "info") with "*" + if !strings.Contains(l, ":") { + l = defaultLogLevelKey + ":" + l + } + + options := make([]log.Option, 0) + + isDefaultLogLevelSet := false + var option log.Option + var err error + + list := strings.Split(l, ",") + for _, item := range list { + moduleAndLevel := strings.Split(item, ":") + + if len(moduleAndLevel) != 2 { + return nil, fmt.Errorf("Expected list in a form of \"module:level\" pairs, given pair %s, list %s", item, list) + } + + module := moduleAndLevel[0] + level := moduleAndLevel[1] + + if module == defaultLogLevelKey { + option, err = log.AllowLevel(level) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("Failed to parse default log level (pair %s, list %s)", item, l)) + } + options = append(options, option) + isDefaultLogLevelSet = true + } else { + switch level { + case "debug": + option = log.AllowDebugWith("module", module) + case "info": + option = log.AllowInfoWith("module", module) + case "error": + option = log.AllowErrorWith("module", module) + case "none": + option = log.AllowNoneWith("module", module) + default: + return nil, fmt.Errorf("Expected either \"info\", \"debug\", \"error\" or \"none\" log level, given %s (pair %s, list %s)", level, item, list) + } + options = append(options, option) + + } + } + + // if "*" is not provided, set default global level + if !isDefaultLogLevelSet { + option, err = log.AllowLevel(defaultLogLevelValue) + if err != nil { + return nil, err + } + options = append(options, option) + } + + return log.NewFilter(logger, options...), nil +} diff --git a/libs/cli/flags/log_level_test.go b/libs/cli/flags/log_level_test.go new file mode 100644 index 000000000..1503ec281 --- /dev/null +++ b/libs/cli/flags/log_level_test.go @@ -0,0 +1,94 @@ +package flags_test + +import ( + "bytes" + "strings" + "testing" + + tmflags "github.com/tendermint/tendermint/libs/cli/flags" + "github.com/tendermint/tendermint/libs/log" +) + +const ( + defaultLogLevelValue = "info" +) + +func TestParseLogLevel(t *testing.T) { + var buf bytes.Buffer + jsonLogger := log.NewTMJSONLogger(&buf) + + correctLogLevels := []struct { + lvl string + expectedLogLines []string + }{ + {"mempool:error", []string{ + ``, // if no default is given, assume info + ``, + `{"_msg":"Mesmero","level":"error","module":"mempool"}`, + `{"_msg":"Mind","level":"info","module":"state"}`, // if no default is given, assume info + ``}}, + + {"mempool:error,*:debug", []string{ + `{"_msg":"Kingpin","level":"debug","module":"wire"}`, + ``, + `{"_msg":"Mesmero","level":"error","module":"mempool"}`, + `{"_msg":"Mind","level":"info","module":"state"}`, + `{"_msg":"Gideon","level":"debug"}`}}, + + {"*:debug,wire:none", []string{ + ``, + `{"_msg":"Kitty Pryde","level":"info","module":"mempool"}`, + `{"_msg":"Mesmero","level":"error","module":"mempool"}`, + `{"_msg":"Mind","level":"info","module":"state"}`, + `{"_msg":"Gideon","level":"debug"}`}}, + } + + for _, c := range correctLogLevels { + logger, err := tmflags.ParseLogLevel(c.lvl, jsonLogger, defaultLogLevelValue) + if err != nil { + t.Fatal(err) + } + + buf.Reset() + + logger.With("module", "wire").Debug("Kingpin") + if have := strings.TrimSpace(buf.String()); c.expectedLogLines[0] != have { + t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[0], have, c.lvl) + } + + buf.Reset() + + logger.With("module", "mempool").Info("Kitty Pryde") + if have := strings.TrimSpace(buf.String()); c.expectedLogLines[1] != have { + t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[1], have, c.lvl) + } + + buf.Reset() + + logger.With("module", "mempool").Error("Mesmero") + if have := strings.TrimSpace(buf.String()); c.expectedLogLines[2] != have { + t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[2], have, c.lvl) + } + + buf.Reset() + + logger.With("module", "state").Info("Mind") + if have := strings.TrimSpace(buf.String()); c.expectedLogLines[3] != have { + t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[3], have, c.lvl) + } + + buf.Reset() + + logger.Debug("Gideon") + if have := strings.TrimSpace(buf.String()); c.expectedLogLines[4] != have { + t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[4], have, c.lvl) + } + } + + incorrectLogLevel := []string{"some", "mempool:some", "*:some,mempool:error"} + for _, lvl := range incorrectLogLevel { + if _, err := tmflags.ParseLogLevel(lvl, jsonLogger, defaultLogLevelValue); err == nil { + t.Fatalf("Expected %s to produce error", lvl) + } + } +} diff --git a/libs/cli/helper.go b/libs/cli/helper.go new file mode 100644 index 000000000..878cf26e5 --- /dev/null +++ b/libs/cli/helper.go @@ -0,0 +1,87 @@ +package cli + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// WriteConfigVals writes a toml file with the given values. +// It returns an error if writing was impossible. +func WriteConfigVals(dir string, vals map[string]string) error { + data := "" + for k, v := range vals { + data = data + fmt.Sprintf("%s = \"%s\"\n", k, v) + } + cfile := filepath.Join(dir, "config.toml") + return ioutil.WriteFile(cfile, []byte(data), 0666) +} + +// RunWithArgs executes the given command with the specified command line args +// and environmental variables set. It returns any error returned from cmd.Execute() +func RunWithArgs(cmd Executable, args []string, env map[string]string) error { + oargs := os.Args + oenv := map[string]string{} + // defer returns the environment back to normal + defer func() { + os.Args = oargs + for k, v := range oenv { + os.Setenv(k, v) + } + }() + + // set the args and env how we want them + os.Args = args + for k, v := range env { + // backup old value if there, to restore at end + oenv[k] = os.Getenv(k) + err := os.Setenv(k, v) + if err != nil { + return err + } + } + + // and finally run the command + return cmd.Execute() +} + +// RunCaptureWithArgs executes the given command with the specified command +// line args and environmental variables set. It returns string fields +// representing output written to stdout and stderr, additionally any error +// from cmd.Execute() is also returned +func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (stdout, stderr string, err error) { + oldout, olderr := os.Stdout, os.Stderr // keep backup of the real stdout + rOut, wOut, _ := os.Pipe() + rErr, wErr, _ := os.Pipe() + os.Stdout, os.Stderr = wOut, wErr + defer func() { + os.Stdout, os.Stderr = oldout, olderr // restoring the real stdout + }() + + // copy the output in a separate goroutine so printing can't block indefinitely + copyStd := func(reader *os.File) *(chan string) { + stdC := make(chan string) + go func() { + var buf bytes.Buffer + // io.Copy will end when we call reader.Close() below + io.Copy(&buf, reader) + stdC <- buf.String() + }() + return &stdC + } + outC := copyStd(rOut) + errC := copyStd(rErr) + + // now run the command + err = RunWithArgs(cmd, args, env) + + // and grab the stdout to return + wOut.Close() + wErr.Close() + stdout = <-*outC + stderr = <-*errC + return stdout, stderr, err +} diff --git a/libs/cli/setup.go b/libs/cli/setup.go new file mode 100644 index 000000000..06cf1cd1f --- /dev/null +++ b/libs/cli/setup.go @@ -0,0 +1,157 @@ +package cli + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +const ( + HomeFlag = "home" + TraceFlag = "trace" + OutputFlag = "output" + EncodingFlag = "encoding" +) + +// Executable is the minimal interface to *corba.Command, so we can +// wrap if desired before the test +type Executable interface { + Execute() error +} + +// PrepareBaseCmd is meant for tendermint and other servers +func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defaultHome string) Executor { + cobra.OnInitialize(func() { initEnv(envPrefix) }) + cmd.PersistentFlags().StringP(HomeFlag, "", defaultHome, "directory for config and data") + cmd.PersistentFlags().Bool(TraceFlag, false, "print out full stack trace on errors") + cmd.PersistentPreRunE = concatCobraCmdFuncs(bindFlagsLoadViper, cmd.PersistentPreRunE) + return Executor{cmd, os.Exit} +} + +// PrepareMainCmd is meant for client side libs that want some more flags +// +// This adds --encoding (hex, btc, base64) and --output (text, json) to +// the command. These only really make sense in interactive commands. +func PrepareMainCmd(cmd *cobra.Command, envPrefix, defaultHome string) Executor { + cmd.PersistentFlags().StringP(EncodingFlag, "e", "hex", "Binary encoding (hex|b64|btc)") + cmd.PersistentFlags().StringP(OutputFlag, "o", "text", "Output format (text|json)") + cmd.PersistentPreRunE = concatCobraCmdFuncs(validateOutput, cmd.PersistentPreRunE) + return PrepareBaseCmd(cmd, envPrefix, defaultHome) +} + +// initEnv sets to use ENV variables if set. +func initEnv(prefix string) { + copyEnvVars(prefix) + + // env variables with TM prefix (eg. TM_ROOT) + viper.SetEnvPrefix(prefix) + viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_")) + viper.AutomaticEnv() +} + +// This copies all variables like TMROOT to TM_ROOT, +// so we can support both formats for the user +func copyEnvVars(prefix string) { + prefix = strings.ToUpper(prefix) + ps := prefix + "_" + for _, e := range os.Environ() { + kv := strings.SplitN(e, "=", 2) + if len(kv) == 2 { + k, v := kv[0], kv[1] + if strings.HasPrefix(k, prefix) && !strings.HasPrefix(k, ps) { + k2 := strings.Replace(k, prefix, ps, 1) + os.Setenv(k2, v) + } + } + } +} + +// Executor wraps the cobra Command with a nicer Execute method +type Executor struct { + *cobra.Command + Exit func(int) // this is os.Exit by default, override in tests +} + +type ExitCoder interface { + ExitCode() int +} + +// execute adds all child commands to the root command sets flags appropriately. +// This is called by main.main(). It only needs to happen once to the rootCmd. +func (e Executor) Execute() error { + e.SilenceUsage = true + e.SilenceErrors = true + err := e.Command.Execute() + if err != nil { + if viper.GetBool(TraceFlag) { + fmt.Fprintf(os.Stderr, "ERROR: %+v\n", err) + } else { + fmt.Fprintf(os.Stderr, "ERROR: %v\n", err) + } + + // return error code 1 by default, can override it with a special error type + exitCode := 1 + if ec, ok := err.(ExitCoder); ok { + exitCode = ec.ExitCode() + } + e.Exit(exitCode) + } + return err +} + +type cobraCmdFunc func(cmd *cobra.Command, args []string) error + +// Returns a single function that calls each argument function in sequence +// RunE, PreRunE, PersistentPreRunE, etc. all have this same signature +func concatCobraCmdFuncs(fs ...cobraCmdFunc) cobraCmdFunc { + return func(cmd *cobra.Command, args []string) error { + for _, f := range fs { + if f != nil { + if err := f(cmd, args); err != nil { + return err + } + } + } + return nil + } +} + +// Bind all flags and read the config into viper +func bindFlagsLoadViper(cmd *cobra.Command, args []string) error { + // cmd.Flags() includes flags from this command and all persistent flags from the parent + if err := viper.BindPFlags(cmd.Flags()); err != nil { + return err + } + + homeDir := viper.GetString(HomeFlag) + viper.Set(HomeFlag, homeDir) + viper.SetConfigName("config") // name of config file (without extension) + viper.AddConfigPath(homeDir) // search root directory + viper.AddConfigPath(filepath.Join(homeDir, "config")) // search root directory /config + + // If a config file is found, read it in. + if err := viper.ReadInConfig(); err == nil { + // stderr, so if we redirect output to json file, this doesn't appear + // fmt.Fprintln(os.Stderr, "Using config file:", viper.ConfigFileUsed()) + } else if _, ok := err.(viper.ConfigFileNotFoundError); !ok { + // ignore not found error, return other errors + return err + } + return nil +} + +func validateOutput(cmd *cobra.Command, args []string) error { + // validate output format + output := viper.GetString(OutputFlag) + switch output { + case "text", "json": + default: + return errors.Errorf("Unsupported output format: %s", output) + } + return nil +} diff --git a/libs/cli/setup_test.go b/libs/cli/setup_test.go new file mode 100644 index 000000000..04209e493 --- /dev/null +++ b/libs/cli/setup_test.go @@ -0,0 +1,237 @@ +package cli + +import ( + "fmt" + "io/ioutil" + "strconv" + "strings" + "testing" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSetupEnv(t *testing.T) { + cases := []struct { + args []string + env map[string]string + expected string + }{ + {nil, nil, ""}, + {[]string{"--foobar", "bang!"}, nil, "bang!"}, + // make sure reset is good + {nil, nil, ""}, + // test both variants of the prefix + {nil, map[string]string{"DEMO_FOOBAR": "good"}, "good"}, + {nil, map[string]string{"DEMOFOOBAR": "silly"}, "silly"}, + // and that cli overrides env... + {[]string{"--foobar", "important"}, + map[string]string{"DEMO_FOOBAR": "ignored"}, "important"}, + } + + for idx, tc := range cases { + i := strconv.Itoa(idx) + // test command that store value of foobar in local variable + var foo string + demo := &cobra.Command{ + Use: "demo", + RunE: func(cmd *cobra.Command, args []string) error { + foo = viper.GetString("foobar") + return nil + }, + } + demo.Flags().String("foobar", "", "Some test value from config") + cmd := PrepareBaseCmd(demo, "DEMO", "/qwerty/asdfgh") // some missing dir.. + cmd.Exit = func(int) {} + + viper.Reset() + args := append([]string{cmd.Use}, tc.args...) + err := RunWithArgs(cmd, args, tc.env) + require.Nil(t, err, i) + assert.Equal(t, tc.expected, foo, i) + } +} + +func tempDir() string { + cdir, err := ioutil.TempDir("", "test-cli") + if err != nil { + panic(err) + } + return cdir +} + +func TestSetupConfig(t *testing.T) { + // we pre-create two config files we can refer to in the rest of + // the test cases. + cval1 := "fubble" + conf1 := tempDir() + err := WriteConfigVals(conf1, map[string]string{"boo": cval1}) + require.Nil(t, err) + + cases := []struct { + args []string + env map[string]string + expected string + expectedTwo string + }{ + {nil, nil, "", ""}, + // setting on the command line + {[]string{"--boo", "haha"}, nil, "haha", ""}, + {[]string{"--two-words", "rocks"}, nil, "", "rocks"}, + {[]string{"--home", conf1}, nil, cval1, ""}, + // test both variants of the prefix + {nil, map[string]string{"RD_BOO": "bang"}, "bang", ""}, + {nil, map[string]string{"RD_TWO_WORDS": "fly"}, "", "fly"}, + {nil, map[string]string{"RDTWO_WORDS": "fly"}, "", "fly"}, + {nil, map[string]string{"RD_HOME": conf1}, cval1, ""}, + {nil, map[string]string{"RDHOME": conf1}, cval1, ""}, + } + + for idx, tc := range cases { + i := strconv.Itoa(idx) + // test command that store value of foobar in local variable + var foo, two string + boo := &cobra.Command{ + Use: "reader", + RunE: func(cmd *cobra.Command, args []string) error { + foo = viper.GetString("boo") + two = viper.GetString("two-words") + return nil + }, + } + boo.Flags().String("boo", "", "Some test value from config") + boo.Flags().String("two-words", "", "Check out env handling -") + cmd := PrepareBaseCmd(boo, "RD", "/qwerty/asdfgh") // some missing dir... + cmd.Exit = func(int) {} + + viper.Reset() + args := append([]string{cmd.Use}, tc.args...) + err := RunWithArgs(cmd, args, tc.env) + require.Nil(t, err, i) + assert.Equal(t, tc.expected, foo, i) + assert.Equal(t, tc.expectedTwo, two, i) + } +} + +type DemoConfig struct { + Name string `mapstructure:"name"` + Age int `mapstructure:"age"` + Unused int `mapstructure:"unused"` +} + +func TestSetupUnmarshal(t *testing.T) { + // we pre-create two config files we can refer to in the rest of + // the test cases. + cval1, cval2 := "someone", "else" + conf1 := tempDir() + err := WriteConfigVals(conf1, map[string]string{"name": cval1}) + require.Nil(t, err) + // even with some ignored fields, should be no problem + conf2 := tempDir() + err = WriteConfigVals(conf2, map[string]string{"name": cval2, "foo": "bar"}) + require.Nil(t, err) + + // unused is not declared on a flag and remains from base + base := DemoConfig{ + Name: "default", + Age: 42, + Unused: -7, + } + c := func(name string, age int) DemoConfig { + r := base + // anything set on the flags as a default is used over + // the default config object + r.Name = "from-flag" + if name != "" { + r.Name = name + } + if age != 0 { + r.Age = age + } + return r + } + + cases := []struct { + args []string + env map[string]string + expected DemoConfig + }{ + {nil, nil, c("", 0)}, + // setting on the command line + {[]string{"--name", "haha"}, nil, c("haha", 0)}, + {[]string{"--home", conf1}, nil, c(cval1, 0)}, + // test both variants of the prefix + {nil, map[string]string{"MR_AGE": "56"}, c("", 56)}, + {nil, map[string]string{"MR_HOME": conf1}, c(cval1, 0)}, + {[]string{"--age", "17"}, map[string]string{"MRHOME": conf2}, c(cval2, 17)}, + } + + for idx, tc := range cases { + i := strconv.Itoa(idx) + // test command that store value of foobar in local variable + cfg := base + marsh := &cobra.Command{ + Use: "marsh", + RunE: func(cmd *cobra.Command, args []string) error { + return viper.Unmarshal(&cfg) + }, + } + marsh.Flags().String("name", "from-flag", "Some test value from config") + // if we want a flag to use the proper default, then copy it + // from the default config here + marsh.Flags().Int("age", base.Age, "Some test value from config") + cmd := PrepareBaseCmd(marsh, "MR", "/qwerty/asdfgh") // some missing dir... + cmd.Exit = func(int) {} + + viper.Reset() + args := append([]string{cmd.Use}, tc.args...) + err := RunWithArgs(cmd, args, tc.env) + require.Nil(t, err, i) + assert.Equal(t, tc.expected, cfg, i) + } +} + +func TestSetupTrace(t *testing.T) { + cases := []struct { + args []string + env map[string]string + long bool + expected string + }{ + {nil, nil, false, "Trace flag = false"}, + {[]string{"--trace"}, nil, true, "Trace flag = true"}, + {[]string{"--no-such-flag"}, nil, false, "unknown flag: --no-such-flag"}, + {nil, map[string]string{"DBG_TRACE": "true"}, true, "Trace flag = true"}, + } + + for idx, tc := range cases { + i := strconv.Itoa(idx) + // test command that store value of foobar in local variable + trace := &cobra.Command{ + Use: "trace", + RunE: func(cmd *cobra.Command, args []string) error { + return errors.Errorf("Trace flag = %t", viper.GetBool(TraceFlag)) + }, + } + cmd := PrepareBaseCmd(trace, "DBG", "/qwerty/asdfgh") // some missing dir.. + cmd.Exit = func(int) {} + + viper.Reset() + args := append([]string{cmd.Use}, tc.args...) + stdout, stderr, err := RunCaptureWithArgs(cmd, args, tc.env) + require.NotNil(t, err, i) + require.Equal(t, "", stdout, i) + require.NotEqual(t, "", stderr, i) + msg := strings.Split(stderr, "\n") + desired := fmt.Sprintf("ERROR: %s", tc.expected) + assert.Equal(t, desired, msg[0], i) + if tc.long && assert.True(t, len(msg) > 2, i) { + // the next line starts the stack trace... + assert.Contains(t, msg[1], "TestSetupTrace", i) + assert.Contains(t, msg[2], "setup_test.go", i) + } + } +} diff --git a/libs/clist/clist.go b/libs/clist/clist.go new file mode 100644 index 000000000..ccb1f5777 --- /dev/null +++ b/libs/clist/clist.go @@ -0,0 +1,384 @@ +package clist + +/* + +The purpose of CList is to provide a goroutine-safe linked-list. +This list can be traversed concurrently by any number of goroutines. +However, removed CElements cannot be added back. +NOTE: Not all methods of container/list are (yet) implemented. +NOTE: Removed elements need to DetachPrev or DetachNext consistently +to ensure garbage collection of removed elements. + +*/ + +import ( + "sync" +) + +/* + +CElement is an element of a linked-list +Traversal from a CElement is goroutine-safe. + +We can't avoid using WaitGroups or for-loops given the documentation +spec without re-implementing the primitives that already exist in +golang/sync. Notice that WaitGroup allows many go-routines to be +simultaneously released, which is what we want. Mutex doesn't do +this. RWMutex does this, but it's clumsy to use in the way that a +WaitGroup would be used -- and we'd end up having two RWMutex's for +prev/next each, which is doubly confusing. + +sync.Cond would be sort-of useful, but we don't need a write-lock in +the for-loop. Use sync.Cond when you need serial access to the +"condition". In our case our condition is if `next != nil || removed`, +and there's no reason to serialize that condition for goroutines +waiting on NextWait() (since it's just a read operation). + +*/ +type CElement struct { + mtx sync.RWMutex + prev *CElement + prevWg *sync.WaitGroup + prevWaitCh chan struct{} + next *CElement + nextWg *sync.WaitGroup + nextWaitCh chan struct{} + removed bool + + Value interface{} // immutable +} + +// Blocking implementation of Next(). +// May return nil iff CElement was tail and got removed. +func (e *CElement) NextWait() *CElement { + for { + e.mtx.RLock() + next := e.next + nextWg := e.nextWg + removed := e.removed + e.mtx.RUnlock() + + if next != nil || removed { + return next + } + + nextWg.Wait() + // e.next doesn't necessarily exist here. + // That's why we need to continue a for-loop. + } +} + +// Blocking implementation of Prev(). +// May return nil iff CElement was head and got removed. +func (e *CElement) PrevWait() *CElement { + for { + e.mtx.RLock() + prev := e.prev + prevWg := e.prevWg + removed := e.removed + e.mtx.RUnlock() + + if prev != nil || removed { + return prev + } + + prevWg.Wait() + } +} + +// PrevWaitChan can be used to wait until Prev becomes not nil. Once it does, +// channel will be closed. +func (e *CElement) PrevWaitChan() <-chan struct{} { + e.mtx.RLock() + defer e.mtx.RUnlock() + + return e.prevWaitCh +} + +// NextWaitChan can be used to wait until Next becomes not nil. Once it does, +// channel will be closed. +func (e *CElement) NextWaitChan() <-chan struct{} { + e.mtx.RLock() + defer e.mtx.RUnlock() + + return e.nextWaitCh +} + +// Nonblocking, may return nil if at the end. +func (e *CElement) Next() *CElement { + e.mtx.RLock() + defer e.mtx.RUnlock() + + return e.next +} + +// Nonblocking, may return nil if at the end. +func (e *CElement) Prev() *CElement { + e.mtx.RLock() + defer e.mtx.RUnlock() + + return e.prev +} + +func (e *CElement) Removed() bool { + e.mtx.RLock() + defer e.mtx.RUnlock() + + return e.removed +} + +func (e *CElement) DetachNext() { + if !e.Removed() { + panic("DetachNext() must be called after Remove(e)") + } + e.mtx.Lock() + defer e.mtx.Unlock() + + e.next = nil +} + +func (e *CElement) DetachPrev() { + if !e.Removed() { + panic("DetachPrev() must be called after Remove(e)") + } + e.mtx.Lock() + defer e.mtx.Unlock() + + e.prev = nil +} + +// NOTE: This function needs to be safe for +// concurrent goroutines waiting on nextWg. +func (e *CElement) SetNext(newNext *CElement) { + e.mtx.Lock() + defer e.mtx.Unlock() + + oldNext := e.next + e.next = newNext + if oldNext != nil && newNext == nil { + // See https://golang.org/pkg/sync/: + // + // If a WaitGroup is reused to wait for several independent sets of + // events, new Add calls must happen after all previous Wait calls have + // returned. + e.nextWg = waitGroup1() // WaitGroups are difficult to re-use. + e.nextWaitCh = make(chan struct{}) + } + if oldNext == nil && newNext != nil { + e.nextWg.Done() + close(e.nextWaitCh) + } +} + +// NOTE: This function needs to be safe for +// concurrent goroutines waiting on prevWg +func (e *CElement) SetPrev(newPrev *CElement) { + e.mtx.Lock() + defer e.mtx.Unlock() + + oldPrev := e.prev + e.prev = newPrev + if oldPrev != nil && newPrev == nil { + e.prevWg = waitGroup1() // WaitGroups are difficult to re-use. + e.prevWaitCh = make(chan struct{}) + } + if oldPrev == nil && newPrev != nil { + e.prevWg.Done() + close(e.prevWaitCh) + } +} + +func (e *CElement) SetRemoved() { + e.mtx.Lock() + defer e.mtx.Unlock() + + e.removed = true + + // This wakes up anyone waiting in either direction. + if e.prev == nil { + e.prevWg.Done() + close(e.prevWaitCh) + } + if e.next == nil { + e.nextWg.Done() + close(e.nextWaitCh) + } +} + +//-------------------------------------------------------------------------------- + +// CList represents a linked list. +// The zero value for CList is an empty list ready to use. +// Operations are goroutine-safe. +type CList struct { + mtx sync.RWMutex + wg *sync.WaitGroup + waitCh chan struct{} + head *CElement // first element + tail *CElement // last element + len int // list length +} + +func (l *CList) Init() *CList { + l.mtx.Lock() + defer l.mtx.Unlock() + + l.wg = waitGroup1() + l.waitCh = make(chan struct{}) + l.head = nil + l.tail = nil + l.len = 0 + return l +} + +func New() *CList { return new(CList).Init() } + +func (l *CList) Len() int { + l.mtx.RLock() + defer l.mtx.RUnlock() + + return l.len +} + +func (l *CList) Front() *CElement { + l.mtx.RLock() + defer l.mtx.RUnlock() + + return l.head +} + +func (l *CList) FrontWait() *CElement { + // Loop until the head is non-nil else wait and try again + for { + l.mtx.RLock() + head := l.head + wg := l.wg + l.mtx.RUnlock() + + if head != nil { + return head + } + wg.Wait() + // NOTE: If you think l.head exists here, think harder. + } +} + +func (l *CList) Back() *CElement { + l.mtx.RLock() + defer l.mtx.RUnlock() + + return l.tail +} + +func (l *CList) BackWait() *CElement { + for { + l.mtx.RLock() + tail := l.tail + wg := l.wg + l.mtx.RUnlock() + + if tail != nil { + return tail + } + wg.Wait() + // l.tail doesn't necessarily exist here. + // That's why we need to continue a for-loop. + } +} + +// WaitChan can be used to wait until Front or Back becomes not nil. Once it +// does, channel will be closed. +func (l *CList) WaitChan() <-chan struct{} { + l.mtx.Lock() + defer l.mtx.Unlock() + + return l.waitCh +} + +func (l *CList) PushBack(v interface{}) *CElement { + l.mtx.Lock() + defer l.mtx.Unlock() + + // Construct a new element + e := &CElement{ + prev: nil, + prevWg: waitGroup1(), + prevWaitCh: make(chan struct{}), + next: nil, + nextWg: waitGroup1(), + nextWaitCh: make(chan struct{}), + removed: false, + Value: v, + } + + // Release waiters on FrontWait/BackWait maybe + if l.len == 0 { + l.wg.Done() + close(l.waitCh) + } + l.len++ + + // Modify the tail + if l.tail == nil { + l.head = e + l.tail = e + } else { + e.SetPrev(l.tail) // We must init e first. + l.tail.SetNext(e) // This will make e accessible. + l.tail = e // Update the list. + } + + return e +} + +// CONTRACT: Caller must call e.DetachPrev() and/or e.DetachNext() to avoid memory leaks. +// NOTE: As per the contract of CList, removed elements cannot be added back. +func (l *CList) Remove(e *CElement) interface{} { + l.mtx.Lock() + defer l.mtx.Unlock() + + prev := e.Prev() + next := e.Next() + + if l.head == nil || l.tail == nil { + panic("Remove(e) on empty CList") + } + if prev == nil && l.head != e { + panic("Remove(e) with false head") + } + if next == nil && l.tail != e { + panic("Remove(e) with false tail") + } + + // If we're removing the only item, make CList FrontWait/BackWait wait. + if l.len == 1 { + l.wg = waitGroup1() // WaitGroups are difficult to re-use. + l.waitCh = make(chan struct{}) + } + + // Update l.len + l.len-- + + // Connect next/prev and set head/tail + if prev == nil { + l.head = next + } else { + prev.SetNext(next) + } + if next == nil { + l.tail = prev + } else { + next.SetPrev(prev) + } + + // Set .Done() on e, otherwise waiters will wait forever. + e.SetRemoved() + + return e.Value +} + +func waitGroup1() (wg *sync.WaitGroup) { + wg = &sync.WaitGroup{} + wg.Add(1) + return +} diff --git a/libs/clist/clist_test.go b/libs/clist/clist_test.go new file mode 100644 index 000000000..dbdf2f028 --- /dev/null +++ b/libs/clist/clist_test.go @@ -0,0 +1,294 @@ +package clist + +import ( + "fmt" + "runtime" + "sync/atomic" + "testing" + "time" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +func TestSmall(t *testing.T) { + l := New() + el1 := l.PushBack(1) + el2 := l.PushBack(2) + el3 := l.PushBack(3) + if l.Len() != 3 { + t.Error("Expected len 3, got ", l.Len()) + } + + //fmt.Printf("%p %v\n", el1, el1) + //fmt.Printf("%p %v\n", el2, el2) + //fmt.Printf("%p %v\n", el3, el3) + + r1 := l.Remove(el1) + + //fmt.Printf("%p %v\n", el1, el1) + //fmt.Printf("%p %v\n", el2, el2) + //fmt.Printf("%p %v\n", el3, el3) + + r2 := l.Remove(el2) + + //fmt.Printf("%p %v\n", el1, el1) + //fmt.Printf("%p %v\n", el2, el2) + //fmt.Printf("%p %v\n", el3, el3) + + r3 := l.Remove(el3) + + if r1 != 1 { + t.Error("Expected 1, got ", r1) + } + if r2 != 2 { + t.Error("Expected 2, got ", r2) + } + if r3 != 3 { + t.Error("Expected 3, got ", r3) + } + if l.Len() != 0 { + t.Error("Expected len 0, got ", l.Len()) + } + +} + +/* +This test is quite hacky because it relies on SetFinalizer +which isn't guaranteed to run at all. +*/ +// nolint: megacheck +func _TestGCFifo(t *testing.T) { + + const numElements = 1000000 + l := New() + gcCount := new(uint64) + + // SetFinalizer doesn't work well with circular structures, + // so we construct a trivial non-circular structure to + // track. + type value struct { + Int int + } + done := make(chan struct{}) + + for i := 0; i < numElements; i++ { + v := new(value) + v.Int = i + l.PushBack(v) + runtime.SetFinalizer(v, func(v *value) { + atomic.AddUint64(gcCount, 1) + }) + } + + for el := l.Front(); el != nil; { + l.Remove(el) + //oldEl := el + el = el.Next() + //oldEl.DetachPrev() + //oldEl.DetachNext() + } + + runtime.GC() + time.Sleep(time.Second * 3) + runtime.GC() + time.Sleep(time.Second * 3) + _ = done + + if *gcCount != numElements { + t.Errorf("Expected gcCount to be %v, got %v", numElements, + *gcCount) + } +} + +/* +This test is quite hacky because it relies on SetFinalizer +which isn't guaranteed to run at all. +*/ +// nolint: megacheck +func _TestGCRandom(t *testing.T) { + + const numElements = 1000000 + l := New() + gcCount := 0 + + // SetFinalizer doesn't work well with circular structures, + // so we construct a trivial non-circular structure to + // track. + type value struct { + Int int + } + + for i := 0; i < numElements; i++ { + v := new(value) + v.Int = i + l.PushBack(v) + runtime.SetFinalizer(v, func(v *value) { + gcCount++ + }) + } + + els := make([]*CElement, 0, numElements) + for el := l.Front(); el != nil; el = el.Next() { + els = append(els, el) + } + + for _, i := range cmn.RandPerm(numElements) { + el := els[i] + l.Remove(el) + _ = el.Next() + } + + runtime.GC() + time.Sleep(time.Second * 3) + + if gcCount != numElements { + t.Errorf("Expected gcCount to be %v, got %v", numElements, + gcCount) + } +} + +func TestScanRightDeleteRandom(t *testing.T) { + + const numElements = 10000 + const numTimes = 1000 + const numScanners = 10 + + l := New() + stop := make(chan struct{}) + + els := make([]*CElement, numElements) + for i := 0; i < numElements; i++ { + el := l.PushBack(i) + els[i] = el + } + + // Launch scanner routines that will rapidly iterate over elements. + for i := 0; i < numScanners; i++ { + go func(scannerID int) { + var el *CElement + restartCounter := 0 + counter := 0 + FOR_LOOP: + for { + select { + case <-stop: + fmt.Println("stopped") + break FOR_LOOP + default: + } + if el == nil { + el = l.FrontWait() + restartCounter++ + } + el = el.Next() + counter++ + } + fmt.Printf("Scanner %v restartCounter: %v counter: %v\n", scannerID, restartCounter, counter) + }(i) + } + + // Remove an element, push back an element. + for i := 0; i < numTimes; i++ { + // Pick an element to remove + rmElIdx := cmn.RandIntn(len(els)) + rmEl := els[rmElIdx] + + // Remove it + l.Remove(rmEl) + //fmt.Print(".") + + // Insert a new element + newEl := l.PushBack(-1*i - 1) + els[rmElIdx] = newEl + + if i%100000 == 0 { + fmt.Printf("Pushed %vK elements so far...\n", i/1000) + } + + } + + // Stop scanners + close(stop) + time.Sleep(time.Second * 1) + + // And remove all the elements. + for el := l.Front(); el != nil; el = el.Next() { + l.Remove(el) + } + if l.Len() != 0 { + t.Fatal("Failed to remove all elements from CList") + } +} + +func TestWaitChan(t *testing.T) { + l := New() + ch := l.WaitChan() + + // 1) add one element to an empty list + go l.PushBack(1) + <-ch + + // 2) and remove it + el := l.Front() + v := l.Remove(el) + if v != 1 { + t.Fatal("where is 1 coming from?") + } + + // 3) test iterating forward and waiting for Next (NextWaitChan and Next) + el = l.PushBack(0) + + done := make(chan struct{}) + pushed := 0 + go func() { + for i := 1; i < 100; i++ { + l.PushBack(i) + pushed++ + time.Sleep(time.Duration(cmn.RandIntn(100)) * time.Millisecond) + } + close(done) + }() + + next := el + seen := 0 +FOR_LOOP: + for { + select { + case <-next.NextWaitChan(): + next = next.Next() + seen++ + if next == nil { + continue + } + case <-done: + break FOR_LOOP + case <-time.After(10 * time.Second): + t.Fatal("max execution time") + } + } + + if pushed != seen { + t.Fatalf("number of pushed items (%d) not equal to number of seen items (%d)", pushed, seen) + } + + // 4) test iterating backwards (PrevWaitChan and Prev) + prev := next + seen = 0 +FOR_LOOP2: + for { + select { + case <-prev.PrevWaitChan(): + prev = prev.Prev() + seen++ + if prev == nil { + t.Fatal("expected PrevWaitChan to block forever on nil when reached first elem") + } + case <-time.After(5 * time.Second): + break FOR_LOOP2 + } + } + + if pushed != seen { + t.Fatalf("number of pushed items (%d) not equal to number of seen items (%d)", pushed, seen) + } +} diff --git a/crypto/LICENSE b/libs/common/LICENSE similarity index 99% rename from crypto/LICENSE rename to libs/common/LICENSE index 3beb77b13..8a142a71b 100644 --- a/crypto/LICENSE +++ b/libs/common/LICENSE @@ -1,4 +1,4 @@ -Tendermint Go-Crypto +Tendermint Go-Common Copyright (C) 2015 Tendermint diff --git a/libs/common/async.go b/libs/common/async.go new file mode 100644 index 000000000..e3293ab4c --- /dev/null +++ b/libs/common/async.go @@ -0,0 +1,175 @@ +package common + +import ( + "sync/atomic" +) + +//---------------------------------------- +// Task + +// val: the value returned after task execution. +// err: the error returned during task completion. +// abort: tells Parallel to return, whether or not all tasks have completed. +type Task func(i int) (val interface{}, err error, abort bool) + +type TaskResult struct { + Value interface{} + Error error +} + +type TaskResultCh <-chan TaskResult + +type taskResultOK struct { + TaskResult + OK bool +} + +type TaskResultSet struct { + chz []TaskResultCh + results []taskResultOK +} + +func newTaskResultSet(chz []TaskResultCh) *TaskResultSet { + return &TaskResultSet{ + chz: chz, + results: make([]taskResultOK, len(chz)), + } +} + +func (trs *TaskResultSet) Channels() []TaskResultCh { + return trs.chz +} + +func (trs *TaskResultSet) LatestResult(index int) (TaskResult, bool) { + if len(trs.results) <= index { + return TaskResult{}, false + } + resultOK := trs.results[index] + return resultOK.TaskResult, resultOK.OK +} + +// NOTE: Not concurrency safe. +// Writes results to trs.results without waiting for all tasks to complete. +func (trs *TaskResultSet) Reap() *TaskResultSet { + for i := 0; i < len(trs.results); i++ { + var trch = trs.chz[i] + select { + case result, ok := <-trch: + if ok { + // Write result. + trs.results[i] = taskResultOK{ + TaskResult: result, + OK: true, + } + } else { + // We already wrote it. + } + default: + // Do nothing. + } + } + return trs +} + +// NOTE: Not concurrency safe. +// Like Reap() but waits until all tasks have returned or panic'd. +func (trs *TaskResultSet) Wait() *TaskResultSet { + for i := 0; i < len(trs.results); i++ { + var trch = trs.chz[i] + result, ok := <-trch + if ok { + // Write result. + trs.results[i] = taskResultOK{ + TaskResult: result, + OK: true, + } + } else { + // We already wrote it. + } + } + return trs +} + +// Returns the firstmost (by task index) error as +// discovered by all previous Reap() calls. +func (trs *TaskResultSet) FirstValue() interface{} { + for _, result := range trs.results { + if result.Value != nil { + return result.Value + } + } + return nil +} + +// Returns the firstmost (by task index) error as +// discovered by all previous Reap() calls. +func (trs *TaskResultSet) FirstError() error { + for _, result := range trs.results { + if result.Error != nil { + return result.Error + } + } + return nil +} + +//---------------------------------------- +// Parallel + +// Run tasks in parallel, with ability to abort early. +// Returns ok=false iff any of the tasks returned abort=true. +// NOTE: Do not implement quit features here. Instead, provide convenient +// concurrent quit-like primitives, passed implicitly via Task closures. (e.g. +// it's not Parallel's concern how you quit/abort your tasks). +func Parallel(tasks ...Task) (trs *TaskResultSet, ok bool) { + var taskResultChz = make([]TaskResultCh, len(tasks)) // To return. + var taskDoneCh = make(chan bool, len(tasks)) // A "wait group" channel, early abort if any true received. + var numPanics = new(int32) // Keep track of panics to set ok=false later. + ok = true // We will set it to false iff any tasks panic'd or returned abort. + + // Start all tasks in parallel in separate goroutines. + // When the task is complete, it will appear in the + // respective taskResultCh (associated by task index). + for i, task := range tasks { + var taskResultCh = make(chan TaskResult, 1) // Capacity for 1 result. + taskResultChz[i] = taskResultCh + go func(i int, task Task, taskResultCh chan TaskResult) { + // Recovery + defer func() { + if pnk := recover(); pnk != nil { + atomic.AddInt32(numPanics, 1) + // Send panic to taskResultCh. + taskResultCh <- TaskResult{nil, ErrorWrap(pnk, "Panic in task")} + // Closing taskResultCh lets trs.Wait() work. + close(taskResultCh) + // Decrement waitgroup. + taskDoneCh <- false + } + }() + // Run the task. + var val, err, abort = task(i) + // Send val/err to taskResultCh. + // NOTE: Below this line, nothing must panic/ + taskResultCh <- TaskResult{val, err} + // Closing taskResultCh lets trs.Wait() work. + close(taskResultCh) + // Decrement waitgroup. + taskDoneCh <- abort + }(i, task, taskResultCh) + } + + // Wait until all tasks are done, or until abort. + // DONE_LOOP: + for i := 0; i < len(tasks); i++ { + abort := <-taskDoneCh + if abort { + ok = false + break + } + } + + // Ok is also false if there were any panics. + // We must do this check here (after DONE_LOOP). + ok = ok && (atomic.LoadInt32(numPanics) == 0) + + return newTaskResultSet(taskResultChz).Reap(), ok +} diff --git a/libs/common/async_test.go b/libs/common/async_test.go new file mode 100644 index 000000000..f565b4bd3 --- /dev/null +++ b/libs/common/async_test.go @@ -0,0 +1,156 @@ +package common + +import ( + "errors" + "fmt" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestParallel(t *testing.T) { + + // Create tasks. + var counter = new(int32) + var tasks = make([]Task, 100*1000) + for i := 0; i < len(tasks); i++ { + tasks[i] = func(i int) (res interface{}, err error, abort bool) { + atomic.AddInt32(counter, 1) + return -1 * i, nil, false + } + } + + // Run in parallel. + var trs, ok = Parallel(tasks...) + assert.True(t, ok) + + // Verify. + assert.Equal(t, int(*counter), len(tasks), "Each task should have incremented the counter already") + var failedTasks int + for i := 0; i < len(tasks); i++ { + taskResult, ok := trs.LatestResult(i) + if !ok { + assert.Fail(t, "Task #%v did not complete.", i) + failedTasks++ + } else if taskResult.Error != nil { + assert.Fail(t, "Task should not have errored but got %v", taskResult.Error) + failedTasks++ + } else if !assert.Equal(t, -1*i, taskResult.Value.(int)) { + assert.Fail(t, "Task should have returned %v but got %v", -1*i, taskResult.Value.(int)) + failedTasks++ + } else { + // Good! + } + } + assert.Equal(t, failedTasks, 0, "No task should have failed") + assert.Nil(t, trs.FirstError(), "There should be no errors") + assert.Equal(t, 0, trs.FirstValue(), "First value should be 0") +} + +func TestParallelAbort(t *testing.T) { + + var flow1 = make(chan struct{}, 1) + var flow2 = make(chan struct{}, 1) + var flow3 = make(chan struct{}, 1) // Cap must be > 0 to prevent blocking. + var flow4 = make(chan struct{}, 1) + + // Create tasks. + var tasks = []Task{ + func(i int) (res interface{}, err error, abort bool) { + assert.Equal(t, i, 0) + flow1 <- struct{}{} + return 0, nil, false + }, + func(i int) (res interface{}, err error, abort bool) { + assert.Equal(t, i, 1) + flow2 <- <-flow1 + return 1, errors.New("some error"), false + }, + func(i int) (res interface{}, err error, abort bool) { + assert.Equal(t, i, 2) + flow3 <- <-flow2 + return 2, nil, true + }, + func(i int) (res interface{}, err error, abort bool) { + assert.Equal(t, i, 3) + <-flow4 + return 3, nil, false + }, + } + + // Run in parallel. + var taskResultSet, ok = Parallel(tasks...) + assert.False(t, ok, "ok should be false since we aborted task #2.") + + // Verify task #3. + // Initially taskResultSet.chz[3] sends nothing since flow4 didn't send. + waitTimeout(t, taskResultSet.chz[3], "Task #3") + + // Now let the last task (#3) complete after abort. + flow4 <- <-flow3 + + // Wait until all tasks have returned or panic'd. + taskResultSet.Wait() + + // Verify task #0, #1, #2. + checkResult(t, taskResultSet, 0, 0, nil, nil) + checkResult(t, taskResultSet, 1, 1, errors.New("some error"), nil) + checkResult(t, taskResultSet, 2, 2, nil, nil) + checkResult(t, taskResultSet, 3, 3, nil, nil) +} + +func TestParallelRecover(t *testing.T) { + + // Create tasks. + var tasks = []Task{ + func(i int) (res interface{}, err error, abort bool) { + return 0, nil, false + }, + func(i int) (res interface{}, err error, abort bool) { + return 1, errors.New("some error"), false + }, + func(i int) (res interface{}, err error, abort bool) { + panic(2) + }, + } + + // Run in parallel. + var taskResultSet, ok = Parallel(tasks...) + assert.False(t, ok, "ok should be false since we panic'd in task #2.") + + // Verify task #0, #1, #2. + checkResult(t, taskResultSet, 0, 0, nil, nil) + checkResult(t, taskResultSet, 1, 1, errors.New("some error"), nil) + checkResult(t, taskResultSet, 2, nil, nil, 2) +} + +// Wait for result +func checkResult(t *testing.T, taskResultSet *TaskResultSet, index int, val interface{}, err error, pnk interface{}) { + taskResult, ok := taskResultSet.LatestResult(index) + taskName := fmt.Sprintf("Task #%v", index) + assert.True(t, ok, "TaskResultCh unexpectedly closed for %v", taskName) + assert.Equal(t, val, taskResult.Value, taskName) + if err != nil { + assert.Equal(t, err, taskResult.Error, taskName) + } else if pnk != nil { + assert.Equal(t, pnk, taskResult.Error.(Error).Data(), taskName) + } else { + assert.Nil(t, taskResult.Error, taskName) + } +} + +// Wait for timeout (no result) +func waitTimeout(t *testing.T, taskResultCh TaskResultCh, taskName string) { + select { + case _, ok := <-taskResultCh: + if !ok { + assert.Fail(t, "TaskResultCh unexpectedly closed (%v)", taskName) + } else { + assert.Fail(t, "TaskResultCh unexpectedly returned for %v", taskName) + } + case <-time.After(1 * time.Second): // TODO use deterministic time? + // Good! + } +} diff --git a/libs/common/bit_array.go b/libs/common/bit_array.go new file mode 100644 index 000000000..0290921a6 --- /dev/null +++ b/libs/common/bit_array.go @@ -0,0 +1,378 @@ +package common + +import ( + "encoding/binary" + "fmt" + "regexp" + "strings" + "sync" +) + +type BitArray struct { + mtx sync.Mutex + Bits int `json:"bits"` // NOTE: persisted via reflect, must be exported + Elems []uint64 `json:"elems"` // NOTE: persisted via reflect, must be exported +} + +// There is no BitArray whose Size is 0. Use nil instead. +func NewBitArray(bits int) *BitArray { + if bits <= 0 { + return nil + } + return &BitArray{ + Bits: bits, + Elems: make([]uint64, (bits+63)/64), + } +} + +func (bA *BitArray) Size() int { + if bA == nil { + return 0 + } + return bA.Bits +} + +// NOTE: behavior is undefined if i >= bA.Bits +func (bA *BitArray) GetIndex(i int) bool { + if bA == nil { + return false + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + return bA.getIndex(i) +} + +func (bA *BitArray) getIndex(i int) bool { + if i >= bA.Bits { + return false + } + return bA.Elems[i/64]&(uint64(1)< 0 +} + +// NOTE: behavior is undefined if i >= bA.Bits +func (bA *BitArray) SetIndex(i int, v bool) bool { + if bA == nil { + return false + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + return bA.setIndex(i, v) +} + +func (bA *BitArray) setIndex(i int, v bool) bool { + if i >= bA.Bits { + return false + } + if v { + bA.Elems[i/64] |= (uint64(1) << uint(i%64)) + } else { + bA.Elems[i/64] &= ^(uint64(1) << uint(i%64)) + } + return true +} + +func (bA *BitArray) Copy() *BitArray { + if bA == nil { + return nil + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + return bA.copy() +} + +func (bA *BitArray) copy() *BitArray { + c := make([]uint64, len(bA.Elems)) + copy(c, bA.Elems) + return &BitArray{ + Bits: bA.Bits, + Elems: c, + } +} + +func (bA *BitArray) copyBits(bits int) *BitArray { + c := make([]uint64, (bits+63)/64) + copy(c, bA.Elems) + return &BitArray{ + Bits: bits, + Elems: c, + } +} + +// Returns a BitArray of larger bits size. +func (bA *BitArray) Or(o *BitArray) *BitArray { + if bA == nil && o == nil { + return nil + } + if bA == nil && o != nil { + return o.Copy() + } + if o == nil { + return bA.Copy() + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + c := bA.copyBits(MaxInt(bA.Bits, o.Bits)) + for i := 0; i < len(c.Elems); i++ { + c.Elems[i] |= o.Elems[i] + } + return c +} + +// Returns a BitArray of smaller bit size. +func (bA *BitArray) And(o *BitArray) *BitArray { + if bA == nil || o == nil { + return nil + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + return bA.and(o) +} + +func (bA *BitArray) and(o *BitArray) *BitArray { + c := bA.copyBits(MinInt(bA.Bits, o.Bits)) + for i := 0; i < len(c.Elems); i++ { + c.Elems[i] &= o.Elems[i] + } + return c +} + +func (bA *BitArray) Not() *BitArray { + if bA == nil { + return nil // Degenerate + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + c := bA.copy() + for i := 0; i < len(c.Elems); i++ { + c.Elems[i] = ^c.Elems[i] + } + return c +} + +func (bA *BitArray) Sub(o *BitArray) *BitArray { + if bA == nil || o == nil { + // TODO: Decide if we should do 1's complement here? + return nil + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + if bA.Bits > o.Bits { + c := bA.copy() + for i := 0; i < len(o.Elems)-1; i++ { + c.Elems[i] &= ^c.Elems[i] + } + i := len(o.Elems) - 1 + if i >= 0 { + for idx := i * 64; idx < o.Bits; idx++ { + // NOTE: each individual GetIndex() call to o is safe. + c.setIndex(idx, c.getIndex(idx) && !o.GetIndex(idx)) + } + } + return c + } + return bA.and(o.Not()) // Note degenerate case where o == nil +} + +func (bA *BitArray) IsEmpty() bool { + if bA == nil { + return true // should this be opposite? + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + for _, e := range bA.Elems { + if e > 0 { + return false + } + } + return true +} + +func (bA *BitArray) IsFull() bool { + if bA == nil { + return true + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + + // Check all elements except the last + for _, elem := range bA.Elems[:len(bA.Elems)-1] { + if (^elem) != 0 { + return false + } + } + + // Check that the last element has (lastElemBits) 1's + lastElemBits := (bA.Bits+63)%64 + 1 + lastElem := bA.Elems[len(bA.Elems)-1] + return (lastElem+1)&((uint64(1)< 0 { + randBitStart := RandIntn(64) + for j := 0; j < 64; j++ { + bitIdx := ((j + randBitStart) % 64) + if (bA.Elems[elemIdx] & (uint64(1) << uint(bitIdx))) > 0 { + return 64*elemIdx + bitIdx, true + } + } + PanicSanity("should not happen") + } + } else { + // Special case for last elem, to ignore straggler bits + elemBits := bA.Bits % 64 + if elemBits == 0 { + elemBits = 64 + } + randBitStart := RandIntn(elemBits) + for j := 0; j < elemBits; j++ { + bitIdx := ((j + randBitStart) % elemBits) + if (bA.Elems[elemIdx] & (uint64(1) << uint(bitIdx))) > 0 { + return 64*elemIdx + bitIdx, true + } + } + } + } + return 0, false +} + +// String returns a string representation of BitArray: BA{}, +// where is a sequence of 'x' (1) and '_' (0). +// The includes spaces and newlines to help people. +// For a simple sequence of 'x' and '_' characters with no spaces or newlines, +// see the MarshalJSON() method. +// Example: "BA{_x_}" or "nil-BitArray" for nil. +func (bA *BitArray) String() string { + return bA.StringIndented("") +} + +func (bA *BitArray) StringIndented(indent string) string { + if bA == nil { + return "nil-BitArray" + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + return bA.stringIndented(indent) +} + +func (bA *BitArray) stringIndented(indent string) string { + lines := []string{} + bits := "" + for i := 0; i < bA.Bits; i++ { + if bA.getIndex(i) { + bits += "x" + } else { + bits += "_" + } + if i%100 == 99 { + lines = append(lines, bits) + bits = "" + } + if i%10 == 9 { + bits += indent + } + if i%50 == 49 { + bits += indent + } + } + if len(bits) > 0 { + lines = append(lines, bits) + } + return fmt.Sprintf("BA{%v:%v}", bA.Bits, strings.Join(lines, indent)) +} + +func (bA *BitArray) Bytes() []byte { + bA.mtx.Lock() + defer bA.mtx.Unlock() + + numBytes := (bA.Bits + 7) / 8 + bytes := make([]byte, numBytes) + for i := 0; i < len(bA.Elems); i++ { + elemBytes := [8]byte{} + binary.LittleEndian.PutUint64(elemBytes[:], bA.Elems[i]) + copy(bytes[i*8:], elemBytes[:]) + } + return bytes +} + +// NOTE: other bitarray o is not locked when reading, +// so if necessary, caller must copy or lock o prior to calling Update. +// If bA is nil, does nothing. +func (bA *BitArray) Update(o *BitArray) { + if bA == nil || o == nil { + return + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + + copy(bA.Elems, o.Elems) +} + +// MarshalJSON implements json.Marshaler interface by marshaling bit array +// using a custom format: a string of '-' or 'x' where 'x' denotes the 1 bit. +func (bA *BitArray) MarshalJSON() ([]byte, error) { + if bA == nil { + return []byte("null"), nil + } + + bA.mtx.Lock() + defer bA.mtx.Unlock() + + bits := `"` + for i := 0; i < bA.Bits; i++ { + if bA.getIndex(i) { + bits += `x` + } else { + bits += `_` + } + } + bits += `"` + return []byte(bits), nil +} + +var bitArrayJSONRegexp = regexp.MustCompile(`\A"([_x]*)"\z`) + +// UnmarshalJSON implements json.Unmarshaler interface by unmarshaling a custom +// JSON description. +func (bA *BitArray) UnmarshalJSON(bz []byte) error { + b := string(bz) + if b == "null" { + // This is required e.g. for encoding/json when decoding + // into a pointer with pre-allocated BitArray. + bA.Bits = 0 + bA.Elems = nil + return nil + } + + // Validate 'b'. + match := bitArrayJSONRegexp.FindStringSubmatch(b) + if match == nil { + return fmt.Errorf("BitArray in JSON should be a string of format %q but got %s", bitArrayJSONRegexp.String(), b) + } + bits := match[1] + + // Construct new BitArray and copy over. + numBits := len(bits) + bA2 := NewBitArray(numBits) + for i := 0; i < numBits; i++ { + if bits[i] == 'x' { + bA2.SetIndex(i, true) + } + } + *bA = *bA2 + return nil +} diff --git a/libs/common/bit_array_test.go b/libs/common/bit_array_test.go new file mode 100644 index 000000000..c697ba5de --- /dev/null +++ b/libs/common/bit_array_test.go @@ -0,0 +1,267 @@ +package common + +import ( + "bytes" + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func randBitArray(bits int) (*BitArray, []byte) { + src := RandBytes((bits + 7) / 8) + bA := NewBitArray(bits) + for i := 0; i < len(src); i++ { + for j := 0; j < 8; j++ { + if i*8+j >= bits { + return bA, src + } + setBit := src[i]&(1< 0 + bA.SetIndex(i*8+j, setBit) + } + } + return bA, src +} + +func TestAnd(t *testing.T) { + + bA1, _ := randBitArray(51) + bA2, _ := randBitArray(31) + bA3 := bA1.And(bA2) + + var bNil *BitArray + require.Equal(t, bNil.And(bA1), (*BitArray)(nil)) + require.Equal(t, bA1.And(nil), (*BitArray)(nil)) + require.Equal(t, bNil.And(nil), (*BitArray)(nil)) + + if bA3.Bits != 31 { + t.Error("Expected min bits", bA3.Bits) + } + if len(bA3.Elems) != len(bA2.Elems) { + t.Error("Expected min elems length") + } + for i := 0; i < bA3.Bits; i++ { + expected := bA1.GetIndex(i) && bA2.GetIndex(i) + if bA3.GetIndex(i) != expected { + t.Error("Wrong bit from bA3", i, bA1.GetIndex(i), bA2.GetIndex(i), bA3.GetIndex(i)) + } + } +} + +func TestOr(t *testing.T) { + + bA1, _ := randBitArray(51) + bA2, _ := randBitArray(31) + bA3 := bA1.Or(bA2) + + bNil := (*BitArray)(nil) + require.Equal(t, bNil.Or(bA1), bA1) + require.Equal(t, bA1.Or(nil), bA1) + require.Equal(t, bNil.Or(nil), (*BitArray)(nil)) + + if bA3.Bits != 51 { + t.Error("Expected max bits") + } + if len(bA3.Elems) != len(bA1.Elems) { + t.Error("Expected max elems length") + } + for i := 0; i < bA3.Bits; i++ { + expected := bA1.GetIndex(i) || bA2.GetIndex(i) + if bA3.GetIndex(i) != expected { + t.Error("Wrong bit from bA3", i, bA1.GetIndex(i), bA2.GetIndex(i), bA3.GetIndex(i)) + } + } +} + +func TestSub1(t *testing.T) { + + bA1, _ := randBitArray(31) + bA2, _ := randBitArray(51) + bA3 := bA1.Sub(bA2) + + bNil := (*BitArray)(nil) + require.Equal(t, bNil.Sub(bA1), (*BitArray)(nil)) + require.Equal(t, bA1.Sub(nil), (*BitArray)(nil)) + require.Equal(t, bNil.Sub(nil), (*BitArray)(nil)) + + if bA3.Bits != bA1.Bits { + t.Error("Expected bA1 bits") + } + if len(bA3.Elems) != len(bA1.Elems) { + t.Error("Expected bA1 elems length") + } + for i := 0; i < bA3.Bits; i++ { + expected := bA1.GetIndex(i) + if bA2.GetIndex(i) { + expected = false + } + if bA3.GetIndex(i) != expected { + t.Error("Wrong bit from bA3", i, bA1.GetIndex(i), bA2.GetIndex(i), bA3.GetIndex(i)) + } + } +} + +func TestSub2(t *testing.T) { + + bA1, _ := randBitArray(51) + bA2, _ := randBitArray(31) + bA3 := bA1.Sub(bA2) + + bNil := (*BitArray)(nil) + require.Equal(t, bNil.Sub(bA1), (*BitArray)(nil)) + require.Equal(t, bA1.Sub(nil), (*BitArray)(nil)) + require.Equal(t, bNil.Sub(nil), (*BitArray)(nil)) + + if bA3.Bits != bA1.Bits { + t.Error("Expected bA1 bits") + } + if len(bA3.Elems) != len(bA1.Elems) { + t.Error("Expected bA1 elems length") + } + for i := 0; i < bA3.Bits; i++ { + expected := bA1.GetIndex(i) + if i < bA2.Bits && bA2.GetIndex(i) { + expected = false + } + if bA3.GetIndex(i) != expected { + t.Error("Wrong bit from bA3") + } + } +} + +func TestPickRandom(t *testing.T) { + for idx := 0; idx < 123; idx++ { + bA1 := NewBitArray(123) + bA1.SetIndex(idx, true) + index, ok := bA1.PickRandom() + if !ok { + t.Fatal("Expected to pick element but got none") + } + if index != idx { + t.Fatalf("Expected to pick element at %v but got wrong index", idx) + } + } +} + +func TestBytes(t *testing.T) { + bA := NewBitArray(4) + bA.SetIndex(0, true) + check := func(bA *BitArray, bz []byte) { + if !bytes.Equal(bA.Bytes(), bz) { + panic(Fmt("Expected %X but got %X", bz, bA.Bytes())) + } + } + check(bA, []byte{0x01}) + bA.SetIndex(3, true) + check(bA, []byte{0x09}) + + bA = NewBitArray(9) + check(bA, []byte{0x00, 0x00}) + bA.SetIndex(7, true) + check(bA, []byte{0x80, 0x00}) + bA.SetIndex(8, true) + check(bA, []byte{0x80, 0x01}) + + bA = NewBitArray(16) + check(bA, []byte{0x00, 0x00}) + bA.SetIndex(7, true) + check(bA, []byte{0x80, 0x00}) + bA.SetIndex(8, true) + check(bA, []byte{0x80, 0x01}) + bA.SetIndex(9, true) + check(bA, []byte{0x80, 0x03}) +} + +func TestEmptyFull(t *testing.T) { + ns := []int{47, 123} + for _, n := range ns { + bA := NewBitArray(n) + if !bA.IsEmpty() { + t.Fatal("Expected bit array to be empty") + } + for i := 0; i < n; i++ { + bA.SetIndex(i, true) + } + if !bA.IsFull() { + t.Fatal("Expected bit array to be full") + } + } +} + +func TestUpdateNeverPanics(t *testing.T) { + newRandBitArray := func(n int) *BitArray { + ba, _ := randBitArray(n) + return ba + } + pairs := []struct { + a, b *BitArray + }{ + {nil, nil}, + {newRandBitArray(10), newRandBitArray(12)}, + {newRandBitArray(23), newRandBitArray(23)}, + {newRandBitArray(37), nil}, + {nil, NewBitArray(10)}, + } + + for _, pair := range pairs { + a, b := pair.a, pair.b + a.Update(b) + b.Update(a) + } +} + +func TestNewBitArrayNeverCrashesOnNegatives(t *testing.T) { + bitList := []int{-127, -128, -1 << 31} + for _, bits := range bitList { + _ = NewBitArray(bits) + } +} + +func TestJSONMarshalUnmarshal(t *testing.T) { + + bA1 := NewBitArray(0) + + bA2 := NewBitArray(1) + + bA3 := NewBitArray(1) + bA3.SetIndex(0, true) + + bA4 := NewBitArray(5) + bA4.SetIndex(0, true) + bA4.SetIndex(1, true) + + testCases := []struct { + bA *BitArray + marshalledBA string + }{ + {nil, `null`}, + {bA1, `null`}, + {bA2, `"_"`}, + {bA3, `"x"`}, + {bA4, `"xx___"`}, + } + + for _, tc := range testCases { + t.Run(tc.bA.String(), func(t *testing.T) { + bz, err := json.Marshal(tc.bA) + require.NoError(t, err) + + assert.Equal(t, tc.marshalledBA, string(bz)) + + var unmarshalledBA *BitArray + err = json.Unmarshal(bz, &unmarshalledBA) + require.NoError(t, err) + + if tc.bA == nil { + require.Nil(t, unmarshalledBA) + } else { + require.NotNil(t, unmarshalledBA) + assert.EqualValues(t, tc.bA.Bits, unmarshalledBA.Bits) + if assert.EqualValues(t, tc.bA.String(), unmarshalledBA.String()) { + assert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems) + } + } + }) + } +} diff --git a/libs/common/bytes.go b/libs/common/bytes.go new file mode 100644 index 000000000..711720aa7 --- /dev/null +++ b/libs/common/bytes.go @@ -0,0 +1,62 @@ +package common + +import ( + "encoding/hex" + "fmt" + "strings" +) + +// The main purpose of HexBytes is to enable HEX-encoding for json/encoding. +type HexBytes []byte + +// Marshal needed for protobuf compatibility +func (bz HexBytes) Marshal() ([]byte, error) { + return bz, nil +} + +// Unmarshal needed for protobuf compatibility +func (bz *HexBytes) Unmarshal(data []byte) error { + *bz = data + return nil +} + +// This is the point of Bytes. +func (bz HexBytes) MarshalJSON() ([]byte, error) { + s := strings.ToUpper(hex.EncodeToString(bz)) + jbz := make([]byte, len(s)+2) + jbz[0] = '"' + copy(jbz[1:], []byte(s)) + jbz[len(jbz)-1] = '"' + return jbz, nil +} + +// This is the point of Bytes. +func (bz *HexBytes) UnmarshalJSON(data []byte) error { + if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { + return fmt.Errorf("Invalid hex string: %s", data) + } + bz2, err := hex.DecodeString(string(data[1 : len(data)-1])) + if err != nil { + return err + } + *bz = bz2 + return nil +} + +// Allow it to fulfill various interfaces in light-client, etc... +func (bz HexBytes) Bytes() []byte { + return bz +} + +func (bz HexBytes) String() string { + return strings.ToUpper(hex.EncodeToString(bz)) +} + +func (bz HexBytes) Format(s fmt.State, verb rune) { + switch verb { + case 'p': + s.Write([]byte(fmt.Sprintf("%p", bz))) + default: + s.Write([]byte(fmt.Sprintf("%X", []byte(bz)))) + } +} diff --git a/libs/common/bytes_test.go b/libs/common/bytes_test.go new file mode 100644 index 000000000..9e11988f2 --- /dev/null +++ b/libs/common/bytes_test.go @@ -0,0 +1,65 @@ +package common + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +// This is a trivial test for protobuf compatibility. +func TestMarshal(t *testing.T) { + bz := []byte("hello world") + dataB := HexBytes(bz) + bz2, err := dataB.Marshal() + assert.Nil(t, err) + assert.Equal(t, bz, bz2) + + var dataB2 HexBytes + err = (&dataB2).Unmarshal(bz) + assert.Nil(t, err) + assert.Equal(t, dataB, dataB2) +} + +// Test that the hex encoding works. +func TestJSONMarshal(t *testing.T) { + + type TestStruct struct { + B1 []byte + B2 HexBytes + } + + cases := []struct { + input []byte + expected string + }{ + {[]byte(``), `{"B1":"","B2":""}`}, + {[]byte(`a`), `{"B1":"YQ==","B2":"61"}`}, + {[]byte(`abc`), `{"B1":"YWJj","B2":"616263"}`}, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("Case %d", i), func(t *testing.T) { + ts := TestStruct{B1: tc.input, B2: tc.input} + + // Test that it marshals correctly to JSON. + jsonBytes, err := json.Marshal(ts) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, string(jsonBytes), tc.expected) + + // TODO do fuzz testing to ensure that unmarshal fails + + // Test that unmarshaling works correctly. + ts2 := TestStruct{} + err = json.Unmarshal(jsonBytes, &ts2) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, ts2.B1, tc.input) + assert.Equal(t, ts2.B2, HexBytes(tc.input)) + }) + } +} diff --git a/libs/common/byteslice.go b/libs/common/byteslice.go new file mode 100644 index 000000000..57b3a8a2b --- /dev/null +++ b/libs/common/byteslice.go @@ -0,0 +1,73 @@ +package common + +import ( + "bytes" +) + +// Fingerprint returns the first 6 bytes of a byte slice. +// If the slice is less than 6 bytes, the fingerprint +// contains trailing zeroes. +func Fingerprint(slice []byte) []byte { + fingerprint := make([]byte, 6) + copy(fingerprint, slice) + return fingerprint +} + +func IsZeros(slice []byte) bool { + for _, byt := range slice { + if byt != byte(0) { + return false + } + } + return true +} + +func RightPadBytes(slice []byte, l int) []byte { + if l < len(slice) { + return slice + } + padded := make([]byte, l) + copy(padded[0:len(slice)], slice) + return padded +} + +func LeftPadBytes(slice []byte, l int) []byte { + if l < len(slice) { + return slice + } + padded := make([]byte, l) + copy(padded[l-len(slice):], slice) + return padded +} + +func TrimmedString(b []byte) string { + trimSet := string([]byte{0}) + return string(bytes.TrimLeft(b, trimSet)) + +} + +// PrefixEndBytes returns the end byteslice for a noninclusive range +// that would include all byte slices for which the input is the prefix +func PrefixEndBytes(prefix []byte) []byte { + if prefix == nil { + return nil + } + + end := make([]byte, len(prefix)) + copy(end, prefix) + finished := false + + for !finished { + if end[len(end)-1] != byte(255) { + end[len(end)-1]++ + finished = true + } else { + end = end[:len(end)-1] + if len(end) == 0 { + end = nil + finished = true + } + } + } + return end +} diff --git a/libs/common/byteslice_test.go b/libs/common/byteslice_test.go new file mode 100644 index 000000000..98085d125 --- /dev/null +++ b/libs/common/byteslice_test.go @@ -0,0 +1,28 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPrefixEndBytes(t *testing.T) { + assert := assert.New(t) + + var testCases = []struct { + prefix []byte + expected []byte + }{ + {[]byte{byte(55), byte(255), byte(255), byte(0)}, []byte{byte(55), byte(255), byte(255), byte(1)}}, + {[]byte{byte(55), byte(255), byte(255), byte(15)}, []byte{byte(55), byte(255), byte(255), byte(16)}}, + {[]byte{byte(55), byte(200), byte(255)}, []byte{byte(55), byte(201)}}, + {[]byte{byte(55), byte(255), byte(255)}, []byte{byte(56)}}, + {[]byte{byte(255), byte(255), byte(255)}, nil}, + {nil, nil}, + } + + for _, test := range testCases { + end := PrefixEndBytes(test.prefix) + assert.Equal(test.expected, end) + } +} diff --git a/libs/common/cmap.go b/libs/common/cmap.go new file mode 100644 index 000000000..c65c27d4c --- /dev/null +++ b/libs/common/cmap.go @@ -0,0 +1,73 @@ +package common + +import "sync" + +// CMap is a goroutine-safe map +type CMap struct { + m map[string]interface{} + l sync.Mutex +} + +func NewCMap() *CMap { + return &CMap{ + m: make(map[string]interface{}), + } +} + +func (cm *CMap) Set(key string, value interface{}) { + cm.l.Lock() + defer cm.l.Unlock() + cm.m[key] = value +} + +func (cm *CMap) Get(key string) interface{} { + cm.l.Lock() + defer cm.l.Unlock() + return cm.m[key] +} + +func (cm *CMap) Has(key string) bool { + cm.l.Lock() + defer cm.l.Unlock() + _, ok := cm.m[key] + return ok +} + +func (cm *CMap) Delete(key string) { + cm.l.Lock() + defer cm.l.Unlock() + delete(cm.m, key) +} + +func (cm *CMap) Size() int { + cm.l.Lock() + defer cm.l.Unlock() + return len(cm.m) +} + +func (cm *CMap) Clear() { + cm.l.Lock() + defer cm.l.Unlock() + cm.m = make(map[string]interface{}) +} + +func (cm *CMap) Keys() []string { + cm.l.Lock() + defer cm.l.Unlock() + + keys := []string{} + for k := range cm.m { + keys = append(keys, k) + } + return keys +} + +func (cm *CMap) Values() []interface{} { + cm.l.Lock() + defer cm.l.Unlock() + items := []interface{}{} + for _, v := range cm.m { + items = append(items, v) + } + return items +} diff --git a/libs/common/cmap_test.go b/libs/common/cmap_test.go new file mode 100644 index 000000000..c665a7f3e --- /dev/null +++ b/libs/common/cmap_test.go @@ -0,0 +1,53 @@ +package common + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIterateKeysWithValues(t *testing.T) { + cmap := NewCMap() + + for i := 1; i <= 10; i++ { + cmap.Set(fmt.Sprintf("key%d", i), fmt.Sprintf("value%d", i)) + } + + // Testing size + assert.Equal(t, 10, cmap.Size()) + assert.Equal(t, 10, len(cmap.Keys())) + assert.Equal(t, 10, len(cmap.Values())) + + // Iterating Keys, checking for matching Value + for _, key := range cmap.Keys() { + val := strings.Replace(key, "key", "value", -1) + assert.Equal(t, val, cmap.Get(key)) + } + + // Test if all keys are within []Keys() + keys := cmap.Keys() + for i := 1; i <= 10; i++ { + assert.Contains(t, keys, fmt.Sprintf("key%d", i), "cmap.Keys() should contain key") + } + + // Delete 1 Key + cmap.Delete("key1") + + assert.NotEqual(t, len(keys), len(cmap.Keys()), "[]keys and []Keys() should not be equal, they are copies, one item was removed") +} + +func TestContains(t *testing.T) { + cmap := NewCMap() + + cmap.Set("key1", "value1") + + // Test for known values + assert.True(t, cmap.Has("key1")) + assert.Equal(t, "value1", cmap.Get("key1")) + + // Test for unknown values + assert.False(t, cmap.Has("key2")) + assert.Nil(t, cmap.Get("key2")) +} diff --git a/libs/common/colors.go b/libs/common/colors.go new file mode 100644 index 000000000..049ce7a50 --- /dev/null +++ b/libs/common/colors.go @@ -0,0 +1,95 @@ +package common + +import ( + "fmt" + "strings" +) + +const ( + ANSIReset = "\x1b[0m" + ANSIBright = "\x1b[1m" + ANSIDim = "\x1b[2m" + ANSIUnderscore = "\x1b[4m" + ANSIBlink = "\x1b[5m" + ANSIReverse = "\x1b[7m" + ANSIHidden = "\x1b[8m" + + ANSIFgBlack = "\x1b[30m" + ANSIFgRed = "\x1b[31m" + ANSIFgGreen = "\x1b[32m" + ANSIFgYellow = "\x1b[33m" + ANSIFgBlue = "\x1b[34m" + ANSIFgMagenta = "\x1b[35m" + ANSIFgCyan = "\x1b[36m" + ANSIFgWhite = "\x1b[37m" + + ANSIBgBlack = "\x1b[40m" + ANSIBgRed = "\x1b[41m" + ANSIBgGreen = "\x1b[42m" + ANSIBgYellow = "\x1b[43m" + ANSIBgBlue = "\x1b[44m" + ANSIBgMagenta = "\x1b[45m" + ANSIBgCyan = "\x1b[46m" + ANSIBgWhite = "\x1b[47m" +) + +// color the string s with color 'color' +// unless s is already colored +func treat(s string, color string) string { + if len(s) > 2 && s[:2] == "\x1b[" { + return s + } + return color + s + ANSIReset +} + +func treatAll(color string, args ...interface{}) string { + var parts []string + for _, arg := range args { + parts = append(parts, treat(fmt.Sprintf("%v", arg), color)) + } + return strings.Join(parts, "") +} + +func Black(args ...interface{}) string { + return treatAll(ANSIFgBlack, args...) +} + +func Red(args ...interface{}) string { + return treatAll(ANSIFgRed, args...) +} + +func Green(args ...interface{}) string { + return treatAll(ANSIFgGreen, args...) +} + +func Yellow(args ...interface{}) string { + return treatAll(ANSIFgYellow, args...) +} + +func Blue(args ...interface{}) string { + return treatAll(ANSIFgBlue, args...) +} + +func Magenta(args ...interface{}) string { + return treatAll(ANSIFgMagenta, args...) +} + +func Cyan(args ...interface{}) string { + return treatAll(ANSIFgCyan, args...) +} + +func White(args ...interface{}) string { + return treatAll(ANSIFgWhite, args...) +} + +func ColoredBytes(data []byte, textColor, bytesColor func(...interface{}) string) string { + s := "" + for _, b := range data { + if 0x21 <= b && b < 0x7F { + s += textColor(string(b)) + } else { + s += bytesColor(Fmt("%02X", b)) + } + } + return s +} diff --git a/libs/common/date.go b/libs/common/date.go new file mode 100644 index 000000000..e017a4b41 --- /dev/null +++ b/libs/common/date.go @@ -0,0 +1,43 @@ +package common + +import ( + "strings" + "time" + + "github.com/pkg/errors" +) + +// TimeLayout helps to parse a date string of the format YYYY-MM-DD +// Intended to be used with the following function: +// time.Parse(TimeLayout, date) +var TimeLayout = "2006-01-02" //this represents YYYY-MM-DD + +// ParseDateRange parses a date range string of the format start:end +// where the start and end date are of the format YYYY-MM-DD. +// The parsed dates are time.Time and will return the zero time for +// unbounded dates, ex: +// unbounded start: :2000-12-31 +// unbounded end: 2000-12-31: +func ParseDateRange(dateRange string) (startDate, endDate time.Time, err error) { + dates := strings.Split(dateRange, ":") + if len(dates) != 2 { + err = errors.New("bad date range, must be in format date:date") + return + } + parseDate := func(date string) (out time.Time, err error) { + if len(date) == 0 { + return + } + out, err = time.Parse(TimeLayout, date) + return + } + startDate, err = parseDate(dates[0]) + if err != nil { + return + } + endDate, err = parseDate(dates[1]) + if err != nil { + return + } + return +} diff --git a/libs/common/date_test.go b/libs/common/date_test.go new file mode 100644 index 000000000..2c0632477 --- /dev/null +++ b/libs/common/date_test.go @@ -0,0 +1,46 @@ +package common + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +var ( + date = time.Date(2015, time.Month(12), 31, 0, 0, 0, 0, time.UTC) + date2 = time.Date(2016, time.Month(12), 31, 0, 0, 0, 0, time.UTC) + zero time.Time +) + +func TestParseDateRange(t *testing.T) { + assert := assert.New(t) + + var testDates = []struct { + dateStr string + start time.Time + end time.Time + errNil bool + }{ + {"2015-12-31:2016-12-31", date, date2, true}, + {"2015-12-31:", date, zero, true}, + {":2016-12-31", zero, date2, true}, + {"2016-12-31", zero, zero, false}, + {"2016-31-12:", zero, zero, false}, + {":2016-31-12", zero, zero, false}, + } + + for _, test := range testDates { + start, end, err := ParseDateRange(test.dateStr) + if test.errNil { + assert.Nil(err) + testPtr := func(want, have time.Time) { + assert.True(have.Equal(want)) + } + testPtr(test.start, start) + testPtr(test.end, end) + } else { + assert.NotNil(err) + } + } +} diff --git a/libs/common/errors.go b/libs/common/errors.go new file mode 100644 index 000000000..5c31b8968 --- /dev/null +++ b/libs/common/errors.go @@ -0,0 +1,246 @@ +package common + +import ( + "fmt" + "runtime" +) + +//---------------------------------------- +// Convenience method. + +func ErrorWrap(cause interface{}, format string, args ...interface{}) Error { + if causeCmnError, ok := cause.(*cmnError); ok { + msg := Fmt(format, args...) + return causeCmnError.Stacktrace().Trace(1, msg) + } else if cause == nil { + return newCmnError(FmtError{format, args}).Stacktrace() + } else { + // NOTE: causeCmnError is a typed nil here. + msg := Fmt(format, args...) + return newCmnError(cause).Stacktrace().Trace(1, msg) + } +} + +//---------------------------------------- +// Error & cmnError + +/* + +Usage with arbitrary error data: + +```go + // Error construction + type MyError struct{} + var err1 error = NewErrorWithData(MyError{}, "my message") + ... + // Wrapping + var err2 error = ErrorWrap(err1, "another message") + if (err1 != err2) { panic("should be the same") + ... + // Error handling + switch err2.Data().(type){ + case MyError: ... + default: ... + } +``` +*/ +type Error interface { + Error() string + Stacktrace() Error + Trace(offset int, format string, args ...interface{}) Error + Data() interface{} +} + +// New Error with formatted message. +// The Error's Data will be a FmtError type. +func NewError(format string, args ...interface{}) Error { + err := FmtError{format, args} + return newCmnError(err) +} + +// New Error with specified data. +func NewErrorWithData(data interface{}) Error { + return newCmnError(data) +} + +type cmnError struct { + data interface{} // associated data + msgtraces []msgtraceItem // all messages traced + stacktrace []uintptr // first stack trace +} + +var _ Error = &cmnError{} + +// NOTE: do not expose. +func newCmnError(data interface{}) *cmnError { + return &cmnError{ + data: data, + msgtraces: nil, + stacktrace: nil, + } +} + +// Implements error. +func (err *cmnError) Error() string { + return fmt.Sprintf("%v", err) +} + +// Captures a stacktrace if one was not already captured. +func (err *cmnError) Stacktrace() Error { + if err.stacktrace == nil { + var offset = 3 + var depth = 32 + err.stacktrace = captureStacktrace(offset, depth) + } + return err +} + +// Add tracing information with msg. +// Set n=0 unless wrapped with some function, then n > 0. +func (err *cmnError) Trace(offset int, format string, args ...interface{}) Error { + msg := Fmt(format, args...) + return err.doTrace(msg, offset) +} + +// Return the "data" of this error. +// Data could be used for error handling/switching, +// or for holding general error/debug information. +func (err *cmnError) Data() interface{} { + return err.data +} + +func (err *cmnError) doTrace(msg string, n int) Error { + pc, _, _, _ := runtime.Caller(n + 2) // +1 for doTrace(). +1 for the caller. + // Include file & line number & msg. + // Do not include the whole stack trace. + err.msgtraces = append(err.msgtraces, msgtraceItem{ + pc: pc, + msg: msg, + }) + return err +} + +func (err *cmnError) Format(s fmt.State, verb rune) { + switch verb { + case 'p': + s.Write([]byte(fmt.Sprintf("%p", &err))) + default: + if s.Flag('#') { + s.Write([]byte("--= Error =--\n")) + // Write data. + s.Write([]byte(fmt.Sprintf("Data: %#v\n", err.data))) + // Write msg trace items. + s.Write([]byte(fmt.Sprintf("Msg Traces:\n"))) + for i, msgtrace := range err.msgtraces { + s.Write([]byte(fmt.Sprintf(" %4d %s\n", i, msgtrace.String()))) + } + // Write stack trace. + if err.stacktrace != nil { + s.Write([]byte(fmt.Sprintf("Stack Trace:\n"))) + for i, pc := range err.stacktrace { + fnc := runtime.FuncForPC(pc) + file, line := fnc.FileLine(pc) + s.Write([]byte(fmt.Sprintf(" %4d %s:%d\n", i, file, line))) + } + } + s.Write([]byte("--= /Error =--\n")) + } else { + // Write msg. + s.Write([]byte(fmt.Sprintf("Error{%v}", err.data))) // TODO tick-esc? + } + } +} + +//---------------------------------------- +// stacktrace & msgtraceItem + +func captureStacktrace(offset int, depth int) []uintptr { + var pcs = make([]uintptr, depth) + n := runtime.Callers(offset, pcs) + return pcs[0:n] +} + +type msgtraceItem struct { + pc uintptr + msg string +} + +func (mti msgtraceItem) String() string { + fnc := runtime.FuncForPC(mti.pc) + file, line := fnc.FileLine(mti.pc) + return fmt.Sprintf("%s:%d - %s", + file, line, + mti.msg, + ) +} + +//---------------------------------------- +// fmt error + +/* + +FmtError is the data type for NewError() (e.g. NewError().Data().(FmtError)) +Theoretically it could be used to switch on the format string. + +```go + // Error construction + var err1 error = NewError("invalid username %v", "BOB") + var err2 error = NewError("another kind of error") + ... + // Error handling + switch err1.Data().(cmn.FmtError).Format() { + case "invalid username %v": ... + case "another kind of error": ... + default: ... + } +``` +*/ +type FmtError struct { + format string + args []interface{} +} + +func (fe FmtError) Error() string { + return fmt.Sprintf(fe.format, fe.args...) +} + +func (fe FmtError) String() string { + return fmt.Sprintf("FmtError{format:%v,args:%v}", + fe.format, fe.args) +} + +func (fe FmtError) Format() string { + return fe.format +} + +//---------------------------------------- +// Panic wrappers +// XXX DEPRECATED + +// A panic resulting from a sanity check means there is a programmer error +// and some guarantee is not satisfied. +// XXX DEPRECATED +func PanicSanity(v interface{}) { + panic(Fmt("Panicked on a Sanity Check: %v", v)) +} + +// A panic here means something has gone horribly wrong, in the form of data corruption or +// failure of the operating system. In a correct/healthy system, these should never fire. +// If they do, it's indicative of a much more serious problem. +// XXX DEPRECATED +func PanicCrisis(v interface{}) { + panic(Fmt("Panicked on a Crisis: %v", v)) +} + +// Indicates a failure of consensus. Someone was malicious or something has +// gone horribly wrong. These should really boot us into an "emergency-recover" mode +// XXX DEPRECATED +func PanicConsensus(v interface{}) { + panic(Fmt("Panicked on a Consensus Failure: %v", v)) +} + +// For those times when we're not sure if we should panic +// XXX DEPRECATED +func PanicQ(v interface{}) { + panic(Fmt("Panicked questionably: %v", v)) +} diff --git a/libs/common/errors_test.go b/libs/common/errors_test.go new file mode 100644 index 000000000..52c78a765 --- /dev/null +++ b/libs/common/errors_test.go @@ -0,0 +1,101 @@ +package common + +import ( + fmt "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestErrorPanic(t *testing.T) { + type pnk struct { + msg string + } + + capturePanic := func() (err Error) { + defer func() { + if r := recover(); r != nil { + err = ErrorWrap(r, "This is the message in ErrorWrap(r, message).") + } + }() + panic(pnk{"something"}) + } + + var err = capturePanic() + + assert.Equal(t, pnk{"something"}, err.Data()) + assert.Equal(t, "Error{{something}}", fmt.Sprintf("%v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), "This is the message in ErrorWrap(r, message).") + assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") +} + +func TestErrorWrapSomething(t *testing.T) { + + var err = ErrorWrap("something", "formatter%v%v", 0, 1) + + assert.Equal(t, "something", err.Data()) + assert.Equal(t, "Error{something}", fmt.Sprintf("%v", err)) + assert.Regexp(t, `formatter01\n`, fmt.Sprintf("%#v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") +} + +func TestErrorWrapNothing(t *testing.T) { + + var err = ErrorWrap(nil, "formatter%v%v", 0, 1) + + assert.Equal(t, + FmtError{"formatter%v%v", []interface{}{0, 1}}, + err.Data()) + assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) + assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") +} + +func TestErrorNewError(t *testing.T) { + + var err = NewError("formatter%v%v", 0, 1) + + assert.Equal(t, + FmtError{"formatter%v%v", []interface{}{0, 1}}, + err.Data()) + assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) + assert.NotContains(t, fmt.Sprintf("%#v", err), "Stack Trace") +} + +func TestErrorNewErrorWithStacktrace(t *testing.T) { + + var err = NewError("formatter%v%v", 0, 1).Stacktrace() + + assert.Equal(t, + FmtError{"formatter%v%v", []interface{}{0, 1}}, + err.Data()) + assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) + assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") +} + +func TestErrorNewErrorWithTrace(t *testing.T) { + + var err = NewError("formatter%v%v", 0, 1) + err.Trace(0, "trace %v", 1) + err.Trace(0, "trace %v", 2) + err.Trace(0, "trace %v", 3) + + assert.Equal(t, + FmtError{"formatter%v%v", []interface{}{0, 1}}, + err.Data()) + assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) + dump := fmt.Sprintf("%#v", err) + assert.NotContains(t, dump, "Stack Trace") + assert.Regexp(t, `common/errors_test\.go:[0-9]+ - trace 1`, dump) + assert.Regexp(t, `common/errors_test\.go:[0-9]+ - trace 2`, dump) + assert.Regexp(t, `common/errors_test\.go:[0-9]+ - trace 3`, dump) +} + +func TestErrorWrapError(t *testing.T) { + var err1 error = NewError("my message") + var err2 error = ErrorWrap(err1, "another message") + assert.Equal(t, err1, err2) +} diff --git a/libs/common/heap.go b/libs/common/heap.go new file mode 100644 index 000000000..b3bcb9db8 --- /dev/null +++ b/libs/common/heap.go @@ -0,0 +1,125 @@ +package common + +import ( + "bytes" + "container/heap" +) + +/* + Example usage: + + ``` + h := NewHeap() + + h.Push("msg1", 1) + h.Push("msg3", 3) + h.Push("msg2", 2) + + fmt.Println(h.Pop()) // msg1 + fmt.Println(h.Pop()) // msg2 + fmt.Println(h.Pop()) // msg3 + ``` +*/ +type Heap struct { + pq priorityQueue +} + +func NewHeap() *Heap { + return &Heap{pq: make([]*pqItem, 0)} +} + +func (h *Heap) Len() int64 { + return int64(len(h.pq)) +} + +func (h *Heap) Push(value interface{}, priority int) { + heap.Push(&h.pq, &pqItem{value: value, priority: cmpInt(priority)}) +} + +func (h *Heap) PushBytes(value interface{}, priority []byte) { + heap.Push(&h.pq, &pqItem{value: value, priority: cmpBytes(priority)}) +} + +func (h *Heap) PushComparable(value interface{}, priority Comparable) { + heap.Push(&h.pq, &pqItem{value: value, priority: priority}) +} + +func (h *Heap) Peek() interface{} { + if len(h.pq) == 0 { + return nil + } + return h.pq[0].value +} + +func (h *Heap) Update(value interface{}, priority Comparable) { + h.pq.Update(h.pq[0], value, priority) +} + +func (h *Heap) Pop() interface{} { + item := heap.Pop(&h.pq).(*pqItem) + return item.value +} + +//----------------------------------------------------------------------------- +// From: http://golang.org/pkg/container/heap/#example__priorityQueue + +type pqItem struct { + value interface{} + priority Comparable + index int +} + +type priorityQueue []*pqItem + +func (pq priorityQueue) Len() int { return len(pq) } + +func (pq priorityQueue) Less(i, j int) bool { + return pq[i].priority.Less(pq[j].priority) +} + +func (pq priorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] + pq[i].index = i + pq[j].index = j +} + +func (pq *priorityQueue) Push(x interface{}) { + n := len(*pq) + item := x.(*pqItem) + item.index = n + *pq = append(*pq, item) +} + +func (pq *priorityQueue) Pop() interface{} { + old := *pq + n := len(old) + item := old[n-1] + item.index = -1 // for safety + *pq = old[0 : n-1] + return item +} + +func (pq *priorityQueue) Update(item *pqItem, value interface{}, priority Comparable) { + item.value = value + item.priority = priority + heap.Fix(pq, item.index) +} + +//-------------------------------------------------------------------------------- +// Comparable + +type Comparable interface { + Less(o interface{}) bool +} + +type cmpInt int + +func (i cmpInt) Less(o interface{}) bool { + return int(i) < int(o.(cmpInt)) +} + +type cmpBytes []byte + +func (bz cmpBytes) Less(o interface{}) bool { + return bytes.Compare([]byte(bz), []byte(o.(cmpBytes))) < 0 +} diff --git a/libs/common/int.go b/libs/common/int.go new file mode 100644 index 000000000..a8a5f1e00 --- /dev/null +++ b/libs/common/int.go @@ -0,0 +1,65 @@ +package common + +import ( + "encoding/binary" + "sort" +) + +// Sort for []uint64 + +type Uint64Slice []uint64 + +func (p Uint64Slice) Len() int { return len(p) } +func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Uint64Slice) Sort() { sort.Sort(p) } + +func SearchUint64s(a []uint64, x uint64) int { + return sort.Search(len(a), func(i int) bool { return a[i] >= x }) +} + +func (p Uint64Slice) Search(x uint64) int { return SearchUint64s(p, x) } + +//-------------------------------------------------------------------------------- + +func PutUint64LE(dest []byte, i uint64) { + binary.LittleEndian.PutUint64(dest, i) +} + +func GetUint64LE(src []byte) uint64 { + return binary.LittleEndian.Uint64(src) +} + +func PutUint64BE(dest []byte, i uint64) { + binary.BigEndian.PutUint64(dest, i) +} + +func GetUint64BE(src []byte) uint64 { + return binary.BigEndian.Uint64(src) +} + +func PutInt64LE(dest []byte, i int64) { + binary.LittleEndian.PutUint64(dest, uint64(i)) +} + +func GetInt64LE(src []byte) int64 { + return int64(binary.LittleEndian.Uint64(src)) +} + +func PutInt64BE(dest []byte, i int64) { + binary.BigEndian.PutUint64(dest, uint64(i)) +} + +func GetInt64BE(src []byte) int64 { + return int64(binary.BigEndian.Uint64(src)) +} + +// IntInSlice returns true if a is found in the list. +func IntInSlice(a int, list []int) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} diff --git a/libs/common/int_test.go b/libs/common/int_test.go new file mode 100644 index 000000000..1ecc7844c --- /dev/null +++ b/libs/common/int_test.go @@ -0,0 +1,14 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIntInSlice(t *testing.T) { + assert.True(t, IntInSlice(1, []int{1, 2, 3})) + assert.False(t, IntInSlice(4, []int{1, 2, 3})) + assert.True(t, IntInSlice(0, []int{0})) + assert.False(t, IntInSlice(0, []int{})) +} diff --git a/libs/common/io.go b/libs/common/io.go new file mode 100644 index 000000000..fa0443e09 --- /dev/null +++ b/libs/common/io.go @@ -0,0 +1,74 @@ +package common + +import ( + "bytes" + "errors" + "io" +) + +type PrefixedReader struct { + Prefix []byte + reader io.Reader +} + +func NewPrefixedReader(prefix []byte, reader io.Reader) *PrefixedReader { + return &PrefixedReader{prefix, reader} +} + +func (pr *PrefixedReader) Read(p []byte) (n int, err error) { + if len(pr.Prefix) > 0 { + read := copy(p, pr.Prefix) + pr.Prefix = pr.Prefix[read:] + return read, nil + } + return pr.reader.Read(p) +} + +// NOTE: Not goroutine safe +type BufferCloser struct { + bytes.Buffer + Closed bool +} + +func NewBufferCloser(buf []byte) *BufferCloser { + return &BufferCloser{ + *bytes.NewBuffer(buf), + false, + } +} + +func (bc *BufferCloser) Close() error { + if bc.Closed { + return errors.New("BufferCloser already closed") + } + bc.Closed = true + return nil +} + +func (bc *BufferCloser) Write(p []byte) (n int, err error) { + if bc.Closed { + return 0, errors.New("Cannot write to closed BufferCloser") + } + return bc.Buffer.Write(p) +} + +func (bc *BufferCloser) WriteByte(c byte) error { + if bc.Closed { + return errors.New("Cannot write to closed BufferCloser") + } + return bc.Buffer.WriteByte(c) +} + +func (bc *BufferCloser) WriteRune(r rune) (n int, err error) { + if bc.Closed { + return 0, errors.New("Cannot write to closed BufferCloser") + } + return bc.Buffer.WriteRune(r) +} + +func (bc *BufferCloser) WriteString(s string) (n int, err error) { + if bc.Closed { + return 0, errors.New("Cannot write to closed BufferCloser") + } + return bc.Buffer.WriteString(s) +} diff --git a/libs/common/kvpair.go b/libs/common/kvpair.go new file mode 100644 index 000000000..54c3a58c0 --- /dev/null +++ b/libs/common/kvpair.go @@ -0,0 +1,67 @@ +package common + +import ( + "bytes" + "sort" +) + +//---------------------------------------- +// KVPair + +/* +Defined in types.proto + +type KVPair struct { + Key []byte + Value []byte +} +*/ + +type KVPairs []KVPair + +// Sorting +func (kvs KVPairs) Len() int { return len(kvs) } +func (kvs KVPairs) Less(i, j int) bool { + switch bytes.Compare(kvs[i].Key, kvs[j].Key) { + case -1: + return true + case 0: + return bytes.Compare(kvs[i].Value, kvs[j].Value) < 0 + case 1: + return false + default: + panic("invalid comparison result") + } +} +func (kvs KVPairs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] } +func (kvs KVPairs) Sort() { sort.Sort(kvs) } + +//---------------------------------------- +// KI64Pair + +/* +Defined in types.proto +type KI64Pair struct { + Key []byte + Value int64 +} +*/ + +type KI64Pairs []KI64Pair + +// Sorting +func (kvs KI64Pairs) Len() int { return len(kvs) } +func (kvs KI64Pairs) Less(i, j int) bool { + switch bytes.Compare(kvs[i].Key, kvs[j].Key) { + case -1: + return true + case 0: + return kvs[i].Value < kvs[j].Value + case 1: + return false + default: + panic("invalid comparison result") + } +} +func (kvs KI64Pairs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] } +func (kvs KI64Pairs) Sort() { sort.Sort(kvs) } diff --git a/libs/common/math.go b/libs/common/math.go new file mode 100644 index 000000000..b037d1a71 --- /dev/null +++ b/libs/common/math.go @@ -0,0 +1,157 @@ +package common + +func MaxInt8(a, b int8) int8 { + if a > b { + return a + } + return b +} + +func MaxUint8(a, b uint8) uint8 { + if a > b { + return a + } + return b +} + +func MaxInt16(a, b int16) int16 { + if a > b { + return a + } + return b +} + +func MaxUint16(a, b uint16) uint16 { + if a > b { + return a + } + return b +} + +func MaxInt32(a, b int32) int32 { + if a > b { + return a + } + return b +} + +func MaxUint32(a, b uint32) uint32 { + if a > b { + return a + } + return b +} + +func MaxInt64(a, b int64) int64 { + if a > b { + return a + } + return b +} + +func MaxUint64(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +func MaxInt(a, b int) int { + if a > b { + return a + } + return b +} + +func MaxUint(a, b uint) uint { + if a > b { + return a + } + return b +} + +//----------------------------------------------------------------------------- + +func MinInt8(a, b int8) int8 { + if a < b { + return a + } + return b +} + +func MinUint8(a, b uint8) uint8 { + if a < b { + return a + } + return b +} + +func MinInt16(a, b int16) int16 { + if a < b { + return a + } + return b +} + +func MinUint16(a, b uint16) uint16 { + if a < b { + return a + } + return b +} + +func MinInt32(a, b int32) int32 { + if a < b { + return a + } + return b +} + +func MinUint32(a, b uint32) uint32 { + if a < b { + return a + } + return b +} + +func MinInt64(a, b int64) int64 { + if a < b { + return a + } + return b +} + +func MinUint64(a, b uint64) uint64 { + if a < b { + return a + } + return b +} + +func MinInt(a, b int) int { + if a < b { + return a + } + return b +} + +func MinUint(a, b uint) uint { + if a < b { + return a + } + return b +} + +//----------------------------------------------------------------------------- + +func ExpUint64(a, b uint64) uint64 { + accum := uint64(1) + for b > 0 { + if b&1 == 1 { + accum *= a + } + a *= a + b >>= 1 + } + return accum +} diff --git a/libs/common/net.go b/libs/common/net.go new file mode 100644 index 000000000..bdbe38f79 --- /dev/null +++ b/libs/common/net.go @@ -0,0 +1,26 @@ +package common + +import ( + "net" + "strings" +) + +// Connect dials the given address and returns a net.Conn. The protoAddr argument should be prefixed with the protocol, +// eg. "tcp://127.0.0.1:8080" or "unix:///tmp/test.sock" +func Connect(protoAddr string) (net.Conn, error) { + proto, address := ProtocolAndAddress(protoAddr) + conn, err := net.Dial(proto, address) + return conn, err +} + +// ProtocolAndAddress splits an address into the protocol and address components. +// For instance, "tcp://127.0.0.1:8080" will be split into "tcp" and "127.0.0.1:8080". +// If the address has no protocol prefix, the default is "tcp". +func ProtocolAndAddress(listenAddr string) (string, string) { + protocol, address := "tcp", listenAddr + parts := strings.SplitN(address, "://", 2) + if len(parts) == 2 { + protocol, address = parts[0], parts[1] + } + return protocol, address +} diff --git a/libs/common/net_test.go b/libs/common/net_test.go new file mode 100644 index 000000000..38d2ae82d --- /dev/null +++ b/libs/common/net_test.go @@ -0,0 +1,38 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestProtocolAndAddress(t *testing.T) { + + cases := []struct { + fullAddr string + proto string + addr string + }{ + { + "tcp://mydomain:80", + "tcp", + "mydomain:80", + }, + { + "mydomain:80", + "tcp", + "mydomain:80", + }, + { + "unix://mydomain:80", + "unix", + "mydomain:80", + }, + } + + for _, c := range cases { + proto, addr := ProtocolAndAddress(c.fullAddr) + assert.Equal(t, proto, c.proto) + assert.Equal(t, addr, c.addr) + } +} diff --git a/libs/common/nil.go b/libs/common/nil.go new file mode 100644 index 000000000..31f75f008 --- /dev/null +++ b/libs/common/nil.go @@ -0,0 +1,29 @@ +package common + +import "reflect" + +// Go lacks a simple and safe way to see if something is a typed nil. +// See: +// - https://dave.cheney.net/2017/08/09/typed-nils-in-go-2 +// - https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I/discussion +// - https://github.com/golang/go/issues/21538 +func IsTypedNil(o interface{}) bool { + rv := reflect.ValueOf(o) + switch rv.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} + +// Returns true if it has zero length. +func IsEmpty(o interface{}) bool { + rv := reflect.ValueOf(o) + switch rv.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: + return rv.Len() == 0 + default: + return false + } +} diff --git a/libs/common/os.go b/libs/common/os.go new file mode 100644 index 000000000..00f4da57b --- /dev/null +++ b/libs/common/os.go @@ -0,0 +1,195 @@ +package common + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "os/signal" + "path/filepath" + "strings" + "syscall" +) + +var gopath string + +// GoPath returns GOPATH env variable value. If it is not set, this function +// will try to call `go env GOPATH` subcommand. +func GoPath() string { + if gopath != "" { + return gopath + } + + path := os.Getenv("GOPATH") + if len(path) == 0 { + goCmd := exec.Command("go", "env", "GOPATH") + out, err := goCmd.Output() + if err != nil { + panic(fmt.Sprintf("failed to determine gopath: %v", err)) + } + path = string(out) + } + gopath = path + return path +} + +// TrapSignal catches the SIGTERM and executes cb function. After that it exits +// with code 1. +func TrapSignal(cb func()) { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + for sig := range c { + fmt.Printf("captured %v, exiting...\n", sig) + if cb != nil { + cb() + } + os.Exit(1) + } + }() + select {} +} + +// Kill the running process by sending itself SIGTERM. +func Kill() error { + p, err := os.FindProcess(os.Getpid()) + if err != nil { + return err + } + return p.Signal(syscall.SIGTERM) +} + +func Exit(s string) { + fmt.Printf(s + "\n") + os.Exit(1) +} + +func EnsureDir(dir string, mode os.FileMode) error { + if _, err := os.Stat(dir); os.IsNotExist(err) { + err := os.MkdirAll(dir, mode) + if err != nil { + return fmt.Errorf("Could not create directory %v. %v", dir, err) + } + } + return nil +} + +func IsDirEmpty(name string) (bool, error) { + f, err := os.Open(name) + if err != nil { + if os.IsNotExist(err) { + return true, err + } + // Otherwise perhaps a permission + // error or some other error. + return false, err + } + defer f.Close() + + _, err = f.Readdirnames(1) // Or f.Readdir(1) + if err == io.EOF { + return true, nil + } + return false, err // Either not empty or error, suits both cases +} + +func FileExists(filePath string) bool { + _, err := os.Stat(filePath) + return !os.IsNotExist(err) +} + +func ReadFile(filePath string) ([]byte, error) { + return ioutil.ReadFile(filePath) +} + +func MustReadFile(filePath string) []byte { + fileBytes, err := ioutil.ReadFile(filePath) + if err != nil { + Exit(Fmt("MustReadFile failed: %v", err)) + return nil + } + return fileBytes +} + +func WriteFile(filePath string, contents []byte, mode os.FileMode) error { + return ioutil.WriteFile(filePath, contents, mode) +} + +func MustWriteFile(filePath string, contents []byte, mode os.FileMode) { + err := WriteFile(filePath, contents, mode) + if err != nil { + Exit(Fmt("MustWriteFile failed: %v", err)) + } +} + +// WriteFileAtomic creates a temporary file with data and the perm given and +// swaps it atomically with filename if successful. +func WriteFileAtomic(filename string, data []byte, perm os.FileMode) error { + var ( + dir = filepath.Dir(filename) + tempFile = filepath.Join(dir, "write-file-atomic-"+RandStr(32)) + // Override in case it does exist, create in case it doesn't and force kernel + // flush, which still leaves the potential of lingering disk cache. + flag = os.O_WRONLY | os.O_CREATE | os.O_SYNC | os.O_TRUNC + ) + + f, err := os.OpenFile(tempFile, flag, perm) + if err != nil { + return err + } + // Clean up in any case. Defer stacking order is last-in-first-out. + defer os.Remove(f.Name()) + defer f.Close() + + if n, err := f.Write(data); err != nil { + return err + } else if n < len(data) { + return io.ErrShortWrite + } + // Close the file before renaming it, otherwise it will cause "The process + // cannot access the file because it is being used by another process." on windows. + f.Close() + + return os.Rename(f.Name(), filename) +} + +//-------------------------------------------------------------------------------- + +func Tempfile(prefix string) (*os.File, string) { + file, err := ioutil.TempFile("", prefix) + if err != nil { + PanicCrisis(err) + } + return file, file.Name() +} + +func Tempdir(prefix string) (*os.File, string) { + tempDir := os.TempDir() + "/" + prefix + RandStr(12) + err := EnsureDir(tempDir, 0700) + if err != nil { + panic(Fmt("Error creating temp dir: %v", err)) + } + dir, err := os.Open(tempDir) + if err != nil { + panic(Fmt("Error opening temp dir: %v", err)) + } + return dir, tempDir +} + +//-------------------------------------------------------------------------------- + +func Prompt(prompt string, defaultValue string) (string, error) { + fmt.Print(prompt) + reader := bufio.NewReader(os.Stdin) + line, err := reader.ReadString('\n') + if err != nil { + return defaultValue, err + } + line = strings.TrimSpace(line) + if line == "" { + return defaultValue, nil + } + return line, nil +} diff --git a/libs/common/os_test.go b/libs/common/os_test.go new file mode 100644 index 000000000..3edd6496e --- /dev/null +++ b/libs/common/os_test.go @@ -0,0 +1,88 @@ +package common + +import ( + "bytes" + "io/ioutil" + "os" + "testing" +) + +func TestWriteFileAtomic(t *testing.T) { + var ( + data = []byte(RandStr(RandIntn(2048))) + old = RandBytes(RandIntn(2048)) + perm os.FileMode = 0600 + ) + + f, err := ioutil.TempFile("/tmp", "write-atomic-test-") + if err != nil { + t.Fatal(err) + } + defer os.Remove(f.Name()) + + if err = ioutil.WriteFile(f.Name(), old, 0664); err != nil { + t.Fatal(err) + } + + if err = WriteFileAtomic(f.Name(), data, perm); err != nil { + t.Fatal(err) + } + + rData, err := ioutil.ReadFile(f.Name()) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(data, rData) { + t.Fatalf("data mismatch: %v != %v", data, rData) + } + + stat, err := os.Stat(f.Name()) + if err != nil { + t.Fatal(err) + } + + if have, want := stat.Mode().Perm(), perm; have != want { + t.Errorf("have %v, want %v", have, want) + } +} + +func TestGoPath(t *testing.T) { + // restore original gopath upon exit + path := os.Getenv("GOPATH") + defer func() { + _ = os.Setenv("GOPATH", path) + }() + + err := os.Setenv("GOPATH", "~/testgopath") + if err != nil { + t.Fatal(err) + } + path = GoPath() + if path != "~/testgopath" { + t.Fatalf("should get GOPATH env var value, got %v", path) + } + os.Unsetenv("GOPATH") + + path = GoPath() + if path != "~/testgopath" { + t.Fatalf("subsequent calls should return the same value, got %v", path) + } +} + +func TestGoPathWithoutEnvVar(t *testing.T) { + // restore original gopath upon exit + path := os.Getenv("GOPATH") + defer func() { + _ = os.Setenv("GOPATH", path) + }() + + os.Unsetenv("GOPATH") + // reset cache + gopath = "" + + path = GoPath() + if path == "" || path == "~/testgopath" { + t.Fatalf("should get nonempty result of calling go env GOPATH, got %v", path) + } +} diff --git a/libs/common/random.go b/libs/common/random.go new file mode 100644 index 000000000..51bfd15c4 --- /dev/null +++ b/libs/common/random.go @@ -0,0 +1,295 @@ +package common + +import ( + crand "crypto/rand" + mrand "math/rand" + "sync" + "time" +) + +const ( + strChars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" // 62 characters +) + +// Rand is a prng, that is seeded with OS randomness. +// The OS randomness is obtained from crypto/rand, however none of the provided +// methods are suitable for cryptographic usage. +// They all utilize math/rand's prng internally. +// +// All of the methods here are suitable for concurrent use. +// This is achieved by using a mutex lock on all of the provided methods. +type Rand struct { + sync.Mutex + rand *mrand.Rand +} + +var grand *Rand + +func init() { + grand = NewRand() + grand.init() +} + +func NewRand() *Rand { + rand := &Rand{} + rand.init() + return rand +} + +func (r *Rand) init() { + bz := cRandBytes(8) + var seed uint64 + for i := 0; i < 8; i++ { + seed |= uint64(bz[i]) + seed <<= 8 + } + r.reset(int64(seed)) +} + +func (r *Rand) reset(seed int64) { + r.rand = mrand.New(mrand.NewSource(seed)) +} + +//---------------------------------------- +// Global functions + +func Seed(seed int64) { + grand.Seed(seed) +} + +func RandStr(length int) string { + return grand.Str(length) +} + +func RandUint16() uint16 { + return grand.Uint16() +} + +func RandUint32() uint32 { + return grand.Uint32() +} + +func RandUint64() uint64 { + return grand.Uint64() +} + +func RandUint() uint { + return grand.Uint() +} + +func RandInt16() int16 { + return grand.Int16() +} + +func RandInt32() int32 { + return grand.Int32() +} + +func RandInt64() int64 { + return grand.Int64() +} + +func RandInt() int { + return grand.Int() +} + +func RandInt31() int32 { + return grand.Int31() +} + +func RandInt31n(n int32) int32 { + return grand.Int31n(n) +} + +func RandInt63() int64 { + return grand.Int63() +} + +func RandInt63n(n int64) int64 { + return grand.Int63n(n) +} + +func RandFloat32() float32 { + return grand.Float32() +} + +func RandFloat64() float64 { + return grand.Float64() +} + +func RandTime() time.Time { + return grand.Time() +} + +func RandBytes(n int) []byte { + return grand.Bytes(n) +} + +func RandIntn(n int) int { + return grand.Intn(n) +} + +func RandPerm(n int) []int { + return grand.Perm(n) +} + +//---------------------------------------- +// Rand methods + +func (r *Rand) Seed(seed int64) { + r.Lock() + r.reset(seed) + r.Unlock() +} + +// Str constructs a random alphanumeric string of given length. +func (r *Rand) Str(length int) string { + chars := []byte{} +MAIN_LOOP: + for { + val := r.Int63() + for i := 0; i < 10; i++ { + v := int(val & 0x3f) // rightmost 6 bits + if v >= 62 { // only 62 characters in strChars + val >>= 6 + continue + } else { + chars = append(chars, strChars[v]) + if len(chars) == length { + break MAIN_LOOP + } + val >>= 6 + } + } + } + + return string(chars) +} + +func (r *Rand) Uint16() uint16 { + return uint16(r.Uint32() & (1<<16 - 1)) +} + +func (r *Rand) Uint32() uint32 { + r.Lock() + u32 := r.rand.Uint32() + r.Unlock() + return u32 +} + +func (r *Rand) Uint64() uint64 { + return uint64(r.Uint32())<<32 + uint64(r.Uint32()) +} + +func (r *Rand) Uint() uint { + r.Lock() + i := r.rand.Int() + r.Unlock() + return uint(i) +} + +func (r *Rand) Int16() int16 { + return int16(r.Uint32() & (1<<16 - 1)) +} + +func (r *Rand) Int32() int32 { + return int32(r.Uint32()) +} + +func (r *Rand) Int64() int64 { + return int64(r.Uint64()) +} + +func (r *Rand) Int() int { + r.Lock() + i := r.rand.Int() + r.Unlock() + return i +} + +func (r *Rand) Int31() int32 { + r.Lock() + i31 := r.rand.Int31() + r.Unlock() + return i31 +} + +func (r *Rand) Int31n(n int32) int32 { + r.Lock() + i31n := r.rand.Int31n(n) + r.Unlock() + return i31n +} + +func (r *Rand) Int63() int64 { + r.Lock() + i63 := r.rand.Int63() + r.Unlock() + return i63 +} + +func (r *Rand) Int63n(n int64) int64 { + r.Lock() + i63n := r.rand.Int63n(n) + r.Unlock() + return i63n +} + +func (r *Rand) Float32() float32 { + r.Lock() + f32 := r.rand.Float32() + r.Unlock() + return f32 +} + +func (r *Rand) Float64() float64 { + r.Lock() + f64 := r.rand.Float64() + r.Unlock() + return f64 +} + +func (r *Rand) Time() time.Time { + return time.Unix(int64(r.Uint64()), 0) +} + +// Bytes returns n random bytes generated from the internal +// prng. +func (r *Rand) Bytes(n int) []byte { + // cRandBytes isn't guaranteed to be fast so instead + // use random bytes generated from the internal PRNG + bs := make([]byte, n) + for i := 0; i < len(bs); i++ { + bs[i] = byte(r.Int() & 0xFF) + } + return bs +} + +// Intn returns, as an int, a uniform pseudo-random number in the range [0, n). +// It panics if n <= 0. +func (r *Rand) Intn(n int) int { + r.Lock() + i := r.rand.Intn(n) + r.Unlock() + return i +} + +// Perm returns a pseudo-random permutation of n integers in [0, n). +func (r *Rand) Perm(n int) []int { + r.Lock() + perm := r.rand.Perm(n) + r.Unlock() + return perm +} + +// NOTE: This relies on the os's random number generator. +// For real security, we should salt that with some seed. +// See github.com/tendermint/go-crypto for a more secure reader. +func cRandBytes(numBytes int) []byte { + b := make([]byte, numBytes) + _, err := crand.Read(b) + if err != nil { + PanicCrisis(err) + } + return b +} diff --git a/libs/common/random_test.go b/libs/common/random_test.go new file mode 100644 index 000000000..c59a577b8 --- /dev/null +++ b/libs/common/random_test.go @@ -0,0 +1,118 @@ +package common + +import ( + "bytes" + "encoding/json" + "fmt" + mrand "math/rand" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestRandStr(t *testing.T) { + l := 243 + s := RandStr(l) + assert.Equal(t, l, len(s)) +} + +func TestRandBytes(t *testing.T) { + l := 243 + b := RandBytes(l) + assert.Equal(t, l, len(b)) +} + +func TestRandIntn(t *testing.T) { + n := 243 + for i := 0; i < 100; i++ { + x := RandIntn(n) + assert.True(t, x < n) + } +} + +// Test to make sure that we never call math.rand(). +// We do this by ensuring that outputs are deterministic. +func TestDeterminism(t *testing.T) { + var firstOutput string + + // Set math/rand's seed for the sake of debugging this test. + // (It isn't strictly necessary). + mrand.Seed(1) + + for i := 0; i < 100; i++ { + output := testThemAll() + if i == 0 { + firstOutput = output + } else { + if firstOutput != output { + t.Errorf("Run #%d's output was different from first run.\nfirst: %v\nlast: %v", + i, firstOutput, output) + } + } + } +} + +func testThemAll() string { + + // Such determinism. + grand.reset(1) + + // Use it. + out := new(bytes.Buffer) + perm := RandPerm(10) + blob, _ := json.Marshal(perm) + fmt.Fprintf(out, "perm: %s\n", blob) + fmt.Fprintf(out, "randInt: %d\n", RandInt()) + fmt.Fprintf(out, "randUint: %d\n", RandUint()) + fmt.Fprintf(out, "randIntn: %d\n", RandIntn(97)) + fmt.Fprintf(out, "randInt31: %d\n", RandInt31()) + fmt.Fprintf(out, "randInt32: %d\n", RandInt32()) + fmt.Fprintf(out, "randInt63: %d\n", RandInt63()) + fmt.Fprintf(out, "randInt64: %d\n", RandInt64()) + fmt.Fprintf(out, "randUint32: %d\n", RandUint32()) + fmt.Fprintf(out, "randUint64: %d\n", RandUint64()) + return out.String() +} + +func TestRngConcurrencySafety(t *testing.T) { + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + _ = RandUint64() + <-time.After(time.Millisecond * time.Duration(RandIntn(100))) + _ = RandPerm(3) + }() + } + wg.Wait() +} + +func BenchmarkRandBytes10B(b *testing.B) { + benchmarkRandBytes(b, 10) +} +func BenchmarkRandBytes100B(b *testing.B) { + benchmarkRandBytes(b, 100) +} +func BenchmarkRandBytes1KiB(b *testing.B) { + benchmarkRandBytes(b, 1024) +} +func BenchmarkRandBytes10KiB(b *testing.B) { + benchmarkRandBytes(b, 10*1024) +} +func BenchmarkRandBytes100KiB(b *testing.B) { + benchmarkRandBytes(b, 100*1024) +} +func BenchmarkRandBytes1MiB(b *testing.B) { + benchmarkRandBytes(b, 1024*1024) +} + +func benchmarkRandBytes(b *testing.B, n int) { + for i := 0; i < b.N; i++ { + _ = RandBytes(n) + } + b.ReportAllocs() +} diff --git a/libs/common/repeat_timer.go b/libs/common/repeat_timer.go new file mode 100644 index 000000000..5d049738d --- /dev/null +++ b/libs/common/repeat_timer.go @@ -0,0 +1,232 @@ +package common + +import ( + "sync" + "time" +) + +// Used by RepeatTimer the first time, +// and every time it's Reset() after Stop(). +type TickerMaker func(dur time.Duration) Ticker + +// Ticker is a basic ticker interface. +type Ticker interface { + + // Never changes, never closes. + Chan() <-chan time.Time + + // Stopping a stopped Ticker will panic. + Stop() +} + +//---------------------------------------- +// defaultTicker + +var _ Ticker = (*defaultTicker)(nil) + +type defaultTicker time.Ticker + +func defaultTickerMaker(dur time.Duration) Ticker { + ticker := time.NewTicker(dur) + return (*defaultTicker)(ticker) +} + +// Implements Ticker +func (t *defaultTicker) Chan() <-chan time.Time { + return t.C +} + +// Implements Ticker +func (t *defaultTicker) Stop() { + ((*time.Ticker)(t)).Stop() +} + +//---------------------------------------- +// LogicalTickerMaker + +// Construct a TickerMaker that always uses `source`. +// It's useful for simulating a deterministic clock. +func NewLogicalTickerMaker(source chan time.Time) TickerMaker { + return func(dur time.Duration) Ticker { + return newLogicalTicker(source, dur) + } +} + +type logicalTicker struct { + source <-chan time.Time + ch chan time.Time + quit chan struct{} +} + +func newLogicalTicker(source <-chan time.Time, interval time.Duration) Ticker { + lt := &logicalTicker{ + source: source, + ch: make(chan time.Time), + quit: make(chan struct{}), + } + go lt.fireRoutine(interval) + return lt +} + +// We need a goroutine to read times from t.source +// and fire on t.Chan() when `interval` has passed. +func (t *logicalTicker) fireRoutine(interval time.Duration) { + source := t.source + + // Init `lasttime` + lasttime := time.Time{} + select { + case lasttime = <-source: + case <-t.quit: + return + } + // Init `lasttime` end + + for { + select { + case newtime := <-source: + elapsed := newtime.Sub(lasttime) + if interval <= elapsed { + // Block for determinism until the ticker is stopped. + select { + case t.ch <- newtime: + case <-t.quit: + return + } + // Reset timeleft. + // Don't try to "catch up" by sending more. + // "Ticker adjusts the intervals or drops ticks to make up for + // slow receivers" - https://golang.org/pkg/time/#Ticker + lasttime = newtime + } + case <-t.quit: + return // done + } + } +} + +// Implements Ticker +func (t *logicalTicker) Chan() <-chan time.Time { + return t.ch // immutable +} + +// Implements Ticker +func (t *logicalTicker) Stop() { + close(t.quit) // it *should* panic when stopped twice. +} + +//--------------------------------------------------------------------- + +/* + RepeatTimer repeatedly sends a struct{}{} to `.Chan()` after each `dur` + period. (It's good for keeping connections alive.) + A RepeatTimer must be stopped, or it will keep a goroutine alive. +*/ +type RepeatTimer struct { + name string + ch chan time.Time + tm TickerMaker + + mtx sync.Mutex + dur time.Duration + ticker Ticker + quit chan struct{} +} + +// NewRepeatTimer returns a RepeatTimer with a defaultTicker. +func NewRepeatTimer(name string, dur time.Duration) *RepeatTimer { + return NewRepeatTimerWithTickerMaker(name, dur, defaultTickerMaker) +} + +// NewRepeatTimerWithTicker returns a RepeatTimer with the given ticker +// maker. +func NewRepeatTimerWithTickerMaker(name string, dur time.Duration, tm TickerMaker) *RepeatTimer { + var t = &RepeatTimer{ + name: name, + ch: make(chan time.Time), + tm: tm, + dur: dur, + ticker: nil, + quit: nil, + } + t.reset() + return t +} + +// receive ticks on ch, send out on t.ch +func (t *RepeatTimer) fireRoutine(ch <-chan time.Time, quit <-chan struct{}) { + for { + select { + case tick := <-ch: + select { + case t.ch <- tick: + case <-quit: + return + } + case <-quit: // NOTE: `t.quit` races. + return + } + } +} + +func (t *RepeatTimer) Chan() <-chan time.Time { + return t.ch +} + +func (t *RepeatTimer) Stop() { + t.mtx.Lock() + defer t.mtx.Unlock() + + t.stop() +} + +// Wait the duration again before firing. +func (t *RepeatTimer) Reset() { + t.mtx.Lock() + defer t.mtx.Unlock() + + t.reset() +} + +//---------------------------------------- +// Misc. + +// CONTRACT: (non-constructor) caller should hold t.mtx. +func (t *RepeatTimer) reset() { + if t.ticker != nil { + t.stop() + } + t.ticker = t.tm(t.dur) + t.quit = make(chan struct{}) + go t.fireRoutine(t.ticker.Chan(), t.quit) +} + +// CONTRACT: caller should hold t.mtx. +func (t *RepeatTimer) stop() { + if t.ticker == nil { + /* + Similar to the case of closing channels twice: + https://groups.google.com/forum/#!topic/golang-nuts/rhxMiNmRAPk + Stopping a RepeatTimer twice implies that you do + not know whether you are done or not. + If you're calling stop on a stopped RepeatTimer, + you probably have race conditions. + */ + panic("Tried to stop a stopped RepeatTimer") + } + t.ticker.Stop() + t.ticker = nil + /* + From https://golang.org/pkg/time/#Ticker: + "Stop the ticker to release associated resources" + "After Stop, no more ticks will be sent" + So we shouldn't have to do the below. + + select { + case <-t.ch: + // read off channel if there's anything there + default: + } + */ + close(t.quit) +} diff --git a/libs/common/repeat_timer_test.go b/libs/common/repeat_timer_test.go new file mode 100644 index 000000000..f2a7b16c3 --- /dev/null +++ b/libs/common/repeat_timer_test.go @@ -0,0 +1,136 @@ +package common + +import ( + "sync" + "testing" + "time" + + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/assert" +) + +func TestDefaultTicker(t *testing.T) { + ticker := defaultTickerMaker(time.Millisecond * 10) + <-ticker.Chan() + ticker.Stop() +} + +func TestRepeatTimer(t *testing.T) { + + ch := make(chan time.Time, 100) + mtx := new(sync.Mutex) + + // tick() fires from start to end + // (exclusive) in milliseconds with incr. + // It locks on mtx, so subsequent calls + // run in series. + tick := func(startMs, endMs, incrMs time.Duration) { + mtx.Lock() + go func() { + for tMs := startMs; tMs < endMs; tMs += incrMs { + lt := time.Time{} + lt = lt.Add(tMs * time.Millisecond) + ch <- lt + } + mtx.Unlock() + }() + } + + // tock consumes Ticker.Chan() events and checks them against the ms in "timesMs". + tock := func(t *testing.T, rt *RepeatTimer, timesMs []int64) { + + // Check against timesMs. + for _, timeMs := range timesMs { + tyme := <-rt.Chan() + sinceMs := tyme.Sub(time.Time{}) / time.Millisecond + assert.Equal(t, timeMs, int64(sinceMs)) + } + + // TODO detect number of running + // goroutines to ensure that + // no other times will fire. + // See https://github.com/tendermint/tendermint/libs/issues/120. + time.Sleep(time.Millisecond * 100) + done := true + select { + case <-rt.Chan(): + done = false + default: + } + assert.True(t, done) + } + + tm := NewLogicalTickerMaker(ch) + rt := NewRepeatTimerWithTickerMaker("bar", time.Second, tm) + + /* NOTE: Useful for debugging deadlocks... + go func() { + time.Sleep(time.Second * 3) + trace := make([]byte, 102400) + count := runtime.Stack(trace, true) + fmt.Printf("Stack of %d bytes: %s\n", count, trace) + }() + */ + + tick(0, 1000, 10) + tock(t, rt, []int64{}) + tick(1000, 2000, 10) + tock(t, rt, []int64{1000}) + tick(2005, 5000, 10) + tock(t, rt, []int64{2005, 3005, 4005}) + tick(5001, 5999, 1) + // Read 5005 instead of 5001 because + // it's 1 second greater than 4005. + tock(t, rt, []int64{5005}) + tick(6000, 7005, 1) + tock(t, rt, []int64{6005}) + tick(7033, 8032, 1) + tock(t, rt, []int64{7033}) + + // After a reset, nothing happens + // until two ticks are received. + rt.Reset() + tock(t, rt, []int64{}) + tick(8040, 8041, 1) + tock(t, rt, []int64{}) + tick(9555, 9556, 1) + tock(t, rt, []int64{9555}) + + // After a stop, nothing more is sent. + rt.Stop() + tock(t, rt, []int64{}) + + // Another stop panics. + assert.Panics(t, func() { rt.Stop() }) +} + +func TestRepeatTimerReset(t *testing.T) { + // check that we are not leaking any go-routines + defer leaktest.Check(t)() + + timer := NewRepeatTimer("test", 20*time.Millisecond) + defer timer.Stop() + + // test we don't receive tick before duration ms. + select { + case <-timer.Chan(): + t.Fatal("did not expect to receive tick") + default: + } + + timer.Reset() + + // test we receive tick after Reset is called + select { + case <-timer.Chan(): + // all good + case <-time.After(40 * time.Millisecond): + t.Fatal("expected to receive tick after reset") + } + + // just random calls + for i := 0; i < 100; i++ { + time.Sleep(time.Duration(RandIntn(40)) * time.Millisecond) + timer.Reset() + } +} diff --git a/libs/common/service.go b/libs/common/service.go new file mode 100644 index 000000000..b6f166e77 --- /dev/null +++ b/libs/common/service.go @@ -0,0 +1,205 @@ +package common + +import ( + "errors" + "fmt" + "sync/atomic" + + "github.com/tendermint/tendermint/libs/log" +) + +var ( + ErrAlreadyStarted = errors.New("already started") + ErrAlreadyStopped = errors.New("already stopped") +) + +// Service defines a service that can be started, stopped, and reset. +type Service interface { + // Start the service. + // If it's already started or stopped, will return an error. + // If OnStart() returns an error, it's returned by Start() + Start() error + OnStart() error + + // Stop the service. + // If it's already stopped, will return an error. + // OnStop must never error. + Stop() error + OnStop() + + // Reset the service. + // Panics by default - must be overwritten to enable reset. + Reset() error + OnReset() error + + // Return true if the service is running + IsRunning() bool + + // Quit returns a channel, which is closed once service is stopped. + Quit() <-chan struct{} + + // String representation of the service + String() string + + // SetLogger sets a logger. + SetLogger(log.Logger) +} + +/* +Classical-inheritance-style service declarations. Services can be started, then +stopped, then optionally restarted. + +Users can override the OnStart/OnStop methods. In the absence of errors, these +methods are guaranteed to be called at most once. If OnStart returns an error, +service won't be marked as started, so the user can call Start again. + +A call to Reset will panic, unless OnReset is overwritten, allowing +OnStart/OnStop to be called again. + +The caller must ensure that Start and Stop are not called concurrently. + +It is ok to call Stop without calling Start first. + +Typical usage: + + type FooService struct { + BaseService + // private fields + } + + func NewFooService() *FooService { + fs := &FooService{ + // init + } + fs.BaseService = *NewBaseService(log, "FooService", fs) + return fs + } + + func (fs *FooService) OnStart() error { + fs.BaseService.OnStart() // Always call the overridden method. + // initialize private fields + // start subroutines, etc. + } + + func (fs *FooService) OnStop() error { + fs.BaseService.OnStop() // Always call the overridden method. + // close/destroy private fields + // stop subroutines, etc. + } +*/ +type BaseService struct { + Logger log.Logger + name string + started uint32 // atomic + stopped uint32 // atomic + quit chan struct{} + + // The "subclass" of BaseService + impl Service +} + +// NewBaseService creates a new BaseService. +func NewBaseService(logger log.Logger, name string, impl Service) *BaseService { + if logger == nil { + logger = log.NewNopLogger() + } + + return &BaseService{ + Logger: logger, + name: name, + quit: make(chan struct{}), + impl: impl, + } +} + +// SetLogger implements Service by setting a logger. +func (bs *BaseService) SetLogger(l log.Logger) { + bs.Logger = l +} + +// Start implements Service by calling OnStart (if defined). An error will be +// returned if the service is already running or stopped. Not to start the +// stopped service, you need to call Reset. +func (bs *BaseService) Start() error { + if atomic.CompareAndSwapUint32(&bs.started, 0, 1) { + if atomic.LoadUint32(&bs.stopped) == 1 { + bs.Logger.Error(Fmt("Not starting %v -- already stopped", bs.name), "impl", bs.impl) + return ErrAlreadyStopped + } + bs.Logger.Info(Fmt("Starting %v", bs.name), "impl", bs.impl) + err := bs.impl.OnStart() + if err != nil { + // revert flag + atomic.StoreUint32(&bs.started, 0) + return err + } + return nil + } + bs.Logger.Debug(Fmt("Not starting %v -- already started", bs.name), "impl", bs.impl) + return ErrAlreadyStarted +} + +// OnStart implements Service by doing nothing. +// NOTE: Do not put anything in here, +// that way users don't need to call BaseService.OnStart() +func (bs *BaseService) OnStart() error { return nil } + +// Stop implements Service by calling OnStop (if defined) and closing quit +// channel. An error will be returned if the service is already stopped. +func (bs *BaseService) Stop() error { + if atomic.CompareAndSwapUint32(&bs.stopped, 0, 1) { + bs.Logger.Info(Fmt("Stopping %v", bs.name), "impl", bs.impl) + bs.impl.OnStop() + close(bs.quit) + return nil + } + bs.Logger.Debug(Fmt("Stopping %v (ignoring: already stopped)", bs.name), "impl", bs.impl) + return ErrAlreadyStopped +} + +// OnStop implements Service by doing nothing. +// NOTE: Do not put anything in here, +// that way users don't need to call BaseService.OnStop() +func (bs *BaseService) OnStop() {} + +// Reset implements Service by calling OnReset callback (if defined). An error +// will be returned if the service is running. +func (bs *BaseService) Reset() error { + if !atomic.CompareAndSwapUint32(&bs.stopped, 1, 0) { + bs.Logger.Debug(Fmt("Can't reset %v. Not stopped", bs.name), "impl", bs.impl) + return fmt.Errorf("can't reset running %s", bs.name) + } + + // whether or not we've started, we can reset + atomic.CompareAndSwapUint32(&bs.started, 1, 0) + + bs.quit = make(chan struct{}) + return bs.impl.OnReset() +} + +// OnReset implements Service by panicking. +func (bs *BaseService) OnReset() error { + PanicSanity("The service cannot be reset") + return nil +} + +// IsRunning implements Service by returning true or false depending on the +// service's state. +func (bs *BaseService) IsRunning() bool { + return atomic.LoadUint32(&bs.started) == 1 && atomic.LoadUint32(&bs.stopped) == 0 +} + +// Wait blocks until the service is stopped. +func (bs *BaseService) Wait() { + <-bs.quit +} + +// String implements Servce by returning a string representation of the service. +func (bs *BaseService) String() string { + return bs.name +} + +// Quit Implements Service by returning a quit channel. +func (bs *BaseService) Quit() <-chan struct{} { + return bs.quit +} diff --git a/libs/common/service_test.go b/libs/common/service_test.go new file mode 100644 index 000000000..ef360a648 --- /dev/null +++ b/libs/common/service_test.go @@ -0,0 +1,54 @@ +package common + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +type testService struct { + BaseService +} + +func (testService) OnReset() error { + return nil +} + +func TestBaseServiceWait(t *testing.T) { + ts := &testService{} + ts.BaseService = *NewBaseService(nil, "TestService", ts) + ts.Start() + + waitFinished := make(chan struct{}) + go func() { + ts.Wait() + waitFinished <- struct{}{} + }() + + go ts.Stop() + + select { + case <-waitFinished: + // all good + case <-time.After(100 * time.Millisecond): + t.Fatal("expected Wait() to finish within 100 ms.") + } +} + +func TestBaseServiceReset(t *testing.T) { + ts := &testService{} + ts.BaseService = *NewBaseService(nil, "TestService", ts) + ts.Start() + + err := ts.Reset() + require.Error(t, err, "expected cant reset service error") + + ts.Stop() + + err = ts.Reset() + require.NoError(t, err) + + err = ts.Start() + require.NoError(t, err) +} diff --git a/libs/common/string.go b/libs/common/string.go new file mode 100644 index 000000000..fac1be6c9 --- /dev/null +++ b/libs/common/string.go @@ -0,0 +1,89 @@ +package common + +import ( + "encoding/hex" + "fmt" + "strings" +) + +// Like fmt.Sprintf, but skips formatting if args are empty. +var Fmt = func(format string, a ...interface{}) string { + if len(a) == 0 { + return format + } + return fmt.Sprintf(format, a...) +} + +// IsHex returns true for non-empty hex-string prefixed with "0x" +func IsHex(s string) bool { + if len(s) > 2 && strings.EqualFold(s[:2], "0x") { + _, err := hex.DecodeString(s[2:]) + return err == nil + } + return false +} + +// StripHex returns hex string without leading "0x" +func StripHex(s string) string { + if IsHex(s) { + return s[2:] + } + return s +} + +// StringInSlice returns true if a is found the list. +func StringInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} + +// SplitAndTrim slices s into all subslices separated by sep and returns a +// slice of the string s with all leading and trailing Unicode code points +// contained in cutset removed. If sep is empty, SplitAndTrim splits after each +// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of +// -1. +func SplitAndTrim(s, sep, cutset string) []string { + if s == "" { + return []string{} + } + + spl := strings.Split(s, sep) + for i := 0; i < len(spl); i++ { + spl[i] = strings.Trim(spl[i], cutset) + } + return spl +} + +// Returns true if s is a non-empty printable non-tab ascii character. +func IsASCIIText(s string) bool { + if len(s) == 0 { + return false + } + for _, b := range []byte(s) { + if 32 <= b && b <= 126 { + // good + } else { + return false + } + } + return true +} + +// NOTE: Assumes that s is ASCII as per IsASCIIText(), otherwise panics. +func ASCIITrim(s string) string { + r := make([]byte, 0, len(s)) + for _, b := range []byte(s) { + if b == 32 { + continue // skip space + } else if 32 < b && b <= 126 { + r = append(r, b) + } else { + panic(fmt.Sprintf("non-ASCII (non-tab) char 0x%X", b)) + } + } + return string(r) +} diff --git a/libs/common/string_test.go b/libs/common/string_test.go new file mode 100644 index 000000000..5d1b68feb --- /dev/null +++ b/libs/common/string_test.go @@ -0,0 +1,74 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStringInSlice(t *testing.T) { + assert.True(t, StringInSlice("a", []string{"a", "b", "c"})) + assert.False(t, StringInSlice("d", []string{"a", "b", "c"})) + assert.True(t, StringInSlice("", []string{""})) + assert.False(t, StringInSlice("", []string{})) +} + +func TestIsHex(t *testing.T) { + notHex := []string{ + "", " ", "a", "x", "0", "0x", "0X", "0x ", "0X ", "0X a", + "0xf ", "0x f", "0xp", "0x-", + "0xf", "0XBED", "0xF", "0xbed", // Odd lengths + } + for _, v := range notHex { + assert.False(t, IsHex(v), "%q is not hex", v) + } + hex := []string{ + "0x00", "0x0a", "0x0F", "0xFFFFFF", "0Xdeadbeef", "0x0BED", + "0X12", "0X0A", + } + for _, v := range hex { + assert.True(t, IsHex(v), "%q is hex", v) + } +} + +func TestSplitAndTrim(t *testing.T) { + testCases := []struct { + s string + sep string + cutset string + expected []string + }{ + {"a,b,c", ",", " ", []string{"a", "b", "c"}}, + {" a , b , c ", ",", " ", []string{"a", "b", "c"}}, + {" a, b, c ", ",", " ", []string{"a", "b", "c"}}, + {" , ", ",", " ", []string{"", ""}}, + {" ", ",", " ", []string{""}}, + } + + for _, tc := range testCases { + assert.Equal(t, tc.expected, SplitAndTrim(tc.s, tc.sep, tc.cutset), "%s", tc.s) + } +} + +func TestIsASCIIText(t *testing.T) { + notASCIIText := []string{ + "", "\xC2", "\xC2\xA2", "\xFF", "\x80", "\xF0", "\n", "\t", + } + for _, v := range notASCIIText { + assert.False(t, IsHex(v), "%q is not ascii-text", v) + } + asciiText := []string{ + " ", ".", "x", "$", "_", "abcdefg;", "-", "0x00", "0", "123", + } + for _, v := range asciiText { + assert.True(t, IsASCIIText(v), "%q is ascii-text", v) + } +} + +func TestASCIITrim(t *testing.T) { + assert.Equal(t, ASCIITrim(" "), "") + assert.Equal(t, ASCIITrim(" a"), "a") + assert.Equal(t, ASCIITrim("a "), "a") + assert.Equal(t, ASCIITrim(" a "), "a") + assert.Panics(t, func() { ASCIITrim("\xC2\xA2") }) +} diff --git a/libs/common/throttle_timer.go b/libs/common/throttle_timer.go new file mode 100644 index 000000000..38ef4e9a3 --- /dev/null +++ b/libs/common/throttle_timer.go @@ -0,0 +1,75 @@ +package common + +import ( + "sync" + "time" +) + +/* +ThrottleTimer fires an event at most "dur" after each .Set() call. +If a short burst of .Set() calls happens, ThrottleTimer fires once. +If a long continuous burst of .Set() calls happens, ThrottleTimer fires +at most once every "dur". +*/ +type ThrottleTimer struct { + Name string + Ch chan struct{} + quit chan struct{} + dur time.Duration + + mtx sync.Mutex + timer *time.Timer + isSet bool +} + +func NewThrottleTimer(name string, dur time.Duration) *ThrottleTimer { + var ch = make(chan struct{}) + var quit = make(chan struct{}) + var t = &ThrottleTimer{Name: name, Ch: ch, dur: dur, quit: quit} + t.mtx.Lock() + t.timer = time.AfterFunc(dur, t.fireRoutine) + t.mtx.Unlock() + t.timer.Stop() + return t +} + +func (t *ThrottleTimer) fireRoutine() { + t.mtx.Lock() + defer t.mtx.Unlock() + select { + case t.Ch <- struct{}{}: + t.isSet = false + case <-t.quit: + // do nothing + default: + t.timer.Reset(t.dur) + } +} + +func (t *ThrottleTimer) Set() { + t.mtx.Lock() + defer t.mtx.Unlock() + if !t.isSet { + t.isSet = true + t.timer.Reset(t.dur) + } +} + +func (t *ThrottleTimer) Unset() { + t.mtx.Lock() + defer t.mtx.Unlock() + t.isSet = false + t.timer.Stop() +} + +// For ease of .Stop()'ing services before .Start()'ing them, +// we ignore .Stop()'s on nil ThrottleTimers +func (t *ThrottleTimer) Stop() bool { + if t == nil { + return false + } + close(t.quit) + t.mtx.Lock() + defer t.mtx.Unlock() + return t.timer.Stop() +} diff --git a/libs/common/throttle_timer_test.go b/libs/common/throttle_timer_test.go new file mode 100644 index 000000000..00f5abdec --- /dev/null +++ b/libs/common/throttle_timer_test.go @@ -0,0 +1,78 @@ +package common + +import ( + "sync" + "testing" + "time" + + // make govet noshadow happy... + asrt "github.com/stretchr/testify/assert" +) + +type thCounter struct { + input chan struct{} + mtx sync.Mutex + count int +} + +func (c *thCounter) Increment() { + c.mtx.Lock() + c.count++ + c.mtx.Unlock() +} + +func (c *thCounter) Count() int { + c.mtx.Lock() + val := c.count + c.mtx.Unlock() + return val +} + +// Read should run in a go-routine and +// updates count by one every time a packet comes in +func (c *thCounter) Read() { + for range c.input { + c.Increment() + } +} + +func TestThrottle(test *testing.T) { + assert := asrt.New(test) + + ms := 50 + delay := time.Duration(ms) * time.Millisecond + longwait := time.Duration(2) * delay + t := NewThrottleTimer("foo", delay) + + // start at 0 + c := &thCounter{input: t.Ch} + assert.Equal(0, c.Count()) + go c.Read() + + // waiting does nothing + time.Sleep(longwait) + assert.Equal(0, c.Count()) + + // send one event adds one + t.Set() + time.Sleep(longwait) + assert.Equal(1, c.Count()) + + // send a burst adds one + for i := 0; i < 5; i++ { + t.Set() + } + time.Sleep(longwait) + assert.Equal(2, c.Count()) + + // send 12, over 2 delay sections, adds 3 + short := time.Duration(ms/5) * time.Millisecond + for i := 0; i < 13; i++ { + t.Set() + time.Sleep(short) + } + time.Sleep(longwait) + assert.Equal(5, c.Count()) + + close(t.Ch) +} diff --git a/libs/common/types.pb.go b/libs/common/types.pb.go new file mode 100644 index 000000000..6442daeb4 --- /dev/null +++ b/libs/common/types.pb.go @@ -0,0 +1,695 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: libs/common/types.proto + +/* + Package common is a generated protocol buffer package. + + It is generated from these files: + libs/common/types.proto + + It has these top-level messages: + KVPair + KI64Pair +*/ +//nolint +package common + +import proto "github.com/gogo/protobuf/proto" +import golang_proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import bytes "bytes" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = golang_proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// Define these here for compatibility but use tmlibs/common.KVPair. +type KVPair struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *KVPair) Reset() { *m = KVPair{} } +func (m *KVPair) String() string { return proto.CompactTextString(m) } +func (*KVPair) ProtoMessage() {} +func (*KVPair) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} } + +func (m *KVPair) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *KVPair) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +// Define these here for compatibility but use tmlibs/common.KI64Pair. +type KI64Pair struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *KI64Pair) Reset() { *m = KI64Pair{} } +func (m *KI64Pair) String() string { return proto.CompactTextString(m) } +func (*KI64Pair) ProtoMessage() {} +func (*KI64Pair) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} } + +func (m *KI64Pair) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *KI64Pair) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +func init() { + proto.RegisterType((*KVPair)(nil), "common.KVPair") + golang_proto.RegisterType((*KVPair)(nil), "common.KVPair") + proto.RegisterType((*KI64Pair)(nil), "common.KI64Pair") + golang_proto.RegisterType((*KI64Pair)(nil), "common.KI64Pair") +} +func (this *KVPair) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*KVPair) + if !ok { + that2, ok := that.(KVPair) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Key, that1.Key) { + return false + } + if !bytes.Equal(this.Value, that1.Value) { + return false + } + return true +} +func (this *KI64Pair) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*KI64Pair) + if !ok { + that2, ok := that.(KI64Pair) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Key, that1.Key) { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (m *KVPair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KVPair) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *KI64Pair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KI64Pair) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if m.Value != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Value)) + } + return i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedKVPair(r randyTypes, easy bool) *KVPair { + this := &KVPair{} + v1 := r.Intn(100) + this.Key = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Key[i] = byte(r.Intn(256)) + } + v2 := r.Intn(100) + this.Value = make([]byte, v2) + for i := 0; i < v2; i++ { + this.Value[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedKI64Pair(r randyTypes, easy bool) *KI64Pair { + this := &KI64Pair{} + v3 := r.Intn(100) + this.Key = make([]byte, v3) + for i := 0; i < v3; i++ { + this.Key[i] = byte(r.Intn(256)) + } + this.Value = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyTypes interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneTypes(r randyTypes) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringTypes(r randyTypes) string { + v4 := r.Intn(100) + tmps := make([]rune, v4) + for i := 0; i < v4; i++ { + tmps[i] = randUTF8RuneTypes(r) + } + return string(tmps) +} +func randUnrecognizedTypes(r randyTypes, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldTypes(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldTypes(dAtA []byte, r randyTypes, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) + v5 := r.Int63() + if r.Intn(2) == 0 { + v5 *= -1 + } + dAtA = encodeVarintPopulateTypes(dAtA, uint64(v5)) + case 1: + dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateTypes(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateTypes(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *KVPair) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *KI64Pair) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Value != 0 { + n += 1 + sovTypes(uint64(m.Value)) + } + return n +} + +func sovTypes(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *KVPair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KVPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KVPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KI64Pair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KI64Pair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KI64Pair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthTypes + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTypes(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("libs/common/types.proto", fileDescriptorTypes) } +func init() { golang_proto.RegisterFile("libs/common/types.proto", fileDescriptorTypes) } + +var fileDescriptorTypes = []byte{ + // 174 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xcf, 0xc9, 0x4c, 0x2a, + 0xd6, 0x4f, 0xce, 0xcf, 0xcd, 0xcd, 0xcf, 0xd3, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, + 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x83, 0x88, 0x49, 0xe9, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, + 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xa7, 0xe7, 0xeb, 0x83, 0xa5, 0x93, 0x4a, 0xd3, 0xc0, 0x3c, + 0x30, 0x07, 0xcc, 0x82, 0x68, 0x53, 0x32, 0xe0, 0x62, 0xf3, 0x0e, 0x0b, 0x48, 0xcc, 0x2c, 0x12, + 0x12, 0xe0, 0x62, 0xce, 0x4e, 0xad, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0x31, 0x85, + 0x44, 0xb8, 0x58, 0xcb, 0x12, 0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x92, 0x11, + 0x17, 0x87, 0xb7, 0xa7, 0x99, 0x09, 0x31, 0x7a, 0x98, 0xa1, 0x7a, 0x9c, 0x64, 0x7e, 0x3c, 0x94, + 0x63, 0x5c, 0xf1, 0x48, 0x8e, 0x71, 0xc7, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, + 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0xf1, 0xc0, 0x63, 0x39, 0xc6, 0x24, 0x36, 0xb0, 0x53, 0x8c, + 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb1, 0x39, 0xe1, 0xef, 0xdc, 0x00, 0x00, 0x00, +} diff --git a/libs/common/types.proto b/libs/common/types.proto new file mode 100644 index 000000000..518e7ca09 --- /dev/null +++ b/libs/common/types.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; +package common; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.goproto_registration) = true; +// Generate tests +option (gogoproto.populate_all) = true; +option (gogoproto.equal_all) = true; +option (gogoproto.testgen_all) = true; + +//---------------------------------------- +// Abstract types + +// Define these here for compatibility but use tmlibs/common.KVPair. +message KVPair { + bytes key = 1; + bytes value = 2; +} + +// Define these here for compatibility but use tmlibs/common.KI64Pair. +message KI64Pair { + bytes key = 1; + int64 value = 2; +} diff --git a/libs/common/typespb_test.go b/libs/common/typespb_test.go new file mode 100644 index 000000000..583c90502 --- /dev/null +++ b/libs/common/typespb_test.go @@ -0,0 +1,280 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: libs/common/types.proto + +/* +Package common is a generated protocol buffer package. + +It is generated from these files: + libs/common/types.proto + +It has these top-level messages: + KVPair + KI64Pair +*/ +package common + +import testing "testing" +import rand "math/rand" +import time "time" +import proto "github.com/gogo/protobuf/proto" +import jsonpb "github.com/gogo/protobuf/jsonpb" +import golang_proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = golang_proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func TestKVPairProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedKVPair(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &KVPair{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestKVPairMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedKVPair(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &KVPair{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestKI64PairProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedKI64Pair(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &KI64Pair{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestKI64PairMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedKI64Pair(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &KI64Pair{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestKVPairJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedKVPair(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &KVPair{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestKI64PairJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedKI64Pair(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &KI64Pair{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestKVPairProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedKVPair(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &KVPair{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestKVPairProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedKVPair(popr, true) + dAtA := proto.CompactTextString(p) + msg := &KVPair{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestKI64PairProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedKI64Pair(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &KI64Pair{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestKI64PairProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedKI64Pair(popr, true) + dAtA := proto.CompactTextString(p) + msg := &KI64Pair{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestKVPairSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedKVPair(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestKI64PairSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedKI64Pair(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +//These tests are generated by github.com/gogo/protobuf/plugin/testgen diff --git a/libs/common/word.go b/libs/common/word.go new file mode 100644 index 000000000..a5b841f55 --- /dev/null +++ b/libs/common/word.go @@ -0,0 +1,90 @@ +package common + +import ( + "bytes" + "sort" +) + +var ( + Zero256 = Word256{0} + One256 = Word256{1} +) + +type Word256 [32]byte + +func (w Word256) String() string { return string(w[:]) } +func (w Word256) TrimmedString() string { return TrimmedString(w.Bytes()) } +func (w Word256) Copy() Word256 { return w } +func (w Word256) Bytes() []byte { return w[:] } // copied. +func (w Word256) Prefix(n int) []byte { return w[:n] } +func (w Word256) Postfix(n int) []byte { return w[32-n:] } +func (w Word256) IsZero() bool { + accum := byte(0) + for _, byt := range w { + accum |= byt + } + return accum == 0 +} +func (w Word256) Compare(other Word256) int { + return bytes.Compare(w[:], other[:]) +} + +func Uint64ToWord256(i uint64) Word256 { + buf := [8]byte{} + PutUint64BE(buf[:], i) + return LeftPadWord256(buf[:]) +} + +func Int64ToWord256(i int64) Word256 { + buf := [8]byte{} + PutInt64BE(buf[:], i) + return LeftPadWord256(buf[:]) +} + +func RightPadWord256(bz []byte) (word Word256) { + copy(word[:], bz) + return +} + +func LeftPadWord256(bz []byte) (word Word256) { + copy(word[32-len(bz):], bz) + return +} + +func Uint64FromWord256(word Word256) uint64 { + buf := word.Postfix(8) + return GetUint64BE(buf) +} + +func Int64FromWord256(word Word256) int64 { + buf := word.Postfix(8) + return GetInt64BE(buf) +} + +//------------------------------------- + +type Tuple256 struct { + First Word256 + Second Word256 +} + +func (tuple Tuple256) Compare(other Tuple256) int { + firstCompare := tuple.First.Compare(other.First) + if firstCompare == 0 { + return tuple.Second.Compare(other.Second) + } + return firstCompare +} + +func Tuple256Split(t Tuple256) (Word256, Word256) { + return t.First, t.Second +} + +type Tuple256Slice []Tuple256 + +func (p Tuple256Slice) Len() int { return len(p) } +func (p Tuple256Slice) Less(i, j int) bool { + return p[i].Compare(p[j]) < 0 +} +func (p Tuple256Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Tuple256Slice) Sort() { sort.Sort(p) } diff --git a/libs/db/LICENSE.md b/libs/db/LICENSE.md new file mode 100644 index 000000000..ab8da59d8 --- /dev/null +++ b/libs/db/LICENSE.md @@ -0,0 +1,3 @@ +Tendermint Go-DB Copyright (C) 2015 All in Bits, Inc + +Released under the Apache2.0 license diff --git a/libs/db/README.md b/libs/db/README.md new file mode 100644 index 000000000..ca5ab33f9 --- /dev/null +++ b/libs/db/README.md @@ -0,0 +1 @@ +TODO: syndtr/goleveldb should be replaced with actual LevelDB instance diff --git a/libs/db/backend_test.go b/libs/db/backend_test.go new file mode 100644 index 000000000..493ed83f9 --- /dev/null +++ b/libs/db/backend_test.go @@ -0,0 +1,215 @@ +package db + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + cmn "github.com/tendermint/tendermint/libs/common" +) + +func cleanupDBDir(dir, name string) { + os.RemoveAll(filepath.Join(dir, name) + ".db") +} + +func testBackendGetSetDelete(t *testing.T, backend DBBackendType) { + // Default + dir, dirname := cmn.Tempdir(fmt.Sprintf("test_backend_%s_", backend)) + defer dir.Close() + db := NewDB("testdb", backend, dirname) + + // A nonexistent key should return nil, even if the key is empty + require.Nil(t, db.Get([]byte(""))) + + // A nonexistent key should return nil, even if the key is nil + require.Nil(t, db.Get(nil)) + + // A nonexistent key should return nil. + key := []byte("abc") + require.Nil(t, db.Get(key)) + + // Set empty value. + db.Set(key, []byte("")) + require.NotNil(t, db.Get(key)) + require.Empty(t, db.Get(key)) + + // Set nil value. + db.Set(key, nil) + require.NotNil(t, db.Get(key)) + require.Empty(t, db.Get(key)) + + // Delete. + db.Delete(key) + require.Nil(t, db.Get(key)) +} + +func TestBackendsGetSetDelete(t *testing.T) { + for dbType := range backends { + testBackendGetSetDelete(t, dbType) + } +} + +func withDB(t *testing.T, creator dbCreator, fn func(DB)) { + name := cmn.Fmt("test_%x", cmn.RandStr(12)) + db, err := creator(name, "") + defer cleanupDBDir("", name) + assert.Nil(t, err) + fn(db) + db.Close() +} + +func TestBackendsNilKeys(t *testing.T) { + + // Test all backends. + for dbType, creator := range backends { + withDB(t, creator, func(db DB) { + t.Run(fmt.Sprintf("Testing %s", dbType), func(t *testing.T) { + + // Nil keys are treated as the empty key for most operations. + expect := func(key, value []byte) { + if len(key) == 0 { // nil or empty + assert.Equal(t, db.Get(nil), db.Get([]byte(""))) + assert.Equal(t, db.Has(nil), db.Has([]byte(""))) + } + assert.Equal(t, db.Get(key), value) + assert.Equal(t, db.Has(key), value != nil) + } + + // Not set + expect(nil, nil) + + // Set nil value + db.Set(nil, nil) + expect(nil, []byte("")) + + // Set empty value + db.Set(nil, []byte("")) + expect(nil, []byte("")) + + // Set nil, Delete nil + db.Set(nil, []byte("abc")) + expect(nil, []byte("abc")) + db.Delete(nil) + expect(nil, nil) + + // Set nil, Delete empty + db.Set(nil, []byte("abc")) + expect(nil, []byte("abc")) + db.Delete([]byte("")) + expect(nil, nil) + + // Set empty, Delete nil + db.Set([]byte(""), []byte("abc")) + expect(nil, []byte("abc")) + db.Delete(nil) + expect(nil, nil) + + // Set empty, Delete empty + db.Set([]byte(""), []byte("abc")) + expect(nil, []byte("abc")) + db.Delete([]byte("")) + expect(nil, nil) + + // SetSync nil, DeleteSync nil + db.SetSync(nil, []byte("abc")) + expect(nil, []byte("abc")) + db.DeleteSync(nil) + expect(nil, nil) + + // SetSync nil, DeleteSync empty + db.SetSync(nil, []byte("abc")) + expect(nil, []byte("abc")) + db.DeleteSync([]byte("")) + expect(nil, nil) + + // SetSync empty, DeleteSync nil + db.SetSync([]byte(""), []byte("abc")) + expect(nil, []byte("abc")) + db.DeleteSync(nil) + expect(nil, nil) + + // SetSync empty, DeleteSync empty + db.SetSync([]byte(""), []byte("abc")) + expect(nil, []byte("abc")) + db.DeleteSync([]byte("")) + expect(nil, nil) + }) + }) + } +} + +func TestGoLevelDBBackend(t *testing.T) { + name := cmn.Fmt("test_%x", cmn.RandStr(12)) + db := NewDB(name, GoLevelDBBackend, "") + defer cleanupDBDir("", name) + + _, ok := db.(*GoLevelDB) + assert.True(t, ok) +} + +func TestDBIterator(t *testing.T) { + for dbType := range backends { + t.Run(fmt.Sprintf("%v", dbType), func(t *testing.T) { + testDBIterator(t, dbType) + }) + } +} + +func testDBIterator(t *testing.T, backend DBBackendType) { + name := cmn.Fmt("test_%x", cmn.RandStr(12)) + db := NewDB(name, backend, "") + defer cleanupDBDir("", name) + + for i := 0; i < 10; i++ { + if i != 6 { // but skip 6. + db.Set(int642Bytes(int64(i)), nil) + } + } + + verifyIterator(t, db.Iterator(nil, nil), []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator") + verifyIterator(t, db.ReverseIterator(nil, nil), []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator") + + verifyIterator(t, db.Iterator(nil, int642Bytes(0)), []int64(nil), "forward iterator to 0") + verifyIterator(t, db.ReverseIterator(nil, int642Bytes(10)), []int64(nil), "reverse iterator 10") + + verifyIterator(t, db.Iterator(int642Bytes(0), nil), []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 0") + verifyIterator(t, db.Iterator(int642Bytes(1), nil), []int64{1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 1") + verifyIterator(t, db.ReverseIterator(int642Bytes(10), nil), []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 10") + verifyIterator(t, db.ReverseIterator(int642Bytes(9), nil), []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 9") + verifyIterator(t, db.ReverseIterator(int642Bytes(8), nil), []int64{8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 8") + + verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(6)), []int64{5}, "forward iterator from 5 to 6") + verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(7)), []int64{5}, "forward iterator from 5 to 7") + verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(8)), []int64{5, 7}, "forward iterator from 5 to 8") + verifyIterator(t, db.Iterator(int642Bytes(6), int642Bytes(7)), []int64(nil), "forward iterator from 6 to 7") + verifyIterator(t, db.Iterator(int642Bytes(6), int642Bytes(8)), []int64{7}, "forward iterator from 6 to 8") + verifyIterator(t, db.Iterator(int642Bytes(7), int642Bytes(8)), []int64{7}, "forward iterator from 7 to 8") + + verifyIterator(t, db.ReverseIterator(int642Bytes(5), int642Bytes(4)), []int64{5}, "reverse iterator from 5 to 4") + verifyIterator(t, db.ReverseIterator(int642Bytes(6), int642Bytes(4)), []int64{5}, "reverse iterator from 6 to 4") + verifyIterator(t, db.ReverseIterator(int642Bytes(7), int642Bytes(4)), []int64{7, 5}, "reverse iterator from 7 to 4") + verifyIterator(t, db.ReverseIterator(int642Bytes(6), int642Bytes(5)), []int64(nil), "reverse iterator from 6 to 5") + verifyIterator(t, db.ReverseIterator(int642Bytes(7), int642Bytes(5)), []int64{7}, "reverse iterator from 7 to 5") + verifyIterator(t, db.ReverseIterator(int642Bytes(7), int642Bytes(6)), []int64{7}, "reverse iterator from 7 to 6") + + verifyIterator(t, db.Iterator(int642Bytes(0), int642Bytes(1)), []int64{0}, "forward iterator from 0 to 1") + verifyIterator(t, db.ReverseIterator(int642Bytes(9), int642Bytes(8)), []int64{9}, "reverse iterator from 9 to 8") + + verifyIterator(t, db.Iterator(int642Bytes(2), int642Bytes(4)), []int64{2, 3}, "forward iterator from 2 to 4") + verifyIterator(t, db.Iterator(int642Bytes(4), int642Bytes(2)), []int64(nil), "forward iterator from 4 to 2") + verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(2)), []int64{4, 3}, "reverse iterator from 4 to 2") + verifyIterator(t, db.ReverseIterator(int642Bytes(2), int642Bytes(4)), []int64(nil), "reverse iterator from 2 to 4") + +} + +func verifyIterator(t *testing.T, itr Iterator, expected []int64, msg string) { + var list []int64 + for itr.Valid() { + list = append(list, bytes2Int64(itr.Key())) + itr.Next() + } + assert.Equal(t, expected, list, msg) +} diff --git a/libs/db/c_level_db.go b/libs/db/c_level_db.go new file mode 100644 index 000000000..307461261 --- /dev/null +++ b/libs/db/c_level_db.go @@ -0,0 +1,312 @@ +// +build gcc + +package db + +import ( + "bytes" + "fmt" + "path/filepath" + + "github.com/jmhodges/levigo" +) + +func init() { + dbCreator := func(name string, dir string) (DB, error) { + return NewCLevelDB(name, dir) + } + registerDBCreator(LevelDBBackend, dbCreator, true) + registerDBCreator(CLevelDBBackend, dbCreator, false) +} + +var _ DB = (*CLevelDB)(nil) + +type CLevelDB struct { + db *levigo.DB + ro *levigo.ReadOptions + wo *levigo.WriteOptions + woSync *levigo.WriteOptions +} + +func NewCLevelDB(name string, dir string) (*CLevelDB, error) { + dbPath := filepath.Join(dir, name+".db") + + opts := levigo.NewOptions() + opts.SetCache(levigo.NewLRUCache(1 << 30)) + opts.SetCreateIfMissing(true) + db, err := levigo.Open(dbPath, opts) + if err != nil { + return nil, err + } + ro := levigo.NewReadOptions() + wo := levigo.NewWriteOptions() + woSync := levigo.NewWriteOptions() + woSync.SetSync(true) + database := &CLevelDB{ + db: db, + ro: ro, + wo: wo, + woSync: woSync, + } + return database, nil +} + +// Implements DB. +func (db *CLevelDB) Get(key []byte) []byte { + key = nonNilBytes(key) + res, err := db.db.Get(db.ro, key) + if err != nil { + panic(err) + } + return res +} + +// Implements DB. +func (db *CLevelDB) Has(key []byte) bool { + return db.Get(key) != nil +} + +// Implements DB. +func (db *CLevelDB) Set(key []byte, value []byte) { + key = nonNilBytes(key) + value = nonNilBytes(value) + err := db.db.Put(db.wo, key, value) + if err != nil { + panic(err) + } +} + +// Implements DB. +func (db *CLevelDB) SetSync(key []byte, value []byte) { + key = nonNilBytes(key) + value = nonNilBytes(value) + err := db.db.Put(db.woSync, key, value) + if err != nil { + panic(err) + } +} + +// Implements DB. +func (db *CLevelDB) Delete(key []byte) { + key = nonNilBytes(key) + err := db.db.Delete(db.wo, key) + if err != nil { + panic(err) + } +} + +// Implements DB. +func (db *CLevelDB) DeleteSync(key []byte) { + key = nonNilBytes(key) + err := db.db.Delete(db.woSync, key) + if err != nil { + panic(err) + } +} + +func (db *CLevelDB) DB() *levigo.DB { + return db.db +} + +// Implements DB. +func (db *CLevelDB) Close() { + db.db.Close() + db.ro.Close() + db.wo.Close() + db.woSync.Close() +} + +// Implements DB. +func (db *CLevelDB) Print() { + itr := db.Iterator(nil, nil) + defer itr.Close() + for ; itr.Valid(); itr.Next() { + key := itr.Key() + value := itr.Value() + fmt.Printf("[%X]:\t[%X]\n", key, value) + } +} + +// Implements DB. +func (db *CLevelDB) Stats() map[string]string { + // TODO: Find the available properties for the C LevelDB implementation + keys := []string{} + + stats := make(map[string]string) + for _, key := range keys { + str := db.db.PropertyValue(key) + stats[key] = str + } + return stats +} + +//---------------------------------------- +// Batch + +// Implements DB. +func (db *CLevelDB) NewBatch() Batch { + batch := levigo.NewWriteBatch() + return &cLevelDBBatch{db, batch} +} + +type cLevelDBBatch struct { + db *CLevelDB + batch *levigo.WriteBatch +} + +// Implements Batch. +func (mBatch *cLevelDBBatch) Set(key, value []byte) { + mBatch.batch.Put(key, value) +} + +// Implements Batch. +func (mBatch *cLevelDBBatch) Delete(key []byte) { + mBatch.batch.Delete(key) +} + +// Implements Batch. +func (mBatch *cLevelDBBatch) Write() { + err := mBatch.db.db.Write(mBatch.db.wo, mBatch.batch) + if err != nil { + panic(err) + } +} + +// Implements Batch. +func (mBatch *cLevelDBBatch) WriteSync() { + err := mBatch.db.db.Write(mBatch.db.woSync, mBatch.batch) + if err != nil { + panic(err) + } +} + +//---------------------------------------- +// Iterator +// NOTE This is almost identical to db/go_level_db.Iterator +// Before creating a third version, refactor. + +func (db *CLevelDB) Iterator(start, end []byte) Iterator { + itr := db.db.NewIterator(db.ro) + return newCLevelDBIterator(itr, start, end, false) +} + +func (db *CLevelDB) ReverseIterator(start, end []byte) Iterator { + itr := db.db.NewIterator(db.ro) + return newCLevelDBIterator(itr, start, end, true) +} + +var _ Iterator = (*cLevelDBIterator)(nil) + +type cLevelDBIterator struct { + source *levigo.Iterator + start, end []byte + isReverse bool + isInvalid bool +} + +func newCLevelDBIterator(source *levigo.Iterator, start, end []byte, isReverse bool) *cLevelDBIterator { + if isReverse { + if start == nil { + source.SeekToLast() + } else { + source.Seek(start) + if source.Valid() { + soakey := source.Key() // start or after key + if bytes.Compare(start, soakey) < 0 { + source.Prev() + } + } else { + source.SeekToLast() + } + } + } else { + if start == nil { + source.SeekToFirst() + } else { + source.Seek(start) + } + } + return &cLevelDBIterator{ + source: source, + start: start, + end: end, + isReverse: isReverse, + isInvalid: false, + } +} + +func (itr cLevelDBIterator) Domain() ([]byte, []byte) { + return itr.start, itr.end +} + +func (itr cLevelDBIterator) Valid() bool { + + // Once invalid, forever invalid. + if itr.isInvalid { + return false + } + + // Panic on DB error. No way to recover. + itr.assertNoError() + + // If source is invalid, invalid. + if !itr.source.Valid() { + itr.isInvalid = true + return false + } + + // If key is end or past it, invalid. + var end = itr.end + var key = itr.source.Key() + if itr.isReverse { + if end != nil && bytes.Compare(key, end) <= 0 { + itr.isInvalid = true + return false + } + } else { + if end != nil && bytes.Compare(end, key) <= 0 { + itr.isInvalid = true + return false + } + } + + // It's valid. + return true +} + +func (itr cLevelDBIterator) Key() []byte { + itr.assertNoError() + itr.assertIsValid() + return itr.source.Key() +} + +func (itr cLevelDBIterator) Value() []byte { + itr.assertNoError() + itr.assertIsValid() + return itr.source.Value() +} + +func (itr cLevelDBIterator) Next() { + itr.assertNoError() + itr.assertIsValid() + if itr.isReverse { + itr.source.Prev() + } else { + itr.source.Next() + } +} + +func (itr cLevelDBIterator) Close() { + itr.source.Close() +} + +func (itr cLevelDBIterator) assertNoError() { + if err := itr.source.GetError(); err != nil { + panic(err) + } +} + +func (itr cLevelDBIterator) assertIsValid() { + if !itr.Valid() { + panic("cLevelDBIterator is invalid") + } +} diff --git a/libs/db/c_level_db_test.go b/libs/db/c_level_db_test.go new file mode 100644 index 000000000..2d30500dd --- /dev/null +++ b/libs/db/c_level_db_test.go @@ -0,0 +1,96 @@ +// +build gcc + +package db + +import ( + "bytes" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + cmn "github.com/tendermint/tendermint/libs/common" +) + +func BenchmarkRandomReadsWrites2(b *testing.B) { + b.StopTimer() + + numItems := int64(1000000) + internal := map[int64]int64{} + for i := 0; i < int(numItems); i++ { + internal[int64(i)] = int64(0) + } + db, err := NewCLevelDB(cmn.Fmt("test_%x", cmn.RandStr(12)), "") + if err != nil { + b.Fatal(err.Error()) + return + } + + fmt.Println("ok, starting") + b.StartTimer() + + for i := 0; i < b.N; i++ { + // Write something + { + idx := (int64(cmn.RandInt()) % numItems) + internal[idx] += 1 + val := internal[idx] + idxBytes := int642Bytes(int64(idx)) + valBytes := int642Bytes(int64(val)) + //fmt.Printf("Set %X -> %X\n", idxBytes, valBytes) + db.Set( + idxBytes, + valBytes, + ) + } + // Read something + { + idx := (int64(cmn.RandInt()) % numItems) + val := internal[idx] + idxBytes := int642Bytes(int64(idx)) + valBytes := db.Get(idxBytes) + //fmt.Printf("Get %X -> %X\n", idxBytes, valBytes) + if val == 0 { + if !bytes.Equal(valBytes, nil) { + b.Errorf("Expected %v for %v, got %X", + nil, idx, valBytes) + break + } + } else { + if len(valBytes) != 8 { + b.Errorf("Expected length 8 for %v, got %X", + idx, valBytes) + break + } + valGot := bytes2Int64(valBytes) + if val != valGot { + b.Errorf("Expected %v for %v, got %v", + val, idx, valGot) + break + } + } + } + } + + db.Close() +} + +/* +func int642Bytes(i int64) []byte { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, uint64(i)) + return buf +} + +func bytes2Int64(buf []byte) int64 { + return int64(binary.BigEndian.Uint64(buf)) +} +*/ + +func TestCLevelDBBackend(t *testing.T) { + name := cmn.Fmt("test_%x", cmn.RandStr(12)) + db := NewDB(name, LevelDBBackend, "") + defer cleanupDBDir("", name) + + _, ok := db.(*CLevelDB) + assert.True(t, ok) +} diff --git a/libs/db/common_test.go b/libs/db/common_test.go new file mode 100644 index 000000000..027b8ee53 --- /dev/null +++ b/libs/db/common_test.go @@ -0,0 +1,191 @@ +package db + +import ( + "fmt" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + cmn "github.com/tendermint/tendermint/libs/common" +) + +//---------------------------------------- +// Helper functions. + +func checkValue(t *testing.T, db DB, key []byte, valueWanted []byte) { + valueGot := db.Get(key) + assert.Equal(t, valueWanted, valueGot) +} + +func checkValid(t *testing.T, itr Iterator, expected bool) { + valid := itr.Valid() + require.Equal(t, expected, valid) +} + +func checkNext(t *testing.T, itr Iterator, expected bool) { + itr.Next() + valid := itr.Valid() + require.Equal(t, expected, valid) +} + +func checkNextPanics(t *testing.T, itr Iterator) { + assert.Panics(t, func() { itr.Next() }, "checkNextPanics expected panic but didn't") +} + +func checkDomain(t *testing.T, itr Iterator, start, end []byte) { + ds, de := itr.Domain() + assert.Equal(t, start, ds, "checkDomain domain start incorrect") + assert.Equal(t, end, de, "checkDomain domain end incorrect") +} + +func checkItem(t *testing.T, itr Iterator, key []byte, value []byte) { + k, v := itr.Key(), itr.Value() + assert.Exactly(t, key, k) + assert.Exactly(t, value, v) +} + +func checkInvalid(t *testing.T, itr Iterator) { + checkValid(t, itr, false) + checkKeyPanics(t, itr) + checkValuePanics(t, itr) + checkNextPanics(t, itr) +} + +func checkKeyPanics(t *testing.T, itr Iterator) { + assert.Panics(t, func() { itr.Key() }, "checkKeyPanics expected panic but didn't") +} + +func checkValuePanics(t *testing.T, itr Iterator) { + assert.Panics(t, func() { itr.Key() }, "checkValuePanics expected panic but didn't") +} + +func newTempDB(t *testing.T, backend DBBackendType) (db DB) { + dir, dirname := cmn.Tempdir("db_common_test") + db = NewDB("testdb", backend, dirname) + dir.Close() + return db +} + +//---------------------------------------- +// mockDB + +// NOTE: not actually goroutine safe. +// If you want something goroutine safe, maybe you just want a MemDB. +type mockDB struct { + mtx sync.Mutex + calls map[string]int +} + +func newMockDB() *mockDB { + return &mockDB{ + calls: make(map[string]int), + } +} + +func (mdb *mockDB) Mutex() *sync.Mutex { + return &(mdb.mtx) +} + +func (mdb *mockDB) Get([]byte) []byte { + mdb.calls["Get"]++ + return nil +} + +func (mdb *mockDB) Has([]byte) bool { + mdb.calls["Has"]++ + return false +} + +func (mdb *mockDB) Set([]byte, []byte) { + mdb.calls["Set"]++ +} + +func (mdb *mockDB) SetSync([]byte, []byte) { + mdb.calls["SetSync"]++ +} + +func (mdb *mockDB) SetNoLock([]byte, []byte) { + mdb.calls["SetNoLock"]++ +} + +func (mdb *mockDB) SetNoLockSync([]byte, []byte) { + mdb.calls["SetNoLockSync"]++ +} + +func (mdb *mockDB) Delete([]byte) { + mdb.calls["Delete"]++ +} + +func (mdb *mockDB) DeleteSync([]byte) { + mdb.calls["DeleteSync"]++ +} + +func (mdb *mockDB) DeleteNoLock([]byte) { + mdb.calls["DeleteNoLock"]++ +} + +func (mdb *mockDB) DeleteNoLockSync([]byte) { + mdb.calls["DeleteNoLockSync"]++ +} + +func (mdb *mockDB) Iterator(start, end []byte) Iterator { + mdb.calls["Iterator"]++ + return &mockIterator{} +} + +func (mdb *mockDB) ReverseIterator(start, end []byte) Iterator { + mdb.calls["ReverseIterator"]++ + return &mockIterator{} +} + +func (mdb *mockDB) Close() { + mdb.calls["Close"]++ +} + +func (mdb *mockDB) NewBatch() Batch { + mdb.calls["NewBatch"]++ + return &memBatch{db: mdb} +} + +func (mdb *mockDB) Print() { + mdb.calls["Print"]++ + fmt.Printf("mockDB{%v}", mdb.Stats()) +} + +func (mdb *mockDB) Stats() map[string]string { + mdb.calls["Stats"]++ + + res := make(map[string]string) + for key, count := range mdb.calls { + res[key] = fmt.Sprintf("%d", count) + } + return res +} + +//---------------------------------------- +// mockIterator + +type mockIterator struct{} + +func (mockIterator) Domain() (start []byte, end []byte) { + return nil, nil +} + +func (mockIterator) Valid() bool { + return false +} + +func (mockIterator) Next() { +} + +func (mockIterator) Key() []byte { + return nil +} + +func (mockIterator) Value() []byte { + return nil +} + +func (mockIterator) Close() { +} diff --git a/libs/db/db.go b/libs/db/db.go new file mode 100644 index 000000000..869937660 --- /dev/null +++ b/libs/db/db.go @@ -0,0 +1,36 @@ +package db + +import "fmt" + +//---------------------------------------- +// Main entry + +type DBBackendType string + +const ( + LevelDBBackend DBBackendType = "leveldb" // legacy, defaults to goleveldb unless +gcc + CLevelDBBackend DBBackendType = "cleveldb" + GoLevelDBBackend DBBackendType = "goleveldb" + MemDBBackend DBBackendType = "memdb" + FSDBBackend DBBackendType = "fsdb" // using the filesystem naively +) + +type dbCreator func(name string, dir string) (DB, error) + +var backends = map[DBBackendType]dbCreator{} + +func registerDBCreator(backend DBBackendType, creator dbCreator, force bool) { + _, ok := backends[backend] + if !force && ok { + return + } + backends[backend] = creator +} + +func NewDB(name string, backend DBBackendType, dir string) DB { + db, err := backends[backend](name, dir) + if err != nil { + panic(fmt.Sprintf("Error initializing DB: %v", err)) + } + return db +} diff --git a/libs/db/db_test.go b/libs/db/db_test.go new file mode 100644 index 000000000..a56901016 --- /dev/null +++ b/libs/db/db_test.go @@ -0,0 +1,194 @@ +package db + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDBIteratorSingleKey(t *testing.T) { + for backend := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("1"), bz("value_1")) + itr := db.Iterator(nil, nil) + + checkValid(t, itr, true) + checkNext(t, itr, false) + checkValid(t, itr, false) + checkNextPanics(t, itr) + + // Once invalid... + checkInvalid(t, itr) + }) + } +} + +func TestDBIteratorTwoKeys(t *testing.T) { + for backend := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("1"), bz("value_1")) + db.SetSync(bz("2"), bz("value_1")) + + { // Fail by calling Next too much + itr := db.Iterator(nil, nil) + checkValid(t, itr, true) + + checkNext(t, itr, true) + checkValid(t, itr, true) + + checkNext(t, itr, false) + checkValid(t, itr, false) + + checkNextPanics(t, itr) + + // Once invalid... + checkInvalid(t, itr) + } + }) + } +} + +func TestDBIteratorMany(t *testing.T) { + for backend := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + + keys := make([][]byte, 100) + for i := 0; i < 100; i++ { + keys[i] = []byte{byte(i)} + } + + value := []byte{5} + for _, k := range keys { + db.Set(k, value) + } + + itr := db.Iterator(nil, nil) + defer itr.Close() + for ; itr.Valid(); itr.Next() { + assert.Equal(t, db.Get(itr.Key()), itr.Value()) + } + }) + } +} + +func TestDBIteratorEmpty(t *testing.T) { + for backend := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + itr := db.Iterator(nil, nil) + + checkInvalid(t, itr) + }) + } +} + +func TestDBIteratorEmptyBeginAfter(t *testing.T) { + for backend := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + itr := db.Iterator(bz("1"), nil) + + checkInvalid(t, itr) + }) + } +} + +func TestDBIteratorNonemptyBeginAfter(t *testing.T) { + for backend := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("1"), bz("value_1")) + itr := db.Iterator(bz("2"), nil) + + checkInvalid(t, itr) + }) + } +} + +func TestDBBatchWrite1(t *testing.T) { + mdb := newMockDB() + ddb := NewDebugDB(t.Name(), mdb) + batch := ddb.NewBatch() + + batch.Set(bz("1"), bz("1")) + batch.Set(bz("2"), bz("2")) + batch.Delete(bz("3")) + batch.Set(bz("4"), bz("4")) + batch.Write() + + assert.Equal(t, 0, mdb.calls["Set"]) + assert.Equal(t, 0, mdb.calls["SetSync"]) + assert.Equal(t, 3, mdb.calls["SetNoLock"]) + assert.Equal(t, 0, mdb.calls["SetNoLockSync"]) + assert.Equal(t, 0, mdb.calls["Delete"]) + assert.Equal(t, 0, mdb.calls["DeleteSync"]) + assert.Equal(t, 1, mdb.calls["DeleteNoLock"]) + assert.Equal(t, 0, mdb.calls["DeleteNoLockSync"]) +} + +func TestDBBatchWrite2(t *testing.T) { + mdb := newMockDB() + ddb := NewDebugDB(t.Name(), mdb) + batch := ddb.NewBatch() + + batch.Set(bz("1"), bz("1")) + batch.Set(bz("2"), bz("2")) + batch.Set(bz("4"), bz("4")) + batch.Delete(bz("3")) + batch.Write() + + assert.Equal(t, 0, mdb.calls["Set"]) + assert.Equal(t, 0, mdb.calls["SetSync"]) + assert.Equal(t, 3, mdb.calls["SetNoLock"]) + assert.Equal(t, 0, mdb.calls["SetNoLockSync"]) + assert.Equal(t, 0, mdb.calls["Delete"]) + assert.Equal(t, 0, mdb.calls["DeleteSync"]) + assert.Equal(t, 1, mdb.calls["DeleteNoLock"]) + assert.Equal(t, 0, mdb.calls["DeleteNoLockSync"]) +} + +func TestDBBatchWriteSync1(t *testing.T) { + mdb := newMockDB() + ddb := NewDebugDB(t.Name(), mdb) + batch := ddb.NewBatch() + + batch.Set(bz("1"), bz("1")) + batch.Set(bz("2"), bz("2")) + batch.Delete(bz("3")) + batch.Set(bz("4"), bz("4")) + batch.WriteSync() + + assert.Equal(t, 0, mdb.calls["Set"]) + assert.Equal(t, 0, mdb.calls["SetSync"]) + assert.Equal(t, 2, mdb.calls["SetNoLock"]) + assert.Equal(t, 1, mdb.calls["SetNoLockSync"]) + assert.Equal(t, 0, mdb.calls["Delete"]) + assert.Equal(t, 0, mdb.calls["DeleteSync"]) + assert.Equal(t, 1, mdb.calls["DeleteNoLock"]) + assert.Equal(t, 0, mdb.calls["DeleteNoLockSync"]) +} + +func TestDBBatchWriteSync2(t *testing.T) { + mdb := newMockDB() + ddb := NewDebugDB(t.Name(), mdb) + batch := ddb.NewBatch() + + batch.Set(bz("1"), bz("1")) + batch.Set(bz("2"), bz("2")) + batch.Set(bz("4"), bz("4")) + batch.Delete(bz("3")) + batch.WriteSync() + + assert.Equal(t, 0, mdb.calls["Set"]) + assert.Equal(t, 0, mdb.calls["SetSync"]) + assert.Equal(t, 3, mdb.calls["SetNoLock"]) + assert.Equal(t, 0, mdb.calls["SetNoLockSync"]) + assert.Equal(t, 0, mdb.calls["Delete"]) + assert.Equal(t, 0, mdb.calls["DeleteSync"]) + assert.Equal(t, 0, mdb.calls["DeleteNoLock"]) + assert.Equal(t, 1, mdb.calls["DeleteNoLockSync"]) +} diff --git a/libs/db/debug_db.go b/libs/db/debug_db.go new file mode 100644 index 000000000..bb361a266 --- /dev/null +++ b/libs/db/debug_db.go @@ -0,0 +1,252 @@ +package db + +import ( + "fmt" + "sync" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +//---------------------------------------- +// debugDB + +type debugDB struct { + label string + db DB +} + +// For printing all operationgs to the console for debugging. +func NewDebugDB(label string, db DB) debugDB { + return debugDB{ + label: label, + db: db, + } +} + +// Implements atomicSetDeleter. +func (ddb debugDB) Mutex() *sync.Mutex { return nil } + +// Implements DB. +func (ddb debugDB) Get(key []byte) (value []byte) { + defer func() { + fmt.Printf("%v.Get(%v) %v\n", ddb.label, + cmn.ColoredBytes(key, cmn.Cyan, cmn.Blue), + cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) + }() + value = ddb.db.Get(key) + return +} + +// Implements DB. +func (ddb debugDB) Has(key []byte) (has bool) { + defer func() { + fmt.Printf("%v.Has(%v) %v\n", ddb.label, + cmn.ColoredBytes(key, cmn.Cyan, cmn.Blue), has) + }() + return ddb.db.Has(key) +} + +// Implements DB. +func (ddb debugDB) Set(key []byte, value []byte) { + fmt.Printf("%v.Set(%v, %v)\n", ddb.label, + cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue), + cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) + ddb.db.Set(key, value) +} + +// Implements DB. +func (ddb debugDB) SetSync(key []byte, value []byte) { + fmt.Printf("%v.SetSync(%v, %v)\n", ddb.label, + cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue), + cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) + ddb.db.SetSync(key, value) +} + +// Implements atomicSetDeleter. +func (ddb debugDB) SetNoLock(key []byte, value []byte) { + fmt.Printf("%v.SetNoLock(%v, %v)\n", ddb.label, + cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue), + cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) + ddb.db.(atomicSetDeleter).SetNoLock(key, value) +} + +// Implements atomicSetDeleter. +func (ddb debugDB) SetNoLockSync(key []byte, value []byte) { + fmt.Printf("%v.SetNoLockSync(%v, %v)\n", ddb.label, + cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue), + cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) + ddb.db.(atomicSetDeleter).SetNoLockSync(key, value) +} + +// Implements DB. +func (ddb debugDB) Delete(key []byte) { + fmt.Printf("%v.Delete(%v)\n", ddb.label, + cmn.ColoredBytes(key, cmn.Red, cmn.Yellow)) + ddb.db.Delete(key) +} + +// Implements DB. +func (ddb debugDB) DeleteSync(key []byte) { + fmt.Printf("%v.DeleteSync(%v)\n", ddb.label, + cmn.ColoredBytes(key, cmn.Red, cmn.Yellow)) + ddb.db.DeleteSync(key) +} + +// Implements atomicSetDeleter. +func (ddb debugDB) DeleteNoLock(key []byte) { + fmt.Printf("%v.DeleteNoLock(%v)\n", ddb.label, + cmn.ColoredBytes(key, cmn.Red, cmn.Yellow)) + ddb.db.(atomicSetDeleter).DeleteNoLock(key) +} + +// Implements atomicSetDeleter. +func (ddb debugDB) DeleteNoLockSync(key []byte) { + fmt.Printf("%v.DeleteNoLockSync(%v)\n", ddb.label, + cmn.ColoredBytes(key, cmn.Red, cmn.Yellow)) + ddb.db.(atomicSetDeleter).DeleteNoLockSync(key) +} + +// Implements DB. +func (ddb debugDB) Iterator(start, end []byte) Iterator { + fmt.Printf("%v.Iterator(%v, %v)\n", ddb.label, + cmn.ColoredBytes(start, cmn.Cyan, cmn.Blue), + cmn.ColoredBytes(end, cmn.Cyan, cmn.Blue)) + return NewDebugIterator(ddb.label, ddb.db.Iterator(start, end)) +} + +// Implements DB. +func (ddb debugDB) ReverseIterator(start, end []byte) Iterator { + fmt.Printf("%v.ReverseIterator(%v, %v)\n", ddb.label, + cmn.ColoredBytes(start, cmn.Cyan, cmn.Blue), + cmn.ColoredBytes(end, cmn.Cyan, cmn.Blue)) + return NewDebugIterator(ddb.label, ddb.db.ReverseIterator(start, end)) +} + +// Implements DB. +// Panics if the underlying db is not an +// atomicSetDeleter. +func (ddb debugDB) NewBatch() Batch { + fmt.Printf("%v.NewBatch()\n", ddb.label) + return NewDebugBatch(ddb.label, ddb.db.NewBatch()) +} + +// Implements DB. +func (ddb debugDB) Close() { + fmt.Printf("%v.Close()\n", ddb.label) + ddb.db.Close() +} + +// Implements DB. +func (ddb debugDB) Print() { + ddb.db.Print() +} + +// Implements DB. +func (ddb debugDB) Stats() map[string]string { + return ddb.db.Stats() +} + +//---------------------------------------- +// debugIterator + +type debugIterator struct { + label string + itr Iterator +} + +// For printing all operationgs to the console for debugging. +func NewDebugIterator(label string, itr Iterator) debugIterator { + return debugIterator{ + label: label, + itr: itr, + } +} + +// Implements Iterator. +func (ditr debugIterator) Domain() (start []byte, end []byte) { + defer func() { + fmt.Printf("%v.itr.Domain() (%X,%X)\n", ditr.label, start, end) + }() + start, end = ditr.itr.Domain() + return +} + +// Implements Iterator. +func (ditr debugIterator) Valid() (ok bool) { + defer func() { + fmt.Printf("%v.itr.Valid() %v\n", ditr.label, ok) + }() + ok = ditr.itr.Valid() + return +} + +// Implements Iterator. +func (ditr debugIterator) Next() { + fmt.Printf("%v.itr.Next()\n", ditr.label) + ditr.itr.Next() +} + +// Implements Iterator. +func (ditr debugIterator) Key() (key []byte) { + key = ditr.itr.Key() + fmt.Printf("%v.itr.Key() %v\n", ditr.label, + cmn.ColoredBytes(key, cmn.Cyan, cmn.Blue)) + return +} + +// Implements Iterator. +func (ditr debugIterator) Value() (value []byte) { + value = ditr.itr.Value() + fmt.Printf("%v.itr.Value() %v\n", ditr.label, + cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) + return +} + +// Implements Iterator. +func (ditr debugIterator) Close() { + fmt.Printf("%v.itr.Close()\n", ditr.label) + ditr.itr.Close() +} + +//---------------------------------------- +// debugBatch + +type debugBatch struct { + label string + bch Batch +} + +// For printing all operationgs to the console for debugging. +func NewDebugBatch(label string, bch Batch) debugBatch { + return debugBatch{ + label: label, + bch: bch, + } +} + +// Implements Batch. +func (dbch debugBatch) Set(key, value []byte) { + fmt.Printf("%v.batch.Set(%v, %v)\n", dbch.label, + cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue), + cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) + dbch.bch.Set(key, value) +} + +// Implements Batch. +func (dbch debugBatch) Delete(key []byte) { + fmt.Printf("%v.batch.Delete(%v)\n", dbch.label, + cmn.ColoredBytes(key, cmn.Red, cmn.Yellow)) + dbch.bch.Delete(key) +} + +// Implements Batch. +func (dbch debugBatch) Write() { + fmt.Printf("%v.batch.Write()\n", dbch.label) + dbch.bch.Write() +} + +// Implements Batch. +func (dbch debugBatch) WriteSync() { + fmt.Printf("%v.batch.WriteSync()\n", dbch.label) + dbch.bch.WriteSync() +} diff --git a/libs/db/fsdb.go b/libs/db/fsdb.go new file mode 100644 index 000000000..fc861decc --- /dev/null +++ b/libs/db/fsdb.go @@ -0,0 +1,262 @@ +package db + +import ( + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "sort" + "sync" + + "github.com/pkg/errors" + cmn "github.com/tendermint/tendermint/libs/common" +) + +const ( + keyPerm = os.FileMode(0600) + dirPerm = os.FileMode(0700) +) + +func init() { + registerDBCreator(FSDBBackend, func(name string, dir string) (DB, error) { + dbPath := filepath.Join(dir, name+".db") + return NewFSDB(dbPath), nil + }, false) +} + +var _ DB = (*FSDB)(nil) + +// It's slow. +type FSDB struct { + mtx sync.Mutex + dir string +} + +func NewFSDB(dir string) *FSDB { + err := os.MkdirAll(dir, dirPerm) + if err != nil { + panic(errors.Wrap(err, "Creating FSDB dir "+dir)) + } + database := &FSDB{ + dir: dir, + } + return database +} + +func (db *FSDB) Get(key []byte) []byte { + db.mtx.Lock() + defer db.mtx.Unlock() + key = escapeKey(key) + + path := db.nameToPath(key) + value, err := read(path) + if os.IsNotExist(err) { + return nil + } else if err != nil { + panic(errors.Wrapf(err, "Getting key %s (0x%X)", string(key), key)) + } + return value +} + +func (db *FSDB) Has(key []byte) bool { + db.mtx.Lock() + defer db.mtx.Unlock() + key = escapeKey(key) + + path := db.nameToPath(key) + return cmn.FileExists(path) +} + +func (db *FSDB) Set(key []byte, value []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.SetNoLock(key, value) +} + +func (db *FSDB) SetSync(key []byte, value []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.SetNoLock(key, value) +} + +// NOTE: Implements atomicSetDeleter. +func (db *FSDB) SetNoLock(key []byte, value []byte) { + key = escapeKey(key) + value = nonNilBytes(value) + path := db.nameToPath(key) + err := write(path, value) + if err != nil { + panic(errors.Wrapf(err, "Setting key %s (0x%X)", string(key), key)) + } +} + +func (db *FSDB) Delete(key []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.DeleteNoLock(key) +} + +func (db *FSDB) DeleteSync(key []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.DeleteNoLock(key) +} + +// NOTE: Implements atomicSetDeleter. +func (db *FSDB) DeleteNoLock(key []byte) { + key = escapeKey(key) + path := db.nameToPath(key) + err := remove(path) + if os.IsNotExist(err) { + return + } else if err != nil { + panic(errors.Wrapf(err, "Removing key %s (0x%X)", string(key), key)) + } +} + +func (db *FSDB) Close() { + // Nothing to do. +} + +func (db *FSDB) Print() { + db.mtx.Lock() + defer db.mtx.Unlock() + + panic("FSDB.Print not yet implemented") +} + +func (db *FSDB) Stats() map[string]string { + db.mtx.Lock() + defer db.mtx.Unlock() + + panic("FSDB.Stats not yet implemented") +} + +func (db *FSDB) NewBatch() Batch { + db.mtx.Lock() + defer db.mtx.Unlock() + + // Not sure we would ever want to try... + // It doesn't seem easy for general filesystems. + panic("FSDB.NewBatch not yet implemented") +} + +func (db *FSDB) Mutex() *sync.Mutex { + return &(db.mtx) +} + +func (db *FSDB) Iterator(start, end []byte) Iterator { + return db.MakeIterator(start, end, false) +} + +func (db *FSDB) MakeIterator(start, end []byte, isReversed bool) Iterator { + db.mtx.Lock() + defer db.mtx.Unlock() + + // We need a copy of all of the keys. + // Not the best, but probably not a bottleneck depending. + keys, err := list(db.dir, start, end, isReversed) + if err != nil { + panic(errors.Wrapf(err, "Listing keys in %s", db.dir)) + } + if isReversed { + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + } else { + sort.Strings(keys) + } + return newMemDBIterator(db, keys, start, end) +} + +func (db *FSDB) ReverseIterator(start, end []byte) Iterator { + return db.MakeIterator(start, end, true) +} + +func (db *FSDB) nameToPath(name []byte) string { + n := url.PathEscape(string(name)) + return filepath.Join(db.dir, n) +} + +// Read some bytes to a file. +// CONTRACT: returns os errors directly without wrapping. +func read(path string) ([]byte, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + d, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + return d, nil +} + +// Write some bytes from a file. +// CONTRACT: returns os errors directly without wrapping. +func write(path string, d []byte) error { + f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, keyPerm) + if err != nil { + return err + } + defer f.Close() + _, err = f.Write(d) + if err != nil { + return err + } + err = f.Sync() + return err +} + +// Remove a file. +// CONTRACT: returns os errors directly without wrapping. +func remove(path string) error { + return os.Remove(path) +} + +// List keys in a directory, stripping of escape sequences and dir portions. +// CONTRACT: returns os errors directly without wrapping. +func list(dirPath string, start, end []byte, isReversed bool) ([]string, error) { + dir, err := os.Open(dirPath) + if err != nil { + return nil, err + } + defer dir.Close() + + names, err := dir.Readdirnames(0) + if err != nil { + return nil, err + } + var keys []string + for _, name := range names { + n, err := url.PathUnescape(name) + if err != nil { + return nil, fmt.Errorf("Failed to unescape %s while listing", name) + } + key := unescapeKey([]byte(n)) + if IsKeyInDomain(key, start, end, isReversed) { + keys = append(keys, string(key)) + } + } + return keys, nil +} + +// To support empty or nil keys, while the file system doesn't allow empty +// filenames. +func escapeKey(key []byte) []byte { + return []byte("k_" + string(key)) +} +func unescapeKey(escKey []byte) []byte { + if len(escKey) < 2 { + panic(fmt.Sprintf("Invalid esc key: %x", escKey)) + } + if string(escKey[:2]) != "k_" { + panic(fmt.Sprintf("Invalid esc key: %x", escKey)) + } + return escKey[2:] +} diff --git a/libs/db/go_level_db.go b/libs/db/go_level_db.go new file mode 100644 index 000000000..349e447b2 --- /dev/null +++ b/libs/db/go_level_db.go @@ -0,0 +1,327 @@ +package db + +import ( + "bytes" + "fmt" + "path/filepath" + + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +func init() { + dbCreator := func(name string, dir string) (DB, error) { + return NewGoLevelDB(name, dir) + } + registerDBCreator(LevelDBBackend, dbCreator, false) + registerDBCreator(GoLevelDBBackend, dbCreator, false) +} + +var _ DB = (*GoLevelDB)(nil) + +type GoLevelDB struct { + db *leveldb.DB +} + +func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { + dbPath := filepath.Join(dir, name+".db") + db, err := leveldb.OpenFile(dbPath, nil) + if err != nil { + return nil, err + } + database := &GoLevelDB{ + db: db, + } + return database, nil +} + +// Implements DB. +func (db *GoLevelDB) Get(key []byte) []byte { + key = nonNilBytes(key) + res, err := db.db.Get(key, nil) + if err != nil { + if err == errors.ErrNotFound { + return nil + } + panic(err) + } + return res +} + +// Implements DB. +func (db *GoLevelDB) Has(key []byte) bool { + return db.Get(key) != nil +} + +// Implements DB. +func (db *GoLevelDB) Set(key []byte, value []byte) { + key = nonNilBytes(key) + value = nonNilBytes(value) + err := db.db.Put(key, value, nil) + if err != nil { + cmn.PanicCrisis(err) + } +} + +// Implements DB. +func (db *GoLevelDB) SetSync(key []byte, value []byte) { + key = nonNilBytes(key) + value = nonNilBytes(value) + err := db.db.Put(key, value, &opt.WriteOptions{Sync: true}) + if err != nil { + cmn.PanicCrisis(err) + } +} + +// Implements DB. +func (db *GoLevelDB) Delete(key []byte) { + key = nonNilBytes(key) + err := db.db.Delete(key, nil) + if err != nil { + cmn.PanicCrisis(err) + } +} + +// Implements DB. +func (db *GoLevelDB) DeleteSync(key []byte) { + key = nonNilBytes(key) + err := db.db.Delete(key, &opt.WriteOptions{Sync: true}) + if err != nil { + cmn.PanicCrisis(err) + } +} + +func (db *GoLevelDB) DB() *leveldb.DB { + return db.db +} + +// Implements DB. +func (db *GoLevelDB) Close() { + db.db.Close() +} + +// Implements DB. +func (db *GoLevelDB) Print() { + str, _ := db.db.GetProperty("leveldb.stats") + fmt.Printf("%v\n", str) + + itr := db.db.NewIterator(nil, nil) + for itr.Next() { + key := itr.Key() + value := itr.Value() + fmt.Printf("[%X]:\t[%X]\n", key, value) + } +} + +// Implements DB. +func (db *GoLevelDB) Stats() map[string]string { + keys := []string{ + "leveldb.num-files-at-level{n}", + "leveldb.stats", + "leveldb.sstables", + "leveldb.blockpool", + "leveldb.cachedblock", + "leveldb.openedtables", + "leveldb.alivesnaps", + "leveldb.aliveiters", + } + + stats := make(map[string]string) + for _, key := range keys { + str, err := db.db.GetProperty(key) + if err == nil { + stats[key] = str + } + } + return stats +} + +//---------------------------------------- +// Batch + +// Implements DB. +func (db *GoLevelDB) NewBatch() Batch { + batch := new(leveldb.Batch) + return &goLevelDBBatch{db, batch} +} + +type goLevelDBBatch struct { + db *GoLevelDB + batch *leveldb.Batch +} + +// Implements Batch. +func (mBatch *goLevelDBBatch) Set(key, value []byte) { + mBatch.batch.Put(key, value) +} + +// Implements Batch. +func (mBatch *goLevelDBBatch) Delete(key []byte) { + mBatch.batch.Delete(key) +} + +// Implements Batch. +func (mBatch *goLevelDBBatch) Write() { + err := mBatch.db.db.Write(mBatch.batch, &opt.WriteOptions{Sync: false}) + if err != nil { + panic(err) + } +} + +// Implements Batch. +func (mBatch *goLevelDBBatch) WriteSync() { + err := mBatch.db.db.Write(mBatch.batch, &opt.WriteOptions{Sync: true}) + if err != nil { + panic(err) + } +} + +//---------------------------------------- +// Iterator +// NOTE This is almost identical to db/c_level_db.Iterator +// Before creating a third version, refactor. + +// Implements DB. +func (db *GoLevelDB) Iterator(start, end []byte) Iterator { + itr := db.db.NewIterator(nil, nil) + return newGoLevelDBIterator(itr, start, end, false) +} + +// Implements DB. +func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator { + itr := db.db.NewIterator(nil, nil) + return newGoLevelDBIterator(itr, start, end, true) +} + +type goLevelDBIterator struct { + source iterator.Iterator + start []byte + end []byte + isReverse bool + isInvalid bool +} + +var _ Iterator = (*goLevelDBIterator)(nil) + +func newGoLevelDBIterator(source iterator.Iterator, start, end []byte, isReverse bool) *goLevelDBIterator { + if isReverse { + if start == nil { + source.Last() + } else { + valid := source.Seek(start) + if valid { + soakey := source.Key() // start or after key + if bytes.Compare(start, soakey) < 0 { + source.Prev() + } + } else { + source.Last() + } + } + } else { + if start == nil { + source.First() + } else { + source.Seek(start) + } + } + return &goLevelDBIterator{ + source: source, + start: start, + end: end, + isReverse: isReverse, + isInvalid: false, + } +} + +// Implements Iterator. +func (itr *goLevelDBIterator) Domain() ([]byte, []byte) { + return itr.start, itr.end +} + +// Implements Iterator. +func (itr *goLevelDBIterator) Valid() bool { + + // Once invalid, forever invalid. + if itr.isInvalid { + return false + } + + // Panic on DB error. No way to recover. + itr.assertNoError() + + // If source is invalid, invalid. + if !itr.source.Valid() { + itr.isInvalid = true + return false + } + + // If key is end or past it, invalid. + var end = itr.end + var key = itr.source.Key() + + if itr.isReverse { + if end != nil && bytes.Compare(key, end) <= 0 { + itr.isInvalid = true + return false + } + } else { + if end != nil && bytes.Compare(end, key) <= 0 { + itr.isInvalid = true + return false + } + } + + // Valid + return true +} + +// Implements Iterator. +func (itr *goLevelDBIterator) Key() []byte { + // Key returns a copy of the current key. + // See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88 + itr.assertNoError() + itr.assertIsValid() + return cp(itr.source.Key()) +} + +// Implements Iterator. +func (itr *goLevelDBIterator) Value() []byte { + // Value returns a copy of the current value. + // See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88 + itr.assertNoError() + itr.assertIsValid() + return cp(itr.source.Value()) +} + +// Implements Iterator. +func (itr *goLevelDBIterator) Next() { + itr.assertNoError() + itr.assertIsValid() + if itr.isReverse { + itr.source.Prev() + } else { + itr.source.Next() + } +} + +// Implements Iterator. +func (itr *goLevelDBIterator) Close() { + itr.source.Release() +} + +func (itr *goLevelDBIterator) assertNoError() { + if err := itr.source.Error(); err != nil { + panic(err) + } +} + +func (itr goLevelDBIterator) assertIsValid() { + if !itr.Valid() { + panic("goLevelDBIterator is invalid") + } +} diff --git a/libs/db/go_level_db_test.go b/libs/db/go_level_db_test.go new file mode 100644 index 000000000..47be216a6 --- /dev/null +++ b/libs/db/go_level_db_test.go @@ -0,0 +1,83 @@ +package db + +import ( + "bytes" + "encoding/binary" + "fmt" + "testing" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +func BenchmarkRandomReadsWrites(b *testing.B) { + b.StopTimer() + + numItems := int64(1000000) + internal := map[int64]int64{} + for i := 0; i < int(numItems); i++ { + internal[int64(i)] = int64(0) + } + db, err := NewGoLevelDB(cmn.Fmt("test_%x", cmn.RandStr(12)), "") + if err != nil { + b.Fatal(err.Error()) + return + } + + fmt.Println("ok, starting") + b.StartTimer() + + for i := 0; i < b.N; i++ { + // Write something + { + idx := (int64(cmn.RandInt()) % numItems) + internal[idx]++ + val := internal[idx] + idxBytes := int642Bytes(int64(idx)) + valBytes := int642Bytes(int64(val)) + //fmt.Printf("Set %X -> %X\n", idxBytes, valBytes) + db.Set( + idxBytes, + valBytes, + ) + } + // Read something + { + idx := (int64(cmn.RandInt()) % numItems) + val := internal[idx] + idxBytes := int642Bytes(int64(idx)) + valBytes := db.Get(idxBytes) + //fmt.Printf("Get %X -> %X\n", idxBytes, valBytes) + if val == 0 { + if !bytes.Equal(valBytes, nil) { + b.Errorf("Expected %v for %v, got %X", + nil, idx, valBytes) + break + } + } else { + if len(valBytes) != 8 { + b.Errorf("Expected length 8 for %v, got %X", + idx, valBytes) + break + } + valGot := bytes2Int64(valBytes) + if val != valGot { + b.Errorf("Expected %v for %v, got %v", + val, idx, valGot) + break + } + } + } + } + + db.Close() +} + +func int642Bytes(i int64) []byte { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, uint64(i)) + return buf +} + +func bytes2Int64(buf []byte) int64 { + return int64(binary.BigEndian.Uint64(buf)) +} diff --git a/libs/db/mem_batch.go b/libs/db/mem_batch.go new file mode 100644 index 000000000..5c5d0c13a --- /dev/null +++ b/libs/db/mem_batch.go @@ -0,0 +1,72 @@ +package db + +import ( + "sync" +) + +type atomicSetDeleter interface { + Mutex() *sync.Mutex + SetNoLock(key, value []byte) + SetNoLockSync(key, value []byte) + DeleteNoLock(key []byte) + DeleteNoLockSync(key []byte) +} + +type memBatch struct { + db atomicSetDeleter + ops []operation +} + +type opType int + +const ( + opTypeSet opType = 1 + opTypeDelete opType = 2 +) + +type operation struct { + opType + key []byte + value []byte +} + +func (mBatch *memBatch) Set(key, value []byte) { + mBatch.ops = append(mBatch.ops, operation{opTypeSet, key, value}) +} + +func (mBatch *memBatch) Delete(key []byte) { + mBatch.ops = append(mBatch.ops, operation{opTypeDelete, key, nil}) +} + +func (mBatch *memBatch) Write() { + mBatch.write(false) +} + +func (mBatch *memBatch) WriteSync() { + mBatch.write(true) +} + +func (mBatch *memBatch) write(doSync bool) { + if mtx := mBatch.db.Mutex(); mtx != nil { + mtx.Lock() + defer mtx.Unlock() + } + + for i, op := range mBatch.ops { + if doSync && i == (len(mBatch.ops)-1) { + switch op.opType { + case opTypeSet: + mBatch.db.SetNoLockSync(op.key, op.value) + case opTypeDelete: + mBatch.db.DeleteNoLockSync(op.key) + } + break // we're done. + } + switch op.opType { + case opTypeSet: + mBatch.db.SetNoLock(op.key, op.value) + case opTypeDelete: + mBatch.db.DeleteNoLock(op.key) + } + } +} diff --git a/libs/db/mem_db.go b/libs/db/mem_db.go new file mode 100644 index 000000000..580123017 --- /dev/null +++ b/libs/db/mem_db.go @@ -0,0 +1,255 @@ +package db + +import ( + "fmt" + "sort" + "sync" +) + +func init() { + registerDBCreator(MemDBBackend, func(name string, dir string) (DB, error) { + return NewMemDB(), nil + }, false) +} + +var _ DB = (*MemDB)(nil) + +type MemDB struct { + mtx sync.Mutex + db map[string][]byte +} + +func NewMemDB() *MemDB { + database := &MemDB{ + db: make(map[string][]byte), + } + return database +} + +// Implements atomicSetDeleter. +func (db *MemDB) Mutex() *sync.Mutex { + return &(db.mtx) +} + +// Implements DB. +func (db *MemDB) Get(key []byte) []byte { + db.mtx.Lock() + defer db.mtx.Unlock() + key = nonNilBytes(key) + + value := db.db[string(key)] + return value +} + +// Implements DB. +func (db *MemDB) Has(key []byte) bool { + db.mtx.Lock() + defer db.mtx.Unlock() + key = nonNilBytes(key) + + _, ok := db.db[string(key)] + return ok +} + +// Implements DB. +func (db *MemDB) Set(key []byte, value []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.SetNoLock(key, value) +} + +// Implements DB. +func (db *MemDB) SetSync(key []byte, value []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.SetNoLock(key, value) +} + +// Implements atomicSetDeleter. +func (db *MemDB) SetNoLock(key []byte, value []byte) { + db.SetNoLockSync(key, value) +} + +// Implements atomicSetDeleter. +func (db *MemDB) SetNoLockSync(key []byte, value []byte) { + key = nonNilBytes(key) + value = nonNilBytes(value) + + db.db[string(key)] = value +} + +// Implements DB. +func (db *MemDB) Delete(key []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.DeleteNoLock(key) +} + +// Implements DB. +func (db *MemDB) DeleteSync(key []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.DeleteNoLock(key) +} + +// Implements atomicSetDeleter. +func (db *MemDB) DeleteNoLock(key []byte) { + db.DeleteNoLockSync(key) +} + +// Implements atomicSetDeleter. +func (db *MemDB) DeleteNoLockSync(key []byte) { + key = nonNilBytes(key) + + delete(db.db, string(key)) +} + +// Implements DB. +func (db *MemDB) Close() { + // Close is a noop since for an in-memory + // database, we don't have a destination + // to flush contents to nor do we want + // any data loss on invoking Close() + // See the discussion in https://github.com/tendermint/tendermint/libs/pull/56 +} + +// Implements DB. +func (db *MemDB) Print() { + db.mtx.Lock() + defer db.mtx.Unlock() + + for key, value := range db.db { + fmt.Printf("[%X]:\t[%X]\n", []byte(key), value) + } +} + +// Implements DB. +func (db *MemDB) Stats() map[string]string { + db.mtx.Lock() + defer db.mtx.Unlock() + + stats := make(map[string]string) + stats["database.type"] = "memDB" + stats["database.size"] = fmt.Sprintf("%d", len(db.db)) + return stats +} + +// Implements DB. +func (db *MemDB) NewBatch() Batch { + db.mtx.Lock() + defer db.mtx.Unlock() + + return &memBatch{db, nil} +} + +//---------------------------------------- +// Iterator + +// Implements DB. +func (db *MemDB) Iterator(start, end []byte) Iterator { + db.mtx.Lock() + defer db.mtx.Unlock() + + keys := db.getSortedKeys(start, end, false) + return newMemDBIterator(db, keys, start, end) +} + +// Implements DB. +func (db *MemDB) ReverseIterator(start, end []byte) Iterator { + db.mtx.Lock() + defer db.mtx.Unlock() + + keys := db.getSortedKeys(start, end, true) + return newMemDBIterator(db, keys, start, end) +} + +// We need a copy of all of the keys. +// Not the best, but probably not a bottleneck depending. +type memDBIterator struct { + db DB + cur int + keys []string + start []byte + end []byte +} + +var _ Iterator = (*memDBIterator)(nil) + +// Keys is expected to be in reverse order for reverse iterators. +func newMemDBIterator(db DB, keys []string, start, end []byte) *memDBIterator { + return &memDBIterator{ + db: db, + cur: 0, + keys: keys, + start: start, + end: end, + } +} + +// Implements Iterator. +func (itr *memDBIterator) Domain() ([]byte, []byte) { + return itr.start, itr.end +} + +// Implements Iterator. +func (itr *memDBIterator) Valid() bool { + return 0 <= itr.cur && itr.cur < len(itr.keys) +} + +// Implements Iterator. +func (itr *memDBIterator) Next() { + itr.assertIsValid() + itr.cur++ +} + +// Implements Iterator. +func (itr *memDBIterator) Key() []byte { + itr.assertIsValid() + return []byte(itr.keys[itr.cur]) +} + +// Implements Iterator. +func (itr *memDBIterator) Value() []byte { + itr.assertIsValid() + key := []byte(itr.keys[itr.cur]) + return itr.db.Get(key) +} + +// Implements Iterator. +func (itr *memDBIterator) Close() { + itr.keys = nil + itr.db = nil +} + +func (itr *memDBIterator) assertIsValid() { + if !itr.Valid() { + panic("memDBIterator is invalid") + } +} + +//---------------------------------------- +// Misc. + +func (db *MemDB) getSortedKeys(start, end []byte, reverse bool) []string { + keys := []string{} + for key := range db.db { + inDomain := IsKeyInDomain([]byte(key), start, end, reverse) + if inDomain { + keys = append(keys, key) + } + } + sort.Strings(keys) + if reverse { + nkeys := len(keys) + for i := 0; i < nkeys/2; i++ { + temp := keys[i] + keys[i] = keys[nkeys-i-1] + keys[nkeys-i-1] = temp + } + } + return keys +} diff --git a/libs/db/prefix_db.go b/libs/db/prefix_db.go new file mode 100644 index 000000000..5bb53ebd9 --- /dev/null +++ b/libs/db/prefix_db.go @@ -0,0 +1,355 @@ +package db + +import ( + "bytes" + "fmt" + "sync" +) + +// IteratePrefix is a convenience function for iterating over a key domain +// restricted by prefix. +func IteratePrefix(db DB, prefix []byte) Iterator { + var start, end []byte + if len(prefix) == 0 { + start = nil + end = nil + } else { + start = cp(prefix) + end = cpIncr(prefix) + } + return db.Iterator(start, end) +} + +/* +TODO: Make test, maybe rename. +// Like IteratePrefix but the iterator strips the prefix from the keys. +func IteratePrefixStripped(db DB, prefix []byte) Iterator { + start, end := ... + return newPrefixIterator(prefix, start, end, IteratePrefix(db, prefix)) +} +*/ + +//---------------------------------------- +// prefixDB + +type prefixDB struct { + mtx sync.Mutex + prefix []byte + db DB +} + +// NewPrefixDB lets you namespace multiple DBs within a single DB. +func NewPrefixDB(db DB, prefix []byte) *prefixDB { + return &prefixDB{ + prefix: prefix, + db: db, + } +} + +// Implements atomicSetDeleter. +func (pdb *prefixDB) Mutex() *sync.Mutex { + return &(pdb.mtx) +} + +// Implements DB. +func (pdb *prefixDB) Get(key []byte) []byte { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + pkey := pdb.prefixed(key) + value := pdb.db.Get(pkey) + return value +} + +// Implements DB. +func (pdb *prefixDB) Has(key []byte) bool { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + return pdb.db.Has(pdb.prefixed(key)) +} + +// Implements DB. +func (pdb *prefixDB) Set(key []byte, value []byte) { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + pkey := pdb.prefixed(key) + pdb.db.Set(pkey, value) +} + +// Implements DB. +func (pdb *prefixDB) SetSync(key []byte, value []byte) { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + pdb.db.SetSync(pdb.prefixed(key), value) +} + +// Implements DB. +func (pdb *prefixDB) Delete(key []byte) { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + pdb.db.Delete(pdb.prefixed(key)) +} + +// Implements DB. +func (pdb *prefixDB) DeleteSync(key []byte) { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + pdb.db.DeleteSync(pdb.prefixed(key)) +} + +// Implements DB. +func (pdb *prefixDB) Iterator(start, end []byte) Iterator { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + var pstart, pend []byte + pstart = append(cp(pdb.prefix), start...) + if end == nil { + pend = cpIncr(pdb.prefix) + } else { + pend = append(cp(pdb.prefix), end...) + } + return newPrefixIterator( + pdb.prefix, + start, + end, + pdb.db.Iterator( + pstart, + pend, + ), + ) +} + +// Implements DB. +func (pdb *prefixDB) ReverseIterator(start, end []byte) Iterator { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + var pstart, pend []byte + if start == nil { + // This may cause the underlying iterator to start with + // an item which doesn't start with prefix. We will skip + // that item later in this function. See 'skipOne'. + pstart = cpIncr(pdb.prefix) + } else { + pstart = append(cp(pdb.prefix), start...) + } + if end == nil { + // This may cause the underlying iterator to end with an + // item which doesn't start with prefix. The + // prefixIterator will terminate iteration + // automatically upon detecting this. + pend = cpDecr(pdb.prefix) + } else { + pend = append(cp(pdb.prefix), end...) + } + ritr := pdb.db.ReverseIterator(pstart, pend) + if start == nil { + skipOne(ritr, cpIncr(pdb.prefix)) + } + return newPrefixIterator( + pdb.prefix, + start, + end, + ritr, + ) +} + +// Implements DB. +// Panics if the underlying DB is not an +// atomicSetDeleter. +func (pdb *prefixDB) NewBatch() Batch { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + return newPrefixBatch(pdb.prefix, pdb.db.NewBatch()) +} + +/* NOTE: Uncomment to use memBatch instead of prefixBatch +// Implements atomicSetDeleter. +func (pdb *prefixDB) SetNoLock(key []byte, value []byte) { + pdb.db.(atomicSetDeleter).SetNoLock(pdb.prefixed(key), value) +} + +// Implements atomicSetDeleter. +func (pdb *prefixDB) SetNoLockSync(key []byte, value []byte) { + pdb.db.(atomicSetDeleter).SetNoLockSync(pdb.prefixed(key), value) +} + +// Implements atomicSetDeleter. +func (pdb *prefixDB) DeleteNoLock(key []byte) { + pdb.db.(atomicSetDeleter).DeleteNoLock(pdb.prefixed(key)) +} + +// Implements atomicSetDeleter. +func (pdb *prefixDB) DeleteNoLockSync(key []byte) { + pdb.db.(atomicSetDeleter).DeleteNoLockSync(pdb.prefixed(key)) +} +*/ + +// Implements DB. +func (pdb *prefixDB) Close() { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + pdb.db.Close() +} + +// Implements DB. +func (pdb *prefixDB) Print() { + fmt.Printf("prefix: %X\n", pdb.prefix) + + itr := pdb.Iterator(nil, nil) + defer itr.Close() + for ; itr.Valid(); itr.Next() { + key := itr.Key() + value := itr.Value() + fmt.Printf("[%X]:\t[%X]\n", key, value) + } +} + +// Implements DB. +func (pdb *prefixDB) Stats() map[string]string { + stats := make(map[string]string) + stats["prefixdb.prefix.string"] = string(pdb.prefix) + stats["prefixdb.prefix.hex"] = fmt.Sprintf("%X", pdb.prefix) + source := pdb.db.Stats() + for key, value := range source { + stats["prefixdb.source."+key] = value + } + return stats +} + +func (pdb *prefixDB) prefixed(key []byte) []byte { + return append(cp(pdb.prefix), key...) +} + +//---------------------------------------- +// prefixBatch + +type prefixBatch struct { + prefix []byte + source Batch +} + +func newPrefixBatch(prefix []byte, source Batch) prefixBatch { + return prefixBatch{ + prefix: prefix, + source: source, + } +} + +func (pb prefixBatch) Set(key, value []byte) { + pkey := append(cp(pb.prefix), key...) + pb.source.Set(pkey, value) +} + +func (pb prefixBatch) Delete(key []byte) { + pkey := append(cp(pb.prefix), key...) + pb.source.Delete(pkey) +} + +func (pb prefixBatch) Write() { + pb.source.Write() +} + +func (pb prefixBatch) WriteSync() { + pb.source.WriteSync() +} + +//---------------------------------------- +// prefixIterator + +// Strips prefix while iterating from Iterator. +type prefixIterator struct { + prefix []byte + start []byte + end []byte + source Iterator + valid bool +} + +func newPrefixIterator(prefix, start, end []byte, source Iterator) prefixIterator { + if !source.Valid() || !bytes.HasPrefix(source.Key(), prefix) { + return prefixIterator{ + prefix: prefix, + start: start, + end: end, + source: source, + valid: false, + } + } else { + return prefixIterator{ + prefix: prefix, + start: start, + end: end, + source: source, + valid: true, + } + } +} + +func (itr prefixIterator) Domain() (start []byte, end []byte) { + return itr.start, itr.end +} + +func (itr prefixIterator) Valid() bool { + return itr.valid && itr.source.Valid() +} + +func (itr prefixIterator) Next() { + if !itr.valid { + panic("prefixIterator invalid, cannot call Next()") + } + itr.source.Next() + if !itr.source.Valid() || !bytes.HasPrefix(itr.source.Key(), itr.prefix) { + itr.source.Close() + itr.valid = false + return + } +} + +func (itr prefixIterator) Key() (key []byte) { + if !itr.valid { + panic("prefixIterator invalid, cannot call Key()") + } + return stripPrefix(itr.source.Key(), itr.prefix) +} + +func (itr prefixIterator) Value() (value []byte) { + if !itr.valid { + panic("prefixIterator invalid, cannot call Value()") + } + return itr.source.Value() +} + +func (itr prefixIterator) Close() { + itr.source.Close() +} + +//---------------------------------------- + +func stripPrefix(key []byte, prefix []byte) (stripped []byte) { + if len(key) < len(prefix) { + panic("should not happen") + } + if !bytes.Equal(key[:len(prefix)], prefix) { + panic("should not happne") + } + return key[len(prefix):] +} + +// If the first iterator item is skipKey, then +// skip it. +func skipOne(itr Iterator, skipKey []byte) { + if itr.Valid() { + if bytes.Equal(itr.Key(), skipKey) { + itr.Next() + } + } +} diff --git a/libs/db/prefix_db_test.go b/libs/db/prefix_db_test.go new file mode 100644 index 000000000..60809f157 --- /dev/null +++ b/libs/db/prefix_db_test.go @@ -0,0 +1,147 @@ +package db + +import "testing" + +func mockDBWithStuff() DB { + db := NewMemDB() + // Under "key" prefix + db.Set(bz("key"), bz("value")) + db.Set(bz("key1"), bz("value1")) + db.Set(bz("key2"), bz("value2")) + db.Set(bz("key3"), bz("value3")) + db.Set(bz("something"), bz("else")) + db.Set(bz(""), bz("")) + db.Set(bz("k"), bz("val")) + db.Set(bz("ke"), bz("valu")) + db.Set(bz("kee"), bz("valuu")) + return db +} + +func TestPrefixDBSimple(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + checkValue(t, pdb, bz("key"), nil) + checkValue(t, pdb, bz(""), bz("value")) + checkValue(t, pdb, bz("key1"), nil) + checkValue(t, pdb, bz("1"), bz("value1")) + checkValue(t, pdb, bz("key2"), nil) + checkValue(t, pdb, bz("2"), bz("value2")) + checkValue(t, pdb, bz("key3"), nil) + checkValue(t, pdb, bz("3"), bz("value3")) + checkValue(t, pdb, bz("something"), nil) + checkValue(t, pdb, bz("k"), nil) + checkValue(t, pdb, bz("ke"), nil) + checkValue(t, pdb, bz("kee"), nil) +} + +func TestPrefixDBIterator1(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.Iterator(nil, nil) + checkDomain(t, itr, nil, nil) + checkItem(t, itr, bz(""), bz("value")) + checkNext(t, itr, true) + checkItem(t, itr, bz("1"), bz("value1")) + checkNext(t, itr, true) + checkItem(t, itr, bz("2"), bz("value2")) + checkNext(t, itr, true) + checkItem(t, itr, bz("3"), bz("value3")) + checkNext(t, itr, false) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBIterator2(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.Iterator(nil, bz("")) + checkDomain(t, itr, nil, bz("")) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBIterator3(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.Iterator(bz(""), nil) + checkDomain(t, itr, bz(""), nil) + checkItem(t, itr, bz(""), bz("value")) + checkNext(t, itr, true) + checkItem(t, itr, bz("1"), bz("value1")) + checkNext(t, itr, true) + checkItem(t, itr, bz("2"), bz("value2")) + checkNext(t, itr, true) + checkItem(t, itr, bz("3"), bz("value3")) + checkNext(t, itr, false) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBIterator4(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.Iterator(bz(""), bz("")) + checkDomain(t, itr, bz(""), bz("")) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBReverseIterator1(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.ReverseIterator(nil, nil) + checkDomain(t, itr, nil, nil) + checkItem(t, itr, bz("3"), bz("value3")) + checkNext(t, itr, true) + checkItem(t, itr, bz("2"), bz("value2")) + checkNext(t, itr, true) + checkItem(t, itr, bz("1"), bz("value1")) + checkNext(t, itr, true) + checkItem(t, itr, bz(""), bz("value")) + checkNext(t, itr, false) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBReverseIterator2(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.ReverseIterator(nil, bz("")) + checkDomain(t, itr, nil, bz("")) + checkItem(t, itr, bz("3"), bz("value3")) + checkNext(t, itr, true) + checkItem(t, itr, bz("2"), bz("value2")) + checkNext(t, itr, true) + checkItem(t, itr, bz("1"), bz("value1")) + checkNext(t, itr, false) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBReverseIterator3(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.ReverseIterator(bz(""), nil) + checkDomain(t, itr, bz(""), nil) + checkItem(t, itr, bz(""), bz("value")) + checkNext(t, itr, false) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBReverseIterator4(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.ReverseIterator(bz(""), bz("")) + checkInvalid(t, itr) + itr.Close() +} diff --git a/libs/db/remotedb/doc.go b/libs/db/remotedb/doc.go new file mode 100644 index 000000000..07c95a56a --- /dev/null +++ b/libs/db/remotedb/doc.go @@ -0,0 +1,37 @@ +/* +remotedb is a package for connecting to distributed Tendermint db.DB +instances. The purpose is to detach difficult deployments such as +CLevelDB that requires gcc or perhaps for databases that require +custom configurations such as extra disk space. It also eases +the burden and cost of deployment of dependencies for databases +to be used by Tendermint developers. Most importantly it is built +over the high performant gRPC transport. + +remotedb's RemoteDB implements db.DB so can be used normally +like other databases. One just has to explicitly connect to the +remote database with a client setup such as: + + client, err := remotedb.NewInsecure(addr) + // Make sure to invoke InitRemote! + if err := client.InitRemote(&remotedb.Init{Name: "test-remote-db", Type: "leveldb"}); err != nil { + log.Fatalf("Failed to initialize the remote db") + } + + client.Set(key1, value) + gv1 := client.SetSync(k2, v2) + + client.Delete(k1) + gv2 := client.Get(k1) + + for itr := client.Iterator(k1, k9); itr.Valid(); itr.Next() { + ik, iv := itr.Key(), itr.Value() + ds, de := itr.Domain() + } + + stats := client.Stats() + + if !client.Has(dk1) { + client.SetSync(dk1, dv1) + } +*/ +package remotedb diff --git a/libs/db/remotedb/grpcdb/client.go b/libs/db/remotedb/grpcdb/client.go new file mode 100644 index 000000000..e11b7839b --- /dev/null +++ b/libs/db/remotedb/grpcdb/client.go @@ -0,0 +1,30 @@ +package grpcdb + +import ( + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto" +) + +// Security defines how the client will talk to the gRPC server. +type Security uint + +const ( + Insecure Security = iota + Secure +) + +// NewClient creates a gRPC client connected to the bound gRPC server at serverAddr. +// Use kind to set the level of security to either Secure or Insecure. +func NewClient(serverAddr, serverCert string) (protodb.DBClient, error) { + creds, err := credentials.NewClientTLSFromFile(serverCert, "") + if err != nil { + return nil, err + } + cc, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(creds)) + if err != nil { + return nil, err + } + return protodb.NewDBClient(cc), nil +} diff --git a/libs/db/remotedb/grpcdb/doc.go b/libs/db/remotedb/grpcdb/doc.go new file mode 100644 index 000000000..0d8e380ce --- /dev/null +++ b/libs/db/remotedb/grpcdb/doc.go @@ -0,0 +1,32 @@ +/* +grpcdb is the distribution of Tendermint's db.DB instances using +the gRPC transport to decouple local db.DB usages from applications, +to using them over a network in a highly performant manner. + +grpcdb allows users to initialize a database's server like +they would locally and invoke the respective methods of db.DB. + +Most users shouldn't use this package, but should instead use +remotedb. Only the lower level users and database server deployers +should use it, for functionality such as: + + ln, err := net.Listen("tcp", "0.0.0.0:0") + srv := grpcdb.NewServer() + defer srv.Stop() + go func() { + if err := srv.Serve(ln); err != nil { + t.Fatalf("BindServer: %v", err) + } + }() + +or + addr := ":8998" + cert := "server.crt" + key := "server.key" + go func() { + if err := grpcdb.ListenAndServe(addr, cert, key); err != nil { + log.Fatalf("BindServer: %v", err) + } + }() +*/ +package grpcdb diff --git a/libs/db/remotedb/grpcdb/example_test.go b/libs/db/remotedb/grpcdb/example_test.go new file mode 100644 index 000000000..eba0d6914 --- /dev/null +++ b/libs/db/remotedb/grpcdb/example_test.go @@ -0,0 +1,52 @@ +package grpcdb_test + +import ( + "bytes" + "context" + "log" + + grpcdb "github.com/tendermint/tendermint/libs/db/remotedb/grpcdb" + protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto" +) + +func Example() { + addr := ":8998" + cert := "server.crt" + key := "server.key" + go func() { + if err := grpcdb.ListenAndServe(addr, cert, key); err != nil { + log.Fatalf("BindServer: %v", err) + } + }() + + client, err := grpcdb.NewClient(addr, cert) + if err != nil { + log.Fatalf("Failed to create grpcDB client: %v", err) + } + + ctx := context.Background() + // 1. Initialize the DB + in := &protodb.Init{ + Type: "leveldb", + Name: "grpc-uno-test", + Dir: ".", + } + if _, err := client.Init(ctx, in); err != nil { + log.Fatalf("Init error: %v", err) + } + + // 2. Now it can be used! + query1 := &protodb.Entity{Key: []byte("Project"), Value: []byte("Tmlibs-on-gRPC")} + if _, err := client.SetSync(ctx, query1); err != nil { + log.Fatalf("SetSync err: %v", err) + } + + query2 := &protodb.Entity{Key: []byte("Project")} + read, err := client.Get(ctx, query2) + if err != nil { + log.Fatalf("Get err: %v", err) + } + if g, w := read.Value, []byte("Tmlibs-on-gRPC"); !bytes.Equal(g, w) { + log.Fatalf("got= (%q ==> % X)\nwant=(%q ==> % X)", g, g, w, w) + } +} diff --git a/libs/db/remotedb/grpcdb/server.go b/libs/db/remotedb/grpcdb/server.go new file mode 100644 index 000000000..3a9955ddf --- /dev/null +++ b/libs/db/remotedb/grpcdb/server.go @@ -0,0 +1,197 @@ +package grpcdb + +import ( + "context" + "net" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "github.com/tendermint/tendermint/libs/db" + protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto" +) + +// ListenAndServe is a blocking function that sets up a gRPC based +// server at the address supplied, with the gRPC options passed in. +// Normally in usage, invoke it in a goroutine like you would for http.ListenAndServe. +func ListenAndServe(addr, cert, key string, opts ...grpc.ServerOption) error { + ln, err := net.Listen("tcp", addr) + if err != nil { + return err + } + srv, err := NewServer(cert, key, opts...) + if err != nil { + return err + } + return srv.Serve(ln) +} + +func NewServer(cert, key string, opts ...grpc.ServerOption) (*grpc.Server, error) { + creds, err := credentials.NewServerTLSFromFile(cert, key) + if err != nil { + return nil, err + } + opts = append(opts, grpc.Creds(creds)) + srv := grpc.NewServer(opts...) + protodb.RegisterDBServer(srv, new(server)) + return srv, nil +} + +type server struct { + mu sync.Mutex + db db.DB +} + +var _ protodb.DBServer = (*server)(nil) + +// Init initializes the server's database. Only one type of database +// can be initialized per server. +// +// Dir is the directory on the file system in which the DB will be stored(if backed by disk) (TODO: remove) +// +// Name is representative filesystem entry's basepath +// +// Type can be either one of: +// * cleveldb (if built with gcc enabled) +// * fsdb +// * memdB +// * leveldb +// See https://godoc.org/github.com/tendermint/tendermint/libs/db#DBBackendType +func (s *server) Init(ctx context.Context, in *protodb.Init) (*protodb.Entity, error) { + s.mu.Lock() + defer s.mu.Unlock() + + s.db = db.NewDB(in.Name, db.DBBackendType(in.Type), in.Dir) + return &protodb.Entity{CreatedAt: time.Now().Unix()}, nil +} + +func (s *server) Delete(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { + s.db.Delete(in.Key) + return nothing, nil +} + +var nothing = new(protodb.Nothing) + +func (s *server) DeleteSync(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { + s.db.DeleteSync(in.Key) + return nothing, nil +} + +func (s *server) Get(ctx context.Context, in *protodb.Entity) (*protodb.Entity, error) { + value := s.db.Get(in.Key) + return &protodb.Entity{Value: value}, nil +} + +func (s *server) GetStream(ds protodb.DB_GetStreamServer) error { + // Receive routine + responsesChan := make(chan *protodb.Entity) + go func() { + defer close(responsesChan) + ctx := context.Background() + for { + in, err := ds.Recv() + if err != nil { + responsesChan <- &protodb.Entity{Err: err.Error()} + return + } + out, err := s.Get(ctx, in) + if err != nil { + if out == nil { + out = new(protodb.Entity) + out.Key = in.Key + } + out.Err = err.Error() + responsesChan <- out + return + } + + // Otherwise continue on + responsesChan <- out + } + }() + + // Send routine, block until we return + for out := range responsesChan { + if err := ds.Send(out); err != nil { + return err + } + } + return nil +} + +func (s *server) Has(ctx context.Context, in *protodb.Entity) (*protodb.Entity, error) { + exists := s.db.Has(in.Key) + return &protodb.Entity{Exists: exists}, nil +} + +func (s *server) Set(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { + s.db.Set(in.Key, in.Value) + return nothing, nil +} + +func (s *server) SetSync(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { + s.db.SetSync(in.Key, in.Value) + return nothing, nil +} + +func (s *server) Iterator(query *protodb.Entity, dis protodb.DB_IteratorServer) error { + it := s.db.Iterator(query.Start, query.End) + return s.handleIterator(it, dis.Send) +} + +func (s *server) handleIterator(it db.Iterator, sendFunc func(*protodb.Iterator) error) error { + for it.Valid() { + start, end := it.Domain() + out := &protodb.Iterator{ + Domain: &protodb.Domain{Start: start, End: end}, + Valid: it.Valid(), + Key: it.Key(), + Value: it.Value(), + } + if err := sendFunc(out); err != nil { + return err + } + + // Finally move the iterator forward + it.Next() + } + return nil +} + +func (s *server) ReverseIterator(query *protodb.Entity, dis protodb.DB_ReverseIteratorServer) error { + it := s.db.ReverseIterator(query.Start, query.End) + return s.handleIterator(it, dis.Send) +} + +func (s *server) Stats(context.Context, *protodb.Nothing) (*protodb.Stats, error) { + stats := s.db.Stats() + return &protodb.Stats{Data: stats, TimeAt: time.Now().Unix()}, nil +} + +func (s *server) BatchWrite(c context.Context, b *protodb.Batch) (*protodb.Nothing, error) { + return s.batchWrite(c, b, false) +} + +func (s *server) BatchWriteSync(c context.Context, b *protodb.Batch) (*protodb.Nothing, error) { + return s.batchWrite(c, b, true) +} + +func (s *server) batchWrite(c context.Context, b *protodb.Batch, sync bool) (*protodb.Nothing, error) { + bat := s.db.NewBatch() + for _, op := range b.Ops { + switch op.Type { + case protodb.Operation_SET: + bat.Set(op.Entity.Key, op.Entity.Value) + case protodb.Operation_DELETE: + bat.Delete(op.Entity.Key) + } + } + if sync { + bat.WriteSync() + } else { + bat.Write() + } + return nothing, nil +} diff --git a/libs/db/remotedb/proto/defs.pb.go b/libs/db/remotedb/proto/defs.pb.go new file mode 100644 index 000000000..4d9f0b272 --- /dev/null +++ b/libs/db/remotedb/proto/defs.pb.go @@ -0,0 +1,914 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: defs.proto + +/* +Package protodb is a generated protocol buffer package. + +It is generated from these files: + defs.proto + +It has these top-level messages: + Batch + Operation + Entity + Nothing + Domain + Iterator + Stats + Init +*/ +package protodb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Operation_Type int32 + +const ( + Operation_SET Operation_Type = 0 + Operation_DELETE Operation_Type = 1 +) + +var Operation_Type_name = map[int32]string{ + 0: "SET", + 1: "DELETE", +} +var Operation_Type_value = map[string]int32{ + "SET": 0, + "DELETE": 1, +} + +func (x Operation_Type) String() string { + return proto.EnumName(Operation_Type_name, int32(x)) +} +func (Operation_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} } + +type Batch struct { + Ops []*Operation `protobuf:"bytes,1,rep,name=ops" json:"ops,omitempty"` +} + +func (m *Batch) Reset() { *m = Batch{} } +func (m *Batch) String() string { return proto.CompactTextString(m) } +func (*Batch) ProtoMessage() {} +func (*Batch) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Batch) GetOps() []*Operation { + if m != nil { + return m.Ops + } + return nil +} + +type Operation struct { + Entity *Entity `protobuf:"bytes,1,opt,name=entity" json:"entity,omitempty"` + Type Operation_Type `protobuf:"varint,2,opt,name=type,enum=protodb.Operation_Type" json:"type,omitempty"` +} + +func (m *Operation) Reset() { *m = Operation{} } +func (m *Operation) String() string { return proto.CompactTextString(m) } +func (*Operation) ProtoMessage() {} +func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Operation) GetEntity() *Entity { + if m != nil { + return m.Entity + } + return nil +} + +func (m *Operation) GetType() Operation_Type { + if m != nil { + return m.Type + } + return Operation_SET +} + +type Entity struct { + Id int32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + Exists bool `protobuf:"varint,4,opt,name=exists" json:"exists,omitempty"` + Start []byte `protobuf:"bytes,5,opt,name=start,proto3" json:"start,omitempty"` + End []byte `protobuf:"bytes,6,opt,name=end,proto3" json:"end,omitempty"` + Err string `protobuf:"bytes,7,opt,name=err" json:"err,omitempty"` + CreatedAt int64 `protobuf:"varint,8,opt,name=created_at,json=createdAt" json:"created_at,omitempty"` +} + +func (m *Entity) Reset() { *m = Entity{} } +func (m *Entity) String() string { return proto.CompactTextString(m) } +func (*Entity) ProtoMessage() {} +func (*Entity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Entity) GetId() int32 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Entity) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *Entity) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *Entity) GetExists() bool { + if m != nil { + return m.Exists + } + return false +} + +func (m *Entity) GetStart() []byte { + if m != nil { + return m.Start + } + return nil +} + +func (m *Entity) GetEnd() []byte { + if m != nil { + return m.End + } + return nil +} + +func (m *Entity) GetErr() string { + if m != nil { + return m.Err + } + return "" +} + +func (m *Entity) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +type Nothing struct { +} + +func (m *Nothing) Reset() { *m = Nothing{} } +func (m *Nothing) String() string { return proto.CompactTextString(m) } +func (*Nothing) ProtoMessage() {} +func (*Nothing) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +type Domain struct { + Start []byte `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` + End []byte `protobuf:"bytes,2,opt,name=end,proto3" json:"end,omitempty"` +} + +func (m *Domain) Reset() { *m = Domain{} } +func (m *Domain) String() string { return proto.CompactTextString(m) } +func (*Domain) ProtoMessage() {} +func (*Domain) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *Domain) GetStart() []byte { + if m != nil { + return m.Start + } + return nil +} + +func (m *Domain) GetEnd() []byte { + if m != nil { + return m.End + } + return nil +} + +type Iterator struct { + Domain *Domain `protobuf:"bytes,1,opt,name=domain" json:"domain,omitempty"` + Valid bool `protobuf:"varint,2,opt,name=valid" json:"valid,omitempty"` + Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Iterator) Reset() { *m = Iterator{} } +func (m *Iterator) String() string { return proto.CompactTextString(m) } +func (*Iterator) ProtoMessage() {} +func (*Iterator) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *Iterator) GetDomain() *Domain { + if m != nil { + return m.Domain + } + return nil +} + +func (m *Iterator) GetValid() bool { + if m != nil { + return m.Valid + } + return false +} + +func (m *Iterator) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *Iterator) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type Stats struct { + Data map[string]string `protobuf:"bytes,1,rep,name=data" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + TimeAt int64 `protobuf:"varint,2,opt,name=time_at,json=timeAt" json:"time_at,omitempty"` +} + +func (m *Stats) Reset() { *m = Stats{} } +func (m *Stats) String() string { return proto.CompactTextString(m) } +func (*Stats) ProtoMessage() {} +func (*Stats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *Stats) GetData() map[string]string { + if m != nil { + return m.Data + } + return nil +} + +func (m *Stats) GetTimeAt() int64 { + if m != nil { + return m.TimeAt + } + return 0 +} + +type Init struct { + Type string `protobuf:"bytes,1,opt,name=Type" json:"Type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=Name" json:"Name,omitempty"` + Dir string `protobuf:"bytes,3,opt,name=Dir" json:"Dir,omitempty"` +} + +func (m *Init) Reset() { *m = Init{} } +func (m *Init) String() string { return proto.CompactTextString(m) } +func (*Init) ProtoMessage() {} +func (*Init) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *Init) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Init) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Init) GetDir() string { + if m != nil { + return m.Dir + } + return "" +} + +func init() { + proto.RegisterType((*Batch)(nil), "protodb.Batch") + proto.RegisterType((*Operation)(nil), "protodb.Operation") + proto.RegisterType((*Entity)(nil), "protodb.Entity") + proto.RegisterType((*Nothing)(nil), "protodb.Nothing") + proto.RegisterType((*Domain)(nil), "protodb.Domain") + proto.RegisterType((*Iterator)(nil), "protodb.Iterator") + proto.RegisterType((*Stats)(nil), "protodb.Stats") + proto.RegisterType((*Init)(nil), "protodb.Init") + proto.RegisterEnum("protodb.Operation_Type", Operation_Type_name, Operation_Type_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for DB service + +type DBClient interface { + Init(ctx context.Context, in *Init, opts ...grpc.CallOption) (*Entity, error) + Get(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) + GetStream(ctx context.Context, opts ...grpc.CallOption) (DB_GetStreamClient, error) + Has(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) + Set(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) + SetSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) + Delete(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) + DeleteSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) + Iterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_IteratorClient, error) + ReverseIterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_ReverseIteratorClient, error) + // rpc print(Nothing) returns (Entity) {} + Stats(ctx context.Context, in *Nothing, opts ...grpc.CallOption) (*Stats, error) + BatchWrite(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) + BatchWriteSync(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) +} + +type dBClient struct { + cc *grpc.ClientConn +} + +func NewDBClient(cc *grpc.ClientConn) DBClient { + return &dBClient{cc} +} + +func (c *dBClient) Init(ctx context.Context, in *Init, opts ...grpc.CallOption) (*Entity, error) { + out := new(Entity) + err := grpc.Invoke(ctx, "/protodb.DB/init", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) Get(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) { + out := new(Entity) + err := grpc.Invoke(ctx, "/protodb.DB/get", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) GetStream(ctx context.Context, opts ...grpc.CallOption) (DB_GetStreamClient, error) { + stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[0], c.cc, "/protodb.DB/getStream", opts...) + if err != nil { + return nil, err + } + x := &dBGetStreamClient{stream} + return x, nil +} + +type DB_GetStreamClient interface { + Send(*Entity) error + Recv() (*Entity, error) + grpc.ClientStream +} + +type dBGetStreamClient struct { + grpc.ClientStream +} + +func (x *dBGetStreamClient) Send(m *Entity) error { + return x.ClientStream.SendMsg(m) +} + +func (x *dBGetStreamClient) Recv() (*Entity, error) { + m := new(Entity) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *dBClient) Has(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) { + out := new(Entity) + err := grpc.Invoke(ctx, "/protodb.DB/has", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) Set(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) { + out := new(Nothing) + err := grpc.Invoke(ctx, "/protodb.DB/set", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) SetSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) { + out := new(Nothing) + err := grpc.Invoke(ctx, "/protodb.DB/setSync", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) Delete(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) { + out := new(Nothing) + err := grpc.Invoke(ctx, "/protodb.DB/delete", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) DeleteSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) { + out := new(Nothing) + err := grpc.Invoke(ctx, "/protodb.DB/deleteSync", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) Iterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_IteratorClient, error) { + stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[1], c.cc, "/protodb.DB/iterator", opts...) + if err != nil { + return nil, err + } + x := &dBIteratorClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type DB_IteratorClient interface { + Recv() (*Iterator, error) + grpc.ClientStream +} + +type dBIteratorClient struct { + grpc.ClientStream +} + +func (x *dBIteratorClient) Recv() (*Iterator, error) { + m := new(Iterator) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *dBClient) ReverseIterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_ReverseIteratorClient, error) { + stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[2], c.cc, "/protodb.DB/reverseIterator", opts...) + if err != nil { + return nil, err + } + x := &dBReverseIteratorClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type DB_ReverseIteratorClient interface { + Recv() (*Iterator, error) + grpc.ClientStream +} + +type dBReverseIteratorClient struct { + grpc.ClientStream +} + +func (x *dBReverseIteratorClient) Recv() (*Iterator, error) { + m := new(Iterator) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *dBClient) Stats(ctx context.Context, in *Nothing, opts ...grpc.CallOption) (*Stats, error) { + out := new(Stats) + err := grpc.Invoke(ctx, "/protodb.DB/stats", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) BatchWrite(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) { + out := new(Nothing) + err := grpc.Invoke(ctx, "/protodb.DB/batchWrite", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) BatchWriteSync(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) { + out := new(Nothing) + err := grpc.Invoke(ctx, "/protodb.DB/batchWriteSync", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for DB service + +type DBServer interface { + Init(context.Context, *Init) (*Entity, error) + Get(context.Context, *Entity) (*Entity, error) + GetStream(DB_GetStreamServer) error + Has(context.Context, *Entity) (*Entity, error) + Set(context.Context, *Entity) (*Nothing, error) + SetSync(context.Context, *Entity) (*Nothing, error) + Delete(context.Context, *Entity) (*Nothing, error) + DeleteSync(context.Context, *Entity) (*Nothing, error) + Iterator(*Entity, DB_IteratorServer) error + ReverseIterator(*Entity, DB_ReverseIteratorServer) error + // rpc print(Nothing) returns (Entity) {} + Stats(context.Context, *Nothing) (*Stats, error) + BatchWrite(context.Context, *Batch) (*Nothing, error) + BatchWriteSync(context.Context, *Batch) (*Nothing, error) +} + +func RegisterDBServer(s *grpc.Server, srv DBServer) { + s.RegisterService(&_DB_serviceDesc, srv) +} + +func _DB_Init_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Init) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).Init(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/Init", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).Init(ctx, req.(*Init)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Entity) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).Get(ctx, req.(*Entity)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_GetStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(DBServer).GetStream(&dBGetStreamServer{stream}) +} + +type DB_GetStreamServer interface { + Send(*Entity) error + Recv() (*Entity, error) + grpc.ServerStream +} + +type dBGetStreamServer struct { + grpc.ServerStream +} + +func (x *dBGetStreamServer) Send(m *Entity) error { + return x.ServerStream.SendMsg(m) +} + +func (x *dBGetStreamServer) Recv() (*Entity, error) { + m := new(Entity) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _DB_Has_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Entity) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).Has(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/Has", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).Has(ctx, req.(*Entity)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Entity) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).Set(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/Set", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).Set(ctx, req.(*Entity)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_SetSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Entity) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).SetSync(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/SetSync", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).SetSync(ctx, req.(*Entity)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Entity) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).Delete(ctx, req.(*Entity)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_DeleteSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Entity) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).DeleteSync(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/DeleteSync", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).DeleteSync(ctx, req.(*Entity)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_Iterator_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(Entity) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DBServer).Iterator(m, &dBIteratorServer{stream}) +} + +type DB_IteratorServer interface { + Send(*Iterator) error + grpc.ServerStream +} + +type dBIteratorServer struct { + grpc.ServerStream +} + +func (x *dBIteratorServer) Send(m *Iterator) error { + return x.ServerStream.SendMsg(m) +} + +func _DB_ReverseIterator_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(Entity) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DBServer).ReverseIterator(m, &dBReverseIteratorServer{stream}) +} + +type DB_ReverseIteratorServer interface { + Send(*Iterator) error + grpc.ServerStream +} + +type dBReverseIteratorServer struct { + grpc.ServerStream +} + +func (x *dBReverseIteratorServer) Send(m *Iterator) error { + return x.ServerStream.SendMsg(m) +} + +func _DB_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Nothing) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).Stats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/Stats", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).Stats(ctx, req.(*Nothing)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_BatchWrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Batch) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).BatchWrite(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/BatchWrite", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).BatchWrite(ctx, req.(*Batch)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_BatchWriteSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Batch) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).BatchWriteSync(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/BatchWriteSync", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).BatchWriteSync(ctx, req.(*Batch)) + } + return interceptor(ctx, in, info, handler) +} + +var _DB_serviceDesc = grpc.ServiceDesc{ + ServiceName: "protodb.DB", + HandlerType: (*DBServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "init", + Handler: _DB_Init_Handler, + }, + { + MethodName: "get", + Handler: _DB_Get_Handler, + }, + { + MethodName: "has", + Handler: _DB_Has_Handler, + }, + { + MethodName: "set", + Handler: _DB_Set_Handler, + }, + { + MethodName: "setSync", + Handler: _DB_SetSync_Handler, + }, + { + MethodName: "delete", + Handler: _DB_Delete_Handler, + }, + { + MethodName: "deleteSync", + Handler: _DB_DeleteSync_Handler, + }, + { + MethodName: "stats", + Handler: _DB_Stats_Handler, + }, + { + MethodName: "batchWrite", + Handler: _DB_BatchWrite_Handler, + }, + { + MethodName: "batchWriteSync", + Handler: _DB_BatchWriteSync_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "getStream", + Handler: _DB_GetStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "iterator", + Handler: _DB_Iterator_Handler, + ServerStreams: true, + }, + { + StreamName: "reverseIterator", + Handler: _DB_ReverseIterator_Handler, + ServerStreams: true, + }, + }, + Metadata: "defs.proto", +} + +func init() { proto.RegisterFile("defs.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 606 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4f, 0x6f, 0xd3, 0x4e, + 0x10, 0xcd, 0xda, 0x8e, 0x13, 0x4f, 0x7f, 0xbf, 0x34, 0x8c, 0x10, 0xb5, 0x8a, 0x90, 0x22, 0x0b, + 0x09, 0x43, 0x69, 0x14, 0x52, 0x24, 0xfe, 0x9c, 0x68, 0x95, 0x1c, 0x2a, 0xa1, 0x22, 0x39, 0x95, + 0x38, 0xa2, 0x6d, 0x3d, 0x34, 0x2b, 0x1a, 0x3b, 0xac, 0x87, 0x8a, 0x5c, 0xb8, 0xf2, 0x79, 0xf8, + 0x7c, 0x5c, 0xd0, 0xae, 0x1d, 0x87, 0x36, 0x39, 0x84, 0x53, 0x76, 0x66, 0xde, 0x7b, 0xb3, 0xf3, + 0x32, 0x5e, 0x80, 0x94, 0x3e, 0x17, 0xfd, 0xb9, 0xce, 0x39, 0xc7, 0x96, 0xfd, 0x49, 0x2f, 0xa2, + 0x43, 0x68, 0x9e, 0x48, 0xbe, 0x9c, 0xe2, 0x63, 0x70, 0xf3, 0x79, 0x11, 0x8a, 0x9e, 0x1b, 0xef, + 0x0c, 0xb1, 0x5f, 0xd5, 0xfb, 0x1f, 0xe6, 0xa4, 0x25, 0xab, 0x3c, 0x4b, 0x4c, 0x39, 0xfa, 0x01, + 0x41, 0x9d, 0xc1, 0x27, 0xe0, 0x53, 0xc6, 0x8a, 0x17, 0xa1, 0xe8, 0x89, 0x78, 0x67, 0xb8, 0x5b, + 0xb3, 0xc6, 0x36, 0x9d, 0x54, 0x65, 0x3c, 0x00, 0x8f, 0x17, 0x73, 0x0a, 0x9d, 0x9e, 0x88, 0x3b, + 0xc3, 0xbd, 0x75, 0xf1, 0xfe, 0xf9, 0x62, 0x4e, 0x89, 0x05, 0x45, 0x0f, 0xc1, 0x33, 0x11, 0xb6, + 0xc0, 0x9d, 0x8c, 0xcf, 0xbb, 0x0d, 0x04, 0xf0, 0x47, 0xe3, 0xf7, 0xe3, 0xf3, 0x71, 0x57, 0x44, + 0xbf, 0x04, 0xf8, 0xa5, 0x38, 0x76, 0xc0, 0x51, 0xa9, 0xed, 0xdc, 0x4c, 0x1c, 0x95, 0x62, 0x17, + 0xdc, 0x2f, 0xb4, 0xb0, 0x3d, 0xfe, 0x4b, 0xcc, 0x11, 0xef, 0x43, 0xf3, 0x46, 0x5e, 0x7f, 0xa3, + 0xd0, 0xb5, 0xb9, 0x32, 0xc0, 0x07, 0xe0, 0xd3, 0x77, 0x55, 0x70, 0x11, 0x7a, 0x3d, 0x11, 0xb7, + 0x93, 0x2a, 0x32, 0xe8, 0x82, 0xa5, 0xe6, 0xb0, 0x59, 0xa2, 0x6d, 0x60, 0x54, 0x29, 0x4b, 0x43, + 0xbf, 0x54, 0xa5, 0xcc, 0xf6, 0x21, 0xad, 0xc3, 0x56, 0x4f, 0xc4, 0x41, 0x62, 0x8e, 0xf8, 0x08, + 0xe0, 0x52, 0x93, 0x64, 0x4a, 0x3f, 0x49, 0x0e, 0xdb, 0x3d, 0x11, 0xbb, 0x49, 0x50, 0x65, 0x8e, + 0x39, 0x0a, 0xa0, 0x75, 0x96, 0xf3, 0x54, 0x65, 0x57, 0xd1, 0x00, 0xfc, 0x51, 0x3e, 0x93, 0x2a, + 0x5b, 0x75, 0x13, 0x1b, 0xba, 0x39, 0x75, 0xb7, 0xe8, 0x2b, 0xb4, 0x4f, 0xd9, 0xb8, 0x94, 0x6b, + 0xe3, 0x77, 0x6a, 0xd9, 0x6b, 0x7e, 0x97, 0xa2, 0x49, 0x55, 0xae, 0x06, 0x57, 0xa5, 0x50, 0x3b, + 0x29, 0x83, 0xa5, 0x41, 0xee, 0x06, 0x83, 0xbc, 0xbf, 0x0c, 0x8a, 0x7e, 0x0a, 0x68, 0x4e, 0x58, + 0x72, 0x81, 0xcf, 0xc1, 0x4b, 0x25, 0xcb, 0x6a, 0x29, 0xc2, 0xba, 0x9d, 0xad, 0xf6, 0x47, 0x92, + 0xe5, 0x38, 0x63, 0xbd, 0x48, 0x2c, 0x0a, 0xf7, 0xa0, 0xc5, 0x6a, 0x46, 0xc6, 0x03, 0xc7, 0x7a, + 0xe0, 0x9b, 0xf0, 0x98, 0xf7, 0x5f, 0x41, 0x50, 0x63, 0x97, 0xb7, 0x10, 0xa5, 0x7d, 0xb7, 0x6e, + 0xe1, 0xd8, 0x5c, 0x19, 0xbc, 0x75, 0x5e, 0x8b, 0xe8, 0x1d, 0x78, 0xa7, 0x99, 0x62, 0xc4, 0x72, + 0x25, 0x2a, 0x52, 0xb9, 0x1e, 0x08, 0xde, 0x99, 0x9c, 0x2d, 0x49, 0xf6, 0x6c, 0xb4, 0x47, 0x4a, + 0xdb, 0x09, 0x83, 0xc4, 0x1c, 0x87, 0xbf, 0x3d, 0x70, 0x46, 0x27, 0x18, 0x83, 0xa7, 0x8c, 0xd0, + 0xff, 0xf5, 0x08, 0x46, 0x77, 0xff, 0xee, 0xc2, 0x46, 0x0d, 0x7c, 0x0a, 0xee, 0x15, 0x31, 0xde, + 0xad, 0x6c, 0x82, 0x1e, 0x41, 0x70, 0x45, 0x3c, 0x61, 0x4d, 0x72, 0xb6, 0x0d, 0x21, 0x16, 0x03, + 0x61, 0xf4, 0xa7, 0xb2, 0xd8, 0x4a, 0xff, 0x19, 0xb8, 0xc5, 0xa6, 0xab, 0x74, 0xeb, 0xc4, 0x72, + 0xad, 0x1a, 0xd8, 0x87, 0x56, 0x41, 0x3c, 0x59, 0x64, 0x97, 0xdb, 0xe1, 0x0f, 0xc1, 0x4f, 0xe9, + 0x9a, 0x98, 0xb6, 0x83, 0xbf, 0x30, 0x8f, 0x87, 0x81, 0x6f, 0xdf, 0x61, 0x08, 0x6d, 0xb5, 0x5c, + 0xdc, 0x35, 0xc2, 0xbd, 0xd5, 0xff, 0x50, 0x61, 0xa2, 0xc6, 0x40, 0xe0, 0x1b, 0xd8, 0xd5, 0x74, + 0x43, 0xba, 0xa0, 0xd3, 0x7f, 0xa5, 0x1e, 0xd8, 0xef, 0x89, 0x0b, 0x5c, 0xbb, 0xcb, 0x7e, 0xe7, + 0xf6, 0xde, 0x46, 0x0d, 0x1c, 0x00, 0x5c, 0x98, 0x47, 0xef, 0xa3, 0x56, 0x4c, 0xb8, 0xaa, 0xdb, + 0x97, 0x70, 0xe3, 0x34, 0x2f, 0xa1, 0xb3, 0x62, 0x58, 0x13, 0xb6, 0x60, 0x5d, 0xf8, 0x36, 0x75, + 0xf4, 0x27, 0x00, 0x00, 0xff, 0xff, 0x95, 0xf4, 0xe3, 0x82, 0x7a, 0x05, 0x00, 0x00, +} diff --git a/libs/db/remotedb/proto/defs.proto b/libs/db/remotedb/proto/defs.proto new file mode 100644 index 000000000..70471f234 --- /dev/null +++ b/libs/db/remotedb/proto/defs.proto @@ -0,0 +1,71 @@ +syntax = "proto3"; + +package protodb; + +message Batch { + repeated Operation ops = 1; +} + +message Operation { + Entity entity = 1; + enum Type { + SET = 0; + DELETE = 1; + } + Type type = 2; +} + +message Entity { + int32 id = 1; + bytes key = 2; + bytes value = 3; + bool exists = 4; + bytes start = 5; + bytes end = 6; + string err = 7; + int64 created_at = 8; +} + +message Nothing { +} + +message Domain { + bytes start = 1; + bytes end = 2; +} + +message Iterator { + Domain domain = 1; + bool valid = 2; + bytes key = 3; + bytes value = 4; +} + +message Stats { + map data = 1; + int64 time_at = 2; +} + +message Init { + string Type = 1; + string Name = 2; + string Dir = 3; +} + +service DB { + rpc init(Init) returns (Entity) {} + rpc get(Entity) returns (Entity) {} + rpc getStream(stream Entity) returns (stream Entity) {} + + rpc has(Entity) returns (Entity) {} + rpc set(Entity) returns (Nothing) {} + rpc setSync(Entity) returns (Nothing) {} + rpc delete(Entity) returns (Nothing) {} + rpc deleteSync(Entity) returns (Nothing) {} + rpc iterator(Entity) returns (stream Iterator) {} + rpc reverseIterator(Entity) returns (stream Iterator) {} + // rpc print(Nothing) returns (Entity) {} + rpc stats(Nothing) returns (Stats) {} + rpc batchWrite(Batch) returns (Nothing) {} + rpc batchWriteSync(Batch) returns (Nothing) {} +} diff --git a/libs/db/remotedb/remotedb.go b/libs/db/remotedb/remotedb.go new file mode 100644 index 000000000..2b60d8159 --- /dev/null +++ b/libs/db/remotedb/remotedb.go @@ -0,0 +1,262 @@ +package remotedb + +import ( + "context" + "fmt" + + "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/db/remotedb/grpcdb" + protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto" +) + +type RemoteDB struct { + ctx context.Context + dc protodb.DBClient +} + +func NewRemoteDB(serverAddr string, serverKey string) (*RemoteDB, error) { + return newRemoteDB(grpcdb.NewClient(serverAddr, serverKey)) +} + +func newRemoteDB(gdc protodb.DBClient, err error) (*RemoteDB, error) { + if err != nil { + return nil, err + } + return &RemoteDB{dc: gdc, ctx: context.Background()}, nil +} + +type Init struct { + Dir string + Name string + Type string +} + +func (rd *RemoteDB) InitRemote(in *Init) error { + _, err := rd.dc.Init(rd.ctx, &protodb.Init{Dir: in.Dir, Type: in.Type, Name: in.Name}) + return err +} + +var _ db.DB = (*RemoteDB)(nil) + +// Close is a noop currently +func (rd *RemoteDB) Close() { +} + +func (rd *RemoteDB) Delete(key []byte) { + if _, err := rd.dc.Delete(rd.ctx, &protodb.Entity{Key: key}); err != nil { + panic(fmt.Sprintf("RemoteDB.Delete: %v", err)) + } +} + +func (rd *RemoteDB) DeleteSync(key []byte) { + if _, err := rd.dc.DeleteSync(rd.ctx, &protodb.Entity{Key: key}); err != nil { + panic(fmt.Sprintf("RemoteDB.DeleteSync: %v", err)) + } +} + +func (rd *RemoteDB) Set(key, value []byte) { + if _, err := rd.dc.Set(rd.ctx, &protodb.Entity{Key: key, Value: value}); err != nil { + panic(fmt.Sprintf("RemoteDB.Set: %v", err)) + } +} + +func (rd *RemoteDB) SetSync(key, value []byte) { + if _, err := rd.dc.SetSync(rd.ctx, &protodb.Entity{Key: key, Value: value}); err != nil { + panic(fmt.Sprintf("RemoteDB.SetSync: %v", err)) + } +} + +func (rd *RemoteDB) Get(key []byte) []byte { + res, err := rd.dc.Get(rd.ctx, &protodb.Entity{Key: key}) + if err != nil { + panic(fmt.Sprintf("RemoteDB.Get error: %v", err)) + } + return res.Value +} + +func (rd *RemoteDB) Has(key []byte) bool { + res, err := rd.dc.Has(rd.ctx, &protodb.Entity{Key: key}) + if err != nil { + panic(fmt.Sprintf("RemoteDB.Has error: %v", err)) + } + return res.Exists +} + +func (rd *RemoteDB) ReverseIterator(start, end []byte) db.Iterator { + dic, err := rd.dc.ReverseIterator(rd.ctx, &protodb.Entity{Start: start, End: end}) + if err != nil { + panic(fmt.Sprintf("RemoteDB.Iterator error: %v", err)) + } + return makeReverseIterator(dic) +} + +func (rd *RemoteDB) NewBatch() db.Batch { + return &batch{ + db: rd, + ops: nil, + } +} + +// TODO: Implement Print when db.DB implements a method +// to print to a string and not db.Print to stdout. +func (rd *RemoteDB) Print() { + panic("Unimplemented") +} + +func (rd *RemoteDB) Stats() map[string]string { + stats, err := rd.dc.Stats(rd.ctx, &protodb.Nothing{}) + if err != nil { + panic(fmt.Sprintf("RemoteDB.Stats error: %v", err)) + } + if stats == nil { + return nil + } + return stats.Data +} + +func (rd *RemoteDB) Iterator(start, end []byte) db.Iterator { + dic, err := rd.dc.Iterator(rd.ctx, &protodb.Entity{Start: start, End: end}) + if err != nil { + panic(fmt.Sprintf("RemoteDB.Iterator error: %v", err)) + } + return makeIterator(dic) +} + +func makeIterator(dic protodb.DB_IteratorClient) db.Iterator { + return &iterator{dic: dic} +} + +func makeReverseIterator(dric protodb.DB_ReverseIteratorClient) db.Iterator { + return &reverseIterator{dric: dric} +} + +type reverseIterator struct { + dric protodb.DB_ReverseIteratorClient + cur *protodb.Iterator +} + +var _ db.Iterator = (*iterator)(nil) + +func (rItr *reverseIterator) Valid() bool { + return rItr.cur != nil && rItr.cur.Valid +} + +func (rItr *reverseIterator) Domain() (start, end []byte) { + if rItr.cur == nil || rItr.cur.Domain == nil { + return nil, nil + } + return rItr.cur.Domain.Start, rItr.cur.Domain.End +} + +// Next advances the current reverseIterator +func (rItr *reverseIterator) Next() { + var err error + rItr.cur, err = rItr.dric.Recv() + if err != nil { + panic(fmt.Sprintf("RemoteDB.ReverseIterator.Next error: %v", err)) + } +} + +func (rItr *reverseIterator) Key() []byte { + if rItr.cur == nil { + return nil + } + return rItr.cur.Key +} + +func (rItr *reverseIterator) Value() []byte { + if rItr.cur == nil { + return nil + } + return rItr.cur.Value +} + +func (rItr *reverseIterator) Close() { +} + +// iterator implements the db.Iterator by retrieving +// streamed iterators from the remote backend as +// needed. It is NOT safe for concurrent usage, +// matching the behavior of other iterators. +type iterator struct { + dic protodb.DB_IteratorClient + cur *protodb.Iterator +} + +var _ db.Iterator = (*iterator)(nil) + +func (itr *iterator) Valid() bool { + return itr.cur != nil && itr.cur.Valid +} + +func (itr *iterator) Domain() (start, end []byte) { + if itr.cur == nil || itr.cur.Domain == nil { + return nil, nil + } + return itr.cur.Domain.Start, itr.cur.Domain.End +} + +// Next advances the current iterator +func (itr *iterator) Next() { + var err error + itr.cur, err = itr.dic.Recv() + if err != nil { + panic(fmt.Sprintf("RemoteDB.Iterator.Next error: %v", err)) + } +} + +func (itr *iterator) Key() []byte { + if itr.cur == nil { + return nil + } + return itr.cur.Key +} + +func (itr *iterator) Value() []byte { + if itr.cur == nil { + return nil + } + return itr.cur.Value +} + +func (itr *iterator) Close() { + err := itr.dic.CloseSend() + if err != nil { + panic(fmt.Sprintf("Error closing iterator: %v", err)) + } +} + +type batch struct { + db *RemoteDB + ops []*protodb.Operation +} + +var _ db.Batch = (*batch)(nil) + +func (bat *batch) Set(key, value []byte) { + op := &protodb.Operation{ + Entity: &protodb.Entity{Key: key, Value: value}, + Type: protodb.Operation_SET, + } + bat.ops = append(bat.ops, op) +} + +func (bat *batch) Delete(key []byte) { + op := &protodb.Operation{ + Entity: &protodb.Entity{Key: key}, + Type: protodb.Operation_DELETE, + } + bat.ops = append(bat.ops, op) +} + +func (bat *batch) Write() { + if _, err := bat.db.dc.BatchWrite(bat.db.ctx, &protodb.Batch{Ops: bat.ops}); err != nil { + panic(fmt.Sprintf("RemoteDB.BatchWrite: %v", err)) + } +} + +func (bat *batch) WriteSync() { + if _, err := bat.db.dc.BatchWriteSync(bat.db.ctx, &protodb.Batch{Ops: bat.ops}); err != nil { + panic(fmt.Sprintf("RemoteDB.BatchWriteSync: %v", err)) + } +} diff --git a/libs/db/remotedb/remotedb_test.go b/libs/db/remotedb/remotedb_test.go new file mode 100644 index 000000000..0e7319971 --- /dev/null +++ b/libs/db/remotedb/remotedb_test.go @@ -0,0 +1,123 @@ +package remotedb_test + +import ( + "net" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/libs/db/remotedb" + "github.com/tendermint/tendermint/libs/db/remotedb/grpcdb" +) + +func TestRemoteDB(t *testing.T) { + cert := "test.crt" + key := "test.key" + ln, err := net.Listen("tcp", "0.0.0.0:0") + require.Nil(t, err, "expecting a port to have been assigned on which we can listen") + srv, err := grpcdb.NewServer(cert, key) + require.Nil(t, err) + defer srv.Stop() + go func() { + if err := srv.Serve(ln); err != nil { + t.Fatalf("BindServer: %v", err) + } + }() + + client, err := remotedb.NewRemoteDB(ln.Addr().String(), cert) + require.Nil(t, err, "expecting a successful client creation") + dbName := "test-remote-db" + require.Nil(t, client.InitRemote(&remotedb.Init{Name: dbName, Type: "leveldb"})) + defer func() { + err := os.RemoveAll(dbName + ".db") + if err != nil { + panic(err) + } + }() + + k1 := []byte("key-1") + v1 := client.Get(k1) + require.Equal(t, 0, len(v1), "expecting no key1 to have been stored, got %X (%s)", v1, v1) + vv1 := []byte("value-1") + client.Set(k1, vv1) + gv1 := client.Get(k1) + require.Equal(t, gv1, vv1) + + // Simple iteration + itr := client.Iterator(nil, nil) + itr.Next() + require.Equal(t, itr.Key(), []byte("key-1")) + require.Equal(t, itr.Value(), []byte("value-1")) + require.Panics(t, itr.Next) + itr.Close() + + // Set some more keys + k2 := []byte("key-2") + v2 := []byte("value-2") + client.SetSync(k2, v2) + has := client.Has(k2) + require.True(t, has) + gv2 := client.Get(k2) + require.Equal(t, gv2, v2) + + // More iteration + itr = client.Iterator(nil, nil) + itr.Next() + require.Equal(t, itr.Key(), []byte("key-1")) + require.Equal(t, itr.Value(), []byte("value-1")) + itr.Next() + require.Equal(t, itr.Key(), []byte("key-2")) + require.Equal(t, itr.Value(), []byte("value-2")) + require.Panics(t, itr.Next) + itr.Close() + + // Deletion + client.Delete(k1) + client.DeleteSync(k2) + gv1 = client.Get(k1) + gv2 = client.Get(k2) + require.Equal(t, len(gv2), 0, "after deletion, not expecting the key to exist anymore") + require.Equal(t, len(gv1), 0, "after deletion, not expecting the key to exist anymore") + + // Batch tests - set + k3 := []byte("key-3") + k4 := []byte("key-4") + k5 := []byte("key-5") + v3 := []byte("value-3") + v4 := []byte("value-4") + v5 := []byte("value-5") + bat := client.NewBatch() + bat.Set(k3, v3) + bat.Set(k4, v4) + rv3 := client.Get(k3) + require.Equal(t, 0, len(rv3), "expecting no k3 to have been stored") + rv4 := client.Get(k4) + require.Equal(t, 0, len(rv4), "expecting no k4 to have been stored") + bat.Write() + rv3 = client.Get(k3) + require.Equal(t, rv3, v3, "expecting k3 to have been stored") + rv4 = client.Get(k4) + require.Equal(t, rv4, v4, "expecting k4 to have been stored") + + // Batch tests - deletion + bat = client.NewBatch() + bat.Delete(k4) + bat.Delete(k3) + bat.WriteSync() + rv3 = client.Get(k3) + require.Equal(t, 0, len(rv3), "expecting k3 to have been deleted") + rv4 = client.Get(k4) + require.Equal(t, 0, len(rv4), "expecting k4 to have been deleted") + + // Batch tests - set and delete + bat = client.NewBatch() + bat.Set(k4, v4) + bat.Set(k5, v5) + bat.Delete(k4) + bat.WriteSync() + rv4 = client.Get(k4) + require.Equal(t, 0, len(rv4), "expecting k4 to have been deleted") + rv5 := client.Get(k5) + require.Equal(t, rv5, v5, "expecting k5 to have been stored") +} diff --git a/libs/db/remotedb/test.crt b/libs/db/remotedb/test.crt new file mode 100644 index 000000000..bdc8a0f29 --- /dev/null +++ b/libs/db/remotedb/test.crt @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEQTCCAimgAwIBAgIRANqF1HD19i/uvQ3n62TAKTwwDQYJKoZIhvcNAQELBQAw +GTEXMBUGA1UEAxMOdGVuZGVybWludC5jb20wHhcNMTgwNzAyMDMwNzMyWhcNMjAw +MTAyMDMwNzMwWjANMQswCQYDVQQDEwI6OjCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAOuWUMCSzYJmvKU1vsouDTe7OxnPWO3oV0FjSH8vKYoi2zpZQX35 +dQDPtLDF2/v/ANZJ5pzMJR8yMMtEQ4tWxKuGzJw1ZgTgHtASPbj/M5fDnDO7Hqg4 +D09eLTkZAUfiBf6BzDyQIHn22CUexhaS70TbIT9AOAoOsGXMZz9d+iImKIm+gbzf +pR52LNbBGesHWGjwIuGF4InstIMsKSwGv2DctzhWI+i/m5Goi3rd1V8z/lzUbsf1 +0uXqQcSfTyv3ee6YiCWj2W8vcdc5H+B6KzSlGjAR4sRcHTHOQJYO9BgA9evQ3qsJ +Pp00iez13RdheJWPtbfUqQy4gdpu8HFeZx8CAwEAAaOBjzCBjDAOBgNVHQ8BAf8E +BAMCA7gwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBRc +XBo+bJILrLcJiGkTWeMPpXb1TDAfBgNVHSMEGDAWgBQqk1Xu65Ww7EBCROw4KLGw +KuToaDAbBgNVHREEFDAShxAAAAAAAAAAAAAAAAAAAAAAMA0GCSqGSIb3DQEBCwUA +A4ICAQAbGsIMhL8clczNmhGl9xZhmyNz6FbLq6g163x9LTgfvwHPt+7urthtd++O +uy4Ut8zFurh/yk7eooPlzf8jO7QUJBAFVy4vj8IcsvpWbFa7cuEOIulbjIzyAm/v +lgy7vUQ6xrWn8x8O9K1ww9z7wugwCyl22BD0wSHZKclJz++AwpL6vUVOD76IIuJO ++S6bE6z26/0ndpundh2AkA++2eIleD6ygnTeTl0PWu6aGoCggBmos50f8KgYHZF/ +OZVef203kDls9xCaOiMzaU91VsgLqq/gNcT+2cBd5r3IZTY3C8Rve6EEHS+/4zxf +PKlmiLN7lU9GFZogKecYzY+zPT7OArY7OVFnGTo4qdhdmxnXzHsI+anMCjxLOgEJ +381hyplQGPQOouEupCBxFcwa7oMYoGu20+1nLWYEqFcIXCeyH+s77MyteJSsseqL +xivG5PT+jKJn9hrnFb39bBmht9Vsa+Th6vk953zi5wCSe1j2wXsxFaENDq6BQZOK +f86Kp86M2elYnv3lJ3j2DE2ZTMpw+PA5ThYUnB+HVqYeeB2Y3ErRS8P1FOp1LBE8 ++eTz7yXQO5OM2wdYhNNL1zDri/41fHXi9b6337PZVqc39GM+N74x/O4Q7xEBiWgQ +T0dT8SNwf55kv63MeZh63ImxFV0FNRkCteYLcJMle3ohIY4zyQ== +-----END CERTIFICATE----- diff --git a/libs/db/remotedb/test.key b/libs/db/remotedb/test.key new file mode 100644 index 000000000..14d285584 --- /dev/null +++ b/libs/db/remotedb/test.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpgIBAAKCAQEA65ZQwJLNgma8pTW+yi4NN7s7Gc9Y7ehXQWNIfy8piiLbOllB +ffl1AM+0sMXb+/8A1knmnMwlHzIwy0RDi1bEq4bMnDVmBOAe0BI9uP8zl8OcM7se +qDgPT14tORkBR+IF/oHMPJAgefbYJR7GFpLvRNshP0A4Cg6wZcxnP136IiYoib6B +vN+lHnYs1sEZ6wdYaPAi4YXgiey0gywpLAa/YNy3OFYj6L+bkaiLet3VXzP+XNRu +x/XS5epBxJ9PK/d57piIJaPZby9x1zkf4HorNKUaMBHixFwdMc5Alg70GAD169De +qwk+nTSJ7PXdF2F4lY+1t9SpDLiB2m7wcV5nHwIDAQABAoIBAQCB2/ilPgaUE8d2 +ldqWHa5hgw4/2uCdO04ll/GVUczm/PG1BxAnvYL2MIfcTSRGkrjGZjP9SDZKLONi +mD1XKDv+hK5yiKi0lUnGzddCC0JILKYEieeLOGOQD0yERblEA13kfW20EIomUJ+y +TnVIajQD03pPIDoDqTco1fQvpMDFYw5Q//UhH7VBC261GO1akvhT2Gqdb4aKLaYQ +iDW9IEButL5cRKIJuRxToB/JbmPVEF7xIZtm0sf9dtYVOlBQLeID0uHXgaci0enc +de6GMajmj7NFqc36ypb+Ct18fqEwQBYD+TSQdKs7/lMsAXwRjd5HW4RbYiMZyYnf +Dxgh7QVBAoGBAP9aLLIUcIG7+gk1x7xd+8wRhfo+dhsungeCluSigI9AsfDr6dpR +G9/0lEJH56noZZKQueACTmj7shmRB40xFFLc8w0IDRZCnofsl+Z15k9K84uFPA3W +hdZH9nMieU/mRKdcUYK7pHGqbicHTaJQ5ydZ+xb2E+zYQHOzYpQacHv/AoGBAOwv +TjDZSiassnAPYmmfcHtkUF4gf7PTpiZfH0hXHGAb0mJX4cXAoktAeDeHSi2tz3LW +dAc0ReP8Pdf3uSNv7wkJ1KpNRxAhU5bhnDFmjRc7gMZknVOU+az2M+4yGOn/SOiJ +I6uMHgQDS/VsI+N583n6gbGxVHbQfr9TOc4bLpThAoGBAKin0JmWMnEdzRnEMbZS +hPrWIB2Wn794XNws/qjoQ+1aF60+xGhz5etXyYy1nWd1nZDekkZIf62LgKiuR8ST +xA6u7MGQrcQkID06oWGQQZvhr1ZZm76wEBnl0ftdq66AMpwvt46XjReeL78LbdVl +hidRoSwbQDHQ61EADH4xsFXVAoGBAISXqhXSZsZ/fU1b1avmTod3MYcmR4r07vnr +vOwnu05ZUCrVm3IhSvtkHhlOYl5yjVuy+UByICp1mWJ9N/qlBFTWqAVTjOmJTBwQ +XFd/cwXv6cN3CLu7js+DCHRYu5PiNVQWaWgNKWynTSViqGM0O3PnJphTLU/mjMFs +P69toyEBAoGBALh9YsqxHdYdS5WK9chzDfGlaTQ79jwN+gEzQuP1ooLF0JkMgh5W +//2C6kCrgBsGTm1gfHAjEfC04ZDZLFbKLm56YVKUGL6JJNapm6e5kfiZGjbRKWAg +ViCeRS2qQnVbH74GfHyimeTPDI9cJMiJfDDTPbfosqWSsPEcg2jfsySJ +-----END RSA PRIVATE KEY----- diff --git a/libs/db/types.go b/libs/db/types.go new file mode 100644 index 000000000..ad78859a7 --- /dev/null +++ b/libs/db/types.go @@ -0,0 +1,134 @@ +package db + +// DBs are goroutine safe. +type DB interface { + + // Get returns nil iff key doesn't exist. + // A nil key is interpreted as an empty byteslice. + // CONTRACT: key, value readonly []byte + Get([]byte) []byte + + // Has checks if a key exists. + // A nil key is interpreted as an empty byteslice. + // CONTRACT: key, value readonly []byte + Has(key []byte) bool + + // Set sets the key. + // A nil key is interpreted as an empty byteslice. + // CONTRACT: key, value readonly []byte + Set([]byte, []byte) + SetSync([]byte, []byte) + + // Delete deletes the key. + // A nil key is interpreted as an empty byteslice. + // CONTRACT: key readonly []byte + Delete([]byte) + DeleteSync([]byte) + + // Iterate over a domain of keys in ascending order. End is exclusive. + // Start must be less than end, or the Iterator is invalid. + // A nil start is interpreted as an empty byteslice. + // If end is nil, iterates up to the last item (inclusive). + // CONTRACT: No writes may happen within a domain while an iterator exists over it. + // CONTRACT: start, end readonly []byte + Iterator(start, end []byte) Iterator + + // Iterate over a domain of keys in descending order. End is exclusive. + // Start must be greater than end, or the Iterator is invalid. + // If start is nil, iterates from the last/greatest item (inclusive). + // If end is nil, iterates up to the first/least item (inclusive). + // CONTRACT: No writes may happen within a domain while an iterator exists over it. + // CONTRACT: start, end readonly []byte + ReverseIterator(start, end []byte) Iterator + + // Closes the connection. + Close() + + // Creates a batch for atomic updates. + NewBatch() Batch + + // For debugging + Print() + + // Stats returns a map of property values for all keys and the size of the cache. + Stats() map[string]string +} + +//---------------------------------------- +// Batch + +type Batch interface { + SetDeleter + Write() + WriteSync() +} + +type SetDeleter interface { + Set(key, value []byte) // CONTRACT: key, value readonly []byte + Delete(key []byte) // CONTRACT: key readonly []byte +} + +//---------------------------------------- +// Iterator + +/* + Usage: + + var itr Iterator = ... + defer itr.Close() + + for ; itr.Valid(); itr.Next() { + k, v := itr.Key(); itr.Value() + // ... + } +*/ +type Iterator interface { + + // The start & end (exclusive) limits to iterate over. + // If end < start, then the Iterator goes in reverse order. + // + // A domain of ([]byte{12, 13}, []byte{12, 14}) will iterate + // over anything with the prefix []byte{12, 13}. + // + // The smallest key is the empty byte array []byte{} - see BeginningKey(). + // The largest key is the nil byte array []byte(nil) - see EndingKey(). + // CONTRACT: start, end readonly []byte + Domain() (start []byte, end []byte) + + // Valid returns whether the current position is valid. + // Once invalid, an Iterator is forever invalid. + Valid() bool + + // Next moves the iterator to the next sequential key in the database, as + // defined by order of iteration. + // + // If Valid returns false, this method will panic. + Next() + + // Key returns the key of the cursor. + // If Valid returns false, this method will panic. + // CONTRACT: key readonly []byte + Key() (key []byte) + + // Value returns the value of the cursor. + // If Valid returns false, this method will panic. + // CONTRACT: value readonly []byte + Value() (value []byte) + + // Close releases the Iterator. + Close() +} + +// For testing convenience. +func bz(s string) []byte { + return []byte(s) +} + +// We defensively turn nil keys or values into []byte{} for +// most operations. +func nonNilBytes(bz []byte) []byte { + if bz == nil { + return []byte{} + } + return bz +} diff --git a/libs/db/util.go b/libs/db/util.go new file mode 100644 index 000000000..51277ac42 --- /dev/null +++ b/libs/db/util.go @@ -0,0 +1,78 @@ +package db + +import ( + "bytes" +) + +func cp(bz []byte) (ret []byte) { + ret = make([]byte, len(bz)) + copy(ret, bz) + return ret +} + +// Returns a slice of the same length (big endian) +// except incremented by one. +// Returns nil on overflow (e.g. if bz bytes are all 0xFF) +// CONTRACT: len(bz) > 0 +func cpIncr(bz []byte) (ret []byte) { + if len(bz) == 0 { + panic("cpIncr expects non-zero bz length") + } + ret = cp(bz) + for i := len(bz) - 1; i >= 0; i-- { + if ret[i] < byte(0xFF) { + ret[i]++ + return + } + ret[i] = byte(0x00) + if i == 0 { + // Overflow + return nil + } + } + return nil +} + +// Returns a slice of the same length (big endian) +// except decremented by one. +// Returns nil on underflow (e.g. if bz bytes are all 0x00) +// CONTRACT: len(bz) > 0 +func cpDecr(bz []byte) (ret []byte) { + if len(bz) == 0 { + panic("cpDecr expects non-zero bz length") + } + ret = cp(bz) + for i := len(bz) - 1; i >= 0; i-- { + if ret[i] > byte(0x00) { + ret[i]-- + return + } + ret[i] = byte(0xFF) + if i == 0 { + // Underflow + return nil + } + } + return nil +} + +// See DB interface documentation for more information. +func IsKeyInDomain(key, start, end []byte, isReverse bool) bool { + if !isReverse { + if bytes.Compare(key, start) < 0 { + return false + } + if end != nil && bytes.Compare(end, key) <= 0 { + return false + } + return true + } else { + if start != nil && bytes.Compare(start, key) < 0 { + return false + } + if end != nil && bytes.Compare(key, end) <= 0 { + return false + } + return true + } +} diff --git a/libs/db/util_test.go b/libs/db/util_test.go new file mode 100644 index 000000000..44f1f9f73 --- /dev/null +++ b/libs/db/util_test.go @@ -0,0 +1,93 @@ +package db + +import ( + "fmt" + "testing" +) + +// Empty iterator for empty db. +func TestPrefixIteratorNoMatchNil(t *testing.T) { + for backend := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + itr := IteratePrefix(db, []byte("2")) + + checkInvalid(t, itr) + }) + } +} + +// Empty iterator for db populated after iterator created. +func TestPrefixIteratorNoMatch1(t *testing.T) { + for backend := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + itr := IteratePrefix(db, []byte("2")) + db.SetSync(bz("1"), bz("value_1")) + + checkInvalid(t, itr) + }) + } +} + +// Empty iterator for prefix starting after db entry. +func TestPrefixIteratorNoMatch2(t *testing.T) { + for backend := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("3"), bz("value_3")) + itr := IteratePrefix(db, []byte("4")) + + checkInvalid(t, itr) + }) + } +} + +// Iterator with single val for db with single val, starting from that val. +func TestPrefixIteratorMatch1(t *testing.T) { + for backend := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("2"), bz("value_2")) + itr := IteratePrefix(db, bz("2")) + + checkValid(t, itr, true) + checkItem(t, itr, bz("2"), bz("value_2")) + checkNext(t, itr, false) + + // Once invalid... + checkInvalid(t, itr) + }) + } +} + +// Iterator with prefix iterates over everything with same prefix. +func TestPrefixIteratorMatches1N(t *testing.T) { + for backend := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + + // prefixed + db.SetSync(bz("a/1"), bz("value_1")) + db.SetSync(bz("a/3"), bz("value_3")) + + // not + db.SetSync(bz("b/3"), bz("value_3")) + db.SetSync(bz("a-3"), bz("value_3")) + db.SetSync(bz("a.3"), bz("value_3")) + db.SetSync(bz("abcdefg"), bz("value_3")) + itr := IteratePrefix(db, bz("a/")) + + checkValid(t, itr, true) + checkItem(t, itr, bz("a/1"), bz("value_1")) + checkNext(t, itr, true) + checkItem(t, itr, bz("a/3"), bz("value_3")) + + // Bad! + checkNext(t, itr, false) + + //Once invalid... + checkInvalid(t, itr) + }) + } +} diff --git a/libs/events/events.go b/libs/events/events.go index 075f9b42b..9c7f0fd05 100644 --- a/libs/events/events.go +++ b/libs/events/events.go @@ -6,7 +6,7 @@ package events import ( "sync" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // Generic event data can be typed and registered with tendermint/go-amino diff --git a/libs/events/events_test.go b/libs/events/events_test.go index 4995ae730..a01fbbb77 100644 --- a/libs/events/events_test.go +++ b/libs/events/events_test.go @@ -2,11 +2,11 @@ package events import ( "fmt" - "math/rand" "testing" "time" "github.com/stretchr/testify/assert" + cmn "github.com/tendermint/tendermint/libs/common" ) // TestAddListenerForEventFireOnce sets up an EventSwitch, subscribes a single @@ -306,8 +306,8 @@ func TestRemoveListenersAsync(t *testing.T) { // collect received events for event2 go sumReceivedNumbers(numbers2, doneSum2) addListenersStress := func() { - s1 := rand.NewSource(time.Now().UnixNano()) - r1 := rand.New(s1) + r1 := cmn.NewRand() + r1.Seed(time.Now().UnixNano()) for k := uint16(0); k < 400; k++ { listenerNumber := r1.Intn(100) + 3 eventNumber := r1.Intn(3) + 1 @@ -317,8 +317,8 @@ func TestRemoveListenersAsync(t *testing.T) { } } removeListenersStress := func() { - s2 := rand.NewSource(time.Now().UnixNano()) - r2 := rand.New(s2) + r2 := cmn.NewRand() + r2.Seed(time.Now().UnixNano()) for k := uint16(0); k < 80; k++ { listenerNumber := r2.Intn(100) + 3 go evsw.RemoveListener(fmt.Sprintf("listener%v", listenerNumber)) diff --git a/libs/flowrate/README.md b/libs/flowrate/README.md new file mode 100644 index 000000000..db428090c --- /dev/null +++ b/libs/flowrate/README.md @@ -0,0 +1,10 @@ +Data Flow Rate Control +====================== + +To download and install this package run: + +go get github.com/mxk/go-flowrate/flowrate + +The documentation is available at: + +http://godoc.org/github.com/mxk/go-flowrate/flowrate diff --git a/libs/flowrate/flowrate.go b/libs/flowrate/flowrate.go new file mode 100644 index 000000000..e233eae0f --- /dev/null +++ b/libs/flowrate/flowrate.go @@ -0,0 +1,275 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +// Package flowrate provides the tools for monitoring and limiting the flow rate +// of an arbitrary data stream. +package flowrate + +import ( + "math" + "sync" + "time" +) + +// Monitor monitors and limits the transfer rate of a data stream. +type Monitor struct { + mu sync.Mutex // Mutex guarding access to all internal fields + active bool // Flag indicating an active transfer + start time.Duration // Transfer start time (clock() value) + bytes int64 // Total number of bytes transferred + samples int64 // Total number of samples taken + + rSample float64 // Most recent transfer rate sample (bytes per second) + rEMA float64 // Exponential moving average of rSample + rPeak float64 // Peak transfer rate (max of all rSamples) + rWindow float64 // rEMA window (seconds) + + sBytes int64 // Number of bytes transferred since sLast + sLast time.Duration // Most recent sample time (stop time when inactive) + sRate time.Duration // Sampling rate + + tBytes int64 // Number of bytes expected in the current transfer + tLast time.Duration // Time of the most recent transfer of at least 1 byte +} + +// New creates a new flow control monitor. Instantaneous transfer rate is +// measured and updated for each sampleRate interval. windowSize determines the +// weight of each sample in the exponential moving average (EMA) calculation. +// The exact formulas are: +// +// sampleTime = currentTime - prevSampleTime +// sampleRate = byteCount / sampleTime +// weight = 1 - exp(-sampleTime/windowSize) +// newRate = weight*sampleRate + (1-weight)*oldRate +// +// The default values for sampleRate and windowSize (if <= 0) are 100ms and 1s, +// respectively. +func New(sampleRate, windowSize time.Duration) *Monitor { + if sampleRate = clockRound(sampleRate); sampleRate <= 0 { + sampleRate = 5 * clockRate + } + if windowSize <= 0 { + windowSize = 1 * time.Second + } + now := clock() + return &Monitor{ + active: true, + start: now, + rWindow: windowSize.Seconds(), + sLast: now, + sRate: sampleRate, + tLast: now, + } +} + +// Update records the transfer of n bytes and returns n. It should be called +// after each Read/Write operation, even if n is 0. +func (m *Monitor) Update(n int) int { + m.mu.Lock() + m.update(n) + m.mu.Unlock() + return n +} + +// Hack to set the current rEMA. +func (m *Monitor) SetREMA(rEMA float64) { + m.mu.Lock() + m.rEMA = rEMA + m.samples++ + m.mu.Unlock() +} + +// IO is a convenience method intended to wrap io.Reader and io.Writer method +// execution. It calls m.Update(n) and then returns (n, err) unmodified. +func (m *Monitor) IO(n int, err error) (int, error) { + return m.Update(n), err +} + +// Done marks the transfer as finished and prevents any further updates or +// limiting. Instantaneous and current transfer rates drop to 0. Update, IO, and +// Limit methods become NOOPs. It returns the total number of bytes transferred. +func (m *Monitor) Done() int64 { + m.mu.Lock() + if now := m.update(0); m.sBytes > 0 { + m.reset(now) + } + m.active = false + m.tLast = 0 + n := m.bytes + m.mu.Unlock() + return n +} + +// timeRemLimit is the maximum Status.TimeRem value. +const timeRemLimit = 999*time.Hour + 59*time.Minute + 59*time.Second + +// Status represents the current Monitor status. All transfer rates are in bytes +// per second rounded to the nearest byte. +type Status struct { + Active bool // Flag indicating an active transfer + Start time.Time // Transfer start time + Duration time.Duration // Time period covered by the statistics + Idle time.Duration // Time since the last transfer of at least 1 byte + Bytes int64 // Total number of bytes transferred + Samples int64 // Total number of samples taken + InstRate int64 // Instantaneous transfer rate + CurRate int64 // Current transfer rate (EMA of InstRate) + AvgRate int64 // Average transfer rate (Bytes / Duration) + PeakRate int64 // Maximum instantaneous transfer rate + BytesRem int64 // Number of bytes remaining in the transfer + TimeRem time.Duration // Estimated time to completion + Progress Percent // Overall transfer progress +} + +// Status returns current transfer status information. The returned value +// becomes static after a call to Done. +func (m *Monitor) Status() Status { + m.mu.Lock() + now := m.update(0) + s := Status{ + Active: m.active, + Start: clockToTime(m.start), + Duration: m.sLast - m.start, + Idle: now - m.tLast, + Bytes: m.bytes, + Samples: m.samples, + PeakRate: round(m.rPeak), + BytesRem: m.tBytes - m.bytes, + Progress: percentOf(float64(m.bytes), float64(m.tBytes)), + } + if s.BytesRem < 0 { + s.BytesRem = 0 + } + if s.Duration > 0 { + rAvg := float64(s.Bytes) / s.Duration.Seconds() + s.AvgRate = round(rAvg) + if s.Active { + s.InstRate = round(m.rSample) + s.CurRate = round(m.rEMA) + if s.BytesRem > 0 { + if tRate := 0.8*m.rEMA + 0.2*rAvg; tRate > 0 { + ns := float64(s.BytesRem) / tRate * 1e9 + if ns > float64(timeRemLimit) { + ns = float64(timeRemLimit) + } + s.TimeRem = clockRound(time.Duration(ns)) + } + } + } + } + m.mu.Unlock() + return s +} + +// Limit restricts the instantaneous (per-sample) data flow to rate bytes per +// second. It returns the maximum number of bytes (0 <= n <= want) that may be +// transferred immediately without exceeding the limit. If block == true, the +// call blocks until n > 0. want is returned unmodified if want < 1, rate < 1, +// or the transfer is inactive (after a call to Done). +// +// At least one byte is always allowed to be transferred in any given sampling +// period. Thus, if the sampling rate is 100ms, the lowest achievable flow rate +// is 10 bytes per second. +// +// For usage examples, see the implementation of Reader and Writer in io.go. +func (m *Monitor) Limit(want int, rate int64, block bool) (n int) { + if want < 1 || rate < 1 { + return want + } + m.mu.Lock() + + // Determine the maximum number of bytes that can be sent in one sample + limit := round(float64(rate) * m.sRate.Seconds()) + if limit <= 0 { + limit = 1 + } + + // If block == true, wait until m.sBytes < limit + if now := m.update(0); block { + for m.sBytes >= limit && m.active { + now = m.waitNextSample(now) + } + } + + // Make limit <= want (unlimited if the transfer is no longer active) + if limit -= m.sBytes; limit > int64(want) || !m.active { + limit = int64(want) + } + m.mu.Unlock() + + if limit < 0 { + limit = 0 + } + return int(limit) +} + +// SetTransferSize specifies the total size of the data transfer, which allows +// the Monitor to calculate the overall progress and time to completion. +func (m *Monitor) SetTransferSize(bytes int64) { + if bytes < 0 { + bytes = 0 + } + m.mu.Lock() + m.tBytes = bytes + m.mu.Unlock() +} + +// update accumulates the transferred byte count for the current sample until +// clock() - m.sLast >= m.sRate. The monitor status is updated once the current +// sample is done. +func (m *Monitor) update(n int) (now time.Duration) { + if !m.active { + return + } + if now = clock(); n > 0 { + m.tLast = now + } + m.sBytes += int64(n) + if sTime := now - m.sLast; sTime >= m.sRate { + t := sTime.Seconds() + if m.rSample = float64(m.sBytes) / t; m.rSample > m.rPeak { + m.rPeak = m.rSample + } + + // Exponential moving average using a method similar to *nix load + // average calculation. Longer sampling periods carry greater weight. + if m.samples > 0 { + w := math.Exp(-t / m.rWindow) + m.rEMA = m.rSample + w*(m.rEMA-m.rSample) + } else { + m.rEMA = m.rSample + } + m.reset(now) + } + return +} + +// reset clears the current sample state in preparation for the next sample. +func (m *Monitor) reset(sampleTime time.Duration) { + m.bytes += m.sBytes + m.samples++ + m.sBytes = 0 + m.sLast = sampleTime +} + +// waitNextSample sleeps for the remainder of the current sample. The lock is +// released and reacquired during the actual sleep period, so it's possible for +// the transfer to be inactive when this method returns. +func (m *Monitor) waitNextSample(now time.Duration) time.Duration { + const minWait = 5 * time.Millisecond + current := m.sLast + + // sleep until the last sample time changes (ideally, just one iteration) + for m.sLast == current && m.active { + d := current + m.sRate - now + m.mu.Unlock() + if d < minWait { + d = minWait + } + time.Sleep(d) + m.mu.Lock() + now = m.update(0) + } + return now +} diff --git a/libs/flowrate/io.go b/libs/flowrate/io.go new file mode 100644 index 000000000..fbe090972 --- /dev/null +++ b/libs/flowrate/io.go @@ -0,0 +1,133 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowrate + +import ( + "errors" + "io" +) + +// ErrLimit is returned by the Writer when a non-blocking write is short due to +// the transfer rate limit. +var ErrLimit = errors.New("flowrate: flow rate limit exceeded") + +// Limiter is implemented by the Reader and Writer to provide a consistent +// interface for monitoring and controlling data transfer. +type Limiter interface { + Done() int64 + Status() Status + SetTransferSize(bytes int64) + SetLimit(new int64) (old int64) + SetBlocking(new bool) (old bool) +} + +// Reader implements io.ReadCloser with a restriction on the rate of data +// transfer. +type Reader struct { + io.Reader // Data source + *Monitor // Flow control monitor + + limit int64 // Rate limit in bytes per second (unlimited when <= 0) + block bool // What to do when no new bytes can be read due to the limit +} + +// NewReader restricts all Read operations on r to limit bytes per second. +func NewReader(r io.Reader, limit int64) *Reader { + return &Reader{r, New(0, 0), limit, true} +} + +// Read reads up to len(p) bytes into p without exceeding the current transfer +// rate limit. It returns (0, nil) immediately if r is non-blocking and no new +// bytes can be read at this time. +func (r *Reader) Read(p []byte) (n int, err error) { + p = p[:r.Limit(len(p), r.limit, r.block)] + if len(p) > 0 { + n, err = r.IO(r.Reader.Read(p)) + } + return +} + +// SetLimit changes the transfer rate limit to new bytes per second and returns +// the previous setting. +func (r *Reader) SetLimit(new int64) (old int64) { + old, r.limit = r.limit, new + return +} + +// SetBlocking changes the blocking behavior and returns the previous setting. A +// Read call on a non-blocking reader returns immediately if no additional bytes +// may be read at this time due to the rate limit. +func (r *Reader) SetBlocking(new bool) (old bool) { + old, r.block = r.block, new + return +} + +// Close closes the underlying reader if it implements the io.Closer interface. +func (r *Reader) Close() error { + defer r.Done() + if c, ok := r.Reader.(io.Closer); ok { + return c.Close() + } + return nil +} + +// Writer implements io.WriteCloser with a restriction on the rate of data +// transfer. +type Writer struct { + io.Writer // Data destination + *Monitor // Flow control monitor + + limit int64 // Rate limit in bytes per second (unlimited when <= 0) + block bool // What to do when no new bytes can be written due to the limit +} + +// NewWriter restricts all Write operations on w to limit bytes per second. The +// transfer rate and the default blocking behavior (true) can be changed +// directly on the returned *Writer. +func NewWriter(w io.Writer, limit int64) *Writer { + return &Writer{w, New(0, 0), limit, true} +} + +// Write writes len(p) bytes from p to the underlying data stream without +// exceeding the current transfer rate limit. It returns (n, ErrLimit) if w is +// non-blocking and no additional bytes can be written at this time. +func (w *Writer) Write(p []byte) (n int, err error) { + var c int + for len(p) > 0 && err == nil { + s := p[:w.Limit(len(p), w.limit, w.block)] + if len(s) > 0 { + c, err = w.IO(w.Writer.Write(s)) + } else { + return n, ErrLimit + } + p = p[c:] + n += c + } + return +} + +// SetLimit changes the transfer rate limit to new bytes per second and returns +// the previous setting. +func (w *Writer) SetLimit(new int64) (old int64) { + old, w.limit = w.limit, new + return +} + +// SetBlocking changes the blocking behavior and returns the previous setting. A +// Write call on a non-blocking writer returns as soon as no additional bytes +// may be written at this time due to the rate limit. +func (w *Writer) SetBlocking(new bool) (old bool) { + old, w.block = w.block, new + return +} + +// Close closes the underlying writer if it implements the io.Closer interface. +func (w *Writer) Close() error { + defer w.Done() + if c, ok := w.Writer.(io.Closer); ok { + return c.Close() + } + return nil +} diff --git a/libs/flowrate/io_test.go b/libs/flowrate/io_test.go new file mode 100644 index 000000000..c84029d5e --- /dev/null +++ b/libs/flowrate/io_test.go @@ -0,0 +1,194 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowrate + +import ( + "bytes" + "testing" + "time" +) + +const ( + _50ms = 50 * time.Millisecond + _100ms = 100 * time.Millisecond + _200ms = 200 * time.Millisecond + _300ms = 300 * time.Millisecond + _400ms = 400 * time.Millisecond + _500ms = 500 * time.Millisecond +) + +func nextStatus(m *Monitor) Status { + samples := m.samples + for i := 0; i < 30; i++ { + if s := m.Status(); s.Samples != samples { + return s + } + time.Sleep(5 * time.Millisecond) + } + return m.Status() +} + +func TestReader(t *testing.T) { + in := make([]byte, 100) + for i := range in { + in[i] = byte(i) + } + b := make([]byte, 100) + r := NewReader(bytes.NewReader(in), 100) + start := time.Now() + + // Make sure r implements Limiter + _ = Limiter(r) + + // 1st read of 10 bytes is performed immediately + if n, err := r.Read(b); n != 10 || err != nil { + t.Fatalf("r.Read(b) expected 10 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt > _50ms { + t.Fatalf("r.Read(b) took too long (%v)", rt) + } + + // No new Reads allowed in the current sample + r.SetBlocking(false) + if n, err := r.Read(b); n != 0 || err != nil { + t.Fatalf("r.Read(b) expected 0 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt > _50ms { + t.Fatalf("r.Read(b) took too long (%v)", rt) + } + + status := [6]Status{0: r.Status()} // No samples in the first status + + // 2nd read of 10 bytes blocks until the next sample + r.SetBlocking(true) + if n, err := r.Read(b[10:]); n != 10 || err != nil { + t.Fatalf("r.Read(b[10:]) expected 10 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt < _100ms { + t.Fatalf("r.Read(b[10:]) returned ahead of time (%v)", rt) + } + + status[1] = r.Status() // 1st sample + status[2] = nextStatus(r.Monitor) // 2nd sample + status[3] = nextStatus(r.Monitor) // No activity for the 3rd sample + + if n := r.Done(); n != 20 { + t.Fatalf("r.Done() expected 20; got %v", n) + } + + status[4] = r.Status() + status[5] = nextStatus(r.Monitor) // Timeout + start = status[0].Start + + // Active, Start, Duration, Idle, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, TimeRem, Progress + want := []Status{ + Status{true, start, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Status{true, start, _100ms, 0, 10, 1, 100, 100, 100, 100, 0, 0, 0}, + Status{true, start, _200ms, _100ms, 20, 2, 100, 100, 100, 100, 0, 0, 0}, + Status{true, start, _300ms, _200ms, 20, 3, 0, 90, 67, 100, 0, 0, 0}, + Status{false, start, _300ms, 0, 20, 3, 0, 0, 67, 100, 0, 0, 0}, + Status{false, start, _300ms, 0, 20, 3, 0, 0, 67, 100, 0, 0, 0}, + } + for i, s := range status { + if !statusesAreEqual(&s, &want[i]) { + t.Errorf("r.Status(%v)\nexpected: %v\ngot : %v", i, want[i], s) + } + } + if !bytes.Equal(b[:20], in[:20]) { + t.Errorf("r.Read() input doesn't match output") + } +} + +func TestWriter(t *testing.T) { + b := make([]byte, 100) + for i := range b { + b[i] = byte(i) + } + w := NewWriter(&bytes.Buffer{}, 200) + start := time.Now() + + // Make sure w implements Limiter + _ = Limiter(w) + + // Non-blocking 20-byte write for the first sample returns ErrLimit + w.SetBlocking(false) + if n, err := w.Write(b); n != 20 || err != ErrLimit { + t.Fatalf("w.Write(b) expected 20 (ErrLimit); got %v (%v)", n, err) + } else if rt := time.Since(start); rt > _50ms { + t.Fatalf("w.Write(b) took too long (%v)", rt) + } + + // Blocking 80-byte write + w.SetBlocking(true) + if n, err := w.Write(b[20:]); n != 80 || err != nil { + t.Fatalf("w.Write(b[20:]) expected 80 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt < _300ms { + // Explanation for `rt < _300ms` (as opposed to `< _400ms`) + // + // |<-- start | | + // epochs: -----0ms|---100ms|---200ms|---300ms|---400ms + // sends: 20|20 |20 |20 |20# + // + // NOTE: The '#' symbol can thus happen before 400ms is up. + // Thus, we can only panic if rt < _300ms. + t.Fatalf("w.Write(b[20:]) returned ahead of time (%v)", rt) + } + + w.SetTransferSize(100) + status := []Status{w.Status(), nextStatus(w.Monitor)} + start = status[0].Start + + // Active, Start, Duration, Idle, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, TimeRem, Progress + want := []Status{ + Status{true, start, _400ms, 0, 80, 4, 200, 200, 200, 200, 20, _100ms, 80000}, + Status{true, start, _500ms, _100ms, 100, 5, 200, 200, 200, 200, 0, 0, 100000}, + } + for i, s := range status { + if !statusesAreEqual(&s, &want[i]) { + t.Errorf("w.Status(%v)\nexpected: %v\ngot : %v\n", i, want[i], s) + } + } + if !bytes.Equal(b, w.Writer.(*bytes.Buffer).Bytes()) { + t.Errorf("w.Write() input doesn't match output") + } +} + +const maxDeviationForDuration = 50 * time.Millisecond +const maxDeviationForRate int64 = 50 + +// statusesAreEqual returns true if s1 is equal to s2. Equality here means +// general equality of fields except for the duration and rates, which can +// drift due to unpredictable delays (e.g. thread wakes up 25ms after +// `time.Sleep` has ended). +func statusesAreEqual(s1 *Status, s2 *Status) bool { + if s1.Active == s2.Active && + s1.Start == s2.Start && + durationsAreEqual(s1.Duration, s2.Duration, maxDeviationForDuration) && + s1.Idle == s2.Idle && + s1.Bytes == s2.Bytes && + s1.Samples == s2.Samples && + ratesAreEqual(s1.InstRate, s2.InstRate, maxDeviationForRate) && + ratesAreEqual(s1.CurRate, s2.CurRate, maxDeviationForRate) && + ratesAreEqual(s1.AvgRate, s2.AvgRate, maxDeviationForRate) && + ratesAreEqual(s1.PeakRate, s2.PeakRate, maxDeviationForRate) && + s1.BytesRem == s2.BytesRem && + durationsAreEqual(s1.TimeRem, s2.TimeRem, maxDeviationForDuration) && + s1.Progress == s2.Progress { + return true + } + return false +} + +func durationsAreEqual(d1 time.Duration, d2 time.Duration, maxDeviation time.Duration) bool { + return d2-d1 <= maxDeviation +} + +func ratesAreEqual(r1 int64, r2 int64, maxDeviation int64) bool { + sub := r1 - r2 + if sub < 0 { + sub = -sub + } + if sub <= maxDeviation { + return true + } + return false +} diff --git a/libs/flowrate/util.go b/libs/flowrate/util.go new file mode 100644 index 000000000..b33ddc701 --- /dev/null +++ b/libs/flowrate/util.go @@ -0,0 +1,67 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowrate + +import ( + "math" + "strconv" + "time" +) + +// clockRate is the resolution and precision of clock(). +const clockRate = 20 * time.Millisecond + +// czero is the process start time rounded down to the nearest clockRate +// increment. +var czero = time.Now().Round(clockRate) + +// clock returns a low resolution timestamp relative to the process start time. +func clock() time.Duration { + return time.Now().Round(clockRate).Sub(czero) +} + +// clockToTime converts a clock() timestamp to an absolute time.Time value. +func clockToTime(c time.Duration) time.Time { + return czero.Add(c) +} + +// clockRound returns d rounded to the nearest clockRate increment. +func clockRound(d time.Duration) time.Duration { + return (d + clockRate>>1) / clockRate * clockRate +} + +// round returns x rounded to the nearest int64 (non-negative values only). +func round(x float64) int64 { + if _, frac := math.Modf(x); frac >= 0.5 { + return int64(math.Ceil(x)) + } + return int64(math.Floor(x)) +} + +// Percent represents a percentage in increments of 1/1000th of a percent. +type Percent uint32 + +// percentOf calculates what percent of the total is x. +func percentOf(x, total float64) Percent { + if x < 0 || total <= 0 { + return 0 + } else if p := round(x / total * 1e5); p <= math.MaxUint32 { + return Percent(p) + } + return Percent(math.MaxUint32) +} + +func (p Percent) Float() float64 { + return float64(p) * 1e-3 +} + +func (p Percent) String() string { + var buf [12]byte + b := strconv.AppendUint(buf[:0], uint64(p)/1000, 10) + n := len(b) + b = strconv.AppendUint(b, 1000+uint64(p)%1000, 10) + b[n] = '.' + return string(append(b, '%')) +} diff --git a/libs/log/filter.go b/libs/log/filter.go new file mode 100644 index 000000000..768c09b85 --- /dev/null +++ b/libs/log/filter.go @@ -0,0 +1,158 @@ +package log + +import "fmt" + +type level byte + +const ( + levelDebug level = 1 << iota + levelInfo + levelError +) + +type filter struct { + next Logger + allowed level // XOR'd levels for default case + allowedKeyvals map[keyval]level // When key-value match, use this level +} + +type keyval struct { + key interface{} + value interface{} +} + +// NewFilter wraps next and implements filtering. See the commentary on the +// Option functions for a detailed description of how to configure levels. If +// no options are provided, all leveled log events created with Debug, Info or +// Error helper methods are squelched. +func NewFilter(next Logger, options ...Option) Logger { + l := &filter{ + next: next, + allowedKeyvals: make(map[keyval]level), + } + for _, option := range options { + option(l) + } + return l +} + +func (l *filter) Info(msg string, keyvals ...interface{}) { + levelAllowed := l.allowed&levelInfo != 0 + if !levelAllowed { + return + } + l.next.Info(msg, keyvals...) +} + +func (l *filter) Debug(msg string, keyvals ...interface{}) { + levelAllowed := l.allowed&levelDebug != 0 + if !levelAllowed { + return + } + l.next.Debug(msg, keyvals...) +} + +func (l *filter) Error(msg string, keyvals ...interface{}) { + levelAllowed := l.allowed&levelError != 0 + if !levelAllowed { + return + } + l.next.Error(msg, keyvals...) +} + +// With implements Logger by constructing a new filter with a keyvals appended +// to the logger. +// +// If custom level was set for a keyval pair using one of the +// Allow*With methods, it is used as the logger's level. +// +// Examples: +// logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto")) +// logger.With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto" +// +// logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto"), log.AllowNoneWith("user", "Sam")) +// logger.With("module", "crypto", "user", "Sam").Info("Hello") # returns nil +// +// logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto"), log.AllowNoneWith("user", "Sam")) +// logger.With("user", "Sam").With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto user=Sam" +func (l *filter) With(keyvals ...interface{}) Logger { + for i := len(keyvals) - 2; i >= 0; i -= 2 { + for kv, allowed := range l.allowedKeyvals { + if keyvals[i] == kv.key && keyvals[i+1] == kv.value { + return &filter{next: l.next.With(keyvals...), allowed: allowed, allowedKeyvals: l.allowedKeyvals} + } + } + } + return &filter{next: l.next.With(keyvals...), allowed: l.allowed, allowedKeyvals: l.allowedKeyvals} +} + +//-------------------------------------------------------------------------------- + +// Option sets a parameter for the filter. +type Option func(*filter) + +// AllowLevel returns an option for the given level or error if no option exist +// for such level. +func AllowLevel(lvl string) (Option, error) { + switch lvl { + case "debug": + return AllowDebug(), nil + case "info": + return AllowInfo(), nil + case "error": + return AllowError(), nil + case "none": + return AllowNone(), nil + default: + return nil, fmt.Errorf("Expected either \"info\", \"debug\", \"error\" or \"none\" level, given %s", lvl) + } +} + +// AllowAll is an alias for AllowDebug. +func AllowAll() Option { + return AllowDebug() +} + +// AllowDebug allows error, info and debug level log events to pass. +func AllowDebug() Option { + return allowed(levelError | levelInfo | levelDebug) +} + +// AllowInfo allows error and info level log events to pass. +func AllowInfo() Option { + return allowed(levelError | levelInfo) +} + +// AllowError allows only error level log events to pass. +func AllowError() Option { + return allowed(levelError) +} + +// AllowNone allows no leveled log events to pass. +func AllowNone() Option { + return allowed(0) +} + +func allowed(allowed level) Option { + return func(l *filter) { l.allowed = allowed } +} + +// AllowDebugWith allows error, info and debug level log events to pass for a specific key value pair. +func AllowDebugWith(key interface{}, value interface{}) Option { + return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError | levelInfo | levelDebug } +} + +// AllowInfoWith allows error and info level log events to pass for a specific key value pair. +func AllowInfoWith(key interface{}, value interface{}) Option { + return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError | levelInfo } +} + +// AllowErrorWith allows only error level log events to pass for a specific key value pair. +func AllowErrorWith(key interface{}, value interface{}) Option { + return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError } +} + +// AllowNoneWith allows no leveled log events to pass for a specific key value pair. +func AllowNoneWith(key interface{}, value interface{}) Option { + return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = 0 } +} diff --git a/libs/log/filter_test.go b/libs/log/filter_test.go new file mode 100644 index 000000000..f9957f043 --- /dev/null +++ b/libs/log/filter_test.go @@ -0,0 +1,118 @@ +package log_test + +import ( + "bytes" + "strings" + "testing" + + "github.com/tendermint/tendermint/libs/log" +) + +func TestVariousLevels(t *testing.T) { + testCases := []struct { + name string + allowed log.Option + want string + }{ + { + "AllowAll", + log.AllowAll(), + strings.Join([]string{ + `{"_msg":"here","level":"debug","this is":"debug log"}`, + `{"_msg":"here","level":"info","this is":"info log"}`, + `{"_msg":"here","level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowDebug", + log.AllowDebug(), + strings.Join([]string{ + `{"_msg":"here","level":"debug","this is":"debug log"}`, + `{"_msg":"here","level":"info","this is":"info log"}`, + `{"_msg":"here","level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowInfo", + log.AllowInfo(), + strings.Join([]string{ + `{"_msg":"here","level":"info","this is":"info log"}`, + `{"_msg":"here","level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowError", + log.AllowError(), + strings.Join([]string{ + `{"_msg":"here","level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowNone", + log.AllowNone(), + ``, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var buf bytes.Buffer + logger := log.NewFilter(log.NewTMJSONLogger(&buf), tc.allowed) + + logger.Debug("here", "this is", "debug log") + logger.Info("here", "this is", "info log") + logger.Error("here", "this is", "error log") + + if want, have := tc.want, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant:\n%s\nhave:\n%s", want, have) + } + }) + } +} + +func TestLevelContext(t *testing.T) { + var buf bytes.Buffer + + logger := log.NewTMJSONLogger(&buf) + logger = log.NewFilter(logger, log.AllowError()) + logger = logger.With("context", "value") + + logger.Error("foo", "bar", "baz") + if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"error"}`, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } + + buf.Reset() + logger.Info("foo", "bar", "baz") + if want, have := ``, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } +} + +func TestVariousAllowWith(t *testing.T) { + var buf bytes.Buffer + + logger := log.NewTMJSONLogger(&buf) + + logger1 := log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("context", "value")) + logger1.With("context", "value").Info("foo", "bar", "baz") + if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"info"}`, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } + + buf.Reset() + + logger2 := log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("context", "value"), log.AllowNoneWith("user", "Sam")) + logger2.With("context", "value", "user", "Sam").Info("foo", "bar", "baz") + if want, have := ``, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } + + buf.Reset() + + logger3 := log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("context", "value"), log.AllowNoneWith("user", "Sam")) + logger3.With("user", "Sam").With("context", "value").Info("foo", "bar", "baz") + if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"info","user":"Sam"}`, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } +} diff --git a/libs/log/logger.go b/libs/log/logger.go new file mode 100644 index 000000000..ddb187bc7 --- /dev/null +++ b/libs/log/logger.go @@ -0,0 +1,30 @@ +package log + +import ( + "io" + + kitlog "github.com/go-kit/kit/log" +) + +// Logger is what any Tendermint library should take. +type Logger interface { + Debug(msg string, keyvals ...interface{}) + Info(msg string, keyvals ...interface{}) + Error(msg string, keyvals ...interface{}) + + With(keyvals ...interface{}) Logger +} + +// NewSyncWriter returns a new writer that is safe for concurrent use by +// multiple goroutines. Writes to the returned writer are passed on to w. If +// another write is already in progress, the calling goroutine blocks until +// the writer is available. +// +// If w implements the following interface, so does the returned writer. +// +// interface { +// Fd() uintptr +// } +func NewSyncWriter(w io.Writer) io.Writer { + return kitlog.NewSyncWriter(w) +} diff --git a/libs/log/nop_logger.go b/libs/log/nop_logger.go new file mode 100644 index 000000000..12d75abe6 --- /dev/null +++ b/libs/log/nop_logger.go @@ -0,0 +1,17 @@ +package log + +type nopLogger struct{} + +// Interface assertions +var _ Logger = (*nopLogger)(nil) + +// NewNopLogger returns a logger that doesn't do anything. +func NewNopLogger() Logger { return &nopLogger{} } + +func (nopLogger) Info(string, ...interface{}) {} +func (nopLogger) Debug(string, ...interface{}) {} +func (nopLogger) Error(string, ...interface{}) {} + +func (l *nopLogger) With(...interface{}) Logger { + return l +} diff --git a/libs/log/testing_logger.go b/libs/log/testing_logger.go new file mode 100644 index 000000000..81482bef5 --- /dev/null +++ b/libs/log/testing_logger.go @@ -0,0 +1,49 @@ +package log + +import ( + "os" + "testing" + + "github.com/go-kit/kit/log/term" +) + +var ( + // reuse the same logger across all tests + _testingLogger Logger +) + +// TestingLogger returns a TMLogger which writes to STDOUT if testing being run +// with the verbose (-v) flag, NopLogger otherwise. +// +// Note that the call to TestingLogger() must be made +// inside a test (not in the init func) because +// verbose flag only set at the time of testing. +func TestingLogger() Logger { + if _testingLogger != nil { + return _testingLogger + } + + if testing.Verbose() { + _testingLogger = NewTMLogger(NewSyncWriter(os.Stdout)) + } else { + _testingLogger = NewNopLogger() + } + + return _testingLogger +} + +// TestingLoggerWithColorFn allow you to provide your own color function. See +// TestingLogger for documentation. +func TestingLoggerWithColorFn(colorFn func(keyvals ...interface{}) term.FgBgColor) Logger { + if _testingLogger != nil { + return _testingLogger + } + + if testing.Verbose() { + _testingLogger = NewTMLoggerWithColorFn(NewSyncWriter(os.Stdout), colorFn) + } else { + _testingLogger = NewNopLogger() + } + + return _testingLogger +} diff --git a/libs/log/tm_json_logger.go b/libs/log/tm_json_logger.go new file mode 100644 index 000000000..a71ac1034 --- /dev/null +++ b/libs/log/tm_json_logger.go @@ -0,0 +1,15 @@ +package log + +import ( + "io" + + kitlog "github.com/go-kit/kit/log" +) + +// NewTMJSONLogger returns a Logger that encodes keyvals to the Writer as a +// single JSON object. Each log event produces no more than one call to +// w.Write. The passed Writer must be safe for concurrent use by multiple +// goroutines if the returned Logger will be used concurrently. +func NewTMJSONLogger(w io.Writer) Logger { + return &tmLogger{kitlog.NewJSONLogger(w)} +} diff --git a/libs/log/tm_logger.go b/libs/log/tm_logger.go new file mode 100644 index 000000000..d49e8d22b --- /dev/null +++ b/libs/log/tm_logger.go @@ -0,0 +1,83 @@ +package log + +import ( + "fmt" + "io" + + kitlog "github.com/go-kit/kit/log" + kitlevel "github.com/go-kit/kit/log/level" + "github.com/go-kit/kit/log/term" +) + +const ( + msgKey = "_msg" // "_" prefixed to avoid collisions + moduleKey = "module" +) + +type tmLogger struct { + srcLogger kitlog.Logger +} + +// Interface assertions +var _ Logger = (*tmLogger)(nil) + +// NewTMTermLogger returns a logger that encodes msg and keyvals to the Writer +// using go-kit's log as an underlying logger and our custom formatter. Note +// that underlying logger could be swapped with something else. +func NewTMLogger(w io.Writer) Logger { + // Color by level value + colorFn := func(keyvals ...interface{}) term.FgBgColor { + if keyvals[0] != kitlevel.Key() { + panic(fmt.Sprintf("expected level key to be first, got %v", keyvals[0])) + } + switch keyvals[1].(kitlevel.Value).String() { + case "debug": + return term.FgBgColor{Fg: term.DarkGray} + case "error": + return term.FgBgColor{Fg: term.Red} + default: + return term.FgBgColor{} + } + } + + return &tmLogger{term.NewLogger(w, NewTMFmtLogger, colorFn)} +} + +// NewTMLoggerWithColorFn allows you to provide your own color function. See +// NewTMLogger for documentation. +func NewTMLoggerWithColorFn(w io.Writer, colorFn func(keyvals ...interface{}) term.FgBgColor) Logger { + return &tmLogger{term.NewLogger(w, NewTMFmtLogger, colorFn)} +} + +// Info logs a message at level Info. +func (l *tmLogger) Info(msg string, keyvals ...interface{}) { + lWithLevel := kitlevel.Info(l.srcLogger) + if err := kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...); err != nil { + errLogger := kitlevel.Error(l.srcLogger) + kitlog.With(errLogger, msgKey, msg).Log("err", err) + } +} + +// Debug logs a message at level Debug. +func (l *tmLogger) Debug(msg string, keyvals ...interface{}) { + lWithLevel := kitlevel.Debug(l.srcLogger) + if err := kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...); err != nil { + errLogger := kitlevel.Error(l.srcLogger) + kitlog.With(errLogger, msgKey, msg).Log("err", err) + } +} + +// Error logs a message at level Error. +func (l *tmLogger) Error(msg string, keyvals ...interface{}) { + lWithLevel := kitlevel.Error(l.srcLogger) + lWithMsg := kitlog.With(lWithLevel, msgKey, msg) + if err := lWithMsg.Log(keyvals...); err != nil { + lWithMsg.Log("err", err) + } +} + +// With returns a new contextual logger with keyvals prepended to those passed +// to calls to Info, Debug or Error. +func (l *tmLogger) With(keyvals ...interface{}) Logger { + return &tmLogger{kitlog.With(l.srcLogger, keyvals...)} +} diff --git a/libs/log/tm_logger_test.go b/libs/log/tm_logger_test.go new file mode 100644 index 000000000..1f890cef1 --- /dev/null +++ b/libs/log/tm_logger_test.go @@ -0,0 +1,44 @@ +package log_test + +import ( + "bytes" + "io/ioutil" + "strings" + "testing" + + "github.com/go-logfmt/logfmt" + "github.com/tendermint/tendermint/libs/log" +) + +func TestLoggerLogsItsErrors(t *testing.T) { + var buf bytes.Buffer + + logger := log.NewTMLogger(&buf) + logger.Info("foo", "baz baz", "bar") + msg := strings.TrimSpace(buf.String()) + if !strings.Contains(msg, logfmt.ErrInvalidKey.Error()) { + t.Errorf("Expected logger msg to contain ErrInvalidKey, got %s", msg) + } +} + +func BenchmarkTMLoggerSimple(b *testing.B) { + benchmarkRunner(b, log.NewTMLogger(ioutil.Discard), baseInfoMessage) +} + +func BenchmarkTMLoggerContextual(b *testing.B) { + benchmarkRunner(b, log.NewTMLogger(ioutil.Discard), withInfoMessage) +} + +func benchmarkRunner(b *testing.B, logger log.Logger, f func(log.Logger)) { + lc := logger.With("common_key", "common_value") + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + f(lc) + } +} + +var ( + baseInfoMessage = func(logger log.Logger) { logger.Info("foo_message", "foo_key", "foo_value") } + withInfoMessage = func(logger log.Logger) { logger.With("a", "b").Info("c", "d", "f") } +) diff --git a/libs/log/tmfmt_logger.go b/libs/log/tmfmt_logger.go new file mode 100644 index 000000000..d03979718 --- /dev/null +++ b/libs/log/tmfmt_logger.go @@ -0,0 +1,127 @@ +package log + +import ( + "bytes" + "fmt" + "io" + "sync" + "time" + + kitlog "github.com/go-kit/kit/log" + kitlevel "github.com/go-kit/kit/log/level" + "github.com/go-logfmt/logfmt" +) + +type tmfmtEncoder struct { + *logfmt.Encoder + buf bytes.Buffer +} + +func (l *tmfmtEncoder) Reset() { + l.Encoder.Reset() + l.buf.Reset() +} + +var tmfmtEncoderPool = sync.Pool{ + New: func() interface{} { + var enc tmfmtEncoder + enc.Encoder = logfmt.NewEncoder(&enc.buf) + return &enc + }, +} + +type tmfmtLogger struct { + w io.Writer +} + +// NewTMFmtLogger returns a logger that encodes keyvals to the Writer in +// Tendermint custom format. Note complex types (structs, maps, slices) +// formatted as "%+v". +// +// Each log event produces no more than one call to w.Write. +// The passed Writer must be safe for concurrent use by multiple goroutines if +// the returned Logger will be used concurrently. +func NewTMFmtLogger(w io.Writer) kitlog.Logger { + return &tmfmtLogger{w} +} + +func (l tmfmtLogger) Log(keyvals ...interface{}) error { + enc := tmfmtEncoderPool.Get().(*tmfmtEncoder) + enc.Reset() + defer tmfmtEncoderPool.Put(enc) + + const unknown = "unknown" + lvl := "none" + msg := unknown + module := unknown + + // indexes of keys to skip while encoding later + excludeIndexes := make([]int, 0) + + for i := 0; i < len(keyvals)-1; i += 2 { + // Extract level + if keyvals[i] == kitlevel.Key() { + excludeIndexes = append(excludeIndexes, i) + switch keyvals[i+1].(type) { + case string: + lvl = keyvals[i+1].(string) + case kitlevel.Value: + lvl = keyvals[i+1].(kitlevel.Value).String() + default: + panic(fmt.Sprintf("level value of unknown type %T", keyvals[i+1])) + } + // and message + } else if keyvals[i] == msgKey { + excludeIndexes = append(excludeIndexes, i) + msg = keyvals[i+1].(string) + // and module (could be multiple keyvals; if such case last keyvalue wins) + } else if keyvals[i] == moduleKey { + excludeIndexes = append(excludeIndexes, i) + module = keyvals[i+1].(string) + } + } + + // Form a custom Tendermint line + // + // Example: + // D[05-02|11:06:44.322] Stopping AddrBook (ignoring: already stopped) + // + // Description: + // D - first character of the level, uppercase (ASCII only) + // [05-02|11:06:44.322] - our time format (see https://golang.org/src/time/format.go) + // Stopping ... - message + enc.buf.WriteString(fmt.Sprintf("%c[%s] %-44s ", lvl[0]-32, time.Now().UTC().Format("01-02|15:04:05.000"), msg)) + + if module != unknown { + enc.buf.WriteString("module=" + module + " ") + } + +KeyvalueLoop: + for i := 0; i < len(keyvals)-1; i += 2 { + for _, j := range excludeIndexes { + if i == j { + continue KeyvalueLoop + } + } + + err := enc.EncodeKeyval(keyvals[i], keyvals[i+1]) + if err == logfmt.ErrUnsupportedValueType { + enc.EncodeKeyval(keyvals[i], fmt.Sprintf("%+v", keyvals[i+1])) + } else if err != nil { + return err + } + } + + // Add newline to the end of the buffer + if err := enc.EndRecord(); err != nil { + return err + } + + // The Logger interface requires implementations to be safe for concurrent + // use by multiple goroutines. For this implementation that means making + // only one call to l.w.Write() for each call to Log. + if _, err := l.w.Write(enc.buf.Bytes()); err != nil { + return err + } + return nil +} diff --git a/libs/log/tmfmt_logger_test.go b/libs/log/tmfmt_logger_test.go new file mode 100644 index 000000000..d6f039ce4 --- /dev/null +++ b/libs/log/tmfmt_logger_test.go @@ -0,0 +1,118 @@ +package log_test + +import ( + "bytes" + "errors" + "io/ioutil" + "math" + "regexp" + "testing" + + kitlog "github.com/go-kit/kit/log" + "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/libs/log" +) + +func TestTMFmtLogger(t *testing.T) { + t.Parallel() + buf := &bytes.Buffer{} + logger := log.NewTMFmtLogger(buf) + + if err := logger.Log("hello", "world"); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ hello=world\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("a", 1, "err", errors.New("error")); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ a=1 err=error\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("std_map", map[int]int{1: 2}, "my_map", mymap{0: 0}); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ std_map=map\[1:2\] my_map=special_behavior\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("level", "error"); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`E\[.+\] unknown \s+\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("_msg", "Hello"); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] Hello \s+\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("module", "main", "module", "crypto", "module", "wire"); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+module=wire\s+\n$`), buf.String()) +} + +func BenchmarkTMFmtLoggerSimple(b *testing.B) { + benchmarkRunnerKitlog(b, log.NewTMFmtLogger(ioutil.Discard), baseMessage) +} + +func BenchmarkTMFmtLoggerContextual(b *testing.B) { + benchmarkRunnerKitlog(b, log.NewTMFmtLogger(ioutil.Discard), withMessage) +} + +func TestTMFmtLoggerConcurrency(t *testing.T) { + t.Parallel() + testConcurrency(t, log.NewTMFmtLogger(ioutil.Discard), 10000) +} + +func benchmarkRunnerKitlog(b *testing.B, logger kitlog.Logger, f func(kitlog.Logger)) { + lc := kitlog.With(logger, "common_key", "common_value") + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + f(lc) + } +} + +var ( + baseMessage = func(logger kitlog.Logger) { logger.Log("foo_key", "foo_value") } + withMessage = func(logger kitlog.Logger) { kitlog.With(logger, "a", "b").Log("d", "f") } +) + +// These test are designed to be run with the race detector. + +func testConcurrency(t *testing.T, logger kitlog.Logger, total int) { + n := int(math.Sqrt(float64(total))) + share := total / n + + errC := make(chan error, n) + + for i := 0; i < n; i++ { + go func() { + errC <- spam(logger, share) + }() + } + + for i := 0; i < n; i++ { + err := <-errC + if err != nil { + t.Fatalf("concurrent logging error: %v", err) + } + } +} + +func spam(logger kitlog.Logger, count int) error { + for i := 0; i < count; i++ { + err := logger.Log("key", i) + if err != nil { + return err + } + } + return nil +} + +type mymap map[int]int + +func (m mymap) String() string { return "special_behavior" } diff --git a/libs/log/tracing_logger.go b/libs/log/tracing_logger.go new file mode 100644 index 000000000..d2a6ff44e --- /dev/null +++ b/libs/log/tracing_logger.go @@ -0,0 +1,76 @@ +package log + +import ( + "fmt" + + "github.com/pkg/errors" +) + +// NewTracingLogger enables tracing by wrapping all errors (if they +// implement stackTracer interface) in tracedError. +// +// All errors returned by https://github.com/pkg/errors implement stackTracer +// interface. +// +// For debugging purposes only as it doubles the amount of allocations. +func NewTracingLogger(next Logger) Logger { + return &tracingLogger{ + next: next, + } +} + +type stackTracer interface { + error + StackTrace() errors.StackTrace +} + +type tracingLogger struct { + next Logger +} + +func (l *tracingLogger) Info(msg string, keyvals ...interface{}) { + l.next.Info(msg, formatErrors(keyvals)...) +} + +func (l *tracingLogger) Debug(msg string, keyvals ...interface{}) { + l.next.Debug(msg, formatErrors(keyvals)...) +} + +func (l *tracingLogger) Error(msg string, keyvals ...interface{}) { + l.next.Error(msg, formatErrors(keyvals)...) +} + +func (l *tracingLogger) With(keyvals ...interface{}) Logger { + return &tracingLogger{next: l.next.With(formatErrors(keyvals)...)} +} + +func formatErrors(keyvals []interface{}) []interface{} { + newKeyvals := make([]interface{}, len(keyvals)) + copy(newKeyvals, keyvals) + for i := 0; i < len(newKeyvals)-1; i += 2 { + if err, ok := newKeyvals[i+1].(stackTracer); ok { + newKeyvals[i+1] = tracedError{err} + } + } + return newKeyvals +} + +// tracedError wraps a stackTracer and just makes the Error() result +// always return a full stack trace. +type tracedError struct { + wrapped stackTracer +} + +var _ stackTracer = tracedError{} + +func (t tracedError) StackTrace() errors.StackTrace { + return t.wrapped.StackTrace() +} + +func (t tracedError) Cause() error { + return t.wrapped +} + +func (t tracedError) Error() string { + return fmt.Sprintf("%+v", t.wrapped) +} diff --git a/libs/log/tracing_logger_test.go b/libs/log/tracing_logger_test.go new file mode 100644 index 000000000..1abc6440f --- /dev/null +++ b/libs/log/tracing_logger_test.go @@ -0,0 +1,41 @@ +package log_test + +import ( + "bytes" + stderr "errors" + "fmt" + "strings" + "testing" + + "github.com/pkg/errors" + "github.com/tendermint/tendermint/libs/log" +) + +func TestTracingLogger(t *testing.T) { + var buf bytes.Buffer + + logger := log.NewTMJSONLogger(&buf) + + logger1 := log.NewTracingLogger(logger) + err1 := errors.New("Courage is grace under pressure.") + err2 := errors.New("It does not matter how slowly you go, so long as you do not stop.") + logger1.With("err1", err1).Info("foo", "err2", err2) + have := strings.Replace(strings.Replace(strings.TrimSpace(buf.String()), "\\n", "", -1), "\\t", "", -1) + if want := strings.Replace(strings.Replace(`{"_msg":"foo","err1":"`+fmt.Sprintf("%+v", err1)+`","err2":"`+fmt.Sprintf("%+v", err2)+`","level":"info"}`, "\t", "", -1), "\n", "", -1); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } + + buf.Reset() + + logger.With("err1", stderr.New("Opportunities don't happen. You create them.")).Info("foo", "err2", stderr.New("Once you choose hope, anything's possible.")) + if want, have := `{"_msg":"foo","err1":"Opportunities don't happen. You create them.","err2":"Once you choose hope, anything's possible.","level":"info"}`, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } + + buf.Reset() + + logger.With("user", "Sam").With("context", "value").Info("foo", "bar", "baz") + if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"info","user":"Sam"}`, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } +} diff --git a/libs/pubsub/example_test.go b/libs/pubsub/example_test.go index 260521cd9..4e4634de5 100644 --- a/libs/pubsub/example_test.go +++ b/libs/pubsub/example_test.go @@ -6,7 +6,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/pubsub/query" diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index 776e0653b..4c0d97e2f 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -16,7 +16,7 @@ import ( "errors" "sync" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) type operation int @@ -163,6 +163,8 @@ func (s *Server) Subscribe(ctx context.Context, clientID string, query Query, ou return nil case <-ctx.Done(): return ctx.Err() + case <-s.Quit(): + return nil } } @@ -190,6 +192,8 @@ func (s *Server) Unsubscribe(ctx context.Context, clientID string, query Query) return nil case <-ctx.Done(): return ctx.Err() + case <-s.Quit(): + return nil } } @@ -211,6 +215,8 @@ func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { return nil case <-ctx.Done(): return ctx.Err() + case <-s.Quit(): + return nil } } @@ -229,6 +235,8 @@ func (s *Server) PublishWithTags(ctx context.Context, msg interface{}, tags TagM return nil case <-ctx.Done(): return ctx.Err() + case <-s.Quit(): + return nil } } diff --git a/libs/pubsub/pubsub_test.go b/libs/pubsub/pubsub_test.go index fd6c11cf4..5e9931e40 100644 --- a/libs/pubsub/pubsub_test.go +++ b/libs/pubsub/pubsub_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/pubsub/query" diff --git a/libs/pubsub/query/Makefile b/libs/pubsub/query/Makefile index 91030ef09..aef42b2df 100644 --- a/libs/pubsub/query/Makefile +++ b/libs/pubsub/query/Makefile @@ -1,10 +1,10 @@ gen_query_parser: - @go get github.com/pointlander/peg + go get -u -v github.com/pointlander/peg peg -inline -switch query.peg fuzzy_test: - @go get github.com/dvyukov/go-fuzz/go-fuzz - @go get github.com/dvyukov/go-fuzz/go-fuzz-build + go get -u -v github.com/dvyukov/go-fuzz/go-fuzz + go get -u -v github.com/dvyukov/go-fuzz/go-fuzz-build go-fuzz-build github.com/tendermint/tendermint/libs/pubsub/query/fuzz_test go-fuzz -bin=./fuzz_test-fuzz.zip -workdir=./fuzz_test/output diff --git a/libs/pubsub/query/query.peg.go b/libs/pubsub/query/query.peg.go index c86e4a47f..c1cc60aa9 100644 --- a/libs/pubsub/query/query.peg.go +++ b/libs/pubsub/query/query.peg.go @@ -1,6 +1,8 @@ // nolint package query +//go:generate peg -inline -switch query.peg + import ( "fmt" "math" diff --git a/libs/test.sh b/libs/test.sh new file mode 100755 index 000000000..ecf17fc45 --- /dev/null +++ b/libs/test.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +set -e + +# run the linter +# make metalinter_test + +# setup certs +make gen_certs + +# run the unit tests with coverage +echo "" > coverage.txt +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done + +# cleanup certs +make clean_certs diff --git a/libs/test/assert.go b/libs/test/assert.go new file mode 100644 index 000000000..a6ffed0ce --- /dev/null +++ b/libs/test/assert.go @@ -0,0 +1,14 @@ +package test + +import ( + "testing" +) + +func AssertPanics(t *testing.T, msg string, f func()) { + defer func() { + if err := recover(); err == nil { + t.Errorf("Should have panic'd, but didn't: %v", msg) + } + }() + f() +} diff --git a/libs/test/mutate.go b/libs/test/mutate.go new file mode 100644 index 000000000..3bbbbd217 --- /dev/null +++ b/libs/test/mutate.go @@ -0,0 +1,28 @@ +package test + +import ( + cmn "github.com/tendermint/tendermint/libs/common" +) + +// Contract: !bytes.Equal(input, output) && len(input) >= len(output) +func MutateByteSlice(bytez []byte) []byte { + // If bytez is empty, panic + if len(bytez) == 0 { + panic("Cannot mutate an empty bytez") + } + + // Copy bytez + mBytez := make([]byte, len(bytez)) + copy(mBytez, bytez) + bytez = mBytez + + // Try a random mutation + switch cmn.RandInt() % 2 { + case 0: // Mutate a single byte + bytez[cmn.RandInt()%len(bytez)] += byte(cmn.RandInt()%255 + 1) + case 1: // Remove an arbitrary byte + pos := cmn.RandInt() % len(bytez) + bytez = append(bytez[:pos], bytez[pos+1:]...) + } + return bytez +} diff --git a/libs/version/version.go b/libs/version/version.go new file mode 100644 index 000000000..6e73a937d --- /dev/null +++ b/libs/version/version.go @@ -0,0 +1,3 @@ +package version + +const Version = "0.9.0" diff --git a/lite/files/commit_test.go b/lite/files/commit_test.go index e0235ba29..2891e5809 100644 --- a/lite/files/commit_test.go +++ b/lite/files/commit_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/lite" ) diff --git a/lite/files/wire.go b/lite/files/wire.go index 3a207744a..f45e4c630 100644 --- a/lite/files/wire.go +++ b/lite/files/wire.go @@ -2,11 +2,11 @@ package files import ( "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/crypto" + cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino" ) var cdc = amino.NewCodec() func init() { - crypto.RegisterAmino(cdc) + cryptoAmino.RegisterAmino(cdc) } diff --git a/lite/helpers.go b/lite/helpers.go index 695f6fb9b..9f404f247 100644 --- a/lite/helpers.go +++ b/lite/helpers.go @@ -4,6 +4,8 @@ import ( "time" crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/secp256k1" "github.com/tendermint/tendermint/types" ) @@ -23,7 +25,7 @@ type ValKeys []crypto.PrivKey func GenValKeys(n int) ValKeys { res := make(ValKeys, n) for i := range res { - res[i] = crypto.GenPrivKeyEd25519() + res[i] = ed25519.GenPrivKey() } return res } @@ -32,7 +34,7 @@ func GenValKeys(n int) ValKeys { func (v ValKeys) Change(i int) ValKeys { res := make(ValKeys, len(v)) copy(res, v) - res[i] = crypto.GenPrivKeyEd25519() + res[i] = ed25519.GenPrivKey() return res } @@ -46,7 +48,7 @@ func (v ValKeys) Extend(n int) ValKeys { func GenSecpValKeys(n int) ValKeys { res := make(ValKeys, n) for i := range res { - res[i] = crypto.GenPrivKeySecp256k1() + res[i] = secp256k1.GenPrivKey() } return res } diff --git a/lite/performance_test.go b/lite/performance_test.go index 8cd522cbb..3b805a418 100644 --- a/lite/performance_test.go +++ b/lite/performance_test.go @@ -2,13 +2,12 @@ package lite import ( "fmt" - "math/rand" "sync" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - + cmn "github.com/tendermint/tendermint/libs/common" liteErr "github.com/tendermint/tendermint/lite/errors" ) @@ -280,7 +279,11 @@ func BenchmarkMemStoreProviderGetByHeightBinarySearch1000(b *testing.B) { benchmarkMemStoreProvidergetByHeight(b, fcs1000, h1000, binarySearch) } -var rng = rand.New(rand.NewSource(10)) +var rng = cmn.NewRand() + +func init() { + rng.Seed(10) +} func benchmarkMemStoreProvidergetByHeight(b *testing.B, fcs []FullCommit, fHeights []int64, algo algo) { lazyGenerateFullCommits(b) diff --git a/lite/proxy/block.go b/lite/proxy/block.go index 4cff9ee68..00b8c87fb 100644 --- a/lite/proxy/block.go +++ b/lite/proxy/block.go @@ -15,14 +15,14 @@ func ValidateBlockMeta(meta *types.BlockMeta, check lite.Commit) error { return errors.New("expecting a non-nil BlockMeta") } // TODO: check the BlockID?? - return ValidateHeader(meta.Header, check) + return ValidateHeader(&meta.Header, check) } func ValidateBlock(meta *types.Block, check lite.Commit) error { if meta == nil { return errors.New("expecting a non-nil Block") } - err := ValidateHeader(meta.Header, check) + err := ValidateHeader(&meta.Header, check) if err != nil { return err } diff --git a/lite/proxy/proxy.go b/lite/proxy/proxy.go index 2f068f160..0294ddf68 100644 --- a/lite/proxy/proxy.go +++ b/lite/proxy/proxy.go @@ -4,7 +4,7 @@ import ( "net/http" amino "github.com/tendermint/go-amino" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" rpcclient "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/core" diff --git a/lite/proxy/query.go b/lite/proxy/query.go index 9c9557f8f..0ca5be174 100644 --- a/lite/proxy/query.go +++ b/lite/proxy/query.go @@ -3,7 +3,7 @@ package proxy import ( "github.com/pkg/errors" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/lite" "github.com/tendermint/tendermint/lite/client" diff --git a/lite/proxy/validate_test.go b/lite/proxy/validate_test.go index 782a6aabb..47c0ff6de 100644 --- a/lite/proxy/validate_test.go +++ b/lite/proxy/validate_test.go @@ -18,7 +18,7 @@ var ( testTime2 = time.Date(2017, 1, 2, 1, 1, 1, 1, time.UTC) ) -var hdrHeight11 = &types.Header{ +var hdrHeight11 = types.Header{ Height: 11, Time: testTime1, ValidatorsHash: []byte("Tendermint"), @@ -34,21 +34,18 @@ func TestValidateBlock(t *testing.T) { block: nil, wantErr: "non-nil Block", }, { - block: &types.Block{}, wantErr: "nil Header", - }, - { - block: &types.Block{Header: new(types.Header)}, + block: &types.Block{}, }, // Start Header.Height mismatch test { - block: &types.Block{Header: &types.Header{Height: 10}}, + block: &types.Block{Header: types.Header{Height: 10}}, commit: lite.Commit{Header: &types.Header{Height: 11}}, wantErr: "don't match - 10 vs 11", }, { - block: &types.Block{Header: &types.Header{Height: 11}}, + block: &types.Block{Header: types.Header{Height: 11}}, commit: lite.Commit{Header: &types.Header{Height: 11}}, }, // End Header.Height mismatch test @@ -62,15 +59,15 @@ func TestValidateBlock(t *testing.T) { { block: &types.Block{Header: hdrHeight11}, - commit: lite.Commit{Header: hdrHeight11}, + commit: lite.Commit{Header: &hdrHeight11}, }, // End Header.Hash mismatch test // Start Header.Data hash mismatch test { block: &types.Block{ - Header: &types.Header{Height: 11}, - Data: &types.Data{Txs: []types.Tx{[]byte("0xDE"), []byte("AD")}}, + Header: types.Header{Height: 11}, + Data: types.Data{Txs: []types.Tx{[]byte("0xDE"), []byte("AD")}}, }, commit: lite.Commit{ Header: &types.Header{Height: 11}, @@ -80,8 +77,8 @@ func TestValidateBlock(t *testing.T) { }, { block: &types.Block{ - Header: &types.Header{Height: 11, DataHash: deadBeefHash}, - Data: &types.Data{Txs: deadBeefTxs}, + Header: types.Header{Height: 11, DataHash: deadBeefHash}, + Data: types.Data{Txs: deadBeefTxs}, }, commit: lite.Commit{ Header: &types.Header{Height: 11}, @@ -116,21 +113,18 @@ func TestValidateBlockMeta(t *testing.T) { meta: nil, wantErr: "non-nil BlockMeta", }, { - meta: &types.BlockMeta{}, wantErr: "non-nil Header", - }, - { - meta: &types.BlockMeta{Header: new(types.Header)}, + meta: &types.BlockMeta{}, }, // Start Header.Height mismatch test { - meta: &types.BlockMeta{Header: &types.Header{Height: 10}}, + meta: &types.BlockMeta{Header: types.Header{Height: 10}}, commit: lite.Commit{Header: &types.Header{Height: 11}}, wantErr: "don't match - 10 vs 11", }, { - meta: &types.BlockMeta{Header: &types.Header{Height: 11}}, + meta: &types.BlockMeta{Header: types.Header{Height: 11}}, commit: lite.Commit{Header: &types.Header{Height: 11}}, }, // End Header.Height mismatch test @@ -144,12 +138,12 @@ func TestValidateBlockMeta(t *testing.T) { { meta: &types.BlockMeta{Header: hdrHeight11}, - commit: lite.Commit{Header: hdrHeight11}, + commit: lite.Commit{Header: &hdrHeight11}, }, { meta: &types.BlockMeta{ - Header: &types.Header{ + Header: types.Header{ Height: 11, ValidatorsHash: []byte("lite-test"), // TODO: should be able to use empty time after Amino upgrade @@ -164,7 +158,7 @@ func TestValidateBlockMeta(t *testing.T) { { meta: &types.BlockMeta{ - Header: &types.Header{ + Header: types.Header{ Height: 11, DataHash: deadBeefHash, ValidatorsHash: []byte("Tendermint"), Time: testTime1, @@ -183,7 +177,7 @@ func TestValidateBlockMeta(t *testing.T) { { meta: &types.BlockMeta{ - Header: &types.Header{ + Header: types.Header{ Height: 11, DataHash: deadBeefHash, ValidatorsHash: []byte("Tendermint"), Time: testTime2, diff --git a/lite/proxy/wrapper.go b/lite/proxy/wrapper.go index 5fb12a40a..f0eb6b41e 100644 --- a/lite/proxy/wrapper.go +++ b/lite/proxy/wrapper.go @@ -1,7 +1,7 @@ package proxy import ( - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/lite" certclient "github.com/tendermint/tendermint/lite/client" diff --git a/mempool/mempool.go b/mempool/mempool.go index 8c9e41d59..e1ed4f260 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -11,10 +11,10 @@ import ( "github.com/pkg/errors" abci "github.com/tendermint/tendermint/abci/types" - auto "github.com/tendermint/tmlibs/autofile" - "github.com/tendermint/tmlibs/clist" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + auto "github.com/tendermint/tendermint/libs/autofile" + "github.com/tendermint/tendermint/libs/clist" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/proxy" @@ -57,6 +57,11 @@ var ( ErrMempoolIsFull = errors.New("Mempool is full") ) +// TxID is the hex encoded hash of the bytes as a types.Tx. +func TxID(tx []byte) string { + return fmt.Sprintf("%X", types.Tx(tx).Hash()) +} + // Mempool is an ordered in-memory pool for transactions before they are proposed in a consensus // round. Transaction validity is checked using the CheckTx abci message before the transaction is // added to the pool. The Mempool uses a concurrent list structure for storing transactions that @@ -73,7 +78,7 @@ type Mempool struct { recheckCursor *clist.CElement // next expected response recheckEnd *clist.CElement // re-checking stops here notifiedTxsAvailable bool - txsAvailable chan int64 // fires the next height once for each height, when the mempool is not empty + txsAvailable chan struct{} // fires once for each height, when the mempool is not empty // Keep a cache of already-seen txs. // This reduces the pressure on the proxyApp. @@ -125,7 +130,7 @@ func NewMempool( // ensuring it will trigger once every height when transactions are available. // NOTE: not thread safe - should only be called once, on startup func (mem *Mempool) EnableTxsAvailable() { - mem.txsAvailable = make(chan int64, 1) + mem.txsAvailable = make(chan struct{}, 1) } // SetLogger sets the Logger. @@ -288,11 +293,11 @@ func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) { tx: tx, } mem.txs.PushBack(memTx) - mem.logger.Info("Added good transaction", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "res", r) + mem.logger.Info("Added good transaction", "tx", TxID(tx), "res", r, "total", mem.Size()) mem.notifyTxsAvailable() } else { // ignore bad transaction - mem.logger.Info("Rejected bad transaction", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "res", r) + mem.logger.Info("Rejected bad transaction", "tx", TxID(tx), "res", r) // remove from cache (it might be good later) mem.cache.Remove(tx) @@ -343,7 +348,7 @@ func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) { // TxsAvailable returns a channel which fires once for every height, // and only when transactions are available in the mempool. // NOTE: the returned channel may be nil if EnableTxsAvailable was not called. -func (mem *Mempool) TxsAvailable() <-chan int64 { +func (mem *Mempool) TxsAvailable() <-chan struct{} { return mem.txsAvailable } @@ -353,11 +358,11 @@ func (mem *Mempool) notifyTxsAvailable() { } if mem.txsAvailable != nil && !mem.notifiedTxsAvailable { // channel cap is 1, so this will send once + mem.notifiedTxsAvailable = true select { - case mem.txsAvailable <- mem.height + 1: + case mem.txsAvailable <- struct{}{}: default: } - mem.notifiedTxsAvailable = true } } diff --git a/mempool/mempool_test.go b/mempool/mempool_test.go index fb664ddec..acaa17eec 100644 --- a/mempool/mempool_test.go +++ b/mempool/mempool_test.go @@ -14,8 +14,8 @@ import ( "github.com/tendermint/tendermint/abci/example/counter" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/proxy" @@ -38,7 +38,7 @@ func newMempoolWithApp(cc proxy.ClientCreator) *Mempool { return mempool } -func ensureNoFire(t *testing.T, ch <-chan int64, timeoutMS int) { +func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) select { case <-ch: @@ -47,7 +47,7 @@ func ensureNoFire(t *testing.T, ch <-chan int64, timeoutMS int) { } } -func ensureFire(t *testing.T, ch <-chan int64, timeoutMS int) { +func ensureFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) select { case <-ch: diff --git a/mempool/reactor.go b/mempool/reactor.go index d6cebfbf3..96988be78 100644 --- a/mempool/reactor.go +++ b/mempool/reactor.go @@ -5,10 +5,10 @@ import ( "reflect" "time" - abci "github.com/tendermint/tendermint/abci/types" amino "github.com/tendermint/go-amino" - "github.com/tendermint/tmlibs/clist" - "github.com/tendermint/tmlibs/log" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/clist" + "github.com/tendermint/tendermint/libs/log" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p" @@ -78,7 +78,7 @@ func (memR *MempoolReactor) RemovePeer(peer p2p.Peer, reason interface{}) { // Receive implements Reactor. // It adds any received transactions to the mempool. func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - msg, err := DecodeMessage(msgBytes) + msg, err := decodeMsg(msgBytes) if err != nil { memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) memR.Switch.StopPeerForError(src, err) @@ -90,7 +90,7 @@ func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { case *TxMessage: err := memR.Mempool.CheckTx(msg.Tx, nil) if err != nil { - memR.Logger.Info("Could not check tx", "tx", msg.Tx, "err", err) + memR.Logger.Info("Could not check tx", "tx", TxID(msg.Tx), "err", err) } // broadcasting happens from go routines per peer default: @@ -174,11 +174,9 @@ func RegisterMempoolMessages(cdc *amino.Codec) { cdc.RegisterConcrete(&TxMessage{}, "tendermint/mempool/TxMessage", nil) } -// DecodeMessage decodes a byte-array into a MempoolMessage. -func DecodeMessage(bz []byte) (msg MempoolMessage, err error) { +func decodeMsg(bz []byte) (msg MempoolMessage, err error) { if len(bz) > maxMsgSize { - return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", - len(bz), maxMsgSize) + return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize) } err = cdc.UnmarshalBinaryBare(bz, &msg) return diff --git a/mempool/reactor_test.go b/mempool/reactor_test.go index c6844dbb0..b4362032a 100644 --- a/mempool/reactor_test.go +++ b/mempool/reactor_test.go @@ -13,7 +13,7 @@ import ( "github.com/go-kit/kit/log/term" "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p" diff --git a/networks/local/README.md b/networks/local/README.md index 554abdf4b..09a0b12cb 100644 --- a/networks/local/README.md +++ b/networks/local/README.md @@ -2,7 +2,7 @@ ## Requirements -- [Install tendermint](/docs/install.rst) +- [Install tendermint](/docs/install.md) - [Install docker](https://docs.docker.com/engine/installation/) - [Install docker-compose](https://docs.docker.com/compose/install/) diff --git a/networks/remote/README.md b/networks/remote/README.md index 090f6da16..2094fcc98 100644 --- a/networks/remote/README.md +++ b/networks/remote/README.md @@ -1,3 +1,3 @@ # Remote Cluster with Terraform and Ansible -See the [docs](/docs/terraform-and-ansible.rst) +See the [docs](/docs/terraform-and-ansible.md) diff --git a/node/node.go b/node/node.go index bd3ac4645..e333c667a 100644 --- a/node/node.go +++ b/node/node.go @@ -8,18 +8,19 @@ import ( "net" "net/http" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" - abci "github.com/tendermint/tendermint/abci/types" amino "github.com/tendermint/go-amino" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" bc "github.com/tendermint/tendermint/blockchain" cfg "github.com/tendermint/tendermint/config" cs "github.com/tendermint/tendermint/consensus" - "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/evidence" mempl "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/p2p" @@ -196,7 +197,7 @@ func NewNode(config *cfg.Config, var ( // TODO: persist this key so external signer // can actually authenticate us - privKey = crypto.GenPrivKeyEd25519() + privKey = ed25519.GenPrivKey() pvsc = privval.NewSocketPV( logger.With("module", "privval"), config.PrivValidatorListenAddr, @@ -321,9 +322,9 @@ func NewNode(config *cfg.Config, // TODO persistent peers ? so we can have their DNS addrs saved pexReactor := pex.NewPEXReactor(addrBook, &pex.PEXReactorConfig{ - Seeds: cmn.SplitAndTrim(config.P2P.Seeds, ",", " "), - SeedMode: config.P2P.SeedMode, - PrivatePeerIDs: cmn.SplitAndTrim(config.P2P.PrivatePeerIDs, ",", " ")}) + Seeds: cmn.SplitAndTrim(config.P2P.Seeds, ",", " "), + SeedMode: config.P2P.SeedMode, + }) pexReactor.SetLogger(p2pLogger) sw.AddReactor("PEX", pexReactor) } @@ -426,8 +427,11 @@ func (n *Node) OnStart() error { } // Create & add listener - protocol, address := cmn.ProtocolAndAddress(n.config.P2P.ListenAddress) - l := p2p.NewDefaultListener(protocol, address, n.config.P2P.SkipUPNP, n.Logger.With("module", "p2p")) + l := p2p.NewDefaultListener( + n.config.P2P.ListenAddress, + n.config.P2P.ExternalAddress, + n.config.P2P.UPNP, + n.Logger.With("module", "p2p")) n.sw.AddListener(l) // Generate node PrivKey @@ -445,6 +449,9 @@ func (n *Node) OnStart() error { // Add ourselves to addrbook to prevent dialing ourselves n.addrBook.AddOurAddress(nodeInfo.NetAddress()) + // Add private IDs to addrbook to block those peers being added + n.addrBook.AddPrivateIDs(cmn.SplitAndTrim(n.config.P2P.PrivatePeerIDs, ",", " ")) + // Start the RPC server before the P2P server // so we can eg. receive txs for the first block if n.config.RPC.ListenAddress != "" { @@ -482,9 +489,16 @@ func (n *Node) OnStop() { n.BaseService.OnStop() n.Logger.Info("Stopping Node") + + // first stop the non-reactor services + n.eventBus.Stop() + n.indexerService.Stop() + + // now stop the reactors // TODO: gracefully disconnect from peers. n.sw.Stop() + // finally stop the listeners / external services for _, l := range n.rpcListeners { n.Logger.Info("Closing rpc listener", "listener", l) if err := l.Close(); err != nil { @@ -492,9 +506,6 @@ func (n *Node) OnStop() { } } - n.eventBus.Stop() - n.indexerService.Stop() - if pvsc, ok := n.privValidator.(*privval.SocketPV); ok { if err := pvsc.Stop(); err != nil { n.Logger.Error("Error stopping priv validator socket client", "err", err) @@ -596,8 +607,13 @@ func (n *Node) startRPC() ([]net.Listener, error) { // collectors on addr. func (n *Node) startPrometheusServer(addr string) *http.Server { srv := &http.Server{ - Addr: addr, - Handler: promhttp.Handler(), + Addr: addr, + Handler: promhttp.InstrumentMetricHandler( + prometheus.DefaultRegisterer, promhttp.HandlerFor( + prometheus.DefaultGatherer, + promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections}, + ), + ), } go func() { if err := srv.ListenAndServe(); err != http.ErrServerClosed { @@ -696,7 +712,7 @@ func (n *Node) makeNodeInfo(nodeID p2p.ID) p2p.NodeInfo { } p2pListener := n.sw.Listeners()[0] - p2pHost := p2pListener.ExternalAddress().IP.String() + p2pHost := p2pListener.ExternalAddressHost() p2pPort := p2pListener.ExternalAddress().Port nodeInfo.ListenAddr = cmn.Fmt("%v:%v", p2pHost, p2pPort) diff --git a/node/node_test.go b/node/node_test.go index cdabdbb3f..ca074e1bc 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -2,12 +2,15 @@ package node import ( "context" + "fmt" + "os" + "syscall" "testing" "time" "github.com/stretchr/testify/assert" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/types" @@ -43,6 +46,13 @@ func TestNodeStartStop(t *testing.T) { select { case <-n.Quit(): case <-time.After(5 * time.Second): + pid := os.Getpid() + p, err := os.FindProcess(pid) + if err != nil { + panic(err) + } + err = p.Signal(syscall.SIGABRT) + fmt.Println(err) t.Fatal("timed out waiting for shutdown") } } diff --git a/node/wire.go b/node/wire.go index 8b3ae8950..3bd6de2fc 100644 --- a/node/wire.go +++ b/node/wire.go @@ -2,11 +2,11 @@ package node import ( amino "github.com/tendermint/go-amino" - crypto "github.com/tendermint/tendermint/crypto" + cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino" ) var cdc = amino.NewCodec() func init() { - crypto.RegisterAmino(cdc) + cryptoAmino.RegisterAmino(cdc) } diff --git a/p2p/base_reactor.go b/p2p/base_reactor.go index 83c8efa4b..da1296da0 100644 --- a/p2p/base_reactor.go +++ b/p2p/base_reactor.go @@ -2,7 +2,7 @@ package p2p import ( "github.com/tendermint/tendermint/p2p/conn" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) type Reactor interface { diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index c56507296..9672e0117 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -12,13 +12,13 @@ import ( "time" amino "github.com/tendermint/go-amino" - cmn "github.com/tendermint/tmlibs/common" - flow "github.com/tendermint/tmlibs/flowrate" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + flow "github.com/tendermint/tendermint/libs/flowrate" + "github.com/tendermint/tendermint/libs/log" ) const ( - defaultMaxPacketMsgSize = 1024 + defaultMaxPacketMsgPayloadSize = 1024 numBatchPacketMsgs = 10 minReadBufferSize = 1024 @@ -96,7 +96,7 @@ type MConnection struct { created time.Time // time of creation - emptyPacketMsgSize int + _maxPacketMsgSize int } // MConnConfig is a MConnection configuration. @@ -105,7 +105,7 @@ type MConnConfig struct { RecvRate int64 `mapstructure:"recv_rate"` // Maximum payload size - MaxPacketMsgSize int `mapstructure:"max_packet_msg_size"` + MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"` // Interval to flush writes (throttled) FlushThrottle time.Duration `mapstructure:"flush_throttle"` @@ -120,12 +120,12 @@ type MConnConfig struct { // DefaultMConnConfig returns the default config. func DefaultMConnConfig() MConnConfig { return MConnConfig{ - SendRate: defaultSendRate, - RecvRate: defaultRecvRate, - MaxPacketMsgSize: defaultMaxPacketMsgSize, - FlushThrottle: defaultFlushThrottle, - PingInterval: defaultPingInterval, - PongTimeout: defaultPongTimeout, + SendRate: defaultSendRate, + RecvRate: defaultRecvRate, + MaxPacketMsgPayloadSize: defaultMaxPacketMsgPayloadSize, + FlushThrottle: defaultFlushThrottle, + PingInterval: defaultPingInterval, + PongTimeout: defaultPongTimeout, } } @@ -146,17 +146,16 @@ func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onRec } mconn := &MConnection{ - conn: conn, - bufConnReader: bufio.NewReaderSize(conn, minReadBufferSize), - bufConnWriter: bufio.NewWriterSize(conn, minWriteBufferSize), - sendMonitor: flow.New(0, 0), - recvMonitor: flow.New(0, 0), - send: make(chan struct{}, 1), - pong: make(chan struct{}, 1), - onReceive: onReceive, - onError: onError, - config: config, - emptyPacketMsgSize: emptyPacketMsgSize(), + conn: conn, + bufConnReader: bufio.NewReaderSize(conn, minReadBufferSize), + bufConnWriter: bufio.NewWriterSize(conn, minWriteBufferSize), + sendMonitor: flow.New(0, 0), + recvMonitor: flow.New(0, 0), + send: make(chan struct{}, 1), + pong: make(chan struct{}, 1), + onReceive: onReceive, + onError: onError, + config: config, } // Create channels @@ -173,6 +172,9 @@ func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onRec mconn.BaseService = *cmn.NewBaseService(nil, "MConnection", mconn) + // maxPacketMsgSize() is a bit heavy, so call just once + mconn._maxPacketMsgSize = mconn.maxPacketMsgSize() + return mconn } @@ -397,7 +399,7 @@ func (c *MConnection) sendSomePacketMsgs() bool { // Block until .sendMonitor says we can write. // Once we're ready we send more than we asked for, // but amortized it should even out. - c.sendMonitor.Limit(c.config.MaxPacketMsgSize, atomic.LoadInt64(&c.config.SendRate), true) + c.sendMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.SendRate), true) // Now send some PacketMsgs. for i := 0; i < numBatchPacketMsgs; i++ { @@ -455,7 +457,7 @@ func (c *MConnection) recvRoutine() { FOR_LOOP: for { // Block until .recvMonitor says we can read. - c.recvMonitor.Limit(c.config.MaxPacketMsgSize, atomic.LoadInt64(&c.config.RecvRate), true) + c.recvMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.RecvRate), true) // Peek into bufConnReader for debugging /* @@ -475,7 +477,7 @@ FOR_LOOP: var packet Packet var _n int64 var err error - _n, err = cdc.UnmarshalBinaryReader(c.bufConnReader, &packet, int64(c.config.MaxPacketMsgSize)) + _n, err = cdc.UnmarshalBinaryReader(c.bufConnReader, &packet, int64(c._maxPacketMsgSize)) c.recvMonitor.Update(int(_n)) if err != nil { if c.IsRunning() { @@ -548,6 +550,16 @@ func (c *MConnection) stopPongTimer() { } } +// maxPacketMsgSize returns a maximum size of PacketMsg, including the overhead +// of amino encoding. +func (c *MConnection) maxPacketMsgSize() int { + return len(cdc.MustMarshalBinary(PacketMsg{ + ChannelID: 0x01, + EOF: 1, + Bytes: make([]byte, c.config.MaxPacketMsgPayloadSize), + })) + 10 // leave room for changes in amino +} + type ConnectionStatus struct { Duration time.Duration SendMonitor flow.Status @@ -631,7 +643,7 @@ func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel { desc: desc, sendQueue: make(chan []byte, desc.SendQueueCapacity), recving: make([]byte, 0, desc.RecvBufferCapacity), - maxPacketMsgPayloadSize: conn.config.MaxPacketMsgSize, + maxPacketMsgPayloadSize: conn.config.MaxPacketMsgPayloadSize, } } @@ -694,7 +706,7 @@ func (ch *Channel) isSendPending() bool { func (ch *Channel) nextPacketMsg() PacketMsg { packet := PacketMsg{} packet.ChannelID = byte(ch.desc.ID) - maxSize := ch.maxPacketMsgPayloadSize - ch.conn.emptyPacketMsgSize + maxSize := ch.maxPacketMsgPayloadSize packet.Bytes = ch.sending[:cmn.MinInt(maxSize, len(ch.sending))] if len(ch.sending) <= maxSize { packet.EOF = byte(0x01) @@ -780,25 +792,3 @@ type PacketMsg struct { func (mp PacketMsg) String() string { return fmt.Sprintf("PacketMsg{%X:%X T:%X}", mp.ChannelID, mp.Bytes, mp.EOF) } - -// - Uvarint length of MustMarshalBinary(packet) = 1 or 2 bytes -// (as long as it's less than 16,384 bytes) -// - Prefix bytes = 4 bytes -// - ChannelID field key + byte = 2 bytes -// - EOF field key + byte = 2 bytes -// - Bytes field key = 1 bytes -// - Uvarint length of MustMarshalBinary(bytes) = 1 or 2 bytes -// - Struct terminator = 1 byte -// = up to 14 bytes overhead for the packet. - -func emptyPacketMsgSize() int { - emptyPacketMsgSize := len(cdc.MustMarshalBinary(PacketMsg{ - ChannelID: 0x01, - EOF: 1, - Bytes: make([]byte, 1), - })) - // -1 byte of data - // +1 byte because uvarint length of MustMarshalBinary(bytes) will be 2 bytes for big packets - // +1 byte because uvarint length of MustMarshalBinary(packet) will be 2 bytes for big packets - return emptyPacketMsgSize - 1 + 1 + 1 -} diff --git a/p2p/conn/connection_test.go b/p2p/conn/connection_test.go index 34b37ab8a..19e05fbc7 100644 --- a/p2p/conn/connection_test.go +++ b/p2p/conn/connection_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/require" amino "github.com/tendermint/go-amino" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" ) const maxPingPongPacketSize = 1024 // bytes @@ -426,7 +426,7 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { var packet = PacketMsg{ ChannelID: 0x01, EOF: 1, - Bytes: make([]byte, mconnClient.config.MaxPacketMsgSize-emptyPacketMsgSize()), + Bytes: make([]byte, mconnClient.config.MaxPacketMsgPayloadSize), } _, err = cdc.MarshalBinaryWriter(buf, packet) assert.Nil(t, err) @@ -440,7 +440,7 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { packet = PacketMsg{ ChannelID: 0x01, EOF: 1, - Bytes: make([]byte, mconnClient.config.MaxPacketMsgSize+1), + Bytes: make([]byte, mconnClient.config.MaxPacketMsgPayloadSize+100), } _, err = cdc.MarshalBinaryWriter(buf, packet) assert.Nil(t, err) diff --git a/p2p/conn/secret_connection.go b/p2p/conn/secret_connection.go index 43f84f0bf..a2cbe008d 100644 --- a/p2p/conn/secret_connection.go +++ b/p2p/conn/secret_connection.go @@ -21,7 +21,7 @@ import ( "golang.org/x/crypto/ripemd160" "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // 4 + 1024 == 1028 total frame size diff --git a/p2p/conn/secret_connection_test.go b/p2p/conn/secret_connection_test.go index 7f862fecd..27dca034a 100644 --- a/p2p/conn/secret_connection_test.go +++ b/p2p/conn/secret_connection_test.go @@ -7,8 +7,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" ) type kvstoreConn struct { @@ -35,9 +35,9 @@ func makeKVStoreConnPair() (fooConn, barConn kvstoreConn) { func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection) { var fooConn, barConn = makeKVStoreConnPair() - var fooPrvKey = crypto.GenPrivKeyEd25519() + var fooPrvKey = ed25519.GenPrivKey() var fooPubKey = fooPrvKey.PubKey() - var barPrvKey = crypto.GenPrivKeyEd25519() + var barPrvKey = ed25519.GenPrivKey() var barPubKey = barPrvKey.PubKey() // Make connections from both sides in parallel. @@ -105,7 +105,7 @@ func TestSecretConnectionReadWrite(t *testing.T) { genNodeRunner := func(id string, nodeConn kvstoreConn, nodeWrites []string, nodeReads *[]string) cmn.Task { return func(_ int) (interface{}, error, bool) { // Initiate cryptographic private key and secret connection trhough nodeConn. - nodePrvKey := crypto.GenPrivKeyEd25519() + nodePrvKey := ed25519.GenPrivKey() nodeSecretConn, err := MakeSecretConnection(nodeConn, nodePrvKey) if err != nil { t.Errorf("Failed to establish SecretConnection for node: %v", err) diff --git a/p2p/conn/wire.go b/p2p/conn/wire.go index 3182fde38..4bd778c74 100644 --- a/p2p/conn/wire.go +++ b/p2p/conn/wire.go @@ -2,12 +2,12 @@ package conn import ( "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/crypto" + cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino" ) var cdc *amino.Codec = amino.NewCodec() func init() { - crypto.RegisterAmino(cdc) + cryptoAmino.RegisterAmino(cdc) RegisterPacket(cdc) } diff --git a/p2p/dummy/peer.go b/p2p/dummy/peer.go index fc2242366..bb6e822fc 100644 --- a/p2p/dummy/peer.go +++ b/p2p/dummy/peer.go @@ -3,9 +3,9 @@ package dummy import ( "net" + cmn "github.com/tendermint/tendermint/libs/common" p2p "github.com/tendermint/tendermint/p2p" tmconn "github.com/tendermint/tendermint/p2p/conn" - cmn "github.com/tendermint/tmlibs/common" ) type peer struct { @@ -78,3 +78,8 @@ func (p *peer) Get(key string) interface{} { } return nil } + +// OriginalAddr always returns nil. +func (p *peer) OriginalAddr() *p2p.NetAddress { + return nil +} diff --git a/p2p/fuzz.go b/p2p/fuzz.go index 8d00ba40d..80e4fed6a 100644 --- a/p2p/fuzz.go +++ b/p2p/fuzz.go @@ -6,7 +6,7 @@ import ( "time" "github.com/tendermint/tendermint/config" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // FuzzedConnection wraps any net.Conn and depending on the mode either delays diff --git a/p2p/key.go b/p2p/key.go index 7e242bfc3..4d1ecd82f 100644 --- a/p2p/key.go +++ b/p2p/key.go @@ -7,7 +7,8 @@ import ( "io/ioutil" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" ) // ID is a hex-encoded crypto.Address @@ -70,7 +71,7 @@ func LoadNodeKey(filePath string) (*NodeKey, error) { } func genNodeKey(filePath string) (*NodeKey, error) { - privKey := crypto.GenPrivKeyEd25519() + privKey := ed25519.GenPrivKey() nodeKey := &NodeKey{ PrivKey: privKey, } diff --git a/p2p/key_test.go b/p2p/key_test.go index c2e1f3e0e..51e1c0787 100644 --- a/p2p/key_test.go +++ b/p2p/key_test.go @@ -7,7 +7,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func TestLoadOrGenNodeKey(t *testing.T) { diff --git a/p2p/listener.go b/p2p/listener.go index e698765cd..3509ec69c 100644 --- a/p2p/listener.go +++ b/p2p/listener.go @@ -4,22 +4,30 @@ import ( "fmt" "net" "strconv" + "strings" "time" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p/upnp" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" ) +// Listener is a network listener for stream-oriented protocols, providing +// convenient methods to get listener's internal and external addresses. +// Clients are supposed to read incoming connections from a channel, returned +// by Connections() method. type Listener interface { Connections() <-chan net.Conn InternalAddress() *NetAddress ExternalAddress() *NetAddress + ExternalAddressHost() string String() string Stop() error } -// Implements Listener +// DefaultListener is a cmn.Service, running net.Listener underneath. +// Optionally, UPnP is used upon calling NewDefaultListener to resolve external +// address. type DefaultListener struct { cmn.BaseService @@ -29,6 +37,8 @@ type DefaultListener struct { connections chan net.Conn } +var _ Listener = (*DefaultListener)(nil) + const ( numBufferedConnections = 10 defaultExternalPort = 8770 @@ -47,9 +57,16 @@ func splitHostPort(addr string) (host string, port int) { return host, port } -// skipUPNP: If true, does not try getUPNPExternalAddress() -func NewDefaultListener(protocol string, lAddr string, skipUPNP bool, logger log.Logger) Listener { - // Local listen IP & port +// NewDefaultListener creates a new DefaultListener on lAddr, optionally trying +// to determine external address using UPnP. +func NewDefaultListener( + fullListenAddrString string, + externalAddrString string, + useUPnP bool, + logger log.Logger) Listener { + + // Split protocol, address, and port. + protocol, lAddr := cmn.ProtocolAndAddress(fullListenAddrString) lAddrIP, lAddrPort := splitHostPort(lAddr) // Create listener @@ -77,17 +94,28 @@ func NewDefaultListener(protocol string, lAddr string, skipUPNP bool, logger log panic(err) } - // Determine external address... + inAddrAny := lAddrIP == "" || lAddrIP == "0.0.0.0" + + // Determine external address. var extAddr *NetAddress - if !skipUPNP { - // If the lAddrIP is INADDR_ANY, try UPnP - if lAddrIP == "" || lAddrIP == "0.0.0.0" { - extAddr = getUPNPExternalAddress(lAddrPort, listenerPort, logger) + + if externalAddrString != "" { + var err error + extAddr, err = NewNetAddressStringWithOptionalID(externalAddrString) + if err != nil { + panic(fmt.Sprintf("Error in ExternalAddress: %v", err)) } } - // Otherwise just use the local address... + + // If the lAddrIP is INADDR_ANY, try UPnP. + if extAddr == nil && useUPnP && inAddrAny { + extAddr = getUPNPExternalAddress(lAddrPort, listenerPort, logger) + } + + // Otherwise just use the local address. if extAddr == nil { - extAddr = getNaiveExternalAddress(listenerPort, false, logger) + defaultToIPv4 := inAddrAny + extAddr = getNaiveExternalAddress(defaultToIPv4, listenerPort, false, logger) } if extAddr == nil { panic("Could not determine external address!") @@ -107,6 +135,8 @@ func NewDefaultListener(protocol string, lAddr string, skipUPNP bool, logger log return dl } +// OnStart implements cmn.Service by spinning a goroutine, listening for new +// connections. func (l *DefaultListener) OnStart() error { if err := l.BaseService.OnStart(); err != nil { return err @@ -115,6 +145,7 @@ func (l *DefaultListener) OnStart() error { return nil } +// OnStop implements cmn.Service by closing the listener. func (l *DefaultListener) OnStop() { l.BaseService.OnStop() l.listener.Close() // nolint: errcheck @@ -145,24 +176,33 @@ func (l *DefaultListener) listenRoutine() { } } -// A channel of inbound connections. +// Connections returns a channel of inbound connections. // It gets closed when the listener closes. func (l *DefaultListener) Connections() <-chan net.Conn { return l.connections } +// InternalAddress returns the internal NetAddress (address used for +// listening). func (l *DefaultListener) InternalAddress() *NetAddress { return l.intAddr } +// ExternalAddress returns the external NetAddress (publicly available, +// determined using either UPnP or local resolver). func (l *DefaultListener) ExternalAddress() *NetAddress { return l.extAddr } -// NOTE: The returned listener is already Accept()'ing. -// So it's not suitable to pass into http.Serve(). -func (l *DefaultListener) NetListener() net.Listener { - return l.listener +// ExternalAddressHost returns the external NetAddress IP string. If an IP is +// IPv6, it's wrapped in brackets ("[2001:db8:1f70::999:de8:7648:6e8]"). +func (l *DefaultListener) ExternalAddressHost() string { + ip := l.ExternalAddress().IP + if isIpv6(ip) { + // Means it's ipv6, so format it with brackets + return "[" + ip.String() + "]" + } + return ip.String() } func (l *DefaultListener) String() string { @@ -201,8 +241,20 @@ func getUPNPExternalAddress(externalPort, internalPort int, logger log.Logger) * return NewNetAddressIPPort(ext, uint16(externalPort)) } +func isIpv6(ip net.IP) bool { + v4 := ip.To4() + if v4 != nil { + return false + } + + ipString := ip.String() + + // Extra check just to be sure it's IPv6 + return (strings.Contains(ipString, ":") && !strings.Contains(ipString, ".")) +} + // TODO: use syscalls: see issue #712 -func getNaiveExternalAddress(port int, settleForLocal bool, logger log.Logger) *NetAddress { +func getNaiveExternalAddress(defaultToIPv4 bool, port int, settleForLocal bool, logger log.Logger) *NetAddress { addrs, err := net.InterfaceAddrs() if err != nil { panic(cmn.Fmt("Could not fetch interface addresses: %v", err)) @@ -213,14 +265,20 @@ func getNaiveExternalAddress(port int, settleForLocal bool, logger log.Logger) * if !ok { continue } - v4 := ipnet.IP.To4() - if v4 == nil || (!settleForLocal && v4[0] == 127) { + if defaultToIPv4 || !isIpv6(ipnet.IP) { + v4 := ipnet.IP.To4() + if v4 == nil || (!settleForLocal && v4[0] == 127) { + // loopback + continue + } + } else if !settleForLocal && ipnet.IP.IsLoopback() { + // IPv6, check for loopback continue - } // loopback + } return NewNetAddressIPPort(ipnet.IP, uint16(port)) } // try again, but settle for local logger.Info("Node may not be connected to internet. Settling for local address") - return getNaiveExternalAddress(port, true, logger) + return getNaiveExternalAddress(defaultToIPv4, port, true, logger) } diff --git a/p2p/listener_test.go b/p2p/listener_test.go index 92018e0aa..f87b5d6f5 100644 --- a/p2p/listener_test.go +++ b/p2p/listener_test.go @@ -2,14 +2,17 @@ package p2p import ( "bytes" + "net" + "strings" "testing" - "github.com/tendermint/tmlibs/log" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" ) func TestListener(t *testing.T) { // Create a listener - l := NewDefaultListener("tcp", ":8001", true, log.TestingLogger()) + l := NewDefaultListener("tcp://:8001", "", false, log.TestingLogger()) // Dial the listener lAddr := l.ExternalAddress() @@ -45,3 +48,32 @@ func TestListener(t *testing.T) { // Close the server, no longer needed. l.Stop() } + +func TestExternalAddress(t *testing.T) { + { + // Create a listener with no external addr. Should default + // to local ipv4. + l := NewDefaultListener("tcp://:8001", "", false, log.TestingLogger()) + lAddr := l.ExternalAddress().String() + _, _, err := net.SplitHostPort(lAddr) + require.Nil(t, err) + spl := strings.Split(lAddr, ".") + require.Equal(t, len(spl), 4) + l.Stop() + } + + { + // Create a listener with set external ipv4 addr. + setExAddr := "8.8.8.8:8080" + l := NewDefaultListener("tcp://:8001", setExAddr, false, log.TestingLogger()) + lAddr := l.ExternalAddress().String() + require.Equal(t, lAddr, setExAddr) + l.Stop() + } + + { + // Invalid external addr causes panic + setExAddr := "awrlsckjnal:8080" + require.Panics(t, func() { NewDefaultListener("tcp://:8001", setExAddr, false, log.TestingLogger()) }) + } +} diff --git a/p2p/netaddress.go b/p2p/netaddress.go index 3e0d99d69..ebac8cc82 100644 --- a/p2p/netaddress.go +++ b/p2p/netaddress.go @@ -13,7 +13,7 @@ import ( "strings" "time" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // NetAddress defines information about a peer on the network diff --git a/p2p/node_info.go b/p2p/node_info.go index 60383bc5e..fa1333b27 100644 --- a/p2p/node_info.go +++ b/p2p/node_info.go @@ -2,8 +2,9 @@ package p2p import ( "fmt" - cmn "github.com/tendermint/tmlibs/common" "strings" + + cmn "github.com/tendermint/tendermint/libs/common" ) const ( @@ -81,8 +82,8 @@ func (info NodeInfo) Validate() error { // CONTRACT: two nodes are compatible if the major version matches and network match // and they have at least one channel in common. func (info NodeInfo) CompatibleWith(other NodeInfo) error { - iMajor, iMinor, _, iErr := splitVersion(info.Version) - oMajor, oMinor, _, oErr := splitVersion(other.Version) + iMajor, _, _, iErr := splitVersion(info.Version) + oMajor, _, _, oErr := splitVersion(other.Version) // if our own version number is not formatted right, we messed up if iErr != nil { @@ -99,11 +100,6 @@ func (info NodeInfo) CompatibleWith(other NodeInfo) error { return fmt.Errorf("Peer is on a different major version. Got %v, expected %v", oMajor, iMajor) } - // minor version can differ - if iMinor != oMinor { - // ok - } - // nodes must be on the same network if info.Network != other.Network { return fmt.Errorf("Peer is on a different network. Got %v, expected %v", other.Network, info.Network) diff --git a/p2p/peer.go b/p2p/peer.go index cf96354e6..4f59fef76 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -7,8 +7,8 @@ import ( "time" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/config" tmconn "github.com/tendermint/tendermint/p2p/conn" @@ -26,6 +26,7 @@ type Peer interface { IsPersistent() bool // do we redial this peer when we disconnect NodeInfo() NodeInfo // peer's info Status() tmconn.ConnectionStatus + OriginalAddr() *NetAddress Send(byte, []byte) bool TrySend(byte, []byte) bool @@ -38,11 +39,12 @@ type Peer interface { // peerConn contains the raw connection and its config. type peerConn struct { - outbound bool - persistent bool - config *config.P2PConfig - conn net.Conn // source connection - ip net.IP + outbound bool + persistent bool + config *config.P2PConfig + conn net.Conn // source connection + ip net.IP + originalAddr *NetAddress // nil for inbound connections } // ID only exists for SecretConnection. @@ -139,7 +141,7 @@ func newOutboundPeerConn( return peerConn{}, cmn.ErrorWrap(err, "Error creating peer") } - pc, err := newPeerConn(conn, config, true, persistent, ourNodePrivKey) + pc, err := newPeerConn(conn, config, true, persistent, ourNodePrivKey, addr) if err != nil { if cerr := conn.Close(); cerr != nil { return peerConn{}, cmn.ErrorWrap(err, cerr.Error()) @@ -166,7 +168,7 @@ func newInboundPeerConn( // TODO: issue PoW challenge - return newPeerConn(conn, config, false, false, ourNodePrivKey) + return newPeerConn(conn, config, false, false, ourNodePrivKey, nil) } func newPeerConn( @@ -174,6 +176,7 @@ func newPeerConn( cfg *config.P2PConfig, outbound, persistent bool, ourNodePrivKey crypto.PrivKey, + originalAddr *NetAddress, ) (pc peerConn, err error) { conn := rawConn @@ -200,10 +203,11 @@ func newPeerConn( // Only the information we already have return peerConn{ - config: cfg, - outbound: outbound, - persistent: persistent, - conn: conn, + config: cfg, + outbound: outbound, + persistent: persistent, + conn: conn, + originalAddr: originalAddr, }, nil } @@ -254,6 +258,15 @@ func (p *peer) NodeInfo() NodeInfo { return p.nodeInfo } +// OriginalAddr returns the original address, which was used to connect with +// the peer. Returns nil for inbound peers. +func (p *peer) OriginalAddr() *NetAddress { + if p.peerConn.outbound { + return p.peerConn.originalAddr + } + return nil +} + // Status returns the peer's ConnectionStatus. func (p *peer) Status() tmconn.ConnectionStatus { return p.mconn.Status() diff --git a/p2p/peer_set.go b/p2p/peer_set.go index e048cf4e3..257856156 100644 --- a/p2p/peer_set.go +++ b/p2p/peer_set.go @@ -55,8 +55,8 @@ func (ps *PeerSet) Add(peer Peer) error { return nil } -// Has returns true iff the PeerSet contains -// the peer referred to by this peerKey. +// Has returns true if the set contains the peer referred to by this +// peerKey, otherwise false. func (ps *PeerSet) Has(peerKey ID) bool { ps.mtx.Lock() _, ok := ps.lookup[peerKey] @@ -64,8 +64,8 @@ func (ps *PeerSet) Has(peerKey ID) bool { return ok } -// HasIP returns true if the PeerSet contains the peer referred to by this IP -// address. +// HasIP returns true if the set contains the peer referred to by this IP +// address, otherwise false. func (ps *PeerSet) HasIP(peerIP net.IP) bool { ps.mtx.Lock() defer ps.mtx.Unlock() @@ -85,7 +85,8 @@ func (ps *PeerSet) hasIP(peerIP net.IP) bool { return false } -// Get looks up a peer by the provided peerKey. +// Get looks up a peer by the provided peerKey. Returns nil if peer is not +// found. func (ps *PeerSet) Get(peerKey ID) Peer { ps.mtx.Lock() defer ps.mtx.Unlock() diff --git a/p2p/peer_set_test.go b/p2p/peer_set_test.go index 32d312437..4582ab644 100644 --- a/p2p/peer_set_test.go +++ b/p2p/peer_set_test.go @@ -1,15 +1,14 @@ package p2p import ( - "math/rand" "net" "sync" "testing" "github.com/stretchr/testify/assert" - crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" ) // Returns an empty kvstore peer @@ -18,11 +17,11 @@ func randPeer(ip net.IP) *peer { ip = net.IP{127, 0, 0, 1} } - nodeKey := NodeKey{PrivKey: crypto.GenPrivKeyEd25519()} + nodeKey := NodeKey{PrivKey: ed25519.GenPrivKey()} p := &peer{ nodeInfo: NodeInfo{ ID: nodeKey.ID(), - ListenAddr: cmn.Fmt("%v.%v.%v.%v:26656", rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256), + ListenAddr: cmn.Fmt("%v.%v.%v.%v:26656", cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256), }, } diff --git a/p2p/peer_test.go b/p2p/peer_test.go index 73c0db825..f0e915328 100644 --- a/p2p/peer_test.go +++ b/p2p/peer_test.go @@ -10,8 +10,9 @@ import ( "github.com/stretchr/testify/require" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/config" tmconn "github.com/tendermint/tendermint/p2p/conn" @@ -23,7 +24,7 @@ func TestPeerBasic(t *testing.T) { assert, require := assert.New(t), require.New(t) // simulate remote peer - rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: cfg} + rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} rp.Start() defer rp.Stop() @@ -49,7 +50,7 @@ func TestPeerSend(t *testing.T) { config := cfg // simulate remote peer - rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: config} + rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: config} rp.Start() defer rp.Stop() @@ -74,7 +75,7 @@ func createOutboundPeerAndPerformHandshake( {ID: testCh, Priority: 1}, } reactorsByCh := map[byte]Reactor{testCh: NewTestReactor(chDescs, true)} - pk := crypto.GenPrivKeyEd25519() + pk := ed25519.GenPrivKey() pc, err := newOutboundPeerConn(addr, config, false, pk) if err != nil { return nil, err diff --git a/p2p/pex/addrbook.go b/p2p/pex/addrbook.go index 592269957..c630d14c3 100644 --- a/p2p/pex/addrbook.go +++ b/p2p/pex/addrbook.go @@ -13,8 +13,8 @@ import ( "time" crypto "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/p2p" - cmn "github.com/tendermint/tmlibs/common" ) const ( @@ -34,6 +34,8 @@ type AddrBook interface { // Check if it is our address OurAddress(*p2p.NetAddress) bool + AddPrivateIDs([]string) + // Add and remove an address AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error RemoveAddress(*p2p.NetAddress) @@ -82,6 +84,7 @@ type addrBook struct { mtx sync.Mutex rand *cmn.Rand ourAddrs map[string]struct{} + privateIDs map[p2p.ID]struct{} addrLookup map[p2p.ID]*knownAddress // new & old bucketsOld []map[string]*knownAddress bucketsNew []map[string]*knownAddress @@ -97,6 +100,7 @@ func NewAddrBook(filePath string, routabilityStrict bool) *addrBook { am := &addrBook{ rand: cmn.NewRand(), ourAddrs: make(map[string]struct{}), + privateIDs: make(map[p2p.ID]struct{}), addrLookup: make(map[p2p.ID]*knownAddress), filePath: filePath, routabilityStrict: routabilityStrict, @@ -168,6 +172,14 @@ func (a *addrBook) OurAddress(addr *p2p.NetAddress) bool { return ok } +func (a *addrBook) AddPrivateIDs(IDs []string) { + a.mtx.Lock() + defer a.mtx.Unlock() + for _, id := range IDs { + a.privateIDs[p2p.ID(id)] = struct{}{} + } +} + // AddAddress implements AddrBook // Add address to a "new" bucket. If it's already in one, only add it probabilistically. // Returns error if the addr is non-routable. Does not add self. @@ -631,6 +643,10 @@ func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error { return ErrAddrBookSelf{addr} } + if _, ok := a.privateIDs[addr.ID]; ok { + return ErrAddrBookPrivate{addr} + } + ka := a.addrLookup[addr.ID] if ka != nil { // If its already old and the addr is the same, ignore it. diff --git a/p2p/pex/addrbook_test.go b/p2p/pex/addrbook_test.go index 2e2604286..8b64c380f 100644 --- a/p2p/pex/addrbook_test.go +++ b/p2p/pex/addrbook_test.go @@ -4,14 +4,15 @@ import ( "encoding/hex" "fmt" "io/ioutil" - "math/rand" "os" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" ) func createTempFileName(prefix string) string { @@ -202,12 +203,12 @@ func randNetAddressPairs(t *testing.T, n int) []netAddressPair { func randIPv4Address(t *testing.T) *p2p.NetAddress { for { ip := fmt.Sprintf("%v.%v.%v.%v", - rand.Intn(254)+1, - rand.Intn(255), - rand.Intn(255), - rand.Intn(255), + cmn.RandIntn(254)+1, + cmn.RandIntn(255), + cmn.RandIntn(255), + cmn.RandIntn(255), ) - port := rand.Intn(65535-1) + 1 + port := cmn.RandIntn(65535-1) + 1 id := p2p.ID(hex.EncodeToString(cmn.RandBytes(p2p.IDByteLength))) idAddr := p2p.IDAddressString(id, fmt.Sprintf("%v:%v", ip, port)) addr, err := p2p.NewNetAddressString(idAddr) @@ -354,3 +355,29 @@ func TestAddrBookHasAddress(t *testing.T) { assert.False(t, book.HasAddress(addr)) } + +func TestPrivatePeers(t *testing.T) { + fname := createTempFileName("addrbook_test") + defer deleteTempFile(fname) + + book := NewAddrBook(fname, true) + book.SetLogger(log.TestingLogger()) + + addrs := make([]*p2p.NetAddress, 10) + for i := 0; i < 10; i++ { + addrs[i] = randIPv4Address(t) + } + + private := make([]string, 10) + for i, addr := range addrs { + private[i] = string(addr.ID) + } + book.AddPrivateIDs(private) + + for _, addr := range addrs { + err := book.AddAddress(addr, addr) + require.Error(t, err, "AddAddress should have failed with private peer %s", addr) + _, ok := err.(ErrAddrBookPrivate) + require.True(t, ok, "Wrong error type, wanted ErrAddrBookPrivate, got error: %s", err) + } +} diff --git a/p2p/pex/errors.go b/p2p/pex/errors.go index 0b8bf4715..34bfb5ab1 100644 --- a/p2p/pex/errors.go +++ b/p2p/pex/errors.go @@ -22,6 +22,14 @@ func (err ErrAddrBookSelf) Error() string { return fmt.Sprintf("Cannot add ourselves with address %v", err.Addr) } +type ErrAddrBookPrivate struct { + Addr *p2p.NetAddress +} + +func (err ErrAddrBookPrivate) Error() string { + return fmt.Sprintf("Cannot add private peer with address %v", err.Addr) +} + type ErrAddrBookNilAddr struct { Addr *p2p.NetAddress Src *p2p.NetAddress diff --git a/p2p/pex/file.go b/p2p/pex/file.go index 38142dd9d..3237e1253 100644 --- a/p2p/pex/file.go +++ b/p2p/pex/file.go @@ -4,7 +4,7 @@ import ( "encoding/json" "os" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) /* Loading & Saving */ diff --git a/p2p/pex/pex_reactor.go b/p2p/pex/pex_reactor.go index 27ed422c5..5c4894ce4 100644 --- a/p2p/pex/pex_reactor.go +++ b/p2p/pex/pex_reactor.go @@ -8,7 +8,7 @@ import ( "time" amino "github.com/tendermint/go-amino" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p/conn" @@ -77,10 +77,10 @@ type PEXReactor struct { attemptsToDial sync.Map // address (string) -> {number of attempts (int), last time dialed (time.Time)} } -func (pexR *PEXReactor) minReceiveRequestInterval() time.Duration { +func (r *PEXReactor) minReceiveRequestInterval() time.Duration { // NOTE: must be less than ensurePeersPeriod, otherwise we'll request // peers too quickly from others and they'll think we're bad! - return pexR.ensurePeersPeriod / 3 + return r.ensurePeersPeriod / 3 } // PEXReactorConfig holds reactor specific configuration data. @@ -91,10 +91,6 @@ type PEXReactorConfig struct { // Seeds is a list of addresses reactor may use // if it can't connect to peers in the addrbook. Seeds []string - - // PrivatePeerIDs is a list of peer IDs, which must not be gossiped to other - // peers. - PrivatePeerIDs []string } type _attemptsToDial struct { @@ -173,11 +169,6 @@ func (r *PEXReactor) AddPeer(p Peer) { addr := p.NodeInfo().NetAddress() src := addr - // ignore private addrs - if isAddrPrivate(addr, r.config.PrivatePeerIDs) { - return - } - // add to book. dont RequestAddrs right away because // we don't trust inbound as much - let ensurePeersRoutine handle it. err := r.book.AddAddress(addr, src) @@ -191,7 +182,7 @@ func (r *PEXReactor) logErrAddrBook(err error) { case ErrAddrBookNilAddr: r.Logger.Error("Failed to add new address", "err", err) default: - // non-routable, self, full book, etc. + // non-routable, self, full book, private, etc. r.Logger.Debug("Failed to add new address", "err", err) } } @@ -206,7 +197,7 @@ func (r *PEXReactor) RemovePeer(p Peer, reason interface{}) { // Receive implements Reactor by handling incoming PEX messages. func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) { - msg, err := DecodeMessage(msgBytes) + msg, err := decodeMsg(msgBytes) if err != nil { r.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) r.Switch.StopPeerForError(src, err) @@ -287,7 +278,7 @@ func (r *PEXReactor) RequestAddrs(p Peer) { return } r.requestsSent.Set(id, struct{}{}) - p.Send(PexChannel, cdc.MustMarshalBinary(&pexRequestMessage{})) + p.Send(PexChannel, cdc.MustMarshalBinaryBare(&pexRequestMessage{})) } // ReceiveAddrs adds the given addrs to the addrbook if theres an open @@ -308,14 +299,6 @@ func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { return cmn.NewError("received nil addr") } - // ignore private peers - // TODO: give private peers to AddrBook so it can enforce this on AddAddress. - // We'd then have to check for ErrPrivatePeer on AddAddress here, which is - // an error we just ignore (maybe peer is probing us for our private peers :P) - if isAddrPrivate(netAddr, r.config.PrivatePeerIDs) { - continue - } - err := r.book.AddAddress(netAddr, srcAddr) r.logErrAddrBook(err) } @@ -324,7 +307,7 @@ func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { // SendAddrs sends addrs to the peer. func (r *PEXReactor) SendAddrs(p Peer, netAddrs []*p2p.NetAddress) { - p.Send(PexChannel, cdc.MustMarshalBinary(&pexAddrsMessage{Addrs: netAddrs})) + p.Send(PexChannel, cdc.MustMarshalBinaryBare(&pexAddrsMessage{Addrs: netAddrs})) } // SetEnsurePeersPeriod sets period to ensure peers connected. @@ -628,7 +611,9 @@ func (r *PEXReactor) crawlPeers() { } // Ask for more addresses peer := r.Switch.Peers().Get(pi.Addr.ID) - r.RequestAddrs(peer) + if peer != nil { + r.RequestAddrs(peer) + } } } @@ -645,16 +630,6 @@ func (r *PEXReactor) attemptDisconnects() { } } -// isAddrPrivate returns true if addr.ID is a private ID. -func isAddrPrivate(addr *p2p.NetAddress, privatePeerIDs []string) bool { - for _, id := range privatePeerIDs { - if string(addr.ID) == id { - return true - } - } - return false -} - //----------------------------------------------------------------------------- // Messages @@ -668,13 +643,11 @@ func RegisterPexMessage(cdc *amino.Codec) { cdc.RegisterConcrete(&pexAddrsMessage{}, "tendermint/p2p/PexAddrsMessage", nil) } -// DecodeMessage implements interface registered above. -func DecodeMessage(bz []byte) (msg PexMessage, err error) { +func decodeMsg(bz []byte) (msg PexMessage, err error) { if len(bz) > maxMsgSize { - return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", - len(bz), maxMsgSize) + return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize) } - err = cdc.UnmarshalBinary(bz, &msg) + err = cdc.UnmarshalBinaryBare(bz, &msg) return } diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go index f4251e869..8d54693fe 100644 --- a/p2p/pex/pex_reactor_test.go +++ b/p2p/pex/pex_reactor_test.go @@ -13,8 +13,9 @@ import ( "github.com/stretchr/testify/require" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p" @@ -109,7 +110,7 @@ func TestPEXReactorRunning(t *testing.T) { addOtherNodeAddrToAddrBook(2, 1) for i, sw := range switches { - sw.AddListener(p2p.NewDefaultListener("tcp", sw.NodeInfo().ListenAddr, true, logger.With("pex", i))) + sw.AddListener(p2p.NewDefaultListener("tcp://"+sw.NodeInfo().ListenAddr, "", false, logger.With("pex", i))) err := sw.Start() // start switch and reactors require.Nil(t, err) @@ -134,11 +135,11 @@ func TestPEXReactorReceive(t *testing.T) { size := book.Size() addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()} - msg := cdc.MustMarshalBinary(&pexAddrsMessage{Addrs: addrs}) + msg := cdc.MustMarshalBinaryBare(&pexAddrsMessage{Addrs: addrs}) r.Receive(PexChannel, peer, msg) assert.Equal(t, size+1, book.Size()) - msg = cdc.MustMarshalBinary(&pexRequestMessage{}) + msg = cdc.MustMarshalBinaryBare(&pexRequestMessage{}) r.Receive(PexChannel, peer, msg) // should not panic. } @@ -154,7 +155,7 @@ func TestPEXReactorRequestMessageAbuse(t *testing.T) { assert.True(t, sw.Peers().Has(peer.ID())) id := string(peer.ID()) - msg := cdc.MustMarshalBinary(&pexRequestMessage{}) + msg := cdc.MustMarshalBinaryBare(&pexRequestMessage{}) // first time creates the entry r.Receive(PexChannel, peer, msg) @@ -191,7 +192,7 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) { assert.True(t, sw.Peers().Has(peer.ID())) addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()} - msg := cdc.MustMarshalBinary(&pexAddrsMessage{Addrs: addrs}) + msg := cdc.MustMarshalBinaryBare(&pexAddrsMessage{Addrs: addrs}) // receive some addrs. should clear the request r.Receive(PexChannel, peer, msg) @@ -229,12 +230,7 @@ func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) { }, ) seed.AddListener( - p2p.NewDefaultListener( - "tcp", - seed.NodeInfo().ListenAddr, - true, - log.TestingLogger(), - ), + p2p.NewDefaultListener("tcp://"+seed.NodeInfo().ListenAddr, "", false, log.TestingLogger()), ) require.Nil(t, seed.Start()) defer seed.Stop() @@ -300,7 +296,8 @@ func TestPEXReactorCrawlStatus(t *testing.T) { func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) { peer := p2p.CreateRandomPeer(false) - pexR, book := createReactor(&PEXReactorConfig{PrivatePeerIDs: []string{string(peer.NodeInfo().ID)}}) + pexR, book := createReactor(&PEXReactorConfig{}) + book.AddPrivateIDs([]string{string(peer.NodeInfo().ID)}) defer teardownReactor(book) // we have to send a request to receive responses @@ -308,7 +305,7 @@ func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) { size := book.Size() addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()} - msg := cdc.MustMarshalBinary(&pexAddrsMessage{Addrs: addrs}) + msg := cdc.MustMarshalBinaryBare(&pexAddrsMessage{Addrs: addrs}) pexR.Receive(PexChannel, peer, msg) assert.Equal(t, size, book.Size()) @@ -360,7 +357,7 @@ func newMockPeer() mockPeer { _, netAddr := p2p.CreateRoutableAddr() mp := mockPeer{ addr: netAddr, - pubKey: crypto.GenPrivKeyEd25519().PubKey(), + pubKey: ed25519.GenPrivKey().PubKey(), } mp.BaseService = cmn.NewBaseService(nil, "MockPeer", mp) mp.Start() @@ -376,12 +373,13 @@ func (mp mockPeer) NodeInfo() p2p.NodeInfo { ListenAddr: mp.addr.DialString(), } } -func (mp mockPeer) RemoteIP() net.IP { return net.ParseIP("127.0.0.1") } -func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} } -func (mp mockPeer) Send(byte, []byte) bool { return false } -func (mp mockPeer) TrySend(byte, []byte) bool { return false } -func (mp mockPeer) Set(string, interface{}) {} -func (mp mockPeer) Get(string) interface{} { return nil } +func (mockPeer) RemoteIP() net.IP { return net.ParseIP("127.0.0.1") } +func (mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} } +func (mockPeer) Send(byte, []byte) bool { return false } +func (mockPeer) TrySend(byte, []byte) bool { return false } +func (mockPeer) Set(string, interface{}) {} +func (mockPeer) Get(string) interface{} { return nil } +func (mockPeer) OriginalAddr() *p2p.NetAddress { return nil } func assertPeersWithTimeout( t *testing.T, diff --git a/p2p/switch.go b/p2p/switch.go index ae322b54f..636ca6d8e 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -8,8 +8,8 @@ import ( "time" "github.com/tendermint/tendermint/config" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/p2p/conn" - cmn "github.com/tendermint/tmlibs/common" ) const ( @@ -100,7 +100,7 @@ func NewSwitch(cfg *config.P2PConfig, options ...SwitchOption) *Switch { mConfig.FlushThrottle = time.Duration(cfg.FlushThrottleTimeout) * time.Millisecond mConfig.SendRate = cfg.SendRate mConfig.RecvRate = cfg.RecvRate - mConfig.MaxPacketMsgSize = cfg.MaxPacketMsgSize + mConfig.MaxPacketMsgPayloadSize = cfg.MaxPacketMsgPayloadSize sw.mConfig = mConfig @@ -281,8 +281,13 @@ func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) { sw.stopAndRemovePeer(peer, reason) if peer.IsPersistent() { - // NOTE: this is the self-reported addr, not the original we dialed - go sw.reconnectToPeer(peer.NodeInfo().NetAddress()) + addr := peer.OriginalAddr() + if addr == nil { + // FIXME: persistent peers can't be inbound right now. + // self-reported address for inbound persistent peers + addr = peer.NodeInfo().NetAddress() + } + go sw.reconnectToPeer(addr) } } diff --git a/p2p/switch_test.go b/p2p/switch_test.go index afccfd585..2ce297761 100644 --- a/p2p/switch_test.go +++ b/p2p/switch_test.go @@ -11,8 +11,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - crypto "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p/conn" @@ -259,7 +259,7 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) { defer sw.Stop() // simulate remote peer - rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: cfg} + rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} rp.Start() defer rp.Stop() @@ -289,7 +289,7 @@ func TestSwitchReconnectsToPersistentPeer(t *testing.T) { defer sw.Stop() // simulate remote peer - rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: cfg} + rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} rp.Start() defer rp.Stop() @@ -319,7 +319,7 @@ func TestSwitchReconnectsToPersistentPeer(t *testing.T) { // simulate another remote peer rp = &remotePeer{ - PrivKey: crypto.GenPrivKeyEd25519(), + PrivKey: ed25519.GenPrivKey(), Config: cfg, // Use different interface to prevent duplicate IP filter, this will break // beyond two peers. diff --git a/p2p/test_util.go b/p2p/test_util.go index b0b801487..fdf9ae764 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -4,9 +4,9 @@ import ( "fmt" "net" - crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p/conn" @@ -135,7 +135,7 @@ func MakeSwitch(cfg *config.P2PConfig, i int, network, version string, initSwitc // new switch, add reactors // TODO: let the config be passed in? nodeKey := &NodeKey{ - PrivKey: crypto.GenPrivKeyEd25519(), + PrivKey: ed25519.GenPrivKey(), } sw := NewSwitch(cfg) sw.SetLogger(log.TestingLogger()) diff --git a/p2p/trust/metric.go b/p2p/trust/metric.go index 5770b4208..c0175a93f 100644 --- a/p2p/trust/metric.go +++ b/p2p/trust/metric.go @@ -8,7 +8,7 @@ import ( "sync" "time" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) //--------------------------------------------------------------------------------------- @@ -256,12 +256,13 @@ func (tm *TrustMetric) SetTicker(ticker MetricTicker) { // Copy returns a new trust metric with members containing the same values func (tm *TrustMetric) Copy() *TrustMetric { - tm.mtx.Lock() - defer tm.mtx.Unlock() if tm == nil { return nil } + tm.mtx.Lock() + defer tm.mtx.Unlock() + return &TrustMetric{ proportionalWeight: tm.proportionalWeight, integralWeight: tm.integralWeight, diff --git a/p2p/trust/metric_test.go b/p2p/trust/metric_test.go index 98ea99ab4..f690ce557 100644 --- a/p2p/trust/metric_test.go +++ b/p2p/trust/metric_test.go @@ -56,6 +56,14 @@ func TestTrustMetricConfig(t *testing.T) { tm.Wait() } +func TestTrustMetricCopyNilPointer(t *testing.T) { + var tm *TrustMetric + + ctm := tm.Copy() + + assert.Nil(t, ctm) +} + // XXX: This test fails non-deterministically func _TestTrustMetricStopPause(t *testing.T) { // The TestTicker will provide manual control over diff --git a/p2p/trust/store.go b/p2p/trust/store.go index bbb4592a4..31f659a43 100644 --- a/p2p/trust/store.go +++ b/p2p/trust/store.go @@ -8,8 +8,8 @@ import ( "sync" "time" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" ) const defaultStorePeriodicSaveInterval = 1 * time.Minute diff --git a/p2p/trust/store_test.go b/p2p/trust/store_test.go index 4e5553961..e1bea8636 100644 --- a/p2p/trust/store_test.go +++ b/p2p/trust/store_test.go @@ -10,8 +10,8 @@ import ( "testing" "github.com/stretchr/testify/assert" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" ) func TestTrustMetricStoreSaveLoad(t *testing.T) { diff --git a/p2p/upnp/probe.go b/p2p/upnp/probe.go index 55479415f..2de5e7905 100644 --- a/p2p/upnp/probe.go +++ b/p2p/upnp/probe.go @@ -5,8 +5,8 @@ import ( "net" "time" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" ) type UPNPCapabilities struct { diff --git a/p2p/wire.go b/p2p/wire.go index b7ae41253..40176e3af 100644 --- a/p2p/wire.go +++ b/p2p/wire.go @@ -2,11 +2,11 @@ package p2p import ( "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/crypto" + cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino" ) var cdc = amino.NewCodec() func init() { - crypto.RegisterAmino(cdc) + cryptoAmino.RegisterAmino(cdc) } diff --git a/privval/priv_validator.go b/privval/priv_validator.go index 9f02482ab..5b056d8a0 100644 --- a/privval/priv_validator.go +++ b/privval/priv_validator.go @@ -9,8 +9,9 @@ import ( "time" "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" ) // TODO: type ? @@ -67,7 +68,7 @@ func (pv *FilePV) GetPubKey() crypto.PubKey { // GenFilePV generates a new validator with randomly generated private key // and sets the filePath, but does not call Save(). func GenFilePV(filePath string) *FilePV { - privKey := crypto.GenPrivKeyEd25519() + privKey := ed25519.GenPrivKey() return &FilePV{ Address: privKey.PubKey().Address(), PubKey: privKey.PubKey(), @@ -91,6 +92,10 @@ func LoadFilePV(filePath string) *FilePV { cmn.Exit(cmn.Fmt("Error reading PrivValidator from %v: %v\n", filePath, err)) } + // overwrite pubkey and address for convenience + pv.PubKey = pv.PrivKey.PubKey() + pv.Address = pv.PubKey.Address() + pv.filePath = filePath return pv } @@ -287,7 +292,7 @@ func (pv *FilePV) saveSigned(height int64, round int, step int8, func (pv *FilePV) SignHeartbeat(chainID string, heartbeat *types.Heartbeat) error { pv.mtx.Lock() defer pv.mtx.Unlock() - sig, err:= pv.PrivKey.Sign(heartbeat.SignBytes(chainID)) + sig, err := pv.PrivKey.Sign(heartbeat.SignBytes(chainID)) if err != nil { return err } diff --git a/privval/priv_validator_test.go b/privval/priv_validator_test.go index 314101632..7c9c93fcf 100644 --- a/privval/priv_validator_test.go +++ b/privval/priv_validator_test.go @@ -9,9 +9,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/types" ) func TestGenLoadValidator(t *testing.T) { @@ -47,10 +48,10 @@ func TestUnmarshalValidator(t *testing.T) { assert, require := assert.New(t), require.New(t) // create some fixed values - privKey := crypto.GenPrivKeyEd25519() + privKey := ed25519.GenPrivKey() pubKey := privKey.PubKey() addr := pubKey.Address() - pubArray := [32]byte(pubKey.(crypto.PubKeyEd25519)) + pubArray := [32]byte(pubKey.(ed25519.PubKeyEd25519)) pubBytes := pubArray[:] privArray := [64]byte(privKey) privBytes := privArray[:] diff --git a/privval/socket.go b/privval/socket.go index d0be3ba4e..c33443eda 100644 --- a/privval/socket.go +++ b/privval/socket.go @@ -9,8 +9,9 @@ import ( "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" p2pconn "github.com/tendermint/tendermint/p2p/conn" "github.com/tendermint/tendermint/types" @@ -74,7 +75,7 @@ type SocketPV struct { connDeadline time.Duration connHeartbeat time.Duration connWaitTimeout time.Duration - privKey crypto.PrivKeyEd25519 + privKey ed25519.PrivKeyEd25519 conn net.Conn listener net.Listener @@ -87,7 +88,7 @@ var _ types.PrivValidator = (*SocketPV)(nil) func NewSocketPV( logger log.Logger, socketAddr string, - privKey crypto.PrivKeyEd25519, + privKey ed25519.PrivKeyEd25519, ) *SocketPV { sc := &SocketPV{ addr: socketAddr, @@ -343,7 +344,7 @@ type RemoteSigner struct { chainID string connDeadline time.Duration connRetries int - privKey crypto.PrivKeyEd25519 + privKey ed25519.PrivKeyEd25519 privVal types.PrivValidator conn net.Conn @@ -354,7 +355,7 @@ func NewRemoteSigner( logger log.Logger, chainID, socketAddr string, privVal types.PrivValidator, - privKey crypto.PrivKeyEd25519, + privKey ed25519.PrivKeyEd25519, ) *RemoteSigner { rs := &RemoteSigner{ addr: socketAddr, diff --git a/privval/socket_test.go b/privval/socket_test.go index fcf21e0c6..461ce3f85 100644 --- a/privval/socket_test.go +++ b/privval/socket_test.go @@ -9,9 +9,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" p2pconn "github.com/tendermint/tendermint/p2p/conn" "github.com/tendermint/tendermint/types" @@ -112,14 +112,14 @@ func TestSocketPVAcceptDeadline(t *testing.T) { sc = NewSocketPV( log.TestingLogger(), "127.0.0.1:0", - crypto.GenPrivKeyEd25519(), + ed25519.GenPrivKey(), ) ) defer sc.Stop() SocketPVAcceptDeadline(time.Millisecond)(sc) - assert.Equal(t, sc.Start().(cmn.Error).Cause(), ErrConnWaitTimeout) + assert.Equal(t, sc.Start().(cmn.Error).Data(), ErrConnWaitTimeout) } func TestSocketPVDeadline(t *testing.T) { @@ -129,7 +129,7 @@ func TestSocketPVDeadline(t *testing.T) { sc = NewSocketPV( log.TestingLogger(), addr, - crypto.GenPrivKeyEd25519(), + ed25519.GenPrivKey(), ) ) @@ -152,7 +152,7 @@ func TestSocketPVDeadline(t *testing.T) { _, err = p2pconn.MakeSecretConnection( conn, - crypto.GenPrivKeyEd25519(), + ed25519.GenPrivKey(), ) if err == nil { break @@ -165,20 +165,20 @@ func TestSocketPVDeadline(t *testing.T) { time.Sleep(20 * time.Microsecond) _, err := sc.getPubKey() - assert.Equal(t, err.(cmn.Error).Cause(), ErrConnTimeout) + assert.Equal(t, err.(cmn.Error).Data(), ErrConnTimeout) } func TestSocketPVWait(t *testing.T) { sc := NewSocketPV( log.TestingLogger(), "127.0.0.1:0", - crypto.GenPrivKeyEd25519(), + ed25519.GenPrivKey(), ) defer sc.Stop() SocketPVConnWait(time.Millisecond)(sc) - assert.Equal(t, sc.Start().(cmn.Error).Cause(), ErrConnWaitTimeout) + assert.Equal(t, sc.Start().(cmn.Error).Data(), ErrConnWaitTimeout) } func TestRemoteSignerRetry(t *testing.T) { @@ -214,14 +214,14 @@ func TestRemoteSignerRetry(t *testing.T) { cmn.RandStr(12), ln.Addr().String(), types.NewMockPV(), - crypto.GenPrivKeyEd25519(), + ed25519.GenPrivKey(), ) defer rs.Stop() RemoteSignerConnDeadline(time.Millisecond)(rs) RemoteSignerConnRetries(retries)(rs) - assert.Equal(t, rs.Start().(cmn.Error).Cause(), ErrDialRetryMax) + assert.Equal(t, rs.Start().(cmn.Error).Data(), ErrDialRetryMax) select { case attempts := <-attemptc: @@ -245,12 +245,12 @@ func testSetupSocketPair( chainID, addr, privVal, - crypto.GenPrivKeyEd25519(), + ed25519.GenPrivKey(), ) sc = NewSocketPV( logger, addr, - crypto.GenPrivKeyEd25519(), + ed25519.GenPrivKey(), ) ) diff --git a/privval/wire.go b/privval/wire.go index c42ba40d6..50660ff34 100644 --- a/privval/wire.go +++ b/privval/wire.go @@ -2,12 +2,12 @@ package privval import ( "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/crypto" + cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino" ) var cdc = amino.NewCodec() func init() { - crypto.RegisterAmino(cdc) + cryptoAmino.RegisterAmino(cdc) RegisterSocketPVMsg(cdc) } diff --git a/proxy/app_conn_test.go b/proxy/app_conn_test.go index a50071fea..3c556d4f0 100644 --- a/proxy/app_conn_test.go +++ b/proxy/app_conn_test.go @@ -8,8 +8,8 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/server" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" ) //---------------------------------------- diff --git a/proxy/multi_app_conn.go b/proxy/multi_app_conn.go index 5d89ef195..279fa42ee 100644 --- a/proxy/multi_app_conn.go +++ b/proxy/multi_app_conn.go @@ -3,7 +3,7 @@ package proxy import ( "github.com/pkg/errors" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) //----------------------------- diff --git a/rpc/client/event_test.go b/rpc/client/event_test.go index 844d2b88a..79c452fc9 100644 --- a/rpc/client/event_test.go +++ b/rpc/client/event_test.go @@ -10,7 +10,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) var waitForEventTimeout = 5 * time.Second diff --git a/rpc/client/httpclient.go b/rpc/client/httpclient.go index 1414edce3..4b85bf01d 100644 --- a/rpc/client/httpclient.go +++ b/rpc/client/httpclient.go @@ -11,7 +11,7 @@ import ( ctypes "github.com/tendermint/tendermint/rpc/core/types" rpcclient "github.com/tendermint/tendermint/rpc/lib/client" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) /* @@ -29,12 +29,13 @@ type HTTP struct { *WSEvents } -// New takes a remote endpoint in the form tcp://: +// NewHTTP takes a remote endpoint in the form tcp://: // and the websocket path (which always seems to be "/websocket") func NewHTTP(remote, wsEndpoint string) *HTTP { rc := rpcclient.NewJSONRPCClient(remote) cdc := rc.Codec() ctypes.RegisterAmino(cdc) + rc.SetCodec(cdc) return &HTTP{ rpc: rc, diff --git a/rpc/client/interface.go b/rpc/client/interface.go index afe2d8fa0..f939c855b 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -23,7 +23,7 @@ implementation. import ( ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // ABCIClient groups together the functionality that principally diff --git a/rpc/client/localclient.go b/rpc/client/localclient.go index d89ec3b22..b3c5e3090 100644 --- a/rpc/client/localclient.go +++ b/rpc/client/localclient.go @@ -3,12 +3,12 @@ package client import ( "context" + cmn "github.com/tendermint/tendermint/libs/common" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" nm "github.com/tendermint/tendermint/node" "github.com/tendermint/tendermint/rpc/core" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" ) /* diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index 244855c6b..c8ca060c6 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -6,7 +6,7 @@ import ( ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // ABCIApp will send all abci related request to the named app, diff --git a/rpc/client/mock/abci_test.go b/rpc/client/mock/abci_test.go index 323a42a47..bcf443cf0 100644 --- a/rpc/client/mock/abci_test.go +++ b/rpc/client/mock/abci_test.go @@ -15,7 +15,7 @@ import ( "github.com/tendermint/tendermint/rpc/client/mock" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func TestABCIMock(t *testing.T) { diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index 6af9abb27..955df6277 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -20,7 +20,7 @@ import ( "github.com/tendermint/tendermint/rpc/core" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // Client wraps arbitrary implementations of the various interfaces. diff --git a/rpc/client/mock/status_test.go b/rpc/client/mock/status_test.go index dafd35080..8e3c15061 100644 --- a/rpc/client/mock/status_test.go +++ b/rpc/client/mock/status_test.go @@ -8,7 +8,7 @@ import ( "github.com/tendermint/tendermint/rpc/client/mock" ctypes "github.com/tendermint/tendermint/rpc/core/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func TestStatus(t *testing.T) { diff --git a/rpc/core/abci.go b/rpc/core/abci.go index c07724d58..a5eede3fc 100644 --- a/rpc/core/abci.go +++ b/rpc/core/abci.go @@ -4,7 +4,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/version" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // Query the application for some information. diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index a5ad5b4cb..5815b60e3 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -3,10 +3,10 @@ package core import ( "fmt" + cmn "github.com/tendermint/tendermint/libs/common" ctypes "github.com/tendermint/tendermint/rpc/core/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" ) // Get block headers for minHeight <= height <= maxHeight. @@ -288,12 +288,12 @@ func Commit(heightPtr *int64) (*ctypes.ResultCommit, error) { // use a non-canonical commit if height == storeHeight { commit := blockStore.LoadSeenCommit(height) - return ctypes.NewResultCommit(header, commit, false), nil + return ctypes.NewResultCommit(&header, commit, false), nil } // Return the canonical commit (comes from the block at height+1) commit := blockStore.LoadBlockCommit(height) - return ctypes.NewResultCommit(header, commit, true), nil + return ctypes.NewResultCommit(&header, commit, true), nil } // BlockResults gets ABCIResults at a given height. diff --git a/rpc/core/doc.go b/rpc/core/doc.go index d076b3ecd..75f6ac82e 100644 --- a/rpc/core/doc.go +++ b/rpc/core/doc.go @@ -39,8 +39,6 @@ curl 'localhost:26657/broadcast_tx_sync?tx="abc"' } ``` -The first entry in the result-array (`96`) is the method this response correlates with. `96` refers to "ResultTypeBroadcastTx", see [responses.go](https://github.com/tendermint/tendermint/blob/master/rpc/core/types/responses.go) for a complete overview. - ## JSONRPC/HTTP JSONRPC requests can be POST'd to the root RPC endpoint via HTTP (e.g. `http://localhost:26657/`). diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index 437f5965a..ecc41ce12 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -10,7 +10,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) //----------------------------------------------------------------------------- diff --git a/rpc/core/pipe.go b/rpc/core/pipe.go index 9fcb75e19..128b3e9a7 100644 --- a/rpc/core/pipe.go +++ b/rpc/core/pipe.go @@ -3,15 +3,15 @@ package core import ( "time" - crypto "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/consensus" + crypto "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" ) const ( diff --git a/rpc/core/status.go b/rpc/core/status.go index 2c54d0a94..739e67b86 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -4,10 +4,10 @@ import ( "bytes" "time" + cmn "github.com/tendermint/tendermint/libs/common" ctypes "github.com/tendermint/tendermint/rpc/core/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" ) // Get Tendermint status including node info, pubkey, latest block @@ -104,8 +104,17 @@ func Status() (*ctypes.ResultStatus, error) { return result, nil } +const consensusTimeout = time.Second + func validatorAtHeight(h int64) *types.Validator { - lastBlockHeight, vals := consensusState.GetValidators() + lastBlockHeight, vals := getValidatorsWithTimeout( + consensusState, + consensusTimeout, + ) + + if lastBlockHeight == -1 { + return nil + } privValAddress := pubKey.Address() @@ -131,3 +140,32 @@ func validatorAtHeight(h int64) *types.Validator { return nil } + +type validatorRetriever interface { + GetValidators() (int64, []*types.Validator) +} + +// NOTE: Consensus might halt, but we still need to process RPC requests (at +// least for endpoints whole output does not depend on consensus state). +func getValidatorsWithTimeout( + vr validatorRetriever, + t time.Duration, +) (int64, []*types.Validator) { + resultCh := make(chan struct { + lastBlockHeight int64 + vals []*types.Validator + }) + go func() { + h, v := vr.GetValidators() + resultCh <- struct { + lastBlockHeight int64 + vals []*types.Validator + }{h, v} + }() + select { + case res := <-resultCh: + return res.lastBlockHeight, res.vals + case <-time.After(t): + return -1, []*types.Validator{} + } +} diff --git a/rpc/core/status_test.go b/rpc/core/status_test.go new file mode 100644 index 000000000..e44ffed0f --- /dev/null +++ b/rpc/core/status_test.go @@ -0,0 +1,39 @@ +package core + +import ( + "testing" + "time" + + "github.com/tendermint/tendermint/types" +) + +func TestGetValidatorsWithTimeout(t *testing.T) { + height, vs := getValidatorsWithTimeout( + testValidatorReceiver{}, + time.Millisecond, + ) + + if height != -1 { + t.Errorf("expected negative height") + } + + if len(vs) != 0 { + t.Errorf("expected no validators") + } +} + +type testValidatorReceiver struct{} + +func (tr testValidatorReceiver) GetValidators() (int64, []*types.Validator) { + vs := []*types.Validator{} + + for i := 0; i < 3; i++ { + v, _ := types.RandValidator(true, 10) + + vs = append(vs, v) + } + + time.Sleep(time.Millisecond) + + return 10, vs +} diff --git a/rpc/core/tx.go b/rpc/core/tx.go index 2fa7825fd..f53d82f14 100644 --- a/rpc/core/tx.go +++ b/rpc/core/tx.go @@ -3,7 +3,7 @@ package core import ( "fmt" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" ctypes "github.com/tendermint/tendermint/rpc/core/types" diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index 27302be13..4fec416ed 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -7,7 +7,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/state" diff --git a/rpc/core/types/wire.go b/rpc/core/types/wire.go index d3a31dc35..d49b977eb 100644 --- a/rpc/core/types/wire.go +++ b/rpc/core/types/wire.go @@ -2,12 +2,12 @@ package core_types import ( "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/crypto" + cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino" "github.com/tendermint/tendermint/types" ) func RegisterAmino(cdc *amino.Codec) { types.RegisterEventDatas(cdc) types.RegisterEvidences(cdc) - crypto.RegisterAmino(cdc) + cryptoAmino.RegisterAmino(cdc) } diff --git a/rpc/grpc/client_server.go b/rpc/grpc/client_server.go index c06f1cf46..c88989685 100644 --- a/rpc/grpc/client_server.go +++ b/rpc/grpc/client_server.go @@ -9,7 +9,7 @@ import ( "golang.org/x/net/netutil" "google.golang.org/grpc" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // Config is an gRPC server configuration. diff --git a/rpc/grpc/compile.sh b/rpc/grpc/compile.sh deleted file mode 100644 index 2c4629c8e..000000000 --- a/rpc/grpc/compile.sh +++ /dev/null @@ -1,3 +0,0 @@ -#! /bin/bash - -protoc --go_out=plugins=grpc:. -I $GOPATH/src/ -I . types.proto diff --git a/rpc/grpc/types.pb.go b/rpc/grpc/types.pb.go index be16b711a..8bc9761a4 100644 --- a/rpc/grpc/types.pb.go +++ b/rpc/grpc/types.pb.go @@ -1,34 +1,38 @@ -// Code generated by protoc-gen-go. -// source: types.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: rpc/grpc/types.proto /* -Package core_grpc is a generated protocol buffer package. + Package core_grpc is a generated protocol buffer package. -It is generated from these files: - types.proto + It is generated from these files: + rpc/grpc/types.proto -It has these top-level messages: - RequestPing - RequestBroadcastTx - ResponsePing - ResponseBroadcastTx + It has these top-level messages: + RequestPing + RequestBroadcastTx + ResponsePing + ResponseBroadcastTx */ +//nolint package core_grpc -import proto "github.com/golang/protobuf/proto" +import proto "github.com/gogo/protobuf/proto" +import golang_proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" +import _ "github.com/gogo/protobuf/gogoproto" import types "github.com/tendermint/tendermint/abci/types" -import ( - "context" +import bytes "bytes" - grpc "google.golang.org/grpc" -) +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal +var _ = golang_proto.Marshal var _ = fmt.Errorf var _ = math.Inf @@ -36,7 +40,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type RequestPing struct { } @@ -44,7 +48,7 @@ type RequestPing struct { func (m *RequestPing) Reset() { *m = RequestPing{} } func (m *RequestPing) String() string { return proto.CompactTextString(m) } func (*RequestPing) ProtoMessage() {} -func (*RequestPing) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*RequestPing) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} } type RequestBroadcastTx struct { Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` @@ -53,7 +57,7 @@ type RequestBroadcastTx struct { func (m *RequestBroadcastTx) Reset() { *m = RequestBroadcastTx{} } func (m *RequestBroadcastTx) String() string { return proto.CompactTextString(m) } func (*RequestBroadcastTx) ProtoMessage() {} -func (*RequestBroadcastTx) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (*RequestBroadcastTx) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} } func (m *RequestBroadcastTx) GetTx() []byte { if m != nil { @@ -68,7 +72,7 @@ type ResponsePing struct { func (m *ResponsePing) Reset() { *m = ResponsePing{} } func (m *ResponsePing) String() string { return proto.CompactTextString(m) } func (*ResponsePing) ProtoMessage() {} -func (*ResponsePing) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (*ResponsePing) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{2} } type ResponseBroadcastTx struct { CheckTx *types.ResponseCheckTx `protobuf:"bytes,1,opt,name=check_tx,json=checkTx" json:"check_tx,omitempty"` @@ -78,7 +82,7 @@ type ResponseBroadcastTx struct { func (m *ResponseBroadcastTx) Reset() { *m = ResponseBroadcastTx{} } func (m *ResponseBroadcastTx) String() string { return proto.CompactTextString(m) } func (*ResponseBroadcastTx) ProtoMessage() {} -func (*ResponseBroadcastTx) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (*ResponseBroadcastTx) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{3} } func (m *ResponseBroadcastTx) GetCheckTx() *types.ResponseCheckTx { if m != nil { @@ -96,9 +100,106 @@ func (m *ResponseBroadcastTx) GetDeliverTx() *types.ResponseDeliverTx { func init() { proto.RegisterType((*RequestPing)(nil), "core_grpc.RequestPing") + golang_proto.RegisterType((*RequestPing)(nil), "core_grpc.RequestPing") proto.RegisterType((*RequestBroadcastTx)(nil), "core_grpc.RequestBroadcastTx") + golang_proto.RegisterType((*RequestBroadcastTx)(nil), "core_grpc.RequestBroadcastTx") proto.RegisterType((*ResponsePing)(nil), "core_grpc.ResponsePing") + golang_proto.RegisterType((*ResponsePing)(nil), "core_grpc.ResponsePing") proto.RegisterType((*ResponseBroadcastTx)(nil), "core_grpc.ResponseBroadcastTx") + golang_proto.RegisterType((*ResponseBroadcastTx)(nil), "core_grpc.ResponseBroadcastTx") +} +func (this *RequestPing) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RequestPing) + if !ok { + that2, ok := that.(RequestPing) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *RequestBroadcastTx) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RequestBroadcastTx) + if !ok { + that2, ok := that.(RequestBroadcastTx) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Tx, that1.Tx) { + return false + } + return true +} +func (this *ResponsePing) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResponsePing) + if !ok { + that2, ok := that.(ResponsePing) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *ResponseBroadcastTx) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResponseBroadcastTx) + if !ok { + that2, ok := that.(ResponseBroadcastTx) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.CheckTx.Equal(that1.CheckTx) { + return false + } + if !this.DeliverTx.Equal(that1.DeliverTx) { + return false + } + return true } // Reference imports to suppress errors if they are not otherwise used. @@ -203,28 +304,702 @@ var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "types.proto", -} - -func init() { proto.RegisterFile("types.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 264 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2e, 0xa9, 0x2c, 0x48, - 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4c, 0xce, 0x2f, 0x4a, 0x8d, 0x4f, 0x2f, - 0x2a, 0x48, 0x96, 0xd2, 0x49, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x2f, - 0x49, 0xcd, 0x4b, 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x4f, 0x4c, 0x4a, 0xce, 0xd4, 0x07, - 0x6b, 0xd1, 0x47, 0xd2, 0xa8, 0xc4, 0xcb, 0xc5, 0x1d, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0x12, - 0x90, 0x99, 0x97, 0xae, 0xa4, 0xc2, 0x25, 0x04, 0xe5, 0x3a, 0x15, 0xe5, 0x27, 0xa6, 0x24, 0x27, - 0x16, 0x97, 0x84, 0x54, 0x08, 0xf1, 0x71, 0x31, 0x95, 0x54, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0xf0, - 0x04, 0x31, 0x95, 0x54, 0x28, 0xf1, 0x71, 0xf1, 0x04, 0xa5, 0x16, 0x17, 0xe4, 0xe7, 0x15, 0xa7, - 0x82, 0x75, 0x35, 0x32, 0x72, 0x09, 0xc3, 0x04, 0x90, 0xf5, 0x19, 0x72, 0x71, 0x24, 0x67, 0xa4, - 0x26, 0x67, 0xc7, 0x43, 0x75, 0x73, 0x1b, 0x89, 0xe9, 0x41, 0x2c, 0x87, 0xa9, 0x76, 0x06, 0x49, - 0x87, 0x54, 0x04, 0xb1, 0x27, 0x43, 0x18, 0x42, 0xe6, 0x5c, 0x5c, 0x29, 0xa9, 0x39, 0x99, 0x65, - 0xa9, 0x45, 0x20, 0x4d, 0x4c, 0x60, 0x4d, 0x12, 0x68, 0x9a, 0x5c, 0x20, 0x0a, 0x42, 0x2a, 0x82, - 0x38, 0x53, 0x60, 0x4c, 0xa3, 0xa9, 0x8c, 0x5c, 0x3c, 0x70, 0xbb, 0x1d, 0x03, 0x3c, 0x85, 0xcc, - 0xb9, 0x58, 0x40, 0x8e, 0x13, 0x12, 0xd3, 0x83, 0x87, 0x8d, 0x1e, 0x92, 0x57, 0xa5, 0xc4, 0x51, - 0xc4, 0x11, 0xbe, 0x11, 0xf2, 0xe1, 0xe2, 0x46, 0xf6, 0x84, 0x2c, 0xa6, 0x7e, 0x24, 0x69, 0x29, - 0x39, 0x2c, 0xc6, 0x20, 0xc9, 0x27, 0xb1, 0x81, 0xc3, 0xd9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, - 0x92, 0x29, 0xd9, 0x42, 0xaf, 0x01, 0x00, 0x00, + Metadata: "rpc/grpc/types.proto", +} + +func (m *RequestPing) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestPing) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *RequestBroadcastTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestBroadcastTx) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Tx) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) + i += copy(dAtA[i:], m.Tx) + } + return i, nil +} + +func (m *ResponsePing) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponsePing) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ResponseBroadcastTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseBroadcastTx) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.CheckTx != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.CheckTx.Size())) + n1, err := m.CheckTx.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.DeliverTx != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.DeliverTx.Size())) + n2, err := m.DeliverTx.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedRequestPing(r randyTypes, easy bool) *RequestPing { + this := &RequestPing{} + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedRequestBroadcastTx(r randyTypes, easy bool) *RequestBroadcastTx { + this := &RequestBroadcastTx{} + v1 := r.Intn(100) + this.Tx = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Tx[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponsePing(r randyTypes, easy bool) *ResponsePing { + this := &ResponsePing{} + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponseBroadcastTx(r randyTypes, easy bool) *ResponseBroadcastTx { + this := &ResponseBroadcastTx{} + if r.Intn(10) != 0 { + this.CheckTx = types.NewPopulatedResponseCheckTx(r, easy) + } + if r.Intn(10) != 0 { + this.DeliverTx = types.NewPopulatedResponseDeliverTx(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyTypes interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneTypes(r randyTypes) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringTypes(r randyTypes) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneTypes(r) + } + return string(tmps) +} +func randUnrecognizedTypes(r randyTypes, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldTypes(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldTypes(dAtA []byte, r randyTypes, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateTypes(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateTypes(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateTypes(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *RequestPing) Size() (n int) { + var l int + _ = l + return n +} + +func (m *RequestBroadcastTx) Size() (n int) { + var l int + _ = l + l = len(m.Tx) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponsePing) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ResponseBroadcastTx) Size() (n int) { + var l int + _ = l + if m.CheckTx != nil { + l = m.CheckTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.DeliverTx != nil { + l = m.DeliverTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *RequestPing) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestPing: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestPing: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestBroadcastTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) + if m.Tx == nil { + m.Tx = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponsePing) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponsePing: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponsePing: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseBroadcastTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CheckTx == nil { + m.CheckTx = &types.ResponseCheckTx{} + } + if err := m.CheckTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeliverTx == nil { + m.DeliverTx = &types.ResponseDeliverTx{} + } + if err := m.DeliverTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthTypes + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTypes(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("rpc/grpc/types.proto", fileDescriptorTypes) } +func init() { golang_proto.RegisterFile("rpc/grpc/types.proto", fileDescriptorTypes) } + +var fileDescriptorTypes = []byte{ + // 321 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x29, 0x2a, 0x48, 0xd6, + 0x4f, 0x07, 0x11, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x9c, + 0xc9, 0xf9, 0x45, 0xa9, 0xf1, 0x20, 0x61, 0x29, 0xdd, 0xf4, 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, + 0xe4, 0xfc, 0x5c, 0xfd, 0xf4, 0xfc, 0xf4, 0x7c, 0x7d, 0xb0, 0x8a, 0xa4, 0xd2, 0x34, 0x30, 0x0f, + 0xcc, 0x01, 0xb3, 0x20, 0x3a, 0xa5, 0xcc, 0x91, 0x94, 0x97, 0xa4, 0xe6, 0xa5, 0xa4, 0x16, 0xe5, + 0x66, 0xe6, 0x95, 0x20, 0x33, 0x13, 0x93, 0x92, 0x33, 0x21, 0x96, 0x21, 0x5b, 0xa9, 0xc4, 0xcb, + 0xc5, 0x1d, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0x12, 0x90, 0x99, 0x97, 0xae, 0xa4, 0xc2, 0x25, + 0x04, 0xe5, 0x3a, 0x15, 0xe5, 0x27, 0xa6, 0x24, 0x27, 0x16, 0x97, 0x84, 0x54, 0x08, 0xf1, 0x71, + 0x31, 0x95, 0x54, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x04, 0x31, 0x95, 0x54, 0x28, 0xf1, 0x71, + 0xf1, 0x04, 0xa5, 0x16, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x82, 0x75, 0x35, 0x32, 0x72, 0x09, 0xc3, + 0x04, 0x90, 0xf5, 0x19, 0x72, 0x71, 0x24, 0x67, 0xa4, 0x26, 0x67, 0xc7, 0x43, 0x75, 0x73, 0x1b, + 0x89, 0xe9, 0x41, 0x2c, 0x87, 0xa9, 0x76, 0x06, 0x49, 0x87, 0x54, 0x04, 0xb1, 0x27, 0x43, 0x18, + 0x42, 0xe6, 0x5c, 0x5c, 0x29, 0xa9, 0x39, 0x99, 0x65, 0xa9, 0x45, 0x20, 0x4d, 0x4c, 0x60, 0x4d, + 0x12, 0x68, 0x9a, 0x5c, 0x20, 0x0a, 0x42, 0x2a, 0x82, 0x38, 0x53, 0x60, 0x4c, 0xa3, 0xa9, 0x8c, + 0x5c, 0x3c, 0x70, 0xbb, 0x1d, 0x03, 0x3c, 0x85, 0xcc, 0xb9, 0x58, 0x40, 0x8e, 0x13, 0x12, 0xd3, + 0x83, 0x87, 0xaa, 0x1e, 0x92, 0x57, 0xa5, 0xc4, 0x51, 0xc4, 0x11, 0xbe, 0x11, 0xf2, 0xe1, 0xe2, + 0x46, 0xf6, 0x84, 0x2c, 0xa6, 0x7e, 0x24, 0x69, 0x29, 0x39, 0x2c, 0xc6, 0x20, 0xc9, 0x3b, 0xc9, + 0xfc, 0x78, 0x28, 0xc7, 0xb8, 0xe2, 0x91, 0x1c, 0xe3, 0x8e, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, + 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x81, 0xc7, 0x72, 0x8c, 0x49, 0x6c, + 0xe0, 0x58, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xd5, 0xa8, 0xe4, 0xd9, 0x10, 0x02, 0x00, + 0x00, } diff --git a/rpc/grpc/types.proto b/rpc/grpc/types.proto index d7980d5e0..beb6442a2 100644 --- a/rpc/grpc/types.proto +++ b/rpc/grpc/types.proto @@ -1,8 +1,17 @@ syntax = "proto3"; package core_grpc; +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; import "github.com/tendermint/tendermint/abci/types/types.proto"; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.goproto_registration) = true; +// Generate tests +option (gogoproto.populate_all) = true; +option (gogoproto.equal_all) = true; +option (gogoproto.testgen_all) = true; //---------------------------------------- // Message types diff --git a/rpc/grpc/typespb_test.go b/rpc/grpc/typespb_test.go new file mode 100644 index 000000000..3d28002b1 --- /dev/null +++ b/rpc/grpc/typespb_test.go @@ -0,0 +1,531 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: rpc/grpc/types.proto + +/* +Package core_grpc is a generated protocol buffer package. + +It is generated from these files: + rpc/grpc/types.proto + +It has these top-level messages: + RequestPing + RequestBroadcastTx + ResponsePing + ResponseBroadcastTx +*/ +package core_grpc + +import testing "testing" +import rand "math/rand" +import time "time" +import proto "github.com/gogo/protobuf/proto" +import jsonpb "github.com/gogo/protobuf/jsonpb" +import golang_proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/tendermint/tendermint/abci/types" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = golang_proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func TestRequestPingProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestPing(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestPing{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestRequestPingMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestPing(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestPing{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestBroadcastTxProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestBroadcastTx(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestBroadcastTx{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestRequestBroadcastTxMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestBroadcastTx(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestBroadcastTx{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponsePingProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponsePing(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponsePing{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestResponsePingMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponsePing(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponsePing{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseBroadcastTxProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseBroadcastTx(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseBroadcastTx{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestResponseBroadcastTxMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseBroadcastTx(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseBroadcastTx{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestPingJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestPing(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestPing{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestRequestBroadcastTxJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestBroadcastTx(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestBroadcastTx{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestResponsePingJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponsePing(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponsePing{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestResponseBroadcastTxJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseBroadcastTx(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseBroadcastTx{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestRequestPingProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestPing(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &RequestPing{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestPingProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestPing(popr, true) + dAtA := proto.CompactTextString(p) + msg := &RequestPing{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestBroadcastTxProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestBroadcastTx(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &RequestBroadcastTx{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestBroadcastTxProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestBroadcastTx(popr, true) + dAtA := proto.CompactTextString(p) + msg := &RequestBroadcastTx{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponsePingProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponsePing(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &ResponsePing{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponsePingProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponsePing(popr, true) + dAtA := proto.CompactTextString(p) + msg := &ResponsePing{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseBroadcastTxProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseBroadcastTx(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &ResponseBroadcastTx{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseBroadcastTxProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseBroadcastTx(popr, true) + dAtA := proto.CompactTextString(p) + msg := &ResponseBroadcastTx{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestPingSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestPing(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestRequestBroadcastTxSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRequestBroadcastTx(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestResponsePingSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponsePing(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestResponseBroadcastTxSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedResponseBroadcastTx(popr, true) + size2 := proto.Size(p) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +//These tests are generated by github.com/gogo/protobuf/plugin/testgen diff --git a/rpc/lib/client/http_client.go b/rpc/lib/client/http_client.go index e26d8f274..bd440289b 100644 --- a/rpc/lib/client/http_client.go +++ b/rpc/lib/client/http_client.go @@ -17,6 +17,14 @@ import ( types "github.com/tendermint/tendermint/rpc/lib/types" ) +const ( + protoHTTP = "http" + protoHTTPS = "https" + protoWSS = "wss" + protoWS = "ws" + protoTCP = "tcp" +) + // HTTPClient is a common interface for JSONRPCClient and URIClient. type HTTPClient interface { Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) @@ -25,29 +33,37 @@ type HTTPClient interface { } // TODO: Deprecate support for IP:PORT or /path/to/socket -func makeHTTPDialer(remoteAddr string) (string, func(string, string) (net.Conn, error)) { +func makeHTTPDialer(remoteAddr string) (string, string, func(string, string) (net.Conn, error)) { + // protocol to use for http operations, to support both http and https + clientProtocol := protoHTTP + parts := strings.SplitN(remoteAddr, "://", 2) var protocol, address string if len(parts) == 1 { // default to tcp if nothing specified - protocol, address = "tcp", remoteAddr + protocol, address = protoTCP, remoteAddr } else if len(parts) == 2 { protocol, address = parts[0], parts[1] } else { // return a invalid message msg := fmt.Sprintf("Invalid addr: %s", remoteAddr) - return msg, func(_ string, _ string) (net.Conn, error) { + return clientProtocol, msg, func(_ string, _ string) (net.Conn, error) { return nil, errors.New(msg) } } - // accept http as an alias for tcp - if protocol == "http" { - protocol = "tcp" + + // accept http as an alias for tcp and set the client protocol + switch protocol { + case protoHTTP, protoHTTPS: + clientProtocol = protocol + protocol = protoTCP + case protoWS, protoWSS: + clientProtocol = protocol } // replace / with . for http requests (kvstore domain) trimmedAddress := strings.Replace(address, "/", ".", -1) - return trimmedAddress, func(proto, addr string) (net.Conn, error) { + return clientProtocol, trimmedAddress, func(proto, addr string) (net.Conn, error) { return net.Dial(protocol, address) } } @@ -55,8 +71,8 @@ func makeHTTPDialer(remoteAddr string) (string, func(string, string) (net.Conn, // We overwrite the http.Client.Dial so we can do http over tcp or unix. // remoteAddr should be fully featured (eg. with tcp:// or unix://) func makeHTTPClient(remoteAddr string) (string, *http.Client) { - address, dialer := makeHTTPDialer(remoteAddr) - return "http://" + address, &http.Client{ + protocol, address, dialer := makeHTTPDialer(remoteAddr) + return protocol + "://" + address, &http.Client{ Transport: &http.Transport{ Dial: dialer, }, diff --git a/rpc/lib/client/integration_test.go b/rpc/lib/client/integration_test.go index d3d993374..93a32388f 100644 --- a/rpc/lib/client/integration_test.go +++ b/rpc/lib/client/integration_test.go @@ -14,7 +14,7 @@ import ( "time" "github.com/stretchr/testify/require" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" ) func TestWSClientReconnectWithJitter(t *testing.T) { diff --git a/rpc/lib/client/ws_client.go b/rpc/lib/client/ws_client.go index a95ce17d2..9a07c8676 100644 --- a/rpc/lib/client/ws_client.go +++ b/rpc/lib/client/ws_client.go @@ -15,7 +15,7 @@ import ( "github.com/tendermint/go-amino" types "github.com/tendermint/tendermint/rpc/lib/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) const ( @@ -70,13 +70,21 @@ type WSClient struct { // Send pings to server with this period. Must be less than readWait. If 0, no pings will be sent. pingPeriod time.Duration + + // Support both ws and wss protocols + protocol string } // NewWSClient returns a new client. See the commentary on the func(*WSClient) // functions for a detailed description of how to configure ping period and // pong wait time. The endpoint argument must begin with a `/`. func NewWSClient(remoteAddr, endpoint string, options ...func(*WSClient)) *WSClient { - addr, dialer := makeHTTPDialer(remoteAddr) + protocol, addr, dialer := makeHTTPDialer(remoteAddr) + // default to ws protocol, unless wss is explicitly specified + if protocol != "wss" { + protocol = "ws" + } + c := &WSClient{ cdc: amino.NewCodec(), Address: addr, @@ -88,6 +96,7 @@ func NewWSClient(remoteAddr, endpoint string, options ...func(*WSClient)) *WSCli readWait: defaultReadWait, writeWait: defaultWriteWait, pingPeriod: defaultPingPeriod, + protocol: protocol, } c.BaseService = *cmn.NewBaseService(nil, "WSClient", c) for _, option := range options { @@ -242,7 +251,7 @@ func (c *WSClient) dial() error { Proxy: http.ProxyFromEnvironment, } rHeader := http.Header{} - conn, _, err := dialer.Dial("ws://"+c.Address+c.Endpoint, rHeader) + conn, _, err := dialer.Dial(c.protocol+"://"+c.Address+c.Endpoint, rHeader) if err != nil { return err } diff --git a/rpc/lib/client/ws_client_test.go b/rpc/lib/client/ws_client_test.go index 73f671609..e902fe21a 100644 --- a/rpc/lib/client/ws_client_test.go +++ b/rpc/lib/client/ws_client_test.go @@ -12,7 +12,7 @@ import ( "github.com/gorilla/websocket" "github.com/stretchr/testify/require" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" types "github.com/tendermint/tendermint/rpc/lib/types" ) diff --git a/rpc/lib/doc.go b/rpc/lib/doc.go index 2bc438593..b96b9123c 100644 --- a/rpc/lib/doc.go +++ b/rpc/lib/doc.go @@ -98,6 +98,6 @@ Each route is available as a GET request, as a JSONRPCv2 POST request, and via J # Examples * [Tendermint](https://github.com/tendermint/tendermint/blob/master/rpc/core/routes.go) -* [tm-monitor](https://github.com/tendermint/tools/blob/master/tm-monitor/rpc.go) +* [tm-monitor](https://github.com/tendermint/tendermint/blob/master/tools/tm-monitor/rpc.go) */ package rpc diff --git a/rpc/lib/rpc_test.go b/rpc/lib/rpc_test.go index fe765473d..3d76db323 100644 --- a/rpc/lib/rpc_test.go +++ b/rpc/lib/rpc_test.go @@ -6,7 +6,6 @@ import ( crand "crypto/rand" "encoding/json" "fmt" - "math/rand" "net/http" "os" "os/exec" @@ -18,8 +17,8 @@ import ( "github.com/stretchr/testify/require" amino "github.com/tendermint/go-amino" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" client "github.com/tendermint/tendermint/rpc/lib/client" server "github.com/tendermint/tendermint/rpc/lib/server" @@ -206,7 +205,7 @@ func testWithHTTPClient(t *testing.T, cl client.HTTPClient) { require.Nil(t, err) assert.Equal(t, got3, val3) - val4 := rand.Intn(10000) + val4 := cmn.RandIntn(10000) got4, err := echoIntViaHTTP(cl, val4) require.Nil(t, err) assert.Equal(t, got4, val4) @@ -370,7 +369,7 @@ func TestWSClientPingPong(t *testing.T) { } func randBytes(t *testing.T) []byte { - n := rand.Intn(10) + 2 + n := cmn.RandIntn(10) + 2 buf := make([]byte, n) _, err := crand.Read(buf) require.Nil(t, err) diff --git a/rpc/lib/server/handlers.go b/rpc/lib/server/handlers.go index 6cc03012a..3ec5f81e3 100644 --- a/rpc/lib/server/handlers.go +++ b/rpc/lib/server/handlers.go @@ -18,9 +18,9 @@ import ( "github.com/pkg/errors" amino "github.com/tendermint/go-amino" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" types "github.com/tendermint/tendermint/rpc/lib/types" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" ) // RegisterRPCFuncs adds a route for each function in the funcMap, as well as general jsonrpc and websocket handlers for all functions. @@ -294,7 +294,7 @@ func httpParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, r *http.Request) ([]re continue } - v, err, ok := nonJSONToArg(cdc, argType, arg) + v, err, ok := nonJSONStringToArg(cdc, argType, arg) if err != nil { return nil, err } @@ -303,7 +303,7 @@ func httpParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, r *http.Request) ([]re continue } - values[i], err = _jsonStringToArg(cdc, argType, arg) + values[i], err = jsonStringToArg(cdc, argType, arg) if err != nil { return nil, err } @@ -312,26 +312,64 @@ func httpParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, r *http.Request) ([]re return values, nil } -func _jsonStringToArg(cdc *amino.Codec, ty reflect.Type, arg string) (reflect.Value, error) { - v := reflect.New(ty) - err := cdc.UnmarshalJSON([]byte(arg), v.Interface()) +func jsonStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect.Value, error) { + rv := reflect.New(rt) + err := cdc.UnmarshalJSON([]byte(arg), rv.Interface()) if err != nil { - return v, err + return rv, err } - v = v.Elem() - return v, nil + rv = rv.Elem() + return rv, nil } -func nonJSONToArg(cdc *amino.Codec, ty reflect.Type, arg string) (reflect.Value, error, bool) { +func nonJSONStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect.Value, error, bool) { + if rt.Kind() == reflect.Ptr { + rv_, err, ok := nonJSONStringToArg(cdc, rt.Elem(), arg) + if err != nil { + return reflect.Value{}, err, false + } else if ok { + rv := reflect.New(rt.Elem()) + rv.Elem().Set(rv_) + return rv, nil, true + } else { + return reflect.Value{}, nil, false + } + } else { + return _nonJSONStringToArg(cdc, rt, arg) + } +} + +// NOTE: rt.Kind() isn't a pointer. +func _nonJSONStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect.Value, error, bool) { + isIntString := RE_INT.Match([]byte(arg)) isQuotedString := strings.HasPrefix(arg, `"`) && strings.HasSuffix(arg, `"`) isHexString := strings.HasPrefix(strings.ToLower(arg), "0x") - expectingString := ty.Kind() == reflect.String - expectingByteSlice := ty.Kind() == reflect.Slice && ty.Elem().Kind() == reflect.Uint8 + + var expectingString, expectingByteSlice, expectingInt bool + switch rt.Kind() { + case reflect.Int, reflect.Uint, reflect.Int8, reflect.Uint8, reflect.Int16, reflect.Uint16, reflect.Int32, reflect.Uint32, reflect.Int64, reflect.Uint64: + expectingInt = true + case reflect.String: + expectingString = true + case reflect.Slice: + expectingByteSlice = rt.Elem().Kind() == reflect.Uint8 + } + + if isIntString && expectingInt { + qarg := `"` + arg + `"` + // jsonStringToArg + rv, err := jsonStringToArg(cdc, rt, qarg) + if err != nil { + return rv, err, false + } else { + return rv, nil, true + } + } if isHexString { if !expectingString && !expectingByteSlice { err := errors.Errorf("Got a hex string arg, but expected '%s'", - ty.Kind().String()) + rt.Kind().String()) return reflect.ValueOf(nil), err, false } @@ -340,7 +378,7 @@ func nonJSONToArg(cdc *amino.Codec, ty reflect.Type, arg string) (reflect.Value, if err != nil { return reflect.ValueOf(nil), err, false } - if ty.Kind() == reflect.String { + if rt.Kind() == reflect.String { return reflect.ValueOf(string(value)), nil, true } return reflect.ValueOf([]byte(value)), nil, true @@ -406,7 +444,13 @@ type wsConnection struct { // description of how to configure ping period and pong wait time. NOTE: if the // write buffer is full, pongs may be dropped, which may cause clients to // disconnect. see https://github.com/gorilla/websocket/issues/97 -func NewWSConnection(baseConn *websocket.Conn, funcMap map[string]*RPCFunc, cdc *amino.Codec, options ...func(*wsConnection)) *wsConnection { +func NewWSConnection( + baseConn *websocket.Conn, + funcMap map[string]*RPCFunc, + cdc *amino.Codec, + options ...func(*wsConnection), +) *wsConnection { + baseConn.SetReadLimit(maxBodyBytes) wsc := &wsConnection{ remoteAddr: baseConn.RemoteAddr().String(), baseConn: baseConn, diff --git a/rpc/lib/server/handlers_test.go b/rpc/lib/server/handlers_test.go index af5665138..3471eb791 100644 --- a/rpc/lib/server/handlers_test.go +++ b/rpc/lib/server/handlers_test.go @@ -16,7 +16,7 @@ import ( amino "github.com/tendermint/go-amino" rs "github.com/tendermint/tendermint/rpc/lib/server" types "github.com/tendermint/tendermint/rpc/lib/types" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" ) ////////////////////////////////////////////////////////////////////////////// diff --git a/rpc/lib/server/http_params.go b/rpc/lib/server/http_params.go index 565060678..3c948c0ba 100644 --- a/rpc/lib/server/http_params.go +++ b/rpc/lib/server/http_params.go @@ -15,6 +15,7 @@ var ( dotAtom = atom + `(?:\.` + atom + `)*` domain = `[A-Z0-9.-]+\.[A-Z]{2,4}` + RE_INT = regexp.MustCompile(`^-?[0-9]+$`) RE_HEX = regexp.MustCompile(`^(?i)[a-f0-9]+$`) RE_EMAIL = regexp.MustCompile(`^(?i)(` + dotAtom + `)@(` + dotAtom + `)$`) RE_ADDRESS = regexp.MustCompile(`^(?i)[a-z0-9]{25,34}$`) diff --git a/rpc/lib/server/http_server.go b/rpc/lib/server/http_server.go index 9bdb4dffa..5d816ef22 100644 --- a/rpc/lib/server/http_server.go +++ b/rpc/lib/server/http_server.go @@ -15,7 +15,7 @@ import ( "golang.org/x/net/netutil" types "github.com/tendermint/tendermint/rpc/lib/types" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" ) // Config is an RPC server configuration. @@ -23,6 +23,12 @@ type Config struct { MaxOpenConnections int } +const ( + // maxBodyBytes controls the maximum number of bytes the + // server will read parsing the request body. + maxBodyBytes = int64(1000000) // 1MB +) + // StartHTTPServer starts an HTTP server on listenAddr with the given handler. // It wraps handler with RecoverAndLogHandler. func StartHTTPServer( @@ -53,7 +59,7 @@ func StartHTTPServer( go func() { err := http.Serve( listener, - RecoverAndLogHandler(handler, logger), + RecoverAndLogHandler(maxBytesHandler{h: handler, n: maxBodyBytes}, logger), ) logger.Error("RPC HTTP server stopped", "err", err) }() @@ -99,7 +105,7 @@ func StartHTTPAndTLSServer( go func() { err := http.ServeTLS( listener, - RecoverAndLogHandler(handler, logger), + RecoverAndLogHandler(maxBytesHandler{h: handler, n: maxBodyBytes}, logger), certFile, keyFile, ) @@ -202,3 +208,13 @@ func (w *ResponseWriterWrapper) WriteHeader(status int) { func (w *ResponseWriterWrapper) Hijack() (net.Conn, *bufio.ReadWriter, error) { return w.ResponseWriter.(http.Hijacker).Hijack() } + +type maxBytesHandler struct { + h http.Handler + n int64 +} + +func (h maxBytesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + r.Body = http.MaxBytesReader(w, r.Body, h.n) + h.h.ServeHTTP(w, r) +} diff --git a/rpc/lib/server/http_server_test.go b/rpc/lib/server/http_server_test.go index 22fd8a23b..3cbe0d906 100644 --- a/rpc/lib/server/http_server_test.go +++ b/rpc/lib/server/http_server_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" ) func TestMaxOpenConnections(t *testing.T) { diff --git a/rpc/lib/server/parse_test.go b/rpc/lib/server/parse_test.go index d4a59c622..7b0aacdbe 100644 --- a/rpc/lib/server/parse_test.go +++ b/rpc/lib/server/parse_test.go @@ -2,12 +2,14 @@ package rpcserver import ( "encoding/json" + "fmt" + "net/http" "strconv" "testing" "github.com/stretchr/testify/assert" amino "github.com/tendermint/go-amino" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func TestParseJSONMap(t *testing.T) { @@ -134,7 +136,7 @@ func TestParseJSONArray(t *testing.T) { } } -func TestParseRPC(t *testing.T) { +func TestParseJSONRPC(t *testing.T) { assert := assert.New(t) demo := func(height int, name string) {} @@ -172,5 +174,48 @@ func TestParseRPC(t *testing.T) { } } +} + +func TestParseURI(t *testing.T) { + + demo := func(height int, name string) {} + call := NewRPCFunc(demo, "height,name") + cdc := amino.NewCodec() + + cases := []struct { + raw []string + height int64 + name string + fail bool + }{ + // can parse numbers unquoted and strings quoted + {[]string{"7", `"flew"`}, 7, "flew", false}, + {[]string{"22", `"john"`}, 22, "john", false}, + {[]string{"-10", `"bob"`}, -10, "bob", false}, + // can parse numbers quoted, too + {[]string{`"7"`, `"flew"`}, 7, "flew", false}, + {[]string{`"-10"`, `"bob"`}, -10, "bob", false}, + // cant parse strings uquoted + {[]string{`"-10"`, `bob`}, -10, "bob", true}, + } + for idx, tc := range cases { + i := strconv.Itoa(idx) + // data := []byte(tc.raw) + url := fmt.Sprintf( + "test.com/method?height=%v&name=%v", + tc.raw[0], tc.raw[1]) + req, err := http.NewRequest("GET", url, nil) + assert.NoError(t, err) + vals, err := httpParamsToArgs(call, cdc, req) + if tc.fail { + assert.NotNil(t, err, i) + } else { + assert.Nil(t, err, "%s: %+v", i, err) + if assert.Equal(t, 2, len(vals), i) { + assert.Equal(t, tc.height, vals[0].Int(), i) + assert.Equal(t, tc.name, vals[1].String(), i) + } + } + } } diff --git a/rpc/lib/test/main.go b/rpc/lib/test/main.go index 4dd95ce05..cb9560e12 100644 --- a/rpc/lib/test/main.go +++ b/rpc/lib/test/main.go @@ -7,8 +7,8 @@ import ( amino "github.com/tendermint/go-amino" rpcserver "github.com/tendermint/tendermint/rpc/lib/server" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" ) var routes = map[string]*rpcserver.RPCFunc{ diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index 1d6f865c2..915911818 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -8,10 +8,10 @@ import ( "strings" "time" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" abci "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" cfg "github.com/tendermint/tendermint/config" nm "github.com/tendermint/tendermint/node" diff --git a/scripts/dep_utils/parse.sh b/scripts/dep_utils/parse.sh deleted file mode 100644 index e6519efa0..000000000 --- a/scripts/dep_utils/parse.sh +++ /dev/null @@ -1,14 +0,0 @@ -#! /bin/bash - -set +u -if [[ "$DEP" == "" ]]; then - DEP=$GOPATH/src/github.com/tendermint/tendermint/Gopkg.lock -fi -set -u - - -set -euo pipefail - -LIB=$1 - -grep -A100 "$LIB" "$DEP" | grep revision | head -n1 | grep -o '"[^"]\+"' | cut -d '"' -f 2 diff --git a/scripts/install/install_tendermint_osx.sh b/scripts/install/install_tendermint_osx.sh new file mode 100644 index 000000000..b4107ab01 --- /dev/null +++ b/scripts/install/install_tendermint_osx.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +# XXX: this script is intended to be run from +# an MacOS machine + +# as written, this script will install +# tendermint core from master branch +REPO=github.com/tendermint/tendermint + +# change this to a specific release or branch +BRANCH=master + +if ! [ -x "$(command -v brew)" ]; then + echo 'Error: brew is not installed, to install brew' >&2 + echo 'follow the instructions here: https://docs.brew.sh/Installation' >&2 + exit 1 +fi + +if ! [ -x "$(command -v go)" ]; then + echo 'Error: go is not installed, to install go follow' >&2 + echo 'the instructions here: https://golang.org/doc/install#tarball' >&2 + echo 'ALSO MAKE SURE TO SETUP YOUR $GOPATH and $GOBIN in your ~/.profile: https://github.com/golang/go/wiki/SettingGOPATH' >&2 + exit 1 +fi + +if ! [ -x "$(command -v make)" ]; then + echo 'Make not installed, installing using brew...' + brew install make +fi + +# get the code and move into repo +go get $REPO +cd $GOPATH/src/$REPO + +# build & install +git checkout $BRANCH +# XXX: uncomment if branch isn't master +# git fetch origin $BRANCH +make get_tools +make get_vendor_deps +make install diff --git a/scripts/install_abci_apps.sh b/scripts/install_abci_apps.sh deleted file mode 100644 index eb70070df..000000000 --- a/scripts/install_abci_apps.sh +++ /dev/null @@ -1,12 +0,0 @@ -#! /bin/bash - -# get the abci commit used by tendermint -COMMIT=$(bash scripts/dep_utils/parse.sh abci) -echo "Checking out vendored commit for abci: $COMMIT" - -go get -d github.com/tendermint/abci -cd "$GOPATH/src/github.com/tendermint/abci" || exit -git checkout "$COMMIT" -make get_tools -make get_vendor_deps -make install diff --git a/scripts/slate.sh b/scripts/slate.sh deleted file mode 100644 index e18babea7..000000000 --- a/scripts/slate.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -if [ "$CIRCLE_BRANCH" == "" ]; then - echo "this script is meant to be run on CircleCI, exiting" - echo 1 -fi - -# check for changes in the `rpc/core` directory -did_rpc_change=$(git diff --name-status $CIRCLE_BRANCH origin/master | grep rpc/core) - -if [ "$did_rpc_change" == "" ]; then - echo "no changes detected in rpc/core, exiting" - exit 0 -else - echo "changes detected in rpc/core, continuing" -fi - -# only run this script on changes to rpc/core committed to develop -if [ "$CIRCLE_BRANCH" != "master" ]; then - echo "the branch being built isn't master, exiting" - exit 0 -else - echo "on master, building the RPC docs" -fi - -# godoc2md used to convert the go documentation from -# `rpc/core` into a markdown file consumed by Slate -go get github.com/davecheney/godoc2md - -# slate works via forks, and we'll be committing to -# master branch, which will trigger our fork to run -# the `./deploy.sh` and publish via the `gh-pages` branch -slate_repo=github.com/tendermint/slate -slate_path="$GOPATH"/src/"$slate_repo" - -if [ ! -d "$slate_path" ]; then - git clone https://"$slate_repo".git $slate_path -fi - -# the main file we need to update if rpc/core changed -destination="$slate_path"/source/index.html.md - -# we remove it then re-create it with the latest changes -rm $destination - -header="--- -title: RPC Reference - -language_tabs: - - shell - - go - -toc_footers: - - Tendermint - - Documentation Powered by Slate - -search: true ----" - -# write header to the main slate file -echo "$header" > "$destination" - -# generate a markdown from the godoc comments, using a template -rpc_docs=$(godoc2md -template rpc/core/doc_template.txt github.com/tendermint/tendermint/rpc/core | grep -v -e "pipe.go" -e "routes.go" -e "dev.go" | sed 's$/src/target$https://github.com/tendermint/tendermint/tree/master/rpc/core$') - -# append core RPC docs -echo "$rpc_docs" >> "$destination" - -# commit the changes -cd $slate_path - -git config --global user.email "github@tendermint.com" -git config --global user.name "tenderbot" - -git commit -a -m "Update tendermint RPC docs via CircleCI" -git push -q https://${GITHUB_ACCESS_TOKEN}@github.com/tendermint/slate.git master diff --git a/scripts/wire2amino.go b/scripts/wire2amino.go index 72f472dac..26069b505 100644 --- a/scripts/wire2amino.go +++ b/scripts/wire2amino.go @@ -9,8 +9,10 @@ import ( "time" "github.com/tendermint/go-amino" - crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + "github.com/tendermint/tendermint/crypto/ed25519" + cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino" + + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/privval" @@ -29,9 +31,8 @@ type Genesis struct { ConsensusParams *types.ConsensusParams `json:"consensus_params,omitempty"` Validators []GenesisValidator `json:"validators"` AppHash cmn.HexBytes `json:"app_hash"` - AppStateJSON json.RawMessage `json:"app_state,omitempty"` + AppState json.RawMessage `json:"app_state,omitempty"` AppOptions json.RawMessage `json:"app_options,omitempty"` // DEPRECATED - } type NodeKey struct { @@ -59,7 +60,7 @@ func convertNodeKey(cdc *amino.Codec, jsonBytes []byte) ([]byte, error) { return nil, err } - var privKey crypto.PrivKeyEd25519 + var privKey ed25519.PrivKeyEd25519 copy(privKey[:], nodeKey.PrivKey.Data) nodeKeyNew := p2p.NodeKey{privKey} @@ -78,10 +79,10 @@ func convertPrivVal(cdc *amino.Codec, jsonBytes []byte) ([]byte, error) { return nil, err } - var privKey crypto.PrivKeyEd25519 + var privKey ed25519.PrivKeyEd25519 copy(privKey[:], privVal.PrivKey.Data) - var pubKey crypto.PubKeyEd25519 + var pubKey ed25519.PubKeyEd25519 copy(pubKey[:], privVal.PubKey.Data) privValNew := privval.FilePV{ @@ -112,16 +113,16 @@ func convertGenesis(cdc *amino.Codec, jsonBytes []byte) ([]byte, error) { ChainID: genesis.ChainID, ConsensusParams: genesis.ConsensusParams, // Validators - AppHash: genesis.AppHash, - AppStateJSON: genesis.AppStateJSON, + AppHash: genesis.AppHash, + AppState: genesis.AppState, } if genesis.AppOptions != nil { - genesisNew.AppStateJSON = genesis.AppOptions + genesisNew.AppState = genesis.AppOptions } for _, v := range genesis.Validators { - var pubKey crypto.PubKeyEd25519 + var pubKey ed25519.PubKeyEd25519 copy(pubKey[:], v.PubKey.Data) genesisNew.Validators = append( genesisNew.Validators, @@ -143,7 +144,7 @@ func convertGenesis(cdc *amino.Codec, jsonBytes []byte) ([]byte, error) { func main() { cdc := amino.NewCodec() - crypto.RegisterAmino(cdc) + cryptoAmino.RegisterAmino(cdc) args := os.Args[1:] if len(args) != 1 { diff --git a/state/errors.go b/state/errors.go index afb5737d7..d40c7e141 100644 --- a/state/errors.go +++ b/state/errors.go @@ -1,7 +1,7 @@ package state import ( - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) type ( diff --git a/state/execution.go b/state/execution.go index 0d6ee81bf..f38f5e0bb 100644 --- a/state/execution.go +++ b/state/execution.go @@ -5,10 +5,10 @@ import ( fail "github.com/ebuchman/fail-test" abci "github.com/tendermint/tendermint/abci/types" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" ) //----------------------------------------------------------------------------- @@ -86,7 +86,7 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b fail.Fail() // XXX // update the state with the block and responses - state, err = updateState(state, blockID, block.Header, abciResponses) + state, err = updateState(state, blockID, &block.Header, abciResponses) if err != nil { return state, fmt.Errorf("Commit failed for application: %v", err) } @@ -189,7 +189,7 @@ func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, // Begin block _, err := proxyAppConn.BeginBlockSync(abci.RequestBeginBlock{ Hash: block.Hash(), - Header: types.TM2PB.Header(block.Header), + Header: types.TM2PB.Header(&block.Header), Validators: signVals, ByzantineValidators: byzVals, }) @@ -278,20 +278,24 @@ func updateValidators(currentSet *types.ValidatorSet, abciUpdates []abci.Validat // these are tendermint types now for _, valUpdate := range updates { + if valUpdate.VotingPower < 0 { + return fmt.Errorf("Voting power can't be negative %v", valUpdate) + } + address := valUpdate.Address _, val := currentSet.GetByAddress(address) - if val == nil { - // add val - added := currentSet.Add(valUpdate) - if !added { - return fmt.Errorf("Failed to add new validator %v", valUpdate) - } - } else if valUpdate.VotingPower == 0 { + if valUpdate.VotingPower == 0 { // remove val _, removed := currentSet.Remove(address) if !removed { return fmt.Errorf("Failed to remove validator %X", address) } + } else if val == nil { + // add val + added := currentSet.Add(valUpdate) + if !added { + return fmt.Errorf("Failed to add new validator %v", valUpdate) + } } else { // update val updated := currentSet.Update(valUpdate) diff --git a/state/execution_test.go b/state/execution_test.go index 71fbe3a4d..81510fb9f 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -10,10 +10,10 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" - crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" @@ -149,6 +149,89 @@ func TestBeginBlockByzantineValidators(t *testing.T) { } } +func TestUpdateValidators(t *testing.T) { + pubkey1 := ed25519.GenPrivKey().PubKey() + val1 := types.NewValidator(pubkey1, 10) + pubkey2 := ed25519.GenPrivKey().PubKey() + val2 := types.NewValidator(pubkey2, 20) + + testCases := []struct { + name string + + currentSet *types.ValidatorSet + abciUpdates []abci.Validator + + resultingSet *types.ValidatorSet + shouldErr bool + }{ + { + "adding a validator is OK", + + types.NewValidatorSet([]*types.Validator{val1}), + []abci.Validator{{[]byte{}, types.TM2PB.PubKey(pubkey2), 20}}, + + types.NewValidatorSet([]*types.Validator{val1, val2}), + false, + }, + { + "updating a validator is OK", + + types.NewValidatorSet([]*types.Validator{val1}), + []abci.Validator{{[]byte{}, types.TM2PB.PubKey(pubkey1), 20}}, + + types.NewValidatorSet([]*types.Validator{types.NewValidator(pubkey1, 20)}), + false, + }, + { + "removing a validator is OK", + + types.NewValidatorSet([]*types.Validator{val1, val2}), + []abci.Validator{{[]byte{}, types.TM2PB.PubKey(pubkey2), 0}}, + + types.NewValidatorSet([]*types.Validator{val1}), + false, + }, + + { + "removing a non-existing validator results in error", + + types.NewValidatorSet([]*types.Validator{val1}), + []abci.Validator{{[]byte{}, types.TM2PB.PubKey(pubkey2), 0}}, + + types.NewValidatorSet([]*types.Validator{val1}), + true, + }, + + { + "adding a validator with negative power results in error", + + types.NewValidatorSet([]*types.Validator{val1}), + []abci.Validator{{[]byte{}, types.TM2PB.PubKey(pubkey2), -100}}, + + types.NewValidatorSet([]*types.Validator{val1}), + true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := updateValidators(tc.currentSet, tc.abciUpdates) + if tc.shouldErr { + assert.Error(t, err) + } else { + require.Equal(t, tc.resultingSet.Size(), tc.currentSet.Size()) + + assert.Equal(t, tc.resultingSet.TotalVotingPower(), tc.currentSet.TotalVotingPower()) + + assert.Equal(t, tc.resultingSet.Validators[0].Address, tc.currentSet.Validators[0].Address) + if tc.resultingSet.Size() > 1 { + assert.Equal(t, tc.resultingSet.Validators[1].Address, tc.currentSet.Validators[1].Address) + } + } + }) + } +} + //---------------------------------------------------------------------------- // make some bogus txs @@ -163,7 +246,7 @@ func state(nVals, height int) (State, dbm.DB) { vals := make([]types.GenesisValidator, nVals) for i := 0; i < nVals; i++ { secret := []byte(fmt.Sprintf("test%d", i)) - pk := crypto.GenPrivKeyEd25519FromSecret(secret) + pk := ed25519.GenPrivKeyFromSecret(secret) vals[i] = types.GenesisValidator{ pk.PubKey(), 1000, fmt.Sprintf("test%d", i), } diff --git a/state/services.go b/state/services.go index bf0b1a6f4..c51fa9752 100644 --- a/state/services.go +++ b/state/services.go @@ -27,7 +27,7 @@ type Mempool interface { Flush() FlushAppConn() error - TxsAvailable() <-chan int64 + TxsAvailable() <-chan struct{} EnableTxsAvailable() } @@ -43,7 +43,7 @@ func (m MockMempool) Reap(n int) types.Txs { retur func (m MockMempool) Update(height int64, txs types.Txs) error { return nil } func (m MockMempool) Flush() {} func (m MockMempool) FlushAppConn() error { return nil } -func (m MockMempool) TxsAvailable() <-chan int64 { return make(chan int64) } +func (m MockMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } func (m MockMempool) EnableTxsAvailable() {} //------------------------------------------------------ diff --git a/state/state_test.go b/state/state_test.go index 30a87fb05..05c1859ef 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -9,8 +9,9 @@ import ( "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/types" @@ -78,7 +79,7 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { abciResponses.DeliverTx[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Tags: nil} abciResponses.DeliverTx[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Tags: nil} abciResponses.EndBlock = &abci.ResponseEndBlock{ValidatorUpdates: []abci.Validator{ - types.TM2PB.ValidatorFromPubKeyAndPower(crypto.GenPrivKeyEd25519().PubKey(), 10), + types.TM2PB.ValidatorFromPubKeyAndPower(ed25519.GenPrivKey().PubKey(), 10), }} saveABCIResponses(stateDB, block.Height, abciResponses) @@ -219,7 +220,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { power++ } header, blockID, responses := makeHeaderPartsResponsesValPowerChange(state, i, power) - state, err = updateState(state, blockID, header, responses) + state, err = updateState(state, blockID, &header, responses) assert.Nil(t, err) nextHeight := state.LastBlockHeight + 1 saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) @@ -260,11 +261,11 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { defer tearDown(t) const height = 1 - pubkey := crypto.GenPrivKeyEd25519().PubKey() + pubkey := ed25519.GenPrivKey().PubKey() // swap the first validator with a new one ^^^ (validator set size stays the same) header, blockID, responses := makeHeaderPartsResponsesValPubKeyChange(state, height, pubkey) var err error - state, err = updateState(state, blockID, header, responses) + state, err = updateState(state, blockID, &header, responses) require.Nil(t, err) nextHeight := state.LastBlockHeight + 1 saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) @@ -283,7 +284,7 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { func genValSet(size int) *types.ValidatorSet { vals := make([]*types.Validator, size) for i := 0; i < size; i++ { - vals[i] = types.NewValidator(crypto.GenPrivKeyEd25519().PubKey(), 10) + vals[i] = types.NewValidator(ed25519.GenPrivKey().PubKey(), 10) } return types.NewValidatorSet(vals) } @@ -321,7 +322,7 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { cp = params[changeIndex] } header, blockID, responses := makeHeaderPartsResponsesParams(state, i, cp) - state, err = updateState(state, blockID, header, responses) + state, err = updateState(state, blockID, &header, responses) require.Nil(t, err) nextHeight := state.LastBlockHeight + 1 @@ -370,7 +371,7 @@ func makeParams(blockBytes, blockTx, blockGas, txBytes, } func pk() []byte { - return crypto.GenPrivKeyEd25519().PubKey().Bytes() + return ed25519.GenPrivKey().PubKey().Bytes() } func TestApplyUpdates(t *testing.T) { @@ -420,7 +421,7 @@ func TestApplyUpdates(t *testing.T) { } func makeHeaderPartsResponsesValPubKeyChange(state State, height int64, - pubkey crypto.PubKey) (*types.Header, types.BlockID, *ABCIResponses) { + pubkey crypto.PubKey) (types.Header, types.BlockID, *ABCIResponses) { block := makeBlock(state, height) abciResponses := &ABCIResponses{ @@ -442,7 +443,7 @@ func makeHeaderPartsResponsesValPubKeyChange(state State, height int64, } func makeHeaderPartsResponsesValPowerChange(state State, height int64, - power int64) (*types.Header, types.BlockID, *ABCIResponses) { + power int64) (types.Header, types.BlockID, *ABCIResponses) { block := makeBlock(state, height) abciResponses := &ABCIResponses{ @@ -463,7 +464,7 @@ func makeHeaderPartsResponsesValPowerChange(state State, height int64, } func makeHeaderPartsResponsesParams(state State, height int64, - params types.ConsensusParams) (*types.Header, types.BlockID, *ABCIResponses) { + params types.ConsensusParams) (types.Header, types.BlockID, *ABCIResponses) { block := makeBlock(state, height) abciResponses := &ABCIResponses{ @@ -476,14 +477,3 @@ type paramsChangeTestCase struct { height int64 params types.ConsensusParams } - -func makeHeaderPartsResults(state State, height int64, - results []*abci.ResponseDeliverTx) (*types.Header, types.BlockID, *ABCIResponses) { - - block := makeBlock(state, height) - abciResponses := &ABCIResponses{ - DeliverTx: results, - EndBlock: &abci.ResponseEndBlock{}, - } - return block.Header, types.BlockID{block.Hash(), types.PartSetHeader{}}, abciResponses -} diff --git a/state/store.go b/state/store.go index 13e25a958..3a1a6231b 100644 --- a/state/store.go +++ b/state/store.go @@ -4,9 +4,9 @@ import ( "fmt" abci "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" ) //------------------------------------------------------------------------ @@ -176,8 +176,13 @@ func LoadValidators(db dbm.DB, height int64) (*types.ValidatorSet, error) { if valInfo.ValidatorSet == nil { valInfo2 := loadValidatorsInfo(db, valInfo.LastHeightChanged) if valInfo2 == nil { - cmn.PanicSanity(fmt.Sprintf(`Couldn't find validators at height %d as - last changed from height %d`, valInfo.LastHeightChanged, height)) + panic( + fmt.Sprintf( + "Couldn't find validators at height %d as last changed from height %d", + valInfo.LastHeightChanged, + height, + ), + ) } valInfo = valInfo2 } @@ -240,11 +245,17 @@ func LoadConsensusParams(db dbm.DB, height int64) (types.ConsensusParams, error) } if paramsInfo.ConsensusParams == empty { - paramsInfo = loadConsensusParamsInfo(db, paramsInfo.LastHeightChanged) - if paramsInfo == nil { - cmn.PanicSanity(fmt.Sprintf(`Couldn't find consensus params at height %d as - last changed from height %d`, paramsInfo.LastHeightChanged, height)) + paramsInfo2 := loadConsensusParamsInfo(db, paramsInfo.LastHeightChanged) + if paramsInfo2 == nil { + panic( + fmt.Sprintf( + "Couldn't find consensus params at height %d as last changed from height %d", + paramsInfo.LastHeightChanged, + height, + ), + ) } + paramsInfo = paramsInfo2 } return paramsInfo.ConsensusParams, nil diff --git a/state/txindex/indexer_service.go b/state/txindex/indexer_service.go index 264be1fd8..088252f5e 100644 --- a/state/txindex/indexer_service.go +++ b/state/txindex/indexer_service.go @@ -3,7 +3,7 @@ package txindex import ( "context" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/types" ) diff --git a/state/txindex/kv/kv.go b/state/txindex/kv/kv.go index 718a55d15..707325929 100644 --- a/state/txindex/kv/kv.go +++ b/state/txindex/kv/kv.go @@ -10,8 +10,8 @@ import ( "time" "github.com/pkg/errors" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/state/txindex" diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go index cb718a5fa..1272f4a73 100644 --- a/state/txindex/kv/kv_test.go +++ b/state/txindex/kv/kv_test.go @@ -9,8 +9,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - db "github.com/tendermint/tmlibs/db" + cmn "github.com/tendermint/tendermint/libs/common" + db "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/state/txindex" diff --git a/state/validation.go b/state/validation.go index 84a4cc824..c36339203 100644 --- a/state/validation.go +++ b/state/validation.go @@ -6,7 +6,7 @@ import ( "fmt" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tmlibs/db" + dbm "github.com/tendermint/tendermint/libs/db" ) //----------------------------------------------------- diff --git a/state/validation_test.go b/state/validation_test.go index b4695b077..362a40737 100644 --- a/state/validation_test.go +++ b/state/validation_test.go @@ -4,8 +4,8 @@ import ( "testing" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" ) func TestValidateBlock(t *testing.T) { diff --git a/state/wire.go b/state/wire.go index af743c7b8..eeb156d67 100644 --- a/state/wire.go +++ b/state/wire.go @@ -2,11 +2,11 @@ package state import ( "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/crypto" + cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino" ) var cdc = amino.NewCodec() func init() { - crypto.RegisterAmino(cdc) + cryptoAmino.RegisterAmino(cdc) } diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile index bc211ea43..8b69d27fe 100644 --- a/test/docker/Dockerfile +++ b/test/docker/Dockerfile @@ -21,15 +21,12 @@ ADD Makefile Makefile RUN make get_tools RUN make get_vendor_deps -# Install the apps -ADD scripts scripts -RUN bash scripts/install_abci_apps.sh - # Now copy in the code # NOTE: this will overwrite whatever is in vendor/ COPY . $REPO RUN go install ./cmd/tendermint +RUN go install ./abci/cmd/abci-cli # expose the volume for debugging VOLUME $REPO diff --git a/test/test_cover.sh b/test/test_cover.sh index 5f2dea3ee..17df139e6 100644 --- a/test/test_cover.sh +++ b/test/test_cover.sh @@ -1,6 +1,6 @@ #! /bin/bash -PKGS=$(go list github.com/tendermint/tendermint/... | grep -v /vendor/) +PKGS=$(go list github.com/tendermint/tendermint/...) set -e diff --git a/tools/README.md b/tools/README.md new file mode 100644 index 000000000..aeb411410 --- /dev/null +++ b/tools/README.md @@ -0,0 +1,3 @@ +# tools + +Tools for working with tendermint and associated technologies. Documentation can be found in the `README.md` of each the `tm-bench/` and `tm-monitor/` directories. diff --git a/tools/build/.gitignore b/tools/build/.gitignore new file mode 100644 index 000000000..9974388f1 --- /dev/null +++ b/tools/build/.gitignore @@ -0,0 +1,4 @@ +BUILD +RPMS +SPECS +tmp diff --git a/tools/build/LICENSE b/tools/build/LICENSE new file mode 100644 index 000000000..bb66bb350 --- /dev/null +++ b/tools/build/LICENSE @@ -0,0 +1,204 @@ +Tendermint Core +License: Apache2.0 + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 All in Bits, Inc + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/tools/build/Makefile b/tools/build/Makefile new file mode 100644 index 000000000..a47644b63 --- /dev/null +++ b/tools/build/Makefile @@ -0,0 +1,289 @@ +## +# Extra checks, because we do not use autoconf. +## + +requirements_check = true +gpg_check = false +go_min_version = 1.9.4 +gpg_key = 2122CBE9 + +ifeq ($(requirements_check),true) +ifndef GOPATH +$(error GOPATH not set) +else +go_version := $(shell go version | sed "s/^.* go\([0-9\.]*\) .*$$/\1/" ) +$(info Found go version $(go_version)) +go_version_check := $(shell echo -e "$(go_min_version)\n$(go_version)" | sort -V | head -1) +ifneq ($(go_min_version),$(go_version_check)) +$(error go version go_min_version or above is required) +endif +endif +ifeq ($(gpg_check),true) +gpg_check := $(shell gpg -K | grep '/$(gpg_key) ' | sed 's,^.*/\($(gpg_key)\) .*$$,\1,') +ifneq ($(gpg_check),$(gpg_key)) +$(error GPG key $(gpg_key) not found.) +else +$(info GPG key $(gpg_key) found) +endif +ifndef GPG_PASSPHRASE +$(error GPG_PASSPHRASE not set) +endif +endif +endif + +### +# Here comes the real deal +### + +binaries = tendermint basecoind ethermint gaia +build-binaries = build-tendermint build-basecoind build-ethermint build-gaia +package-rpm = package-rpm-tendermint package-rpm-basecoind package-rpm-ethermint package-rpm-gaia +install-rpm = install-rpm-tendermint install-rpm-basecoind install-rpm-ethermint install-rpm-gaia +package-deb = package-deb-tendermint package-deb-basecoind package-deb-ethermint package-deb-gaia +install-deb = install-deb-tendermint install-deb-basecoind install-deb-ethermint install-deb-gaia + +all: $(binaries) +build: $(build-binaries) +package: $(package-rpm) $(package-deb) +install: $(install-rpm) $(install-deb) +$(binaries): %: build-% package-rpm-% package-deb-% + +### +# Build the binaries +### + +git-branch: + $(eval GIT_BRANCH=$(shell echo $${GIT_BRANCH:-master})) + +gopath-setup: + test -d $(GOPATH) || mkdir -p $(GOPATH) + test -d $(GOPATH)/bin || mkdir -p $(GOPATH)/bin + test -d $(GOPATH)/src || mkdir -p $(GOPATH)/src + +build-tendermint: git-branch gopath-setup + @echo "*** Building tendermint" + go get -d -u github.com/tendermint/tendermint/cmd/tendermint + cd $(GOPATH)/src/github.com/tendermint/tendermint && git checkout "$(GIT_BRANCH)" && git pull + export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/tendermint/tendermint get_tools get_vendor_deps build + cp $(GOPATH)/src/github.com/tendermint/tendermint/build/tendermint $(GOPATH)/bin + @echo "*** Built tendermint" + +build-ethermint: git-branch gopath-setup + @echo "*** Building ethermint" + go get -d -u github.com/tendermint/ethermint/cmd/ethermint + cd $(GOPATH)/src/github.com/tendermint/ethermint && git checkout "$(GIT_BRANCH)" && git pull + export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/tendermint/ethermint get_vendor_deps build + cp $(GOPATH)/src/github.com/tendermint/ethermint/build/ethermint $(GOPATH)/bin + @echo "*** Built ethermint" + +build-gaia: git-branch gopath-setup + @echo "*** Building gaia" + go get -d -u go github.com/cosmos/gaia || echo "Workaround for go downloads." + cd $(GOPATH)/src/github.com/cosmos/gaia && git checkout "$(GIT_BRANCH)" && git pull + export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/cosmos/gaia get_vendor_deps install + @echo "*** Built gaia" + +build-basecoind: git-branch gopath-setup + @echo "*** Building basecoind from cosmos-sdk" + go get -d -u github.com/cosmos/cosmos-sdk/examples/basecoin/cmd/basecoind + cd $(GOPATH)/src/github.com/cosmos/cosmos-sdk && git checkout "$(GIT_BRANCH)" && git pull + export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/cosmos/cosmos-sdk get_tools get_vendor_deps build + cp $(GOPATH)/src/github.com/cosmos/cosmos-sdk/build/basecoind $(GOPATH)/bin/basecoind + @echo "*** Built basecoind from cosmos-sdk" + +### +# Prepare package files +### + +# set app_version +version-%: + @echo "Checking if binary exists" + test -f $(GOPATH)/bin/$* + @echo "BUILD_NUMBER is $(BUILD_NUMBER)" + test -n "$(BUILD_NUMBER)" + $(eval $*_version=$(shell $(GOPATH)/bin/$* version | head -1 | cut -d- -f1 | sed 's/^\(ethermint:\s*\|\)\(v\|\)//' | tr -d '\t ' )) + +# set build_folder +folder-%: version-% + $(eval build_folder=BUILD/$*-$($*_version)-$(BUILD_NUMBER)) + +# clean up folder structure for package files +prepare-files = rm -rf $(build_folder) && mkdir -p $(build_folder) && cp -r ./$(1)/* $(build_folder) && mkdir -p $(build_folder)/usr/bin && cp $(GOPATH)/bin/$(1) $(build_folder)/usr/bin + +## +## Package customizations for the different applications +## + +prepare-tendermint = +prepare-ethermint = mkdir -p $(build_folder)/etc/ethermint && \ + cp $(GOPATH)/src/github.com/tendermint/ethermint/setup/genesis.json $(build_folder)/etc/ethermint/genesis.json && \ + cp -r $(GOPATH)/src/github.com/tendermint/ethermint/setup/keystore $(build_folder)/etc/ethermint +prepare-gaia = +prepare-basecoind = cp $(GOPATH)/bin/basecoind $(build_folder)/usr/bin + +### +# Package the binary for CentOS/RedHat (RPM) and Debian/Ubuntu (DEB) +### + +# Depends on rpmbuild, sorry, this can only be built on CentOS/RedHat machines. +package-rpm-%: folder-% + @echo "*** Packaging RPM $* version $($*_version)" + + $(call prepare-files,$*) + $(call prepare-$*) + + rm -rf $(build_folder)/DEBIAN + mkdir -p $(build_folder)/usr/share/licenses/$* + cp ./LICENSE $(build_folder)/usr/share/licenses/$*/LICENSE + chmod -Rf a+rX,u+w,g-w,o-w $(build_folder) + + mkdir -p {SPECS,tmp} + + ./generate-spec $* spectemplates SPECS + sed -i "s/@VERSION@/$($*_version)/" SPECS/$*.spec + sed -i "s/@BUILD_NUMBER@/$(BUILD_NUMBER)/" SPECS/$*.spec + sed -i "s/@PACKAGE_NAME@/$*/" SPECS/$*.spec + + rpmbuild -bb SPECS/$*.spec --define "_topdir `pwd`" --define "_tmppath `pwd`/tmp" + ./sign RPMS/x86_64/$*-$($*_version)-$(BUILD_NUMBER).x86_64.rpm "$(gpg_key)" "`which gpg`" + rpm -Kv RPMS/x86_64/$*-$($*_version)-$(BUILD_NUMBER).x86_64.rpm || echo "rpm returns non-zero exist for some reason. ($?)" + @echo "*** Packaged RPM $* version $($*_version)" + +package-deb-%: folder-% + @echo "*** Packaging DEB $* version $($*_version)-$(BUILD_NUMBER)" + + $(call prepare-files,$*) + $(call prepare-$*) + + mkdir -p $(build_folder)/usr/share/doc/$* + cp $(build_folder)/DEBIAN/copyright $(build_folder)/usr/share/doc/$* + chmod -Rf a+rX,u+w,g-w,o-w $(build_folder) + + sed -i "s/@VERSION@/$($*_version)-$(BUILD_NUMBER)/" $(build_folder)/DEBIAN/changelog + sed -i "s/@STABILITY@/stable/" $(build_folder)/DEBIAN/changelog + sed -i "s/@DATETIMESTAMP@/`date +%a,\ %d\ %b\ %Y\ %T\ %z`/" $(build_folder)/DEBIAN/changelog + sed -i "s/@VERSION@/$($*_version)-$(BUILD_NUMBER)/" $(build_folder)/DEBIAN/control + + gzip -c $(build_folder)/DEBIAN/changelog > $(build_folder)/usr/share/doc/$*/changelog.Debian.gz + gzip -c $(build_folder)/DEBIAN/changelog > $(build_folder)/usr/share/doc/$*/changelog.Debian.amd64.gz + sed -i "s/@INSTALLEDSIZE@/`du -ks $(build_folder) | cut -f 1`/" $(build_folder)/DEBIAN/control + + cd $(build_folder) && tar --owner=root --group=root -cvJf ../../tmp/data.tar.xz --exclude DEBIAN * + cd $(build_folder)/DEBIAN && tar --owner=root --group=root -cvzf ../../../tmp/control.tar.gz * + echo "2.0" > tmp/debian-binary + + cp ./_gpg tmp/ + cd tmp && sed -i "s/@DATETIMESTAMP@/`date +%a\ %b\ %d\ %T\ %Y`/" _gpg + cd tmp && sed -i "s/@BINMD5@/`md5sum debian-binary | cut -d\ -f1`/" _gpg + cd tmp && sed -i "s/@BINSHA1@/`sha1sum debian-binary | cut -d\ -f1`/" _gpg + cd tmp && sed -i "s/@BINSIZE@/`stat -c %s debian-binary | cut -d\ -f1`/" _gpg + cd tmp && sed -i "s/@CONMD5@/`md5sum control.tar.gz | cut -d\ -f1`/" _gpg + cd tmp && sed -i "s/@CONSHA1@/`sha1sum control.tar.gz | cut -d\ -f1`/" _gpg + cd tmp && sed -i "s/@CONSIZE@/`stat -c %s control.tar.gz | cut -d\ -f1`/" _gpg + cd tmp && sed -i "s/@DATMD5@/`md5sum data.tar.xz | cut -d\ -f1`/" _gpg + cd tmp && sed -i "s/@DATSHA1@/`sha1sum data.tar.xz | cut -d\ -f1`/" _gpg + cd tmp && sed -i "s/@DATSIZE@/`stat -c %s data.tar.xz | cut -d\ -f1`/" _gpg + gpg --batch --passphrase "$(GPG_PASSPHRASE)" --clearsign tmp/_gpg + mv tmp/_gpg.asc tmp/_gpgbuilder + ar r tmp/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb tmp/debian-binary tmp/control.tar.gz tmp/data.tar.xz tmp/_gpgbuilder + mv tmp/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb RPMS/ + rm tmp/debian-binary tmp/control.tar.gz tmp/data.tar.xz tmp/_gpgbuilder tmp/_gpg + @echo "*** Packaged DEB $* version $($*_version)-$(BUILD_NUMBER)" + +install-rpm-%: version-% +#Make sure your host has the IAM role to read/write the S3 bucket OR that you set up ~/.boto + @echo "*** Uploading $*-$($*_version)-$(BUILD_NUMBER).x86_64.rpm to AWS $(DEVOPS_PATH)CentOS repository" + aws s3 sync s3://tendermint-packages/$(DEVOPS_PATH)centos/ tmp/s3/ --delete + mkdir -p tmp/s3/7/os/x86_64/Packages + cp RPMS/x86_64/$*-$($*_version)-$(BUILD_NUMBER).x86_64.rpm tmp/s3/7/os/x86_64/Packages + cp ./RPM-GPG-KEY-Tendermint tmp/s3/7/os/x86_64/ + cp ./tendermint.repo tmp/s3/7/os/x86_64/ + rm -f tmp/s3/7/os/x86_64/repodata/*.bz2 tmp/s3/7/os/x86_64/repodata/*.gz tmp/s3/7/os/x86_64/repodata/repomd.xml.asc + createrepo tmp/s3/7/os/x86_64/Packages -u https://tendermint-packages.interblock.io/$(DEVOPS_PATH)centos/7/os/x86_64/Packages -o tmp/s3/7/os/x86_64 --update -S --repo Tendermint --content tendermint --content basecoind --content ethermint + gpg --batch --passphrase "$(GPG_PASSPHRASE)" --detach-sign -a tmp/s3/7/os/x86_64/repodata/repomd.xml + aws s3 sync tmp/s3/ s3://tendermint-packages/$(DEVOPS_PATH)centos/ --delete --acl public-read + @echo "*** Uploaded $* to AWS $(DEVOPS_PATH)CentOS repository" + +install-deb-%: version-% + @echo "*** Uploading $*-$($*_version)-$(BUILD_NUMBER)_amd64.deb to AWS $(DEVOPS_PATH)Debian repository" + @echo "Testing if $*-$($*_version)-$(BUILD_NUMBER)_amd64.deb is already uploaded" + test ! -f tmp/debian-s3/pool/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb + aws s3 sync s3://tendermint-packages/$(DEVOPS_PATH)debian/ tmp/debian-s3/ --delete + @echo "Testing if $*-$($*_version)-$(BUILD_NUMBER)_amd64.deb is already uploaded" + test ! -f tmp/debian-s3/pool/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb + cp ./tendermint.list tmp/debian-s3/ + mkdir -p tmp/debian-s3/pool tmp/debian-s3/dists/stable/main/binary-amd64 + cp RPMS/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb tmp/debian-s3/pool + cp ./Release_amd64 tmp/debian-s3/dists/stable/main/binary-amd64/Release + + #Packages / Packages.gz + + echo > tmp/Package + echo "Filename: pool/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb" >> tmp/Package + echo "MD5sum: `md5sum RPMS/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb | cut -d\ -f 1`" >> tmp/Package + echo "SHA1: `sha1sum RPMS/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb | cut -d\ -f 1`" >> tmp/Package + echo "SHA256: `sha256sum RPMS/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb | cut -d\ -f 1`" >> tmp/Package + echo "Size: `stat -c %s RPMS/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb | cut -d\ -f 1`" >> tmp/Package + cat BUILD/$*-$($*_version)-$(BUILD_NUMBER)/DEBIAN/control >> tmp/Package + + cat tmp/Package >> tmp/debian-s3/dists/stable/main/binary-amd64/Packages + rm -f tmp/debian-s3/dists/stable/main/binary-amd64/Packages.gz + gzip -c tmp/debian-s3/dists/stable/main/binary-amd64/Packages > tmp/debian-s3/dists/stable/main/binary-amd64/Packages.gz + rm -f tmp/Package + + #main / Release / InRelease / Release.gpg + + cp ./Release tmp/debian-s3/dists/stable/main/Release + rm -f tmp/debian-s3/dists/stable/main/InRelease + rm -f tmp/debian-s3/dists/stable/main/Release.gpg + + echo "MD5Sum:" >> tmp/debian-s3/dists/stable/main/Release + cd tmp/debian-s3/dists/stable/main && for f in `find . -type f | sed 's/^.\///'` ; do test "$$f" == "Release" && continue ; echo -n " " ; md5sum $$f | sed "s/ / `stat -c %s $$f` /" ; done >> Release + echo "SHA1:" >> tmp/debian-s3/dists/stable/main/Release + cd tmp/debian-s3/dists/stable/main && for f in `find . -type f | sed 's/^.\///'` ; do test "$$f" == "Release" && continue ; echo -n " " ; sha1sum $$f | sed "s/ / `stat -c %s $$f` /" ; done >> Release + echo "SHA256:" >> tmp/debian-s3/dists/stable/main/Release + cd tmp/debian-s3/dists/stable/main && for f in `find . -type f | sed 's/^.\///'` ; do test "$$f" == "Release" && continue ; echo -n " " ; sha256sum $$f | sed "s/ / `stat -c %s $$f` /" ; done >> Release + + gpg --batch --passphrase "$(GPG_PASSPHRASE)" --digest-algo SHA256 -b -a tmp/debian-s3/dists/stable/main/Release + mv tmp/debian-s3/dists/stable/main/Release.asc tmp/debian-s3/dists/stable/main/Release.gpg + gpg --batch --passphrase "$(GPG_PASSPHRASE)" --digest-algo SHA512 --clearsign tmp/debian-s3/dists/stable/main/Release + mv tmp/debian-s3/dists/stable/main/Release.asc tmp/debian-s3/dists/stable/main/InRelease + + #stable / Release / InRelease / Release.gpg + + cp ./Release tmp/debian-s3/dists/stable/Release + rm -f tmp/debian-s3/dists/stable/InRelease + rm -f tmp/debian-s3/dists/stable/Release.gpg + + echo "MD5Sum:" >> tmp/debian-s3/dists/stable/Release + cd tmp/debian-s3/dists/stable && for f in `find . -type f | sed 's/^.\///'` ; do test "$$f" == "Release" && continue ; echo -n " " ; md5sum $$f | sed "s/ / `stat -c %s $$f` /" ; done >> Release + echo "SHA1:" >> tmp/debian-s3/dists/stable/Release + cd tmp/debian-s3/dists/stable && for f in `find . -type f | sed 's/^.\///'` ; do test "$$f" == "Release" && continue ; echo -n " " ; sha1sum $$f | sed "s/ / `stat -c %s $$f` /" ; done >> Release + echo "SHA256:" >> tmp/debian-s3/dists/stable/Release + cd tmp/debian-s3/dists/stable && for f in `find . -type f | sed 's/^.\///'` ; do test "$$f" == "Release" && continue ; echo -n " " ; sha256sum $$f | sed "s/ / `stat -c %s $$f` /" ; done >> Release + + gpg --batch --passphrase "$(GPG_PASSPHRASE)" --digest-algo SHA256 -b -a tmp/debian-s3/dists/stable/Release + mv tmp/debian-s3/dists/stable/Release.asc tmp/debian-s3/dists/stable/Release.gpg + gpg --batch --passphrase "$(GPG_PASSPHRASE)" --digest-algo SHA512 --clearsign tmp/debian-s3/dists/stable/Release + mv tmp/debian-s3/dists/stable/Release.asc tmp/debian-s3/dists/stable/InRelease + + aws s3 sync tmp/debian-s3/ s3://tendermint-packages/$(DEVOPS_PATH)debian/ --delete --acl public-read + @echo "*** Uploaded $*-$($*_version)-$(BUILD_NUMBER)_amd64.deb to AWS $(DEVOPS_PATH)Debian repository" + +mostlyclean: + rm -rf {BUILDROOT,SOURCES,SPECS,SRPMS,tmp} + +clean: mostlyclean + rm -rf {BUILD,RPMS} + +distclean: clean + rm -rf $(GOPATH)/src/github.com/tendermint/tendermint + rm -rf $(GOPATH)/src/github.com/cosmos/cosmos-sdk + rm -rf $(GOPATH)/src/github.com/tendermint/ethermint + rm -rf $(GOPATH)/bin/tendermint + rm -rf $(GOPATH)/bin/basecoind + rm -rf $(GOPATH)/bin/ethermint + rm -rf $(GOPATH)/bin/gaia + +.PHONY : clean + diff --git a/tools/build/RPM-GPG-KEY-Tendermint b/tools/build/RPM-GPG-KEY-Tendermint new file mode 100644 index 000000000..e6f200d87 --- /dev/null +++ b/tools/build/RPM-GPG-KEY-Tendermint @@ -0,0 +1,19 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v2.0.22 (GNU/Linux) + +mQENBFk97ngBCADaiPQFKJI7zWYdUKqC490DzY9g9LatsWoJErK5LuMXwEnF5i+a +UkygueukA4C5U7L71l5EeOB9rtb6AbkF4IEZsmmp93APec/3Vfbac9xvK4dBdiht +F8SrazPdHeR6AKcZH8ZpG/+mdONvGb/gEgtxVjaeIJFpCbjKLlKEXazh2zamhhth +q+Nn/17QmI3KBiaGqQK5w4kGZ4mZPy6fXMQhW5dDMq9f4anlGIAYi9O53dVxsx2S +5d+NHuGer5Ps0u6WMJi/e+UT2EGwzP6ygOxkIjyhMFuVftabOtSSrRHHetw8UAaI +N/RPn2gSbQtOQ7unzHDXp3/o6/r2nDEErPyJABEBAAG0LkdyZWcgU3phYm8gKFRl +bmRlcm1pbnQpIDxncmVnQHBoaWxvc29iZWFyLmNvbT6JATkEEwECACMFAlk97ngC +GwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIXgAAKCRDIkIHIISLL6bX/CACXTKmO +u5XgvJICH0pHNeVS5/4Om1Rsg1xNmEkGFBP8N2fqn576exbOLgWLSyNHTEyrJNoc +iTeUtod2qqbVGwRgWm1zeiP8NBYiQ9SUbqskIqcPavJNGWIxsCB0p/odoZah8xSj +tGrkoyoxrc+7z2JgKYK8SVSkJXQkzuc5/ZlY85ci5gPKQhlo5YDqGo+4U9n/Ieo5 +nkF8LBalFC2j7A7sQNroEicpulpGhIq3jyUHtadX01z3pNzuX+wfHX9futoet0YS +tG2007WoPGV0whGnoKxmk0JhwzhscC2XNtJl1GZcwqOOlPU9eGtZuPKj/HBAlRtz +4xTOAcklpg8soqRA +=jNDW +-----END PGP PUBLIC KEY BLOCK----- diff --git a/tools/build/Release b/tools/build/Release new file mode 100644 index 000000000..9003d1320 --- /dev/null +++ b/tools/build/Release @@ -0,0 +1,7 @@ +Origin: Tendermint +Label: Tendermint +Suite: stable +Date: Fri, 16 Jun 2017 19:44:00 UTC +Architectures: amd64 +Components: main +Description: Tendermint repository diff --git a/tools/build/Release_amd64 b/tools/build/Release_amd64 new file mode 100644 index 000000000..1f2ecbfe2 --- /dev/null +++ b/tools/build/Release_amd64 @@ -0,0 +1,5 @@ +Archive: stable +Component: main +Origin: Tendermint +Label: Tendermint +Architecture: amd64 diff --git a/tools/build/_gpg b/tools/build/_gpg new file mode 100644 index 000000000..73742b5d8 --- /dev/null +++ b/tools/build/_gpg @@ -0,0 +1,8 @@ +Version: 4 +Signer: +Date: @DATETIMESTAMP@ +Role: builder +Files: + @BINMD5@ @BINSHA1@ @BINSIZE@ debian-binary + @CONMD5@ @CONSHA1@ @CONSIZE@ control.tar.gz + @DATMD5@ @DATSHA1@ @DATSIZE@ data.tar.xz diff --git a/tools/build/basecoind/DEBIAN/changelog b/tools/build/basecoind/DEBIAN/changelog new file mode 100644 index 000000000..260718eaf --- /dev/null +++ b/tools/build/basecoind/DEBIAN/changelog @@ -0,0 +1,6 @@ +basecoind (@VERSION@) @STABILITY@; urgency=medium + + * Automatic build. See https://github.com/cosmos/cosmos-sdk for more information. + + -- Greg Szabo @DATETIMESTAMP@ + diff --git a/tools/build/basecoind/DEBIAN/compat b/tools/build/basecoind/DEBIAN/compat new file mode 100644 index 000000000..ec635144f --- /dev/null +++ b/tools/build/basecoind/DEBIAN/compat @@ -0,0 +1 @@ +9 diff --git a/tools/build/basecoind/DEBIAN/control b/tools/build/basecoind/DEBIAN/control new file mode 100644 index 000000000..c15d49110 --- /dev/null +++ b/tools/build/basecoind/DEBIAN/control @@ -0,0 +1,14 @@ +Source: basecoind +Section: net +Priority: optional +Maintainer: Greg Szabo +Build-Depends: debhelper (>=9) +Standards-Version: 3.9.6 +Homepage: https://tendermint.com +Package: basecoind +Architecture: amd64 +Version: @VERSION@ +Installed-Size: @INSTALLEDSIZE@ +Description: basecoind is a Proof-of-Stake cryptocurrency and framework + Basecoind is an ABCI application designed to be used with the Tendermint consensus engine to form a Proof-of-Stake cryptocurrency. It also provides a general purpose framework for extending the feature-set of the cryptocurrency by implementing plugins. + diff --git a/tools/build/basecoind/DEBIAN/copyright b/tools/build/basecoind/DEBIAN/copyright new file mode 100644 index 000000000..fe449650c --- /dev/null +++ b/tools/build/basecoind/DEBIAN/copyright @@ -0,0 +1,21 @@ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: basecoind +Source: https://github.com/cosmos/cosmos-sdk + +Files: * +Copyright: 2017 All In Bits, Inc. +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian systems, the full text of the Apache License 2.0 can be found + in the file `/usr/share/common-licenses/Apache-2.0'. diff --git a/tools/build/basecoind/DEBIAN/postinst b/tools/build/basecoind/DEBIAN/postinst new file mode 100644 index 000000000..d7d8f4413 --- /dev/null +++ b/tools/build/basecoind/DEBIAN/postinst @@ -0,0 +1,41 @@ +#!/bin/sh +# postinst script for basecoind +# + +set -e + +# summary of how this script can be called: +# * `configure' +# * `abort-upgrade' +# * `abort-remove' `in-favour' +# +# * `abort-remove' +# * `abort-deconfigure' `in-favour' +# `removing' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + configure) + chown basecoind.basecoind /etc/basecoind + sudo -Hu basecoind basecoind node init --home /etc/basecoind 2B24DEE2364762300168DF19B6C18BCE2D399EA2 + systemctl daemon-reload + ;; + + abort-upgrade|abort-remove|abort-deconfigure) + ;; + + *) + echo "postinst called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/tools/build/basecoind/DEBIAN/postrm b/tools/build/basecoind/DEBIAN/postrm new file mode 100644 index 000000000..b84c9f2a4 --- /dev/null +++ b/tools/build/basecoind/DEBIAN/postrm @@ -0,0 +1,41 @@ +#!/bin/sh +# postrm script for basecoin +# + +set -e + +# summary of how this script can be called: +# * `remove' +# * `purge' +# * `upgrade' +# * `failed-upgrade' +# * `abort-install' +# * `abort-install' +# * `abort-upgrade' +# * `disappear' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + upgrade|failed-upgrade|abort-upgrade) + systemctl daemon-reload + ;; + + purge|remove|abort-install|disappear) + systemctl daemon-reload + ;; + + *) + echo "postrm called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/tools/build/basecoind/DEBIAN/preinst b/tools/build/basecoind/DEBIAN/preinst new file mode 100644 index 000000000..53124c0ce --- /dev/null +++ b/tools/build/basecoind/DEBIAN/preinst @@ -0,0 +1,38 @@ +#!/bin/sh +# preinst script for basecoind +# + +set -e + +# summary of how this script can be called: +# * `install' +# * `install' +# * `upgrade' +# * `abort-upgrade' +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + install|upgrade) + if ! grep -q '^basecoind:' /etc/passwd ; then + useradd -k /dev/null -r -m -b /etc basecoind + chmod 755 /etc/basecoind + fi + ;; + + abort-upgrade) + ;; + + *) + echo "preinst called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/tools/build/basecoind/DEBIAN/prerm b/tools/build/basecoind/DEBIAN/prerm new file mode 100644 index 000000000..18ef42079 --- /dev/null +++ b/tools/build/basecoind/DEBIAN/prerm @@ -0,0 +1,38 @@ +#!/bin/sh +# prerm script for basecoin +# + +set -e + +# summary of how this script can be called: +# * `remove' +# * `upgrade' +# * `failed-upgrade' +# * `remove' `in-favour' +# * `deconfigure' `in-favour' +# `removing' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + remove|upgrade|deconfigure) + systemctl stop basecoind 2> /dev/null || : + ;; + + failed-upgrade) + ;; + + *) + echo "prerm called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/tools/build/basecoind/etc/systemd/system-preset/50-basecoind.preset b/tools/build/basecoind/etc/systemd/system-preset/50-basecoind.preset new file mode 100644 index 000000000..358334fc3 --- /dev/null +++ b/tools/build/basecoind/etc/systemd/system-preset/50-basecoind.preset @@ -0,0 +1,2 @@ +disable basecoind.service + diff --git a/tools/build/basecoind/etc/systemd/system/basecoind.service b/tools/build/basecoind/etc/systemd/system/basecoind.service new file mode 100644 index 000000000..68b46d84f --- /dev/null +++ b/tools/build/basecoind/etc/systemd/system/basecoind.service @@ -0,0 +1,18 @@ +[Unit] +Description=Basecoind +Requires=network-online.target +After=network-online.target + +[Service] +Environment="BCHOME=/etc/basecoind" +Restart=on-failure +User=basecoind +Group=basecoind +PermissionsStartOnly=true +ExecStart=/usr/bin/basecoind start +ExecReload=/bin/kill -HUP $MAINPID +KillSignal=SIGTERM + +[Install] +WantedBy=multi-user.target + diff --git a/tools/build/basecoind/usr/share/basecoind/key.json b/tools/build/basecoind/usr/share/basecoind/key.json new file mode 100644 index 000000000..bdefe8fd4 --- /dev/null +++ b/tools/build/basecoind/usr/share/basecoind/key.json @@ -0,0 +1,12 @@ +{ + "address": "1B1BE55F969F54064628A63B9559E7C21C925165", + "priv_key": { + "type": "ed25519", + "data": "C70D6934B4F55F1B7BC33B56B9CA8A2061384AFC19E91E44B40C4BBA182953D1619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" + }, + "pub_key": { + "type": "ed25519", + "data": "619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" + } +} + diff --git a/tools/build/basecoind/usr/share/basecoind/key2.json b/tools/build/basecoind/usr/share/basecoind/key2.json new file mode 100644 index 000000000..ddfc6809b --- /dev/null +++ b/tools/build/basecoind/usr/share/basecoind/key2.json @@ -0,0 +1,12 @@ +{ + "address": "1DA7C74F9C219229FD54CC9F7386D5A3839F0090", + "priv_key": { + "type": "ed25519", + "data": "34BAE9E65CE8245FAD035A0E3EED9401BDE8785FFB3199ACCF8F5B5DDF7486A8352195DA90CB0B90C24295B90AEBA25A5A71BC61BAB2FE2387241D439698B7B8" + }, + "pub_key": { + "type": "ed25519", + "data": "352195DA90CB0B90C24295B90AEBA25A5A71BC61BAB2FE2387241D439698B7B8" + } +} + diff --git a/tools/build/ethermint/DEBIAN/changelog b/tools/build/ethermint/DEBIAN/changelog new file mode 100644 index 000000000..76a1fb154 --- /dev/null +++ b/tools/build/ethermint/DEBIAN/changelog @@ -0,0 +1,6 @@ +ethermint (@VERSION@) @STABILITY@; urgency=medium + + * Automatic build. See https://github.com/tendermint/tendermint for more information. + + -- Greg Szabo @DATETIMESTAMP@ + diff --git a/tools/build/ethermint/DEBIAN/compat b/tools/build/ethermint/DEBIAN/compat new file mode 100644 index 000000000..ec635144f --- /dev/null +++ b/tools/build/ethermint/DEBIAN/compat @@ -0,0 +1 @@ +9 diff --git a/tools/build/ethermint/DEBIAN/control b/tools/build/ethermint/DEBIAN/control new file mode 100644 index 000000000..2d8b3b002 --- /dev/null +++ b/tools/build/ethermint/DEBIAN/control @@ -0,0 +1,15 @@ +Source: ethermint +Section: net +Priority: optional +Maintainer: Greg Szabo +Build-Depends: debhelper (>=9) +Depends: tendermint (>=0.11.0) +Standards-Version: 3.9.6 +Homepage: https://tendermint.com +Package: ethermint +Architecture: amd64 +Version: @VERSION@ +Installed-Size: @INSTALLEDSIZE@ +Description: ethermint enables ethereum as an ABCI application on tendermint and the COSMOS hub + Ethermint enables ethereum to run as an ABCI application on tendermint and the COSMOS hub. This application allows you to get all the benefits of ethereum without having to run your own miners. + diff --git a/tools/build/ethermint/DEBIAN/copyright b/tools/build/ethermint/DEBIAN/copyright new file mode 100644 index 000000000..6d1bab01b --- /dev/null +++ b/tools/build/ethermint/DEBIAN/copyright @@ -0,0 +1,21 @@ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: ethermint +Source: https://github.com/tendermint/ethermint + +Files: * +Copyright: 2017 All In Bits, Inc. +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian systems, the full text of the Apache License 2.0 can be found + in the file `/usr/share/common-licenses/Apache-2.0'. diff --git a/tools/build/ethermint/DEBIAN/postinst b/tools/build/ethermint/DEBIAN/postinst new file mode 100644 index 000000000..439fdc395 --- /dev/null +++ b/tools/build/ethermint/DEBIAN/postinst @@ -0,0 +1,46 @@ +#!/bin/sh +# postinst script for ethermint +# + +set -e + +# summary of how this script can be called: +# * `configure' +# * `abort-upgrade' +# * `abort-remove' `in-favour' +# +# * `abort-remove' +# * `abort-deconfigure' `in-favour' +# `removing' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + configure) + chown ethermint.ethermint /etc/ethermint + chown ethermint.ethermint /etc/ethermint/genesis.json + chown ethermint.ethermint /etc/ethermint/keystore + chown ethermint.ethermint /etc/ethermint/keystore/UTC--2016-10-21T22-30-03.071787745Z--7eff122b94897ea5b0e2a9abf47b86337fafebdc + + sudo -Hu ethermint /usr/bin/ethermint --datadir /etc/ethermint init /etc/ethermint/genesis.json + sudo -Hu ethermint tendermint init --home /etc/ethermint + systemctl daemon-reload + ;; + + abort-upgrade|abort-remove|abort-deconfigure) + ;; + + *) + echo "postinst called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/tools/build/ethermint/DEBIAN/postrm b/tools/build/ethermint/DEBIAN/postrm new file mode 100644 index 000000000..f1d9d6afc --- /dev/null +++ b/tools/build/ethermint/DEBIAN/postrm @@ -0,0 +1,41 @@ +#!/bin/sh +# postrm script for ethermint +# + +set -e + +# summary of how this script can be called: +# * `remove' +# * `purge' +# * `upgrade' +# * `failed-upgrade' +# * `abort-install' +# * `abort-install' +# * `abort-upgrade' +# * `disappear' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + upgrade|failed-upgrade|abort-upgrade) + systemctl daemon-reload + ;; + + purge|remove|abort-install|disappear) + systemctl daemon-reload + ;; + + *) + echo "postrm called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/tools/build/ethermint/DEBIAN/preinst b/tools/build/ethermint/DEBIAN/preinst new file mode 100644 index 000000000..829112e6b --- /dev/null +++ b/tools/build/ethermint/DEBIAN/preinst @@ -0,0 +1,38 @@ +#!/bin/sh +# preinst script for ethermint +# + +set -e + +# summary of how this script can be called: +# * `install' +# * `install' +# * `upgrade' +# * `abort-upgrade' +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + install|upgrade) + if ! grep -q '^ethermint:' /etc/passwd ; then + useradd -k /dev/null -r -m -b /etc ethermint + chmod 755 /etc/ethermint + fi + ;; + + abort-upgrade) + ;; + + *) + echo "preinst called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/tools/build/ethermint/DEBIAN/prerm b/tools/build/ethermint/DEBIAN/prerm new file mode 100644 index 000000000..00a775cef --- /dev/null +++ b/tools/build/ethermint/DEBIAN/prerm @@ -0,0 +1,38 @@ +#!/bin/sh +# prerm script for ethermint +# + +set -e + +# summary of how this script can be called: +# * `remove' +# * `upgrade' +# * `failed-upgrade' +# * `remove' `in-favour' +# * `deconfigure' `in-favour' +# `removing' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + remove|upgrade|deconfigure) + systemctl stop ethermint 2> /dev/null || : + ;; + + failed-upgrade) + ;; + + *) + echo "prerm called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/tools/build/ethermint/etc/systemd/system-preset/50-ethermint.preset b/tools/build/ethermint/etc/systemd/system-preset/50-ethermint.preset new file mode 100644 index 000000000..836a28c30 --- /dev/null +++ b/tools/build/ethermint/etc/systemd/system-preset/50-ethermint.preset @@ -0,0 +1,2 @@ +disable ethermint.service + diff --git a/tools/build/ethermint/etc/systemd/system/ethermint.service b/tools/build/ethermint/etc/systemd/system/ethermint.service new file mode 100644 index 000000000..f71a074ea --- /dev/null +++ b/tools/build/ethermint/etc/systemd/system/ethermint.service @@ -0,0 +1,17 @@ +[Unit] +Description=Ethermint +Requires=network-online.target +After=network-online.target + +[Service] +Restart=on-failure +User=ethermint +Group=ethermint +PermissionsStartOnly=true +ExecStart=/usr/bin/ethermint --datadir /etc/ethermint +ExecReload=/bin/kill -HUP $MAINPID +KillSignal=SIGTERM + +[Install] +WantedBy=multi-user.target + diff --git a/tools/build/gaia/DEBIAN/changelog b/tools/build/gaia/DEBIAN/changelog new file mode 100644 index 000000000..eca5fbc3d --- /dev/null +++ b/tools/build/gaia/DEBIAN/changelog @@ -0,0 +1,6 @@ +gaia (@VERSION@) @STABILITY@; urgency=medium + + * Automatic build. See https://github.com/tendermint/basecoin for more information. + + -- Greg Szabo @DATETIMESTAMP@ + diff --git a/tools/build/gaia/DEBIAN/compat b/tools/build/gaia/DEBIAN/compat new file mode 100644 index 000000000..ec635144f --- /dev/null +++ b/tools/build/gaia/DEBIAN/compat @@ -0,0 +1 @@ +9 diff --git a/tools/build/gaia/DEBIAN/control b/tools/build/gaia/DEBIAN/control new file mode 100644 index 000000000..55d1cd5dd --- /dev/null +++ b/tools/build/gaia/DEBIAN/control @@ -0,0 +1,14 @@ +Source: gaia +Section: net +Priority: optional +Maintainer: Greg Szabo +Build-Depends: debhelper (>=9) +Standards-Version: 3.9.6 +Homepage: https://cosmos.network +Package: gaia +Architecture: amd64 +Version: @VERSION@ +Installed-Size: @INSTALLEDSIZE@ +Description: gaia - Tendermint Cosmos delegation game chain + Gaia description comes later. + diff --git a/tools/build/gaia/DEBIAN/copyright b/tools/build/gaia/DEBIAN/copyright new file mode 100644 index 000000000..ffc230134 --- /dev/null +++ b/tools/build/gaia/DEBIAN/copyright @@ -0,0 +1,21 @@ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: gaia +Source: https://github.com/cosmos/gaia + +Files: * +Copyright: 2017 All In Bits, Inc. +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian systems, the full text of the Apache License 2.0 can be found + in the file `/usr/share/common-licenses/Apache-2.0'. diff --git a/tools/build/gaia/DEBIAN/postinst b/tools/build/gaia/DEBIAN/postinst new file mode 100644 index 000000000..427b7c493 --- /dev/null +++ b/tools/build/gaia/DEBIAN/postinst @@ -0,0 +1,41 @@ +#!/bin/sh +# postinst script for gaia +# + +set -e + +# summary of how this script can be called: +# * `configure' +# * `abort-upgrade' +# * `abort-remove' `in-favour' +# +# * `abort-remove' +# * `abort-deconfigure' `in-favour' +# `removing' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + configure) + chown gaia.gaia /etc/gaia + sudo -Hu gaia gaia node init --home /etc/gaia 2B24DEE2364762300168DF19B6C18BCE2D399EA2 + systemctl daemon-reload + ;; + + abort-upgrade|abort-remove|abort-deconfigure) + ;; + + *) + echo "postinst called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/tools/build/gaia/DEBIAN/postrm b/tools/build/gaia/DEBIAN/postrm new file mode 100644 index 000000000..da526ec30 --- /dev/null +++ b/tools/build/gaia/DEBIAN/postrm @@ -0,0 +1,41 @@ +#!/bin/sh +# postrm script for gaia +# + +set -e + +# summary of how this script can be called: +# * `remove' +# * `purge' +# * `upgrade' +# * `failed-upgrade' +# * `abort-install' +# * `abort-install' +# * `abort-upgrade' +# * `disappear' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + upgrade|failed-upgrade|abort-upgrade) + systemctl daemon-reload + ;; + + purge|remove|abort-install|disappear) + systemctl daemon-reload + ;; + + *) + echo "postrm called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/tools/build/gaia/DEBIAN/preinst b/tools/build/gaia/DEBIAN/preinst new file mode 100644 index 000000000..382fa419f --- /dev/null +++ b/tools/build/gaia/DEBIAN/preinst @@ -0,0 +1,38 @@ +#!/bin/sh +# preinst script for gaia +# + +set -e + +# summary of how this script can be called: +# * `install' +# * `install' +# * `upgrade' +# * `abort-upgrade' +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + install|upgrade) + if ! grep -q '^gaia:' /etc/passwd ; then + useradd -k /dev/null -r -m -b /etc gaia + chmod 755 /etc/gaia + fi + ;; + + abort-upgrade) + ;; + + *) + echo "preinst called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/tools/build/gaia/DEBIAN/prerm b/tools/build/gaia/DEBIAN/prerm new file mode 100644 index 000000000..165c1ab6a --- /dev/null +++ b/tools/build/gaia/DEBIAN/prerm @@ -0,0 +1,38 @@ +#!/bin/sh +# prerm script for gaia +# + +set -e + +# summary of how this script can be called: +# * `remove' +# * `upgrade' +# * `failed-upgrade' +# * `remove' `in-favour' +# * `deconfigure' `in-favour' +# `removing' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + remove|upgrade|deconfigure) + systemctl stop gaia 2> /dev/null || : + ;; + + failed-upgrade) + ;; + + *) + echo "prerm called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/tools/build/gaia/etc/systemd/system-preset/50-gaia.preset b/tools/build/gaia/etc/systemd/system-preset/50-gaia.preset new file mode 100644 index 000000000..dfbf0bc06 --- /dev/null +++ b/tools/build/gaia/etc/systemd/system-preset/50-gaia.preset @@ -0,0 +1,2 @@ +disable gaia.service + diff --git a/tools/build/gaia/etc/systemd/system/gaia.service b/tools/build/gaia/etc/systemd/system/gaia.service new file mode 100644 index 000000000..372fe9343 --- /dev/null +++ b/tools/build/gaia/etc/systemd/system/gaia.service @@ -0,0 +1,17 @@ +[Unit] +Description=Gaia +Requires=network-online.target +After=network-online.target + +[Service] +Restart=on-failure +User=gaia +Group=gaia +PermissionsStartOnly=true +ExecStart=/usr/bin/gaia node start --home=/etc/gaia +ExecReload=/bin/kill -HUP $MAINPID +KillSignal=SIGTERM + +[Install] +WantedBy=multi-user.target + diff --git a/tools/build/gaia/usr/share/gaia/key.json b/tools/build/gaia/usr/share/gaia/key.json new file mode 100644 index 000000000..bdefe8fd4 --- /dev/null +++ b/tools/build/gaia/usr/share/gaia/key.json @@ -0,0 +1,12 @@ +{ + "address": "1B1BE55F969F54064628A63B9559E7C21C925165", + "priv_key": { + "type": "ed25519", + "data": "C70D6934B4F55F1B7BC33B56B9CA8A2061384AFC19E91E44B40C4BBA182953D1619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" + }, + "pub_key": { + "type": "ed25519", + "data": "619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" + } +} + diff --git a/tools/build/gaia/usr/share/gaia/key2.json b/tools/build/gaia/usr/share/gaia/key2.json new file mode 100644 index 000000000..ddfc6809b --- /dev/null +++ b/tools/build/gaia/usr/share/gaia/key2.json @@ -0,0 +1,12 @@ +{ + "address": "1DA7C74F9C219229FD54CC9F7386D5A3839F0090", + "priv_key": { + "type": "ed25519", + "data": "34BAE9E65CE8245FAD035A0E3EED9401BDE8785FFB3199ACCF8F5B5DDF7486A8352195DA90CB0B90C24295B90AEBA25A5A71BC61BAB2FE2387241D439698B7B8" + }, + "pub_key": { + "type": "ed25519", + "data": "352195DA90CB0B90C24295B90AEBA25A5A71BC61BAB2FE2387241D439698B7B8" + } +} + diff --git a/tools/build/generate-spec b/tools/build/generate-spec new file mode 100755 index 000000000..4ca60a1d4 --- /dev/null +++ b/tools/build/generate-spec @@ -0,0 +1,36 @@ +#!/bin/bash + +if [ $# -ne 3 ]; then + echo "Usage: $0 " + exit 1 +fi + +app=$1 +src=$2 +dst=$3 + +# Find spectemplate +if [ ! -f "$src/$app.spec" ]; then + if [ ! -f "$src/app-template.spec" ]; then + echo "Source template not found." + exit 1 + else + srcfile="$src/app-template.spec" + fi +else + srcfile="$src/$app.spec" +fi + +# Copy spectemplate to SPECS +cp "$srcfile" "$dst/$app.spec" + +# Apply any variables defined in .data +if [ -f "$src/$app.data" ]; then + srcdata="$src/$app.data" + source "$srcdata" + for var in `grep -v -e ^# -e ^\s*$ "$srcdata" | grep = | sed 's/\s*=.*$//'` + do + sed -i "s\\@${var}@\\${!var}\\g" "$dst/$app.spec" + done +fi + diff --git a/tools/build/sign b/tools/build/sign new file mode 100755 index 000000000..0371b5d4b --- /dev/null +++ b/tools/build/sign @@ -0,0 +1,26 @@ +#!/usr/bin/expect -f +set timeout 3 +set PACKAGE [lindex $argv 0] +set GPG_NAME [lindex $argv 1] +set GPG_PATH [lindex $argv 2] +set GPG_PASSPHRASE $env(GPG_PASSPHRASE) + +if {[llength $argv] == 0} { + send_user "Usage: ./sign \n" + exit 1 +} + +send_user "\nSigning $PACKAGE\n" +spawn rpmsign --resign $PACKAGE --define "_signature gpg" --define "_gpg_name $GPG_NAME" --define "_gpgbin $GPG_PATH" +expect { + timeout { send_user "\nTimeout signing $PACKAGE\n"; exit 1 } + "Enter pass phrase:" +} +send "$GPG_PASSPHRASE\r" +expect { + timeout { send_user "\nTimeout signing $PACKAGE\n"; exit 1 } + "Pass phrase is good." +} +interact +sleep 3 + diff --git a/tools/build/spectemplates/app-template.spec b/tools/build/spectemplates/app-template.spec new file mode 100644 index 000000000..6cb8145bb --- /dev/null +++ b/tools/build/spectemplates/app-template.spec @@ -0,0 +1,55 @@ +Version: @VERSION@ +Release: @BUILD_NUMBER@ + +%define __spec_install_post %{nil} +%define debug_package %{nil} +%define __os_install_post %{nil} + +Name: @PACKAGE_NAME@ +Summary: @PACKAGE_SUMMARY@ +License: Apache 2.0 +URL: @PACKAGE_URL@ +Packager: Greg Szabo +@PACKAGE_ADDITIONAL_HEADER@ + +%description +@PACKAGE_DESCRIPTION@ + +%pre +if ! %{__grep} -q '^%{name}:' /etc/passwd ; then + useradd -r -b %{_sysconfdir} %{name} + mkdir -p %{_sysconfdir}/%{name} + chmod 755 %{_sysconfdir}/%{name} + chown %{name}.%{name} %{_sysconfdir}/%{name} +fi + +%prep +# Nothing to do here. - It is done in the Makefile. + +%build +# Nothing to do here. + +%install +cd %{name}-%{version}-%{release} +%{__cp} -a * %{buildroot} + +%post +sudo -Hu %{name} %{name} node init --home %{_sysconfdir}/%{name} 2B24DEE2364762300168DF19B6C18BCE2D399EA2 +systemctl daemon-reload + +%preun +systemctl stop %{name} 2> /dev/null || : + +%postun +systemctl daemon-reload + +%files +%ghost %attr(0755, %{name}, %{name}) %dir %{_sysconfdir}/%{name} +%{_bindir}/* +%{_sysconfdir}/systemd/system/* +%{_sysconfdir}/systemd/system-preset/* +%dir %{_datadir}/%{name} +%{_datadir}/%{name}/* +%dir %{_defaultlicensedir}/%{name} +%doc %{_defaultlicensedir}/%{name}/LICENSE + diff --git a/tools/build/spectemplates/basecoind.data b/tools/build/spectemplates/basecoind.data new file mode 100644 index 000000000..36b172ecf --- /dev/null +++ b/tools/build/spectemplates/basecoind.data @@ -0,0 +1,5 @@ +PACKAGE_SUMMARY="basecoind is a Proof-of-Stake cryptocurrency and framework" +PACKAGE_URL="https://cosmos.network/" +PACKAGE_ADDITIONAL_HEADER="Provides: basecoind" +PACKAGE_DESCRIPTION="Basecoind is an ABCI application designed to be used with the Tendermint consensus engine to form a Proof-of-Stake cryptocurrency. It also provides a general purpose framework for extending the feature-set of the cryptocurrency by implementing plugins." + diff --git a/tools/build/spectemplates/ethermint.data b/tools/build/spectemplates/ethermint.data new file mode 100644 index 000000000..e9d403db7 --- /dev/null +++ b/tools/build/spectemplates/ethermint.data @@ -0,0 +1,5 @@ +PACKAGE_SUMMARY="ethermint enables ethereum as an ABCI application on tendermint and the COSMOS hub" +PACKAGE_URL="https://tendermint.com/" +PACKAGE_ADDITIONAL_HEADER="Provides: ethermint" +PACKAGE_DESCRIPTION="Ethermint enables ethereum to run as an ABCI application on tendermint and the COSMOS hub. This application allows you to get all the benefits of ethereum without having to run your own miners." + diff --git a/tools/build/spectemplates/ethermint.spec b/tools/build/spectemplates/ethermint.spec new file mode 100644 index 000000000..fc443e35b --- /dev/null +++ b/tools/build/spectemplates/ethermint.spec @@ -0,0 +1,60 @@ +Version: @VERSION@ +Release: @BUILD_NUMBER@ + +%define __spec_install_post %{nil} +%define debug_package %{nil} +%define __os_install_post %{nil} + +Name: @PACKAGE_NAME@ +Summary: @PACKAGE_SUMMARY@ +License: Apache 2.0 +URL: @PACKAGE_URL@ +Packager: Greg Szabo +Requires: tendermint >= 0.11.0 +@PACKAGE_ADDITIONAL_HEADER@ + +%description +@PACKAGE_DESCRIPTION@ + +%pre +if ! %{__grep} -q '^%{name}:' /etc/passwd ; then + useradd -r -b %{_sysconfdir} %{name} + mkdir -p %{_sysconfdir}/%{name} + chmod 755 %{_sysconfdir}/%{name} + chown %{name}.%{name} %{_sysconfdir}/%{name} +fi + +%prep +# Nothing to do here. - It is done in the Makefile. + +%build +# Nothing to do here. + +%install +cd %{name}-%{version}-%{release} +%{__cp} -a * %{buildroot} + +%post +sudo -Hu %{name} tendermint init --home %{_sysconfdir}/%{name} +sudo -Hu %{name} %{name} --datadir %{_sysconfdir}/%{name} init %{_sysconfdir}/%{name}/genesis.json + +systemctl daemon-reload + +%preun +systemctl stop %{name} 2> /dev/null || : +systemctl stop %{name}-service 2> /dev/null || : + +%postun +systemctl daemon-reload + +%files +%attr(0755, %{name}, %{name}) %dir %{_sysconfdir}/%{name} +%config(noreplace) %attr(0644, %{name}, %{name}) %{_sysconfdir}/%{name}/genesis.json +%attr(0755, %{name}, %{name}) %dir %{_sysconfdir}/%{name}/keystore +%attr(0644, %{name}, %{name}) %{_sysconfdir}/%{name}/keystore/* +%{_bindir}/* +%{_sysconfdir}/systemd/system/* +%{_sysconfdir}/systemd/system-preset/* +%dir %{_defaultlicensedir}/%{name} +%doc %{_defaultlicensedir}/%{name}/LICENSE + diff --git a/tools/build/spectemplates/gaia.data b/tools/build/spectemplates/gaia.data new file mode 100644 index 000000000..7152b1b51 --- /dev/null +++ b/tools/build/spectemplates/gaia.data @@ -0,0 +1,5 @@ +PACKAGE_SUMMARY="gaia - Tendermint Cosmos delegation game chain" +PACKAGE_URL="https://cosmos.network/" +PACKAGE_ADDITIONAL_HEADER="" +PACKAGE_DESCRIPTION="Gaia description comes later." + diff --git a/tools/build/spectemplates/tendermint.spec b/tools/build/spectemplates/tendermint.spec new file mode 100644 index 000000000..68902a170 --- /dev/null +++ b/tools/build/spectemplates/tendermint.spec @@ -0,0 +1,31 @@ +Version: @VERSION@ +Release: @BUILD_NUMBER@ + +%define __spec_install_post %{nil} +%define debug_package %{nil} +%define __os_install_post %{nil} + +Name: tendermint +Summary: securely and consistently replicate an application on many machines +License: Apache 2.0 +URL: https://tendermint.com/ +Packager: Greg Szabo + +%description +Tendermint is software for securely and consistently replicating an application on many machines. By securely, we mean that Tendermint works even if up to 1/3 of machines fail in arbitrary ways. By consistently, we mean that every non-faulty machine sees the same transaction log and computes the same state. + +%prep +# Nothing to do here. - It is done in the Makefile. + +%build +# Nothing to do here. + +%install +cd %{name}-%{version}-%{release} +%{__cp} -a * %{buildroot} + +%files +%{_bindir}/tendermint +%dir %{_defaultlicensedir}/%{name} +%doc %{_defaultlicensedir}/%{name}/LICENSE + diff --git a/tools/build/tendermint.list b/tools/build/tendermint.list new file mode 100644 index 000000000..bba521af5 --- /dev/null +++ b/tools/build/tendermint.list @@ -0,0 +1 @@ +deb http://tendermint-packages.s3-website-us-west-1.amazonaws.com/debian stable main diff --git a/tools/build/tendermint.repo b/tools/build/tendermint.repo new file mode 100644 index 000000000..439f98ecb --- /dev/null +++ b/tools/build/tendermint.repo @@ -0,0 +1,12 @@ +#This is the .repo file for the Tendermint CentOS repositories. +#Although it has only been tested under CentOS 7, it should work under Fedora and RedHat 7 too. +#Currently only 64-bit packages are built. + +[tendermint] +name=Tendermint stable releases repository +baseurl=https://do9rmxapsag1v.cloudfront.net/centos/7/os/x86_64 +gpgcheck=1 +gpgkey=https://do9rmxapsag1v.cloudfront.net/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint +enabled=1 +#sslverify = 1 + diff --git a/tools/build/tendermint/DEBIAN/changelog b/tools/build/tendermint/DEBIAN/changelog new file mode 100644 index 000000000..4b016f845 --- /dev/null +++ b/tools/build/tendermint/DEBIAN/changelog @@ -0,0 +1,6 @@ +tendermint (@VERSION@) @STABILITY@; urgency=medium + + * Automatic build. See https://github.com/tendermint/tendermint for more information. + + -- Greg Szabo @DATETIMESTAMP@ + diff --git a/tools/build/tendermint/DEBIAN/compat b/tools/build/tendermint/DEBIAN/compat new file mode 100644 index 000000000..ec635144f --- /dev/null +++ b/tools/build/tendermint/DEBIAN/compat @@ -0,0 +1 @@ +9 diff --git a/tools/build/tendermint/DEBIAN/control b/tools/build/tendermint/DEBIAN/control new file mode 100644 index 000000000..d9da17dd1 --- /dev/null +++ b/tools/build/tendermint/DEBIAN/control @@ -0,0 +1,14 @@ +Source: tendermint +Section: net +Priority: optional +Maintainer: Greg Szabo +Build-Depends: debhelper (>=9) +Standards-Version: 3.9.6 +Homepage: https://tendermint.com +Package: tendermint +Architecture: amd64 +Version: @VERSION@ +Installed-Size: @INSTALLEDSIZE@ +Description: securely and consistently replicate an application on many machines + Tendermint is software for securely and consistently replicating an application on many machines. By securely, we mean that Tendermint works even if up to 1/3 of machines fail in arbitrary ways. By consistently, we mean that every non-faulty machine sees the same transaction log and computes the same state. + diff --git a/tools/build/tendermint/DEBIAN/copyright b/tools/build/tendermint/DEBIAN/copyright new file mode 100644 index 000000000..15ee960dd --- /dev/null +++ b/tools/build/tendermint/DEBIAN/copyright @@ -0,0 +1,21 @@ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: tendermint +Source: https://github.com/tendermint/tendermint + +Files: * +Copyright: 2017 All In Bits, Inc. +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian systems, the full text of the Apache License 2.0 can be found + in the file `/usr/share/common-licenses/Apache-2.0'. diff --git a/tools/mintnet-kubernetes/LICENSE b/tools/mintnet-kubernetes/LICENSE new file mode 100644 index 000000000..64a33ddf1 --- /dev/null +++ b/tools/mintnet-kubernetes/LICENSE @@ -0,0 +1,192 @@ +Copyright (C) 2017 Tendermint + + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/tools/mintnet-kubernetes/README.rst b/tools/mintnet-kubernetes/README.rst new file mode 100644 index 000000000..9cfdbb8eb --- /dev/null +++ b/tools/mintnet-kubernetes/README.rst @@ -0,0 +1,290 @@ +Using Kubernetes +================ + +.. figure:: assets/t_plus_k.png + :alt: Tendermint plus Kubernetes + + Tendermint plus Kubernetes + +This should primarily be used for testing purposes or for +tightly-defined chains operated by a single stakeholder (see `the +security precautions <#security>`__). If your desire is to launch an +application with many stakeholders, consider using our set of Ansible +scripts. + +Quick Start +----------- + +For either platform, see the `requirements `__ + +MacOS +^^^^^ + +:: + + curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/darwin/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/kubectl + curl -Lo minikube https://storage.googleapis.com/minikube/releases/v0.18.0/minikube-darwin-amd64 && chmod +x minikube && sudo mv minikube /usr/local/bin/ + minikube start + + git clone https://github.com/tendermint/tools.git && cd tools/mintnet-kubernetes/examples/basecoin && make create + +Linux +^^^^^ + +:: + + curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/kubectl + curl -Lo minikube https://storage.googleapis.com/minikube/releases/v0.18.0/minikube-linux-amd64 && chmod +x minikube && sudo mv minikube /usr/local/bin/ + minikube start + + git clone https://github.com/tendermint/tools.git && cd tools/mintnet-kubernetes/examples/basecoin && make create + +Verify it worked +~~~~~~~~~~~~~~~~ + +**Using a shell:** + +First wait until all the pods are ``Running``: + +``kubectl get pods -w -o wide -L tm`` + +then query the Tendermint app logs from the first pod: + +``kubectl logs -c tm -f tm-0`` + +finally, use our `Rest API <../specification/rpc.html>`__ to fetch the status of the second pod's Tendermint app. + +Note we are using ``kubectl exec`` because pods are not exposed (and should not be) to the +outer network: + +``kubectl exec -c tm tm-0 -- curl -s http://tm-1.basecoin:26657/status | json_pp`` + +**Using the dashboard:** + +:: + + minikube dashboard + +Clean up +~~~~~~~~ + +:: + + make destroy + +Usage +----- + +Setup a Kubernetes cluster +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- locally using `Minikube `__ +- on GCE with a single click in the web UI +- on AWS using `Kubernetes + Operations `__ +- on Linux machines (Digital Ocean) using + `kubeadm `__ +- on AWS, Azure, GCE or bare metal using `Kargo + (Ansible) `__ + +Please refer to `the official +documentation `__ +for overview and comparison of different options. + +Kubernetes on Digital Ocean +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Available options: + +- `kubeadm (alpha) `__ +- `kargo `__ +- `rancher `__ +- `terraform `__ + +As you can see, there is no single tool for creating a cluster on DO. +Therefore, choose the one you know and comfortable working with. If you know +and used `terraform `__ before, then choose it. If you +know Ansible, then pick kargo. If none of these seem familiar to you, go with +``kubeadm``. Rancher is a beautiful UI for deploying and managing containers in +production. + +Kubernetes on Google Cloud Engine +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Review the `Official Documentation `__ for Kubernetes on Google Compute +Engine. + +**Create a cluster** + +The recommended way is to use `Google Container +Engine `__. You should be able +to create a fully fledged cluster with just a few clicks. + +**Connect to it** + +Install ``gcloud`` as a part of `Google Cloud SDK `__. + +Make sure you have credentials for GCloud by running ``gcloud auth login``. + +In order to make API calls against GCE, you must also run ``gcloud auth +application-default login``. + +Press ``Connect``: + +.. figure:: assets/gce1.png + +and execute the first command in your shell. Then start a proxy by +executing ``kubectl` proxy``. + +.. figure:: assets/gce2.png + +Now you should be able to run ``kubectl`` command to create resources, get +resource info, logs, etc. + +**Make sure you have Kubernetes >= 1.5, because you will be using +StatefulSets, which is a beta feature in 1.5.** + +Create a configuration file +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Download a template: + +:: + + curl -Lo app.yaml https://github.com/tendermint/tools/raw/master/mintnet-kubernetes/app.template.yaml + +Open ``app.yaml`` in your favorite editor and configure your app +container (navigate to ``- name: app``). Kubernetes DSL (Domain Specific +Language) is very simple, so it should be easy. You will need to set +Docker image, command and/or run arguments. Replace variables prefixed +with ``YOUR_APP`` with corresponding values. Set genesis time to now and +preferable chain ID in ConfigMap. + +Please note if you are changing ``replicas`` number, do not forget to +update ``validators`` set in ConfigMap. You will be able to scale the +cluster up or down later, but new pods (nodes) won't become validators +automatically. + +Deploy your application +^^^^^^^^^^^^^^^^^^^^^^^ + +:: + + kubectl create -f ./app.yaml + +Observe your cluster +^^^^^^^^^^^^^^^^^^^^ + +`web UI `__ + +The easiest way to access Dashboard is to use ``kubectl``. Run the following +command in your desktop environment: + +:: + + kubectl proxy + +``kubectl`` will handle authentication with apiserver and make Dashboard +available at http://localhost:8001/ui + +**shell** + +List all the pods: + +:: + + kubectl get pods -o wide -L tm + +StatefulSet details: + +:: + + kubectl describe statefulsets tm + +First pod details: + +:: + + kubectl describe pod tm-0 + +Tendermint app logs from the first pod: + +:: + + kubectl logs tm-0 -c tm -f + +App logs from the first pod: + +:: + + kubectl logs tm-0 -c app -f + +Status of the second pod's Tendermint app: + +:: + + kubectl exec -c tm tm-0 -- curl -s http://tm-1.:26657/status | json_pp + +Security +-------- + +Due to the nature of Kubernetes, where you typically have a single +master, the master could be a SPOF (Single Point Of Failure). Therefore, +you need to make sure only authorized people can access it. And these +people themselves had taken basic measures in order not to get hacked. + +These are the best practices: + +- all access to the master is over TLS +- access to the API Server is X.509 certificate or token based +- etcd is not exposed directly to the cluster +- ensure that images are free of vulnerabilities + (`1 `__) +- ensure that only authorized images are used in your environment +- disable direct access to Kubernetes nodes (no SSH) +- define resource quota + +Resources: + +- https://kubernetes.io/docs/admin/accessing-the-api/ +- http://blog.kubernetes.io/2016/08/security-best-practices-kubernetes-deployment.html +- https://blog.openshift.com/securing-kubernetes/ + +Fault tolerance +--------------- + +Having a single master (API server) is a bad thing also because if +something happens to it, you risk being left without an access to the +application. + +To avoid that you can `run Kubernetes in multiple +zones `__, each zone +running an `API +server `__ and load +balance requests between them. Do not forget to make sure only one +instance of scheduler and controller-manager are running at once. + +Running in multiple zones is a lightweight version of a broader `Cluster +Federation feature `__. +Federated deployments could span across multiple regions (not zones). We +haven't tried this feature yet, so any feedback is highly appreciated! +Especially, related to additional latency and cost of exchanging data +between the regions. + +Resources: + +- https://kubernetes.io/docs/admin/high-availability/ + +Starting process +---------------- + +.. figure:: assets/statefulset.png + :alt: StatefulSet + + StatefulSet + +Init containers (``tm-gen-validator``) are run before all other +containers, creating public-private key pair for each pod. Every ``tm`` +container then asks other pods for their public keys, which are served +with nginx (``pub-key`` container). When ``tm`` container have all the +keys, it forms a genesis file and starts the Tendermint process. diff --git a/tools/mintnet-kubernetes/app.template.yaml b/tools/mintnet-kubernetes/app.template.yaml new file mode 100644 index 000000000..826b2e97f --- /dev/null +++ b/tools/mintnet-kubernetes/app.template.yaml @@ -0,0 +1,265 @@ +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + name: YOUR_APP_NAME + labels: + app: YOUR_APP_NAME +spec: + ports: + - port: 26656 + name: p2p + - port: 26657 + name: rpc + clusterIP: None + selector: + app: tm +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: tm-config +data: + seeds: "tm-0,tm-1,tm-2,tm-3" + validators: "tm-0,tm-1,tm-2,tm-3" + validator.power: "10" + genesis.json: |- + { + "genesis_time": "2017-01-02T10:10:10.164Z", + "chain_id": "chain-B5XXm5", + "validators": [], + "app_hash": "" + } + pub_key_nginx.conf: |- + server { + listen 80 default_server; + listen [::]:80 default_server ipv6only=on; + location /pub_key.json { root /usr/share/nginx/; } + } +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: tm-budget +spec: + selector: + matchLabels: + app: tm + minAvailable: 2 +--- +apiVersion: apps/v1beta1 +kind: StatefulSet +metadata: + name: tm +spec: + serviceName: YOUR_APP_NAME + replicas: 4 + template: + metadata: + labels: + app: tm + version: v1 + annotations: + pod.beta.kubernetes.io/init-containers: '[{ + "name": "tm-gen-validator", + "image": "tendermint/tendermint:0.10.0", + "imagePullPolicy": "IfNotPresent", + "command": ["bash", "-c", " + set -ex\n + if [ ! -f /tendermint/priv_validator.json ]; then\n + tendermint gen_validator > /tendermint/priv_validator.json\n + # pub_key.json will be served by pub-key container\n + cat /tendermint/priv_validator.json | jq \".pub_key\" > /tendermint/pub_key.json\n + fi\n + "], + "volumeMounts": [ + {"name": "tmdir", "mountPath": "/tendermint"} + ] + }]' + spec: + containers: + - name: tm + imagePullPolicy: IfNotPresent + image: tendermint/tendermint:0.10.0 + resources: + requests: + cpu: 50m + memory: 128Mi + limits: + cpu: 100m + memory: 256Mi + ports: + - containerPort: 26656 + name: p2p + - containerPort: 26657 + name: rpc + env: + - name: SEEDS + valueFrom: + configMapKeyRef: + name: tm-config + key: seeds + - name: VALIDATOR_POWER + valueFrom: + configMapKeyRef: + name: tm-config + key: validator.power + - name: VALIDATORS + valueFrom: + configMapKeyRef: + name: tm-config + key: validators + - name: TMHOME + value: /tendermint + command: + - bash + - "-c" + - | + set -ex + + # copy template + cp /etc/tendermint/genesis.json /tendermint/genesis.json + + # fill genesis file with validators + IFS=',' read -ra VALS_ARR <<< "$VALIDATORS" + fqdn_suffix=$(hostname -f | sed 's#[^.]*\.\(\)#\1#') + for v in "${VALS_ARR[@]}"; do + # wait until validator generates priv/pub key pair + set +e + + curl -s --fail "http://$v.$fqdn_suffix/pub_key.json" > /dev/null + ERR=$? + while [ "$ERR" != 0 ]; do + sleep 5 + curl -s --fail "http://$v.$fqdn_suffix/pub_key.json" > /dev/null + ERR=$? + done + set -e + + # add validator to genesis file along with its pub_key + curl -s "http://$v.$fqdn_suffix/pub_key.json" | jq ". as \$k | {pub_key: \$k, amount: $VALIDATOR_POWER, name: \"$v\"}" > pub_validator.json + cat /tendermint/genesis.json | jq ".validators |= .+ [$(cat pub_validator.json)]" > tmpgenesis && mv tmpgenesis /tendermint/genesis.json + rm pub_validator.json + done + + # construct seeds + IFS=',' read -ra SEEDS_ARR <<< "$SEEDS" + seeds=() + for s in "${SEEDS_ARR[@]}"; do + seeds+=("$s.$fqdn_suffix:26656") + done + seeds=$(IFS=','; echo "${seeds[*]}") + + tendermint node --p2p.seeds="$seeds" --moniker="`hostname`" --proxy_app="unix:///socks/app.sock" + volumeMounts: + - name: tmdir + mountPath: /tendermint + - mountPath: /etc/tendermint/genesis.json + name: configdir + subPath: genesis.json + - name: socksdir + mountPath: /socks + + - name: app + imagePullPolicy: IfNotPresent + image: YOUR_APP_IMAGE + args: ["--addr=\"unix:///socks/app.sock\""] + volumeMounts: + - name: socksdir + mountPath: /socks + + ######## OR ######## + # + # - name: app + # imagePullPolicy: IfNotPresent + # image: golang:1.7.5 + # resources: + # requests: + # cpu: YOUR_APP_CPU_REQ + # memory: YOUR_APP_MEM_REQ + # limits: + # cpu: YOUR_APP_CPU_LIMIT + # memory: YOUR_APP_MEM_LIMIT + # command: + # - bash + # - "-c" + # - | + # set -ex + + # go get -d YOUR_APP_PACKAGE + # cd $GOPATH/YOUR_APP_PACKAGE + # make install + # + # rm -f /socks/app.sock # remove old socket + + # YOUR_APP_EXEC --addr="unix:///socks/app.sock" + # volumeMounts: + # - name: socksdir + # mountPath: /socks + + ######## OPTIONALLY ######## + # + # - name: data + # imagePullPolicy: IfNotPresent + # image: golang:1.7.5 + # command: + # - bash + # - "-c" + # - | + # set -ex + # go get github.com/tendermint/merkleeyes/cmd/merkleeyes + # rm -f /socks/data.sock # remove old socket + # merkleeyes server --address="unix:///socks/data.sock" + # volumeMounts: + # - name: socksdir + # mountPath: /socks + + - name: pub-key + imagePullPolicy: IfNotPresent + image: nginx:1.11.9 + resources: + requests: + cpu: 10m + memory: 12Mi + limits: + cpu: 20m + memory: 24Mi + ports: + - containerPort: 80 + name: pub-key + command: + - bash + - "-c" + - | + set -ex + # fixes 403 Permission Denied (open() "/tendermint/pub_key.json" failed (13: Permission denied)) + # => we cannot serve from /tendermint, so we copy the file + mkdir -p /usr/share/nginx + cp /tendermint/pub_key.json /usr/share/nginx/pub_key.json + nginx -g "daemon off;" + volumeMounts: + - name: tmdir + mountPath: /tendermint + - mountPath: /etc/nginx/conf.d/pub_key.conf + name: configdir + subPath: pub_key_nginx.conf + + volumes: + - name: configdir + configMap: + name: tm-config + - name: socksdir + emptyDir: {} + + volumeClaimTemplates: + - metadata: + name: tmdir + annotations: + volume.alpha.kubernetes.io/storage-class: anything + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 2Gi diff --git a/tools/mintnet-kubernetes/assets/gce1.png b/tools/mintnet-kubernetes/assets/gce1.png new file mode 100644 index 000000000..3bf3ad005 Binary files /dev/null and b/tools/mintnet-kubernetes/assets/gce1.png differ diff --git a/tools/mintnet-kubernetes/assets/gce2.png b/tools/mintnet-kubernetes/assets/gce2.png new file mode 100644 index 000000000..358dcc04b Binary files /dev/null and b/tools/mintnet-kubernetes/assets/gce2.png differ diff --git a/tools/mintnet-kubernetes/assets/statefulset.png b/tools/mintnet-kubernetes/assets/statefulset.png new file mode 100644 index 000000000..ac68d22b7 Binary files /dev/null and b/tools/mintnet-kubernetes/assets/statefulset.png differ diff --git a/tools/mintnet-kubernetes/assets/t_plus_k.png b/tools/mintnet-kubernetes/assets/t_plus_k.png new file mode 100644 index 000000000..bee9fe56e Binary files /dev/null and b/tools/mintnet-kubernetes/assets/t_plus_k.png differ diff --git a/tools/mintnet-kubernetes/examples/basecoin/Makefile b/tools/mintnet-kubernetes/examples/basecoin/Makefile new file mode 100644 index 000000000..6d54d57d6 --- /dev/null +++ b/tools/mintnet-kubernetes/examples/basecoin/Makefile @@ -0,0 +1,10 @@ +create: + @echo "==> Creating deployment" + @kubectl create -f app.yaml + +destroy: + @echo "==> Destroying deployment" + @kubectl delete -f app.yaml + @kubectl delete pvc -l app=tm + +.PHONY: create destroy diff --git a/tools/mintnet-kubernetes/examples/basecoin/README.md b/tools/mintnet-kubernetes/examples/basecoin/README.md new file mode 100644 index 000000000..46911a096 --- /dev/null +++ b/tools/mintnet-kubernetes/examples/basecoin/README.md @@ -0,0 +1,42 @@ +# Basecoin example + +This is an example of using [basecoin](https://github.com/tendermint/basecoin). + +## Usage + +``` +make create +``` + +### Check account balance and send a transaction + +1. wait until all the pods are `Running`. + + ``` + kubectl get pods -w -o wide -L tm + ``` + +2. wait until app starts. + + ``` + kubectl logs -c app -f tm-0 + ``` + +3. get account's address of the second pod + + ``` + ADDR=`kubectl exec -c app tm-1 -- cat /app/key.json | jq ".address" | tr -d "\""` + ``` + +4. send 5 coins to it from the first pod + + ``` + kubectl exec -c app tm-0 -- basecoin tx send --to "0x$ADDR" --amount 5mycoin --from /app/key.json --chain_id chain-tTH4mi + ``` + + +## Clean up + +``` +make destroy +``` diff --git a/tools/mintnet-kubernetes/examples/basecoin/app.yaml b/tools/mintnet-kubernetes/examples/basecoin/app.yaml new file mode 100644 index 000000000..6206b1cdb --- /dev/null +++ b/tools/mintnet-kubernetes/examples/basecoin/app.yaml @@ -0,0 +1,334 @@ +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + name: basecoin + labels: + app: basecoin +spec: + ports: + - port: 26656 + name: p2p + - port: 26657 + name: rpc + clusterIP: None + selector: + app: tm +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: tm-config +data: + seeds: "tm-0,tm-1,tm-2,tm-3" + validators: "tm-0,tm-1,tm-2,tm-3" + validator.power: "10" + genesis.json: |- + { + "genesis_time": "2016-02-05T06:02:31.526Z", + "chain_id": "chain-tTH4mi", + "validators": [], + "app_hash": "" + } + pub_key_nginx.conf: |- + server { + listen 80 default_server; + listen [::]:80 default_server ipv6only=on; + location /pub_key.json { root /usr/share/nginx/; } + location /app_pub_key.json { root /usr/share/nginx/; } + } +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: app-config +data: + genesis.json: |- + { + "chain_id": "chain-tTH4mi", + "app_options": { + "accounts": [ + { + "pub_key": "tm-0", + "coins": [ + { + "denom": "mycoin", + "amount": 1000000000 + } + ] + }, + { + "pub_key": "tm-1", + "coins": [ + { + "denom": "mycoin", + "amount": 1000000000 + } + ] + }, + { + "pub_key": "tm-2", + "coins": [ + { + "denom": "mycoin", + "amount": 1000000000 + } + ] + }, + { + "pub_key": "tm-3", + "coins": [ + { + "denom": "mycoin", + "amount": 1000000000 + } + ] + } + ] + } + } +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: tm-budget +spec: + selector: + matchLabels: + app: tm + minAvailable: 2 +--- +apiVersion: apps/v1beta1 +kind: StatefulSet +metadata: + name: tm +spec: + serviceName: basecoin + replicas: 4 + template: + metadata: + labels: + app: tm + annotations: + pod.beta.kubernetes.io/init-containers: '[{ + "name": "tm-gen-validator", + "image": "tendermint/tendermint:0.10.0", + "imagePullPolicy": "IfNotPresent", + "command": ["bash", "-c", " + set -ex\n + if [ ! -f /tendermint/priv_validator.json ]; then\n + tendermint gen_validator > /tendermint/priv_validator.json\n + # pub_key.json will be served by pub-key container\n + cat /tendermint/priv_validator.json | jq \".pub_key\" > /tendermint/pub_key.json\n + fi\n + "], + "volumeMounts": [ + {"name": "tmdir", "mountPath": "/tendermint"} + ] + }, + { + "name": "app-gen-key", + "image": "tendermint/basecoin:0.5.1", + "imagePullPolicy": "IfNotPresent", + "command": ["bash", "-c", " + set -ex\n + if [ ! -f /app/key.json ]; then\n + basecoin key new > /app/key.json\n + # pub_key.json will be served by app-pub-key container\n + cat /app/key.json | jq \".pub_key\" > /app/pub_key.json\n + fi\n + "], + "volumeMounts": [ + {"name": "appdir", "mountPath": "/app"} + ] + }]' + spec: + containers: + - name: tm + imagePullPolicy: IfNotPresent + image: tendermint/tendermint:0.10.0 + ports: + - containerPort: 26656 + name: p2p + - containerPort: 26657 + name: rpc + env: + - name: SEEDS + valueFrom: + configMapKeyRef: + name: tm-config + key: seeds + - name: VALIDATOR_POWER + valueFrom: + configMapKeyRef: + name: tm-config + key: validator.power + - name: VALIDATORS + valueFrom: + configMapKeyRef: + name: tm-config + key: validators + - name: TMHOME + value: /tendermint + command: + - bash + - "-c" + - | + set -ex + + # copy template + cp /etc/tendermint/genesis.json /tendermint/genesis.json + + # fill genesis file with validators + IFS=',' read -ra VALS_ARR <<< "$VALIDATORS" + fqdn_suffix=$(hostname -f | sed 's#[^.]*\.\(\)#\1#') + for v in "${VALS_ARR[@]}"; do + # wait until validator generates priv/pub key pair + set +e + + curl -s --fail "http://$v.$fqdn_suffix/pub_key.json" > /dev/null + ERR=$? + while [ "$ERR" != 0 ]; do + sleep 5 + curl -s --fail "http://$v.$fqdn_suffix/pub_key.json" > /dev/null + ERR=$? + done + set -e + + # add validator to genesis file along with its pub_key + curl -s "http://$v.$fqdn_suffix/pub_key.json" | jq ". as \$k | {pub_key: \$k, amount: $VALIDATOR_POWER, name: \"$v\"}" > pub_validator.json + cat /tendermint/genesis.json | jq ".validators |= .+ [$(cat pub_validator.json)]" > tmpgenesis && mv tmpgenesis /tendermint/genesis.json + rm pub_validator.json + done + + # construct seeds + IFS=',' read -ra SEEDS_ARR <<< "$SEEDS" + seeds=() + for s in "${SEEDS_ARR[@]}"; do + seeds+=("$s.$fqdn_suffix:26656") + done + seeds=$(IFS=','; echo "${seeds[*]}") + + tendermint node --p2p.seeds="$seeds" --moniker="`hostname`" --proxy_app="unix:///socks/app.sock" + volumeMounts: + - name: tmdir + mountPath: /tendermint + - mountPath: /etc/tendermint/genesis.json + name: tmconfigdir + subPath: genesis.json + - name: socksdir + mountPath: /socks + + - name: app + imagePullPolicy: IfNotPresent + image: tendermint/basecoin:0.5.1 + env: + - name: BCHOME + value: /app + workingDir: /app + command: + - bash + - "-c" + - | + set -ex + + # replace "tm-N" with public keys in genesis file + cp /etc/app/genesis.json genesis.json + fqdn_suffix=$(hostname -f | sed 's#[^.]*\.\(\)#\1#') + # for every "base/account" + i=0 + length=$(cat genesis.json | jq ".app_options.accounts | length") + while [[ $i -lt $length ]]; do + # extract pod name ("tm-0") + pod=$(cat genesis.json | jq -r ".app_options.accounts[$i].pub_key") + + # wait until pod starts to serve its pub_key + set +e + + curl -s --fail "http://$pod.$fqdn_suffix/app_pub_key.json" > /dev/null + ERR=$? + while [ "$ERR" != 0 ]; do + sleep 5 + curl -s --fail "http://$pod.$fqdn_suffix/app_pub_key.json" > /dev/null + ERR=$? + done + set -e + + # get its pub_key + curl -s "http://$pod.$fqdn_suffix/app_pub_key.json" | jq "." > k.json + + # replace pod name with it ("tm-0" => "{"type": ..., "data": ...}") + cat genesis.json | jq ".app_options.accounts[$i].pub_key = $(cat k.json | jq '.')" > tmpgenesis && mv tmpgenesis genesis.json + rm -f k.json + + i=$((i+1)) + done + + rm -f /socks/app.sock # remove old socket + + basecoin start --address="unix:///socks/app.sock" --without-tendermint + volumeMounts: + - name: appdir + mountPath: /app + - mountPath: /etc/app/genesis.json + name: appconfigdir + subPath: genesis.json + - name: socksdir + mountPath: /socks + + - name: pub-key + imagePullPolicy: IfNotPresent + image: nginx:latest + ports: + - containerPort: 80 + command: + - bash + - "-c" + - | + set -ex + # fixes 403 Permission Denied (open() "/tendermint/pub_key.json" failed (13: Permission denied)) + # => we cannot serve from /tendermint, so we copy the file + mkdir -p /usr/share/nginx + cp /tendermint/pub_key.json /usr/share/nginx/pub_key.json + cp /app/pub_key.json /usr/share/nginx/app_pub_key.json + nginx -g "daemon off;" + volumeMounts: + - name: tmdir + mountPath: /tendermint + - name: appdir + mountPath: /app + - mountPath: /etc/nginx/conf.d/pub_key.conf + name: tmconfigdir + subPath: pub_key_nginx.conf + + volumes: + - name: tmconfigdir + configMap: + name: tm-config + - name: appconfigdir + configMap: + name: app-config + - name: socksdir + emptyDir: {} + + volumeClaimTemplates: + - metadata: + name: tmdir + annotations: + volume.alpha.kubernetes.io/storage-class: anything + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 2Gi + - metadata: + name: appdir + annotations: + volume.alpha.kubernetes.io/storage-class: anything + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 12Mi diff --git a/tools/mintnet-kubernetes/examples/basecoin/lightclient.md b/tools/mintnet-kubernetes/examples/basecoin/lightclient.md new file mode 100644 index 000000000..11d07af1f --- /dev/null +++ b/tools/mintnet-kubernetes/examples/basecoin/lightclient.md @@ -0,0 +1,100 @@ +**OUTDATED** + +# Using with lightclient + +We have an awesome cluster running, let's try to test this out without +relying on executing commands on the cluster. Rather, we can connect to the +rpc interface with the `light-client` package and execute commands locally, +or even proxy our webapp to the kubernetes backend. + +## Setup + +In order to get this working, we need to know a few pieces of info, +the chain id of tendermint, the chain id of basecoin, and an account +with a bit of cash.... + +### Tendermint Chain ID + +`kubectl exec -c tm tm-0 -- curl -s http://tm-1.basecoin:26657/status | json_pp | grep network` + +set TM_CHAIN with the value there + +### Basecoin Chain ID + +`kubectl exec -c app tm-1 -- grep -A1 chainID /app/genesis.json` + +set BC_CHAIN with the value there + +### Expose tendermint rpc + +We need to be able to reach the tendermint rpc interface from our shell. + +`kubectl port-forward tm-0 26657:26657` + +### Start basecoin-proxy + +Using this info, let's connect our proxy and get going + +`proxy-basecoin -tmchain=$TM_CHAIN -chain=$BC_CHAIN -rpc=localhost:26657` + +## Basecoin accounts + +Well, we can connect, but we don't have a registered account yet... +Let's look around, then use the cli to send some money from one of +the validators to our client's address so we can play. + +**TODO** we can add some of our known accounts (from `/keys`) into +the genesis file, so we can skip all the kubectl money fiddling here. +We will want to start with money on some known non-validators. + +### Getting validator info (kubectl) + +The basecoin app deployment starts with 1000 "blank" coin in an account of +each validator. Let's get the address of the first validator + +`kubectl exec -c app tm-1 -- grep address /app/key.json` + +Store this info as VAL1_ADDR + +### Querying state (proxy) + +The proxy can read any public info via the tendermint rpc, so let's check +out this account. + +`curl localhost:8108/query/account/$VAL1_ADDR` + +Now, let's make out own account.... + +`curl -XPOST http://localhost:8108/keys/ -d '{"name": "k8demo", "passphrase": "1234567890"}'` + +(or pick your own user and password). Remember the address you get here. You can +always find it out later by calling: + +`curl http://localhost:8108/keys/k8demo` + +and store it in DEMO_ADDR, which is empty at first + +`curl localhost:8108/query/account/$DEMO_ADDR` + + +### "Stealing" validator cash (kubectl) + +Run one command, that will be signed, now we have money + +`kubectl exec -c app tm-0 -- basecoin tx send --to --amount 500` + +### Using our money + +Returning to our remote shell, we have a remote account with some money. +Let's see that. + +`curl localhost:8108/query/account/$DEMO_ADDR` + +Cool. Now we need to send it to a second account. + +`curl -XPOST http://localhost:8108/keys/ -d '{"name": "buddy", "passphrase": "1234567890"}'` + +and store the resulting address in BUDDY_ADDR + +**TODO** finish this + diff --git a/tools/mintnet-kubernetes/examples/counter/Makefile b/tools/mintnet-kubernetes/examples/counter/Makefile new file mode 100644 index 000000000..6d54d57d6 --- /dev/null +++ b/tools/mintnet-kubernetes/examples/counter/Makefile @@ -0,0 +1,10 @@ +create: + @echo "==> Creating deployment" + @kubectl create -f app.yaml + +destroy: + @echo "==> Destroying deployment" + @kubectl delete -f app.yaml + @kubectl delete pvc -l app=tm + +.PHONY: create destroy diff --git a/tools/mintnet-kubernetes/examples/counter/app.yaml b/tools/mintnet-kubernetes/examples/counter/app.yaml new file mode 100644 index 000000000..fed35f102 --- /dev/null +++ b/tools/mintnet-kubernetes/examples/counter/app.yaml @@ -0,0 +1,214 @@ +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + name: counter + labels: + app: counter +spec: + ports: + - port: 26656 + name: p2p + - port: 26657 + name: rpc + clusterIP: None + selector: + app: tm +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: tm-config +data: + seeds: "tm-0,tm-1,tm-2,tm-3" + validators: "tm-0,tm-1,tm-2,tm-3" + validator.power: "10" + genesis.json: |- + { + "genesis_time": "2016-02-05T23:17:31.164Z", + "chain_id": "chain-B5XXm5", + "validators": [], + "app_hash": "" + } + pub_key_nginx.conf: |- + server { + listen 80 default_server; + listen [::]:80 default_server ipv6only=on; + location /pub_key.json { root /usr/share/nginx/; } + } +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: tm-budget +spec: + selector: + matchLabels: + app: tm + minAvailable: 2 +--- +apiVersion: apps/v1beta1 +kind: StatefulSet +metadata: + name: tm +spec: + serviceName: counter + replicas: 4 + template: + metadata: + labels: + app: tm + annotations: + pod.beta.kubernetes.io/init-containers: '[{ + "name": "tm-gen-validator", + "image": "tendermint/tendermint:0.10.0", + "imagePullPolicy": "IfNotPresent", + "command": ["bash", "-c", " + set -ex\n + if [ ! -f /tendermint/priv_validator.json ]; then\n + tendermint gen_validator > /tendermint/priv_validator.json\n + # pub_key.json will be served by pub-key container\n + cat /tendermint/priv_validator.json | jq \".pub_key\" > /tendermint/pub_key.json\n + fi\n + "], + "volumeMounts": [ + {"name": "tmdir", "mountPath": "/tendermint"} + ] + }]' + spec: + containers: + - name: tm + imagePullPolicy: IfNotPresent + image: tendermint/tendermint:0.10.0 + ports: + - containerPort: 26656 + name: p2p + - containerPort: 26657 + name: rpc + env: + - name: SEEDS + valueFrom: + configMapKeyRef: + name: tm-config + key: seeds + - name: VALIDATOR_POWER + valueFrom: + configMapKeyRef: + name: tm-config + key: validator.power + - name: VALIDATORS + valueFrom: + configMapKeyRef: + name: tm-config + key: validators + - name: TMHOME + value: /tendermint + command: + - bash + - "-c" + - | + set -ex + + # copy template + cp /etc/tendermint/genesis.json /tendermint/genesis.json + + # fill genesis file with validators + IFS=',' read -ra VALS_ARR <<< "$VALIDATORS" + fqdn_suffix=$(hostname -f | sed 's#[^.]*\.\(\)#\1#') + for v in "${VALS_ARR[@]}"; do + # wait until validator generates priv/pub key pair + set +e + + curl -s --fail "http://$v.$fqdn_suffix/pub_key.json" > /dev/null + ERR=$? + while [ "$ERR" != 0 ]; do + sleep 5 + curl -s --fail "http://$v.$fqdn_suffix/pub_key.json" > /dev/null + ERR=$? + done + set -e + + # add validator to genesis file along with its pub_key + curl -s "http://$v.$fqdn_suffix/pub_key.json" | jq ". as \$k | {pub_key: \$k, amount: $VALIDATOR_POWER, name: \"$v\"}" > pub_validator.json + cat /tendermint/genesis.json | jq ".validators |= .+ [$(cat pub_validator.json)]" > tmpgenesis && mv tmpgenesis /tendermint/genesis.json + rm pub_validator.json + done + + # construct seeds + IFS=',' read -ra SEEDS_ARR <<< "$SEEDS" + seeds=() + for s in "${SEEDS_ARR[@]}"; do + seeds+=("$s.$fqdn_suffix:26656") + done + seeds=$(IFS=','; echo "${seeds[*]}") + + tendermint node --p2p.seeds="$seeds" --moniker="`hostname`" --proxy_app="unix:///socks/app.sock" + volumeMounts: + - name: tmdir + mountPath: /tendermint + - mountPath: /etc/tendermint/genesis.json + name: tmconfigdir + subPath: genesis.json + - name: socksdir + mountPath: /socks + + - name: app + imagePullPolicy: IfNotPresent + image: golang:latest + command: + - bash + - "-c" + - | + set -ex + + go get github.com/tendermint/abci/cmd/counter + + rm -f /socks/app.sock # remove old socket + + counter --serial --addr="unix:///socks/app.sock" + volumeMounts: + - name: socksdir + mountPath: /socks + + - name: pub-key + imagePullPolicy: IfNotPresent + image: nginx:latest + ports: + - containerPort: 80 + name: pub-key + command: + - bash + - "-c" + - | + set -ex + # fixes 403 Permission Denied (open() "/tendermint/pub_key.json" failed (13: Permission denied)) + # => we cannot serve from /tendermint, so we copy the file + mkdir -p /usr/share/nginx + cp /tendermint/pub_key.json /usr/share/nginx/pub_key.json + nginx -g "daemon off;" + volumeMounts: + - name: tmdir + mountPath: /tendermint + - mountPath: /etc/nginx/conf.d/pub_key.conf + name: tmconfigdir + subPath: pub_key_nginx.conf + + volumes: + - name: tmconfigdir + configMap: + name: tm-config + - name: socksdir + emptyDir: {} + + volumeClaimTemplates: + - metadata: + name: tmdir + annotations: + volume.alpha.kubernetes.io/storage-class: anything + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 2Gi diff --git a/tools/mintnet-kubernetes/examples/dummy/Makefile b/tools/mintnet-kubernetes/examples/dummy/Makefile new file mode 100644 index 000000000..825487fcd --- /dev/null +++ b/tools/mintnet-kubernetes/examples/dummy/Makefile @@ -0,0 +1,17 @@ +create: + @echo "==> Creating deployment" + @kubectl create -f app.yaml + @echo "==> Waiting 10s until it is probably ready" + @sleep 10 + @echo "==> Creating monitor and transacter pods" + @kubectl create -f tm-monitor-pod.yaml + @kubectl create -f transacter-pod.yaml + +destroy: + @echo "==> Destroying deployment" + @kubectl delete -f transacter-pod.yaml + @kubectl delete -f tm-monitor-pod.yaml + @kubectl delete -f app.yaml + @kubectl delete pvc -l app=tm + +.PHONY: create destroy diff --git a/tools/mintnet-kubernetes/examples/dummy/app.yaml b/tools/mintnet-kubernetes/examples/dummy/app.yaml new file mode 100644 index 000000000..5413bd501 --- /dev/null +++ b/tools/mintnet-kubernetes/examples/dummy/app.yaml @@ -0,0 +1,196 @@ +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + name: dummy + labels: + app: dummy +spec: + ports: + - port: 26656 + name: p2p + - port: 26657 + name: rpc + clusterIP: None + selector: + app: tm +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: tm-config +data: + seeds: "tm-0,tm-1,tm-2,tm-3" + validators: "tm-0,tm-1,tm-2,tm-3" + validator.power: "10" + genesis.json: |- + { + "genesis_time": "2016-02-05T23:17:31.164Z", + "chain_id": "chain-B5XXm5", + "validators": [], + "app_hash": "" + } + pub_key_nginx.conf: |- + server { + listen 80 default_server; + listen [::]:80 default_server ipv6only=on; + location /pub_key.json { root /usr/share/nginx/; } + } +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: tm-budget +spec: + selector: + matchLabels: + app: tm + minAvailable: 2 +--- +apiVersion: apps/v1beta1 +kind: StatefulSet +metadata: + name: tm +spec: + serviceName: dummy + replicas: 4 + template: + metadata: + labels: + app: tm + annotations: + pod.beta.kubernetes.io/init-containers: '[{ + "name": "tm-gen-validator", + "image": "tendermint/tendermint:0.10.0", + "imagePullPolicy": "IfNotPresent", + "command": ["bash", "-c", " + set -ex\n + if [ ! -f /tendermint/priv_validator.json ]; then\n + tendermint gen_validator > /tendermint/priv_validator.json\n + # pub_key.json will be served by pub-key container\n + cat /tendermint/priv_validator.json | jq \".pub_key\" > /tendermint/pub_key.json\n + fi\n + "], + "volumeMounts": [ + {"name": "tmdir", "mountPath": "/tendermint"} + ] + }]' + spec: + containers: + - name: tm + imagePullPolicy: IfNotPresent + image: tendermint/tendermint:0.10.0 + ports: + - containerPort: 26656 + name: p2p + - containerPort: 26657 + name: rpc + env: + - name: SEEDS + valueFrom: + configMapKeyRef: + name: tm-config + key: seeds + - name: VALIDATOR_POWER + valueFrom: + configMapKeyRef: + name: tm-config + key: validator.power + - name: VALIDATORS + valueFrom: + configMapKeyRef: + name: tm-config + key: validators + - name: TMHOME + value: /tendermint + command: + - bash + - "-c" + - | + set -ex + + # copy template + cp /etc/tendermint/genesis.json /tendermint/genesis.json + + # fill genesis file with validators + IFS=',' read -ra VALS_ARR <<< "$VALIDATORS" + fqdn_suffix=$(hostname -f | sed 's#[^.]*\.\(\)#\1#') + for v in "${VALS_ARR[@]}"; do + # wait until validator generates priv/pub key pair + set +e + + curl -s --fail "http://$v.$fqdn_suffix/pub_key.json" > /dev/null + ERR=$? + while [ "$ERR" != 0 ]; do + sleep 5 + curl -s --fail "http://$v.$fqdn_suffix/pub_key.json" > /dev/null + ERR=$? + done + set -e + + # add validator to genesis file along with its pub_key + curl -s "http://$v.$fqdn_suffix/pub_key.json" | jq ". as \$k | {pub_key: \$k, amount: $VALIDATOR_POWER, name: \"$v\"}" > pub_validator.json + cat /tendermint/genesis.json | jq ".validators |= .+ [$(cat pub_validator.json)]" > tmpgenesis && mv tmpgenesis /tendermint/genesis.json + rm pub_validator.json + done + + # construct seeds + IFS=',' read -ra SEEDS_ARR <<< "$SEEDS" + seeds=() + for s in "${SEEDS_ARR[@]}"; do + seeds+=("$s.$fqdn_suffix:26656") + done + seeds=$(IFS=','; echo "${seeds[*]}") + + tendermint node --p2p.seeds="$seeds" --moniker="`hostname`" --proxy_app="dummy" + volumeMounts: + - name: tmdir + mountPath: /tendermint + - mountPath: /etc/tendermint/genesis.json + name: tmconfigdir + subPath: genesis.json + - name: socksdir + mountPath: /socks + + - name: pub-key + imagePullPolicy: IfNotPresent + image: nginx:latest + ports: + - containerPort: 80 + name: pub-key + command: + - bash + - "-c" + - | + set -ex + # fixes 403 Permission Denied (open() "/tendermint/pub_key.json" failed (13: Permission denied)) + # => we cannot serve from /tendermint, so we copy the file + mkdir -p /usr/share/nginx + cp /tendermint/pub_key.json /usr/share/nginx/pub_key.json + nginx -g "daemon off;" + volumeMounts: + - name: tmdir + mountPath: /tendermint + - mountPath: /etc/nginx/conf.d/pub_key.conf + name: tmconfigdir + subPath: pub_key_nginx.conf + + volumes: + - name: tmconfigdir + configMap: + name: tm-config + - name: socksdir + emptyDir: {} + + volumeClaimTemplates: + - metadata: + name: tmdir + annotations: + volume.alpha.kubernetes.io/storage-class: anything + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 2Gi diff --git a/tools/mintnet-kubernetes/examples/dummy/tm-monitor-pod.yaml b/tools/mintnet-kubernetes/examples/dummy/tm-monitor-pod.yaml new file mode 100644 index 000000000..fb0bf7236 --- /dev/null +++ b/tools/mintnet-kubernetes/examples/dummy/tm-monitor-pod.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: monitor +spec: + containers: + - name: monitor + image: tendermint/monitor + args: ["-listen-addr=tcp://0.0.0.0:26670", "tm-0.dummy:26657,tm-1.dummy:26657,tm-2.dummy:26657,tm-3.dummy:26657"] + ports: + - containerPort: 26670 + name: rpc diff --git a/tools/mintnet-kubernetes/examples/dummy/transacter-pod.yaml b/tools/mintnet-kubernetes/examples/dummy/transacter-pod.yaml new file mode 100644 index 000000000..6598e2a8b --- /dev/null +++ b/tools/mintnet-kubernetes/examples/dummy/transacter-pod.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: transacter +spec: + containers: + - name: transacter + image: tendermint/transacter + command: + - bash + - "-c" + - | + set -ex + while true + do + ./transact 100 "tm-0.dummy:26657" + sleep 1 + done diff --git a/tools/tm-bench/Dockerfile b/tools/tm-bench/Dockerfile new file mode 100644 index 000000000..9adb2936e --- /dev/null +++ b/tools/tm-bench/Dockerfile @@ -0,0 +1,6 @@ +FROM alpine:3.7 + +WORKDIR /app +COPY tm-bench /app/tm-bench + +ENTRYPOINT ["./tm-bench"] diff --git a/tools/tm-bench/Dockerfile.dev b/tools/tm-bench/Dockerfile.dev new file mode 100644 index 000000000..469bb8150 --- /dev/null +++ b/tools/tm-bench/Dockerfile.dev @@ -0,0 +1,12 @@ +FROM golang:latest + +RUN mkdir -p /go/src/github.com/tendermint/tendermint/tools/tm-bench +WORKDIR /go/src/github.com/tendermint/tendermint/tools/tm-bench + +COPY Makefile /go/src/github.com/tendermint/tendermint/tools/tm-bench/ + +RUN make get_tools + +COPY . /go/src/github.com/tendermint/tendermint/tools/tm-bench + +RUN make get_vendor_deps diff --git a/tools/tm-bench/LICENSE b/tools/tm-bench/LICENSE new file mode 100644 index 000000000..f48913967 --- /dev/null +++ b/tools/tm-bench/LICENSE @@ -0,0 +1,204 @@ +Tendermint Bench +Copyright 2017 Tendermint + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/tools/tm-bench/Makefile b/tools/tm-bench/Makefile new file mode 100644 index 000000000..2d427dbc1 --- /dev/null +++ b/tools/tm-bench/Makefile @@ -0,0 +1,50 @@ +DIST_DIRS := find * -type d -exec +VERSION := $(shell perl -ne '/^var version.*"([^"]+)".*$$/ && print "v$$1\n"' main.go) + +all: build test install + +######################################## +### Build + +build: + @go build + +install: + @go install + +test: + @go test -race + +build-all: check_tools + rm -rf ./dist + gox -verbose \ + -ldflags "-s -w" \ + -arch="amd64 386 arm arm64" \ + -os="linux darwin windows freebsd" \ + -osarch="!darwin/arm !darwin/arm64" \ + -output="dist/{{.OS}}-{{.Arch}}/{{.Dir}}" . + +dist: build-all + cd dist && \ + $(DIST_DIRS) cp ../LICENSE {} \; && \ + $(DIST_DIRS) cp ../README.rst {} \; && \ + $(DIST_DIRS) tar -zcf tm-bench-${VERSION}-{}.tar.gz {} \; && \ + shasum -a256 ./*.tar.gz > "./tm-bench_${VERSION}_SHA256SUMS" && \ + cd .. + +######################################## +### Docker + +build-docker: + rm -f ./tm-bench + docker run -it --rm -v "$(PWD):/go/src/app" -w "/go/src/app" -e "CGO_ENABLED=0" golang:alpine go build -ldflags "-s -w" -o tm-bench + docker build -t "tendermint/bench" . + +clean: + rm -f ./tm-bench + rm -rf ./dist + +# To avoid unintended conflicts with file names, always add to .PHONY +# unless there is a reason not to. +# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html +.PHONY: check check_tools get_tools update_tools get_vendor_deps build install test build-all dist fmt metalinter metalinter_all build-docker clean diff --git a/tools/tm-bench/README.md b/tools/tm-bench/README.md new file mode 100644 index 000000000..000f20f37 --- /dev/null +++ b/tools/tm-bench/README.md @@ -0,0 +1,80 @@ +# tm-bench + +Tendermint blockchain benchmarking tool: + +- https://github.com/tendermint/tools/tree/master/tm-bench + +For example, the following: + + tm-bench -T 10 -r 1000 localhost:26657 + +will output: + + Stats Avg StdDev Max Total + Txs/sec 818 532 1549 9000 + Blocks/sec 0.818 0.386 1 9 + + +## Quick Start + +[Install Tendermint](https://github.com/tendermint/tendermint#install) +This currently is setup to work on tendermint's develop branch. Please ensure +you are on that. (If not, update `tendermint` and `tmlibs` in gopkg.toml to use + the master branch.) + +then run: + + tendermint init + tendermint node --proxy_app=kvstore + + tm-bench localhost:26657 + +with the last command being in a seperate window. + +## Usage + + tm-bench [-c 1] [-T 10] [-r 1000] [-s 250] [endpoints] + + Examples: + tm-bench localhost:26657 + Flags: + -T int + Exit after the specified amount of time in seconds (default 10) + -c int + Connections to keep open per endpoint (default 1) + -r int + Txs per second to send in a connection (default 1000) + -s int + Size per tx in bytes + -v Verbose output + +## How stats are collected + +These stats are derived by having each connection send transactions at the +specified rate (or as close as it can get) for the specified time. +After the specified time, it iterates over all of the blocks that were created +in that time. +The average and stddev per second are computed based off of that, by +grouping the data by second. + +To send transactions at the specified rate in each connection, we loop +through the number of transactions. +If its too slow, the loop stops at one second. +If its too fast, we wait until the one second mark ends. +The transactions per second stat is computed based off of what ends up in the +block. + +Note that there will be edge effects on the number of transactions in the first +and last blocks. +This is because transactions may start sending midway through when tendermint +starts building the next block, so it only has half as much time to gather txs +that tm-bench sends. +Similarly the end of the duration will likely end mid-way through tendermint +trying to build the next block. + +Each of the connections is handled via two separate goroutines. + +## Development + + make get_vendor_deps + make test diff --git a/tools/tm-bench/main.go b/tools/tm-bench/main.go new file mode 100644 index 000000000..a8ede4a0c --- /dev/null +++ b/tools/tm-bench/main.go @@ -0,0 +1,175 @@ +package main + +import ( + "flag" + "fmt" + "os" + "strings" + "sync" + "time" + + "github.com/go-kit/kit/log/term" + + "github.com/tendermint/tendermint/libs/log" + tmrpc "github.com/tendermint/tendermint/rpc/client" +) + +var logger = log.NewNopLogger() + +func main() { + var durationInt, txsRate, connections, txSize int + var verbose bool + var outputFormat, broadcastTxMethod string + + flagSet := flag.NewFlagSet("tm-bench", flag.ExitOnError) + flagSet.IntVar(&connections, "c", 1, "Connections to keep open per endpoint") + flagSet.IntVar(&durationInt, "T", 10, "Exit after the specified amount of time in seconds") + flagSet.IntVar(&txsRate, "r", 1000, "Txs per second to send in a connection") + flagSet.IntVar(&txSize, "s", 250, "The size of a transaction in bytes.") + flagSet.StringVar(&outputFormat, "output-format", "plain", "Output format: plain or json") + flagSet.StringVar(&broadcastTxMethod, "broadcast-tx-method", "async", "Broadcast method: async (no guarantees; fastest), sync (ensures tx is checked) or commit (ensures tx is checked and committed; slowest)") + flagSet.BoolVar(&verbose, "v", false, "Verbose output") + + flagSet.Usage = func() { + fmt.Println(`Tendermint blockchain benchmarking tool. + +Usage: + tm-bench [-c 1] [-T 10] [-r 1000] [-s 250] [endpoints] [-output-format [-broadcast-tx-method ]] + +Examples: + tm-bench localhost:26657`) + fmt.Println("Flags:") + flagSet.PrintDefaults() + } + + flagSet.Parse(os.Args[1:]) + + if flagSet.NArg() == 0 { + flagSet.Usage() + os.Exit(1) + } + + if verbose { + if outputFormat == "json" { + fmt.Fprintln(os.Stderr, "Verbose mode not supported with json output.") + os.Exit(1) + } + // Color errors red + colorFn := func(keyvals ...interface{}) term.FgBgColor { + for i := 1; i < len(keyvals); i += 2 { + if _, ok := keyvals[i].(error); ok { + return term.FgBgColor{Fg: term.White, Bg: term.Red} + } + } + return term.FgBgColor{} + } + logger = log.NewTMLoggerWithColorFn(log.NewSyncWriter(os.Stdout), colorFn) + + fmt.Printf("Running %ds test @ %s\n", durationInt, flagSet.Arg(0)) + } + + if broadcastTxMethod != "async" && + broadcastTxMethod != "sync" && + broadcastTxMethod != "commit" { + fmt.Fprintln( + os.Stderr, + "broadcast-tx-method should be either 'sync', 'async' or 'commit'.", + ) + os.Exit(1) + } + + var ( + endpoints = strings.Split(flagSet.Arg(0), ",") + client = tmrpc.NewHTTP(endpoints[0], "/websocket") + initialHeight = latestBlockHeight(client) + ) + logger.Info("Latest block height", "h", initialHeight) + + transacters := startTransacters( + endpoints, + connections, + txsRate, + txSize, + "broadcast_tx_"+broadcastTxMethod, + ) + + // Wait until transacters have begun until we get the start time + timeStart := time.Now() + logger.Info("Time last transacter started", "t", timeStart) + + duration := time.Duration(durationInt) * time.Second + + timeEnd := timeStart.Add(duration) + logger.Info("End time for calculation", "t", timeEnd) + + <-time.After(duration) + for i, t := range transacters { + t.Stop() + numCrashes := countCrashes(t.connsBroken) + if numCrashes != 0 { + fmt.Printf("%d connections crashed on transacter #%d\n", numCrashes, i) + } + } + + logger.Debug("Time all transacters stopped", "t", time.Now()) + + stats, err := calculateStatistics( + client, + initialHeight, + timeStart, + durationInt, + ) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + + printStatistics(stats, outputFormat) +} + +func latestBlockHeight(client tmrpc.Client) int64 { + status, err := client.Status() + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + return status.SyncInfo.LatestBlockHeight +} + +func countCrashes(crashes []bool) int { + count := 0 + for i := 0; i < len(crashes); i++ { + if crashes[i] { + count++ + } + } + return count +} + +func startTransacters( + endpoints []string, + connections, + txsRate int, + txSize int, + broadcastTxMethod string, +) []*transacter { + transacters := make([]*transacter, len(endpoints)) + + wg := sync.WaitGroup{} + wg.Add(len(endpoints)) + for i, e := range endpoints { + t := newTransacter(e, connections, txsRate, txSize, broadcastTxMethod) + t.SetLogger(logger) + go func(i int) { + defer wg.Done() + if err := t.Start(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + transacters[i] = t + }(i) + } + wg.Wait() + + return transacters +} diff --git a/tools/tm-bench/statistics.go b/tools/tm-bench/statistics.go new file mode 100644 index 000000000..5a8f60578 --- /dev/null +++ b/tools/tm-bench/statistics.go @@ -0,0 +1,150 @@ +package main + +import ( + "encoding/json" + "fmt" + "math" + "os" + "text/tabwriter" + "time" + + metrics "github.com/rcrowley/go-metrics" + tmrpc "github.com/tendermint/tendermint/rpc/client" + "github.com/tendermint/tendermint/types" +) + +type statistics struct { + TxsThroughput metrics.Histogram `json:"txs_per_sec"` + BlocksThroughput metrics.Histogram `json:"blocks_per_sec"` +} + +// calculateStatistics calculates the tx / second, and blocks / second based +// off of the number the transactions and number of blocks that occurred from +// the start block, and the end time. +func calculateStatistics( + client tmrpc.Client, + minHeight int64, + timeStart time.Time, + duration int, +) (*statistics, error) { + timeEnd := timeStart.Add(time.Duration(duration) * time.Second) + + stats := &statistics{ + BlocksThroughput: metrics.NewHistogram(metrics.NewUniformSample(1000)), + TxsThroughput: metrics.NewHistogram(metrics.NewUniformSample(1000)), + } + + var ( + numBlocksPerSec = make(map[int64]int64) + numTxsPerSec = make(map[int64]int64) + ) + + // because during some seconds blocks won't be created... + for i := int64(0); i < int64(duration); i++ { + numBlocksPerSec[i] = 0 + numTxsPerSec[i] = 0 + } + + blockMetas, err := getBlockMetas(client, minHeight, timeStart, timeEnd) + if err != nil { + return nil, err + } + + // iterates from max height to min height + for _, blockMeta := range blockMetas { + // check if block was created after timeStart + if blockMeta.Header.Time.Before(timeStart) { + break + } + + // check if block was created before timeEnd + if blockMeta.Header.Time.After(timeEnd) { + continue + } + sec := secondsSinceTimeStart(timeStart, blockMeta.Header.Time) + + // increase number of blocks for that second + numBlocksPerSec[sec]++ + + // increase number of txs for that second + numTxsPerSec[sec] += blockMeta.Header.NumTxs + logger.Debug(fmt.Sprintf("%d txs at block height %d", blockMeta.Header.NumTxs, blockMeta.Header.Height)) + } + + for i := int64(0); i < int64(duration); i++ { + stats.BlocksThroughput.Update(numBlocksPerSec[i]) + stats.TxsThroughput.Update(numTxsPerSec[i]) + } + + return stats, nil +} + +func getBlockMetas(client tmrpc.Client, minHeight int64, timeStart, timeEnd time.Time) ([]*types.BlockMeta, error) { + // get blocks between minHeight and last height + // This returns max(minHeight,(last_height - 20)) to last_height + info, err := client.BlockchainInfo(minHeight, 0) + if err != nil { + return nil, err + } + + var ( + blockMetas = info.BlockMetas + lastHeight = info.LastHeight + diff = lastHeight - minHeight + offset = len(blockMetas) + ) + + for offset < int(diff) { + // get blocks between minHeight and last height + info, err := client.BlockchainInfo(minHeight, lastHeight-int64(offset)) + if err != nil { + return nil, err + } + blockMetas = append(blockMetas, info.BlockMetas...) + offset = len(blockMetas) + } + + return blockMetas, nil +} + +func secondsSinceTimeStart(timeStart, timePassed time.Time) int64 { + return int64(math.Round(timePassed.Sub(timeStart).Seconds())) +} + +func printStatistics(stats *statistics, outputFormat string) { + if outputFormat == "json" { + result, err := json.Marshal(struct { + TxsThroughput float64 `json:"txs_per_sec_avg"` + BlocksThroughput float64 `json:"blocks_per_sec_avg"` + }{stats.TxsThroughput.Mean(), stats.BlocksThroughput.Mean()}) + + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + fmt.Println(string(result)) + } else { + w := tabwriter.NewWriter(os.Stdout, 0, 0, 5, ' ', 0) + fmt.Fprintln(w, "Stats\tAvg\tStdDev\tMax\tTotal\t") + fmt.Fprintln( + w, + fmt.Sprintf( + "Txs/sec\t%.0f\t%.0f\t%d\t%d\t", + stats.TxsThroughput.Mean(), + stats.TxsThroughput.StdDev(), + stats.TxsThroughput.Max(), + stats.TxsThroughput.Sum(), + ), + ) + fmt.Fprintln( + w, + fmt.Sprintf("Blocks/sec\t%.3f\t%.3f\t%d\t%d\t", + stats.BlocksThroughput.Mean(), + stats.BlocksThroughput.StdDev(), + stats.BlocksThroughput.Max(), + stats.BlocksThroughput.Sum(), + ), + ) + w.Flush() + } +} diff --git a/tools/tm-bench/transacter.go b/tools/tm-bench/transacter.go new file mode 100644 index 000000000..36cc761e5 --- /dev/null +++ b/tools/tm-bench/transacter.go @@ -0,0 +1,284 @@ +package main + +import ( + "crypto/md5" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "math/rand" + "net" + "net/http" + "net/url" + "os" + "sync" + "time" + + "github.com/gorilla/websocket" + "github.com/pkg/errors" + + "github.com/tendermint/tendermint/libs/log" + rpctypes "github.com/tendermint/tendermint/rpc/lib/types" +) + +const ( + sendTimeout = 10 * time.Second + // see https://github.com/tendermint/tendermint/blob/master/rpc/lib/server/handlers.go + pingPeriod = (30 * 9 / 10) * time.Second +) + +type transacter struct { + Target string + Rate int + Size int + Connections int + BroadcastTxMethod string + + conns []*websocket.Conn + connsBroken []bool + startingWg sync.WaitGroup + endingWg sync.WaitGroup + stopped bool + + logger log.Logger +} + +func newTransacter(target string, connections, rate int, size int, broadcastTxMethod string) *transacter { + return &transacter{ + Target: target, + Rate: rate, + Size: size, + Connections: connections, + BroadcastTxMethod: broadcastTxMethod, + conns: make([]*websocket.Conn, connections), + connsBroken: make([]bool, connections), + logger: log.NewNopLogger(), + } +} + +// SetLogger lets you set your own logger +func (t *transacter) SetLogger(l log.Logger) { + t.logger = l +} + +// Start opens N = `t.Connections` connections to the target and creates read +// and write goroutines for each connection. +func (t *transacter) Start() error { + t.stopped = false + + rand.Seed(time.Now().Unix()) + + for i := 0; i < t.Connections; i++ { + c, _, err := connect(t.Target) + if err != nil { + return err + } + t.conns[i] = c + } + + t.startingWg.Add(t.Connections) + t.endingWg.Add(2 * t.Connections) + for i := 0; i < t.Connections; i++ { + go t.sendLoop(i) + go t.receiveLoop(i) + } + + t.startingWg.Wait() + + return nil +} + +// Stop closes the connections. +func (t *transacter) Stop() { + t.stopped = true + t.endingWg.Wait() + for _, c := range t.conns { + c.Close() + } +} + +// receiveLoop reads messages from the connection (empty in case of +// `broadcast_tx_async`). +func (t *transacter) receiveLoop(connIndex int) { + c := t.conns[connIndex] + defer t.endingWg.Done() + for { + _, _, err := c.ReadMessage() + if err != nil { + if !websocket.IsCloseError(err, websocket.CloseNormalClosure) { + t.logger.Error( + fmt.Sprintf("failed to read response on conn %d", connIndex), + "err", + err, + ) + } + return + } + if t.stopped || t.connsBroken[connIndex] { + return + } + } +} + +// sendLoop generates transactions at a given rate. +func (t *transacter) sendLoop(connIndex int) { + started := false + // Close the starting waitgroup, in the event that this fails to start + defer func() { + if !started { + t.startingWg.Done() + } + }() + c := t.conns[connIndex] + + c.SetPingHandler(func(message string) error { + err := c.WriteControl(websocket.PongMessage, []byte(message), time.Now().Add(sendTimeout)) + if err == websocket.ErrCloseSent { + return nil + } else if e, ok := err.(net.Error); ok && e.Temporary() { + return nil + } + return err + }) + + logger := t.logger.With("addr", c.RemoteAddr()) + + var txNumber = 0 + + pingsTicker := time.NewTicker(pingPeriod) + txsTicker := time.NewTicker(1 * time.Second) + defer func() { + pingsTicker.Stop() + txsTicker.Stop() + t.endingWg.Done() + }() + + // hash of the host name is a part of each tx + var hostnameHash [md5.Size]byte + hostname, err := os.Hostname() + if err != nil { + hostname = "127.0.0.1" + } + hostnameHash = md5.Sum([]byte(hostname)) + // each transaction embeds connection index, tx number and hash of the hostname + // we update the tx number between successive txs + tx := generateTx(connIndex, txNumber, t.Size, hostnameHash) + txHex := make([]byte, len(tx)*2) + hex.Encode(txHex, tx) + + for { + select { + case <-txsTicker.C: + startTime := time.Now() + endTime := startTime.Add(time.Second) + numTxSent := t.Rate + if !started { + t.startingWg.Done() + started = true + } + + now := time.Now() + for i := 0; i < t.Rate; i++ { + // update tx number of the tx, and the corresponding hex + updateTx(tx, txHex, txNumber) + paramsJSON, err := json.Marshal(map[string]interface{}{"tx": txHex}) + if err != nil { + fmt.Printf("failed to encode params: %v\n", err) + os.Exit(1) + } + rawParamsJSON := json.RawMessage(paramsJSON) + + c.SetWriteDeadline(now.Add(sendTimeout)) + err = c.WriteJSON(rpctypes.RPCRequest{ + JSONRPC: "2.0", + ID: "tm-bench", + Method: t.BroadcastTxMethod, + Params: rawParamsJSON, + }) + if err != nil { + err = errors.Wrap(err, + fmt.Sprintf("txs send failed on connection #%d", connIndex)) + t.connsBroken[connIndex] = true + logger.Error(err.Error()) + return + } + + // cache the time.Now() reads to save time. + if i%5 == 0 { + now = time.Now() + if now.After(endTime) { + // Plus one accounts for sending this tx + numTxSent = i + 1 + break + } + } + + txNumber++ + } + + timeToSend := time.Since(startTime) + logger.Info(fmt.Sprintf("sent %d transactions", numTxSent), "took", timeToSend) + if timeToSend < 1*time.Second { + sleepTime := time.Second - timeToSend + logger.Debug(fmt.Sprintf("connection #%d is sleeping for %f seconds", connIndex, sleepTime.Seconds())) + time.Sleep(sleepTime) + } + + case <-pingsTicker.C: + // go-rpc server closes the connection in the absence of pings + c.SetWriteDeadline(time.Now().Add(sendTimeout)) + if err := c.WriteMessage(websocket.PingMessage, []byte{}); err != nil { + err = errors.Wrap(err, + fmt.Sprintf("failed to write ping message on conn #%d", connIndex)) + logger.Error(err.Error()) + t.connsBroken[connIndex] = true + } + } + + if t.stopped { + // To cleanly close a connection, a client should send a close + // frame and wait for the server to close the connection. + c.SetWriteDeadline(time.Now().Add(sendTimeout)) + err := c.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) + if err != nil { + err = errors.Wrap(err, + fmt.Sprintf("failed to write close message on conn #%d", connIndex)) + logger.Error(err.Error()) + t.connsBroken[connIndex] = true + } + + return + } + } +} + +func connect(host string) (*websocket.Conn, *http.Response, error) { + u := url.URL{Scheme: "ws", Host: host, Path: "/websocket"} + return websocket.DefaultDialer.Dial(u.String(), nil) +} + +func generateTx(connIndex int, txNumber int, txSize int, hostnameHash [md5.Size]byte) []byte { + tx := make([]byte, txSize) + + binary.PutUvarint(tx[:8], uint64(connIndex)) + binary.PutUvarint(tx[8:16], uint64(txNumber)) + copy(tx[16:32], hostnameHash[:16]) + binary.PutUvarint(tx[32:40], uint64(time.Now().Unix())) + + // 40-* random data + if _, err := rand.Read(tx[40:]); err != nil { + panic(errors.Wrap(err, "failed to read random bytes")) + } + + return tx +} + +// warning, mutates input byte slice +func updateTx(tx []byte, txHex []byte, txNumber int) { + binary.PutUvarint(tx[8:16], uint64(txNumber)) + hexUpdate := make([]byte, 16) + hex.Encode(hexUpdate, tx[8:16]) + for i := 16; i < 32; i++ { + txHex[i] = hexUpdate[i-16] + } +} diff --git a/tools/tm-bench/transacter_test.go b/tools/tm-bench/transacter_test.go new file mode 100644 index 000000000..086a43c31 --- /dev/null +++ b/tools/tm-bench/transacter_test.go @@ -0,0 +1,104 @@ +package main + +import ( + "crypto/md5" + "encoding/hex" + "encoding/json" + "fmt" + "os" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +// This test tests that the output of generate tx and update tx is consistent +func TestGenerateTxUpdateTxConsistentency(t *testing.T) { + cases := []struct { + connIndex int + startingTxNumber int + txSize int + hostname string + numTxsToTest int + }{ + {0, 0, 50, "localhost:26657", 1000}, + {70, 300, 10000, "localhost:26657", 1000}, + {0, 50, 100000, "localhost:26657", 1000}, + } + + for tcIndex, tc := range cases { + hostnameHash := md5.Sum([]byte(tc.hostname)) + // Tx generated from update tx. This is defined outside of the loop, since we have + // to a have something initially to update + updatedTx := generateTx(tc.connIndex, tc.startingTxNumber, tc.txSize, hostnameHash) + updatedHex := make([]byte, len(updatedTx)*2) + hex.Encode(updatedHex, updatedTx) + for i := 0; i < tc.numTxsToTest; i++ { + expectedTx := generateTx(tc.connIndex, tc.startingTxNumber+i, tc.txSize, hostnameHash) + expectedHex := make([]byte, len(expectedTx)*2) + hex.Encode(expectedHex, expectedTx) + + updateTx(updatedTx, updatedHex, tc.startingTxNumber+i) + + // after first 32 bytes is 8 bytes of time, then purely random bytes + require.Equal(t, expectedTx[:32], updatedTx[:32], + "First 32 bytes of the txs differed. tc #%d, i #%d", tcIndex, i) + require.Equal(t, expectedHex[:64], updatedHex[:64], + "First 64 bytes of the hex differed. tc #%d, i #%d", tcIndex, i) + // Test the lengths of the txs are as expected + require.Equal(t, tc.txSize, len(expectedTx), + "Length of expected Tx differed. tc #%d, i #%d", tcIndex, i) + require.Equal(t, tc.txSize, len(updatedTx), + "Length of expected Tx differed. tc #%d, i #%d", tcIndex, i) + require.Equal(t, tc.txSize*2, len(expectedHex), + "Length of expected hex differed. tc #%d, i #%d", tcIndex, i) + require.Equal(t, tc.txSize*2, len(updatedHex), + "Length of updated hex differed. tc #%d, i #%d", tcIndex, i) + } + } +} + +func BenchmarkIterationOfSendLoop(b *testing.B) { + var ( + connIndex = 0 + txSize = 25000 + ) + + now := time.Now() + // something too far away to matter + endTime := now.Add(time.Hour) + txNumber := 0 + hostnameHash := md5.Sum([]byte{0}) + tx := generateTx(connIndex, txNumber, txSize, hostnameHash) + txHex := make([]byte, len(tx)*2) + hex.Encode(txHex, tx) + b.ResetTimer() + for i := 0; i < b.N; i++ { + updateTx(tx, txHex, txNumber) + paramsJSON, err := json.Marshal(map[string]interface{}{"tx": txHex}) + if err != nil { + fmt.Printf("failed to encode params: %v\n", err) + os.Exit(1) + } + _ = json.RawMessage(paramsJSON) + _ = now.Add(sendTimeout) + + if err != nil { + err = errors.Wrap(err, + fmt.Sprintf("txs send failed on connection #%d", connIndex)) + logger.Error(err.Error()) + return + } + + // Cache the now operations + if i%5 == 0 { + now = time.Now() + if now.After(endTime) { + break + } + } + + txNumber++ + } +} diff --git a/tools/tm-monitor/Dockerfile b/tools/tm-monitor/Dockerfile new file mode 100644 index 000000000..7edfaca66 --- /dev/null +++ b/tools/tm-monitor/Dockerfile @@ -0,0 +1,6 @@ +FROM alpine:3.6 + +WORKDIR /app +COPY tm-monitor /app/tm-monitor + +ENTRYPOINT ["./tm-monitor"] diff --git a/tools/tm-monitor/Dockerfile.dev b/tools/tm-monitor/Dockerfile.dev new file mode 100644 index 000000000..5bfbbfd5a --- /dev/null +++ b/tools/tm-monitor/Dockerfile.dev @@ -0,0 +1,12 @@ +FROM golang:latest + +RUN mkdir -p /go/src/github.com/tendermint/tools/tm-monitor +WORKDIR /go/src/github.com/tendermint/tools/tm-monitor + +COPY Makefile /go/src/github.com/tendermint/tools/tm-monitor/ + +RUN make get_tools + +COPY . /go/src/github.com/tendermint/tools/tm-monitor + +RUN make get_vendor_deps diff --git a/tools/tm-monitor/LICENSE b/tools/tm-monitor/LICENSE new file mode 100644 index 000000000..20728d318 --- /dev/null +++ b/tools/tm-monitor/LICENSE @@ -0,0 +1,204 @@ +Tendermint Monitor +Copyright 2017 Tendermint + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/crypto/Makefile b/tools/tm-monitor/Makefile similarity index 52% rename from crypto/Makefile rename to tools/tm-monitor/Makefile index a4fd3c37f..3371a0c19 100644 --- a/crypto/Makefile +++ b/tools/tm-monitor/Makefile @@ -1,41 +1,27 @@ +DIST_DIRS := find * -type d -exec +VERSION := $(shell perl -ne '/^var version.*"([^"]+)".*$$/ && print "v$$1\n"' main.go) GOTOOLS = \ - github.com/golang/dep/cmd/dep \ - # gopkg.in/alecthomas/gometalinter.v2 \ + github.com/mitchellh/gox \ + github.com/golang/dep/cmd/dep \ + gopkg.in/alecthomas/gometalinter.v2 +PACKAGES=$(shell go list ./... | grep -v '/vendor/') -GOTOOLS_CHECK = dep #gometalinter.v2 - -all: check get_vendor_deps build test install +all: check get_vendor_deps build test install metalinter check: check_tools - -######################################## -### Build - -# Command to generate the workd list (kept here for documentation purposes only): -wordlist: - # To re-generate wordlist.go run: - # go-bindata -ignore ".*\.go" -o keys/words/bip39/wordlist.go -pkg "wordlist" keys/bip39/wordlist/... - -build: wordlist - # Nothing else to build! - -install: - # Nothing to install! - - ######################################## ### Tools & dependencies check_tools: @# https://stackoverflow.com/a/25668869 @echo "Found tools: $(foreach tool,$(GOTOOLS_CHECK),\ - $(if $(shell which $(tool)),$(tool),$(error "No $(tool) in PATH")))" + $(if $(shell which $(tool)),$(tool),$(error "No $(tool) in PATH")))" get_tools: @echo "--> Installing tools" go get -u -v $(GOTOOLS) - #@gometalinter.v2 --install + @gometalinter.v2 --install update_tools: @echo "--> Updating tools" @@ -46,12 +32,45 @@ get_vendor_deps: @echo "--> Running dep ensure" @dep ensure - ######################################## -### Testing +### Build + +build: + @go build + +install: + @go install test: - CGO_ENABLED=0 go test -p 1 $(shell go list ./... | grep -v vendor) + @go test -race $(PACKAGES) + +build-all: check_tools + rm -rf ./dist + gox -verbose \ + -ldflags "-s -w" \ + -arch="amd64 386 arm arm64" \ + -os="linux darwin windows freebsd" \ + -osarch="!darwin/arm !darwin/arm64" \ + -output="dist/{{.OS}}-{{.Arch}}/{{.Dir}}" . + +dist: build-all + cd dist && \ + $(DIST_DIRS) cp ../LICENSE {} \; && \ + $(DIST_DIRS) tar -zcf tm-monitor-${VERSION}-{}.tar.gz {} \; && \ + shasum -a256 ./*.tar.gz > "./tm-monitor_${VERSION}_SHA256SUMS" && \ + cd .. + +######################################## +### Docker + +build-docker: + rm -f ./tm-monitor + docker run -it --rm -v "$(PWD):/go/src/github.com/tendermint/tools/tm-monitor" -w "/go/src/github.com/tendermint/tools/tm-monitor" -e "CGO_ENABLED=0" golang:alpine go build -ldflags "-s -w" -o tm-monitor + docker build -t "tendermint/monitor" . + +clean: + rm -f ./tm-monitor + rm -rf ./dist ######################################## ### Formatting, linting, and vetting @@ -89,11 +108,9 @@ metalinter: #--enable=vet \ metalinter_all: - protoc $(INCLUDE) --lint_out=. types/*.proto gometalinter.v2 --vendor --deadline=600s --enable-all --disable=lll ./... - # To avoid unintended conflicts with file names, always add to .PHONY # unless there is a reason not to. # https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html -.PHONEY: check build install check_tools get_tools update_tools get_vendor_deps test fmt metalinter metalinter_all +.PHONY: check check_tools get_tools update_tools get_vendor_deps build install test build-all dist fmt metalinter metalinter_all build-docker clean diff --git a/tools/tm-monitor/README.md b/tools/tm-monitor/README.md new file mode 100644 index 000000000..4c49775e3 --- /dev/null +++ b/tools/tm-monitor/README.md @@ -0,0 +1,77 @@ +# tm-monitor + +Tendermint blockchain monitoring tool; watches over one or more nodes, +collecting and providing various statistics to the user: + +- https://github.com/tendermint/tools/tree/master/tm-monitor + +## Quick Start + +### Docker + +Assuming your application is running in another container with the name +`app`: + + docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init + docker run -it --rm -v "/tmp:/tendermint" -p "26657:26657" --name=tm --link=app tendermint/tendermint node --proxy_app=tcp://app:26658 + + docker run -it --rm -p "26670:26670" --link=tm tendermint/monitor tm:26657 + +If you don't have an application yet, but still want to try monitor out, +use `kvstore`: + + docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init + docker run -it --rm -v "/tmp:/tendermint" -p "26657:26657" --name=tm tendermint/tendermint node --proxy_app=kvstore + + docker run -it --rm -p "26670:26670" --link=tm tendermint/monitor tm:26657 + +### Using Binaries + +[Install Tendermint](https://github.com/tendermint/tendermint#install) + +then run: + + tendermint init + tendermint node --proxy_app=kvstore + + tm-monitor localhost:26657 + +with the last command being in a seperate window. + +## Usage + + tm-monitor [-v] [-no-ton] [-listen-addr="tcp://0.0.0.0:26670"] [endpoints] + + Examples: + # monitor single instance + tm-monitor localhost:26657 + + # monitor a few instances by providing comma-separated list of RPC endpoints + tm-monitor host1:26657,host2:26657 + Flags: + -listen-addr string + HTTP and Websocket server listen address (default "tcp://0.0.0.0:26670") + -no-ton + Do not show ton (table of nodes) + -v verbose logging + +### RPC UI + +Run `tm-monitor` and visit http://localhost:26670 You should see the +list of the available RPC endpoints: + + http://localhost:26670/status + http://localhost:26670/status/network + http://localhost:26670/monitor?endpoint=_ + http://localhost:26670/status/node?name=_ + http://localhost:26670/unmonitor?endpoint=_ + +The API is available as GET requests with URI encoded parameters, or as +JSONRPC POST requests. The JSONRPC methods are also exposed over +websocket. + +## Development + + make get_tools + make get_vendor_deps + make test diff --git a/tools/tm-monitor/eventmeter/eventmeter.go b/tools/tm-monitor/eventmeter/eventmeter.go new file mode 100644 index 000000000..185f37749 --- /dev/null +++ b/tools/tm-monitor/eventmeter/eventmeter.go @@ -0,0 +1,296 @@ +// eventmeter - generic system to subscribe to events and record their frequency. +package eventmeter + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "time" + + metrics "github.com/rcrowley/go-metrics" + + "github.com/tendermint/tendermint/libs/events" + "github.com/tendermint/tendermint/libs/log" + client "github.com/tendermint/tendermint/rpc/lib/client" +) + +const ( + // Get ping/pong latency and call LatencyCallbackFunc with this period. + latencyPeriod = 1 * time.Second + + // Check if the WS client is connected every + connectionCheckPeriod = 100 * time.Millisecond +) + +// EventMetric exposes metrics for an event. +type EventMetric struct { + ID string `json:"id"` + Started time.Time `json:"start_time"` + LastHeard time.Time `json:"last_heard"` + MinDuration int64 `json:"min_duration"` + MaxDuration int64 `json:"max_duration"` + + // tracks event count and rate + meter metrics.Meter + + // filled in from the Meter + Count int64 `json:"count"` + Rate1 float64 `json:"rate_1" amino:"unsafe"` + Rate5 float64 `json:"rate_5" amino:"unsafe"` + Rate15 float64 `json:"rate_15" amino:"unsafe"` + RateMean float64 `json:"rate_mean" amino:"unsafe"` + + // so the event can have effects in the eventmeter's consumer. runs in a go + // routine. + callback EventCallbackFunc +} + +func (metric *EventMetric) Copy() *EventMetric { + metricCopy := *metric + metricCopy.meter = metric.meter.Snapshot() + return &metricCopy +} + +// called on GetMetric +func (metric *EventMetric) fillMetric() *EventMetric { + metric.Count = metric.meter.Count() + metric.Rate1 = metric.meter.Rate1() + metric.Rate5 = metric.meter.Rate5() + metric.Rate15 = metric.meter.Rate15() + metric.RateMean = metric.meter.RateMean() + return metric +} + +// EventCallbackFunc is a closure to enable side effects from receiving an +// event. +type EventCallbackFunc func(em *EventMetric, data interface{}) + +// EventUnmarshalFunc is a closure to get the query and data out of the raw +// JSON received over the RPC WebSocket. +type EventUnmarshalFunc func(b json.RawMessage) (string, events.EventData, error) + +// LatencyCallbackFunc is a closure to enable side effects from receiving a latency. +type LatencyCallbackFunc func(meanLatencyNanoSeconds float64) + +// DisconnectCallbackFunc is a closure to notify a consumer that the connection +// has died. +type DisconnectCallbackFunc func() + +// EventMeter tracks events, reports latency and disconnects. +type EventMeter struct { + wsc *client.WSClient + + mtx sync.Mutex + queryToMetricMap map[string]*EventMetric + + unmarshalEvent EventUnmarshalFunc + latencyCallback LatencyCallbackFunc + disconnectCallback DisconnectCallbackFunc + subscribed bool + + quit chan struct{} + + logger log.Logger +} + +func NewEventMeter(addr string, unmarshalEvent EventUnmarshalFunc) *EventMeter { + return &EventMeter{ + wsc: client.NewWSClient(addr, "/websocket", client.PingPeriod(1*time.Second)), + queryToMetricMap: make(map[string]*EventMetric), + unmarshalEvent: unmarshalEvent, + logger: log.NewNopLogger(), + } +} + +// SetLogger lets you set your own logger. +func (em *EventMeter) SetLogger(l log.Logger) { + em.logger = l + em.wsc.SetLogger(l.With("module", "rpcclient")) +} + +// String returns a string representation of event meter. +func (em *EventMeter) String() string { + return em.wsc.Address +} + +// Start boots up event meter. +func (em *EventMeter) Start() error { + if err := em.wsc.Start(); err != nil { + return err + } + + em.quit = make(chan struct{}) + go em.receiveRoutine() + go em.disconnectRoutine() + + err := em.subscribe() + if err != nil { + return err + } + em.subscribed = true + return nil +} + +// Stop stops event meter. +func (em *EventMeter) Stop() { + close(em.quit) + + if em.wsc.IsRunning() { + em.wsc.Stop() + } +} + +// Subscribe for the given query. Callback function will be called upon +// receiving an event. +func (em *EventMeter) Subscribe(query string, cb EventCallbackFunc) error { + em.mtx.Lock() + defer em.mtx.Unlock() + + if err := em.wsc.Subscribe(context.TODO(), query); err != nil { + return err + } + + metric := &EventMetric{ + meter: metrics.NewMeter(), + callback: cb, + } + em.queryToMetricMap[query] = metric + return nil +} + +// Unsubscribe from the given query. +func (em *EventMeter) Unsubscribe(query string) error { + em.mtx.Lock() + defer em.mtx.Unlock() + + return em.wsc.Unsubscribe(context.TODO(), query) +} + +// GetMetric fills in the latest data for an query and return a copy. +func (em *EventMeter) GetMetric(query string) (*EventMetric, error) { + em.mtx.Lock() + defer em.mtx.Unlock() + metric, ok := em.queryToMetricMap[query] + if !ok { + return nil, fmt.Errorf("unknown query: %s", query) + } + return metric.fillMetric().Copy(), nil +} + +// RegisterLatencyCallback allows you to set latency callback. +func (em *EventMeter) RegisterLatencyCallback(f LatencyCallbackFunc) { + em.mtx.Lock() + defer em.mtx.Unlock() + em.latencyCallback = f +} + +// RegisterDisconnectCallback allows you to set disconnect callback. +func (em *EventMeter) RegisterDisconnectCallback(f DisconnectCallbackFunc) { + em.mtx.Lock() + defer em.mtx.Unlock() + em.disconnectCallback = f +} + +/////////////////////////////////////////////////////////////////////////////// +// Private + +func (em *EventMeter) subscribe() error { + for query, _ := range em.queryToMetricMap { + if err := em.wsc.Subscribe(context.TODO(), query); err != nil { + return err + } + } + return nil +} + +func (em *EventMeter) receiveRoutine() { + latencyTicker := time.NewTicker(latencyPeriod) + for { + select { + case resp := <-em.wsc.ResponsesCh: + if resp.Error != nil { + em.logger.Error("expected some event, got error", "err", resp.Error.Error()) + continue + } + query, data, err := em.unmarshalEvent(resp.Result) + if err != nil { + em.logger.Error("failed to unmarshal event", "err", err) + continue + } + if query != "" { // FIXME how can it be an empty string? + em.updateMetric(query, data) + } + case <-latencyTicker.C: + if em.wsc.IsActive() { + em.callLatencyCallback(em.wsc.PingPongLatencyTimer.Mean()) + } + case <-em.wsc.Quit(): + return + case <-em.quit: + return + } + } +} + +func (em *EventMeter) disconnectRoutine() { + ticker := time.NewTicker(connectionCheckPeriod) + for { + select { + case <-ticker.C: + if em.wsc.IsReconnecting() && em.subscribed { // notify user about disconnect only once + em.callDisconnectCallback() + em.subscribed = false + } else if !em.wsc.IsReconnecting() && !em.subscribed { // resubscribe + em.subscribe() + em.subscribed = true + } + case <-em.wsc.Quit(): + return + case <-em.quit: + return + } + } +} + +func (em *EventMeter) updateMetric(query string, data events.EventData) { + em.mtx.Lock() + defer em.mtx.Unlock() + + metric, ok := em.queryToMetricMap[query] + if !ok { + // we already unsubscribed, or got an unexpected query + return + } + + last := metric.LastHeard + metric.LastHeard = time.Now() + metric.meter.Mark(1) + dur := int64(metric.LastHeard.Sub(last)) + if dur < metric.MinDuration { + metric.MinDuration = dur + } + if !last.IsZero() && dur > metric.MaxDuration { + metric.MaxDuration = dur + } + + if metric.callback != nil { + go metric.callback(metric.Copy(), data) + } +} + +func (em *EventMeter) callDisconnectCallback() { + em.mtx.Lock() + if em.disconnectCallback != nil { + go em.disconnectCallback() + } + em.mtx.Unlock() +} + +func (em *EventMeter) callLatencyCallback(meanLatencyNanoSeconds float64) { + em.mtx.Lock() + if em.latencyCallback != nil { + go em.latencyCallback(meanLatencyNanoSeconds) + } + em.mtx.Unlock() +} diff --git a/tools/tm-monitor/main.go b/tools/tm-monitor/main.go new file mode 100644 index 000000000..32897b978 --- /dev/null +++ b/tools/tm-monitor/main.go @@ -0,0 +1,86 @@ +package main + +import ( + "flag" + "fmt" + "os" + "strings" + + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + monitor "github.com/tendermint/tendermint/tools/tm-monitor/monitor" +) + +var logger = log.NewNopLogger() + +func main() { + var listenAddr string + var noton bool + + flag.StringVar(&listenAddr, "listen-addr", "tcp://0.0.0.0:26670", "HTTP and Websocket server listen address") + flag.BoolVar(¬on, "no-ton", false, "Do not show ton (table of nodes)") + + flag.Usage = func() { + fmt.Println(`Tendermint monitor watches over one or more Tendermint core +applications, collecting and providing various statistics to the user. + +Usage: + tm-monitor [-no-ton] [-listen-addr="tcp://0.0.0.0:26670"] [endpoints] + +Examples: + # monitor single instance + tm-monitor localhost:26657 + + # monitor a few instances by providing comma-separated list of RPC endpoints + tm-monitor host1:26657,host2:26657`) + fmt.Println("Flags:") + flag.PrintDefaults() + } + + flag.Parse() + + if flag.NArg() == 0 { + flag.Usage() + os.Exit(1) + } + + if noton { + logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + } + + m := startMonitor(flag.Arg(0)) + + startRPC(listenAddr, m, logger) + + var ton *Ton + if !noton { + ton = NewTon(m) + ton.Start() + } + + cmn.TrapSignal(func() { + if !noton { + ton.Stop() + } + m.Stop() + }) +} + +func startMonitor(endpoints string) *monitor.Monitor { + m := monitor.NewMonitor() + m.SetLogger(logger.With("component", "monitor")) + + for _, e := range strings.Split(endpoints, ",") { + n := monitor.NewNode(e) + n.SetLogger(logger.With("node", e)) + if err := m.Monitor(n); err != nil { + panic(err) + } + } + + if err := m.Start(); err != nil { + panic(err) + } + + return m +} diff --git a/tools/tm-monitor/mock/eventmeter.go b/tools/tm-monitor/mock/eventmeter.go new file mode 100644 index 000000000..271297581 --- /dev/null +++ b/tools/tm-monitor/mock/eventmeter.go @@ -0,0 +1,69 @@ +package mock + +import ( + stdlog "log" + "reflect" + + "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/libs/log" + em "github.com/tendermint/tendermint/tools/tm-monitor/eventmeter" +) + +type EventMeter struct { + latencyCallback em.LatencyCallbackFunc + disconnectCallback em.DisconnectCallbackFunc + eventCallback em.EventCallbackFunc +} + +func (e *EventMeter) Start() error { return nil } +func (e *EventMeter) Stop() {} +func (e *EventMeter) SetLogger(l log.Logger) {} +func (e *EventMeter) RegisterLatencyCallback(cb em.LatencyCallbackFunc) { e.latencyCallback = cb } +func (e *EventMeter) RegisterDisconnectCallback(cb em.DisconnectCallbackFunc) { + e.disconnectCallback = cb +} +func (e *EventMeter) Subscribe(query string, cb em.EventCallbackFunc) error { + e.eventCallback = cb + return nil +} +func (e *EventMeter) Unsubscribe(query string) error { + e.eventCallback = nil + return nil +} + +func (e *EventMeter) Call(callback string, args ...interface{}) { + switch callback { + case "latencyCallback": + e.latencyCallback(args[0].(float64)) + case "disconnectCallback": + e.disconnectCallback() + case "eventCallback": + e.eventCallback(args[0].(*em.EventMetric), args[1]) + } +} + +type RpcClient struct { + Stubs map[string]interface{} + cdc *amino.Codec +} + +func (c *RpcClient) Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) { + s, ok := c.Stubs[method] + if !ok { + stdlog.Fatalf("Call to %s, but no stub is defined for it", method) + } + + rv, rt := reflect.ValueOf(result), reflect.TypeOf(result) + rv, rt = rv.Elem(), rt.Elem() + rv.Set(reflect.ValueOf(s)) + + return s, nil +} + +func (c *RpcClient) Codec() *amino.Codec { + return c.cdc +} + +func (c *RpcClient) SetCodec(cdc *amino.Codec) { + c.cdc = cdc +} diff --git a/tools/tm-monitor/monitor/monitor.go b/tools/tm-monitor/monitor/monitor.go new file mode 100644 index 000000000..764f281ff --- /dev/null +++ b/tools/tm-monitor/monitor/monitor.go @@ -0,0 +1,251 @@ +package monitor + +import ( + "fmt" + "math/rand" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/tendermint/tendermint/libs/log" + tmtypes "github.com/tendermint/tendermint/types" +) + +// waiting more than this many seconds for a block means we're unhealthy +const nodeLivenessTimeout = 5 * time.Second + +// Monitor keeps track of the nodes and updates common statistics upon +// receiving new events from nodes. +// +// Common statistics is stored in Network struct. +type Monitor struct { + mtx sync.Mutex + Nodes []*Node + + Network *Network + + monitorQuit chan struct{} // monitor exitting + nodeQuit map[string]chan struct{} // node is being stopped and removed from under the monitor + + recalculateNetworkUptimeEvery time.Duration + numValidatorsUpdateInterval time.Duration + + logger log.Logger +} + +// NewMonitor creates new instance of a Monitor. You can provide options to +// change some default values. +// +// Example: +// NewMonitor(monitor.SetNumValidatorsUpdateInterval(1 * time.Second)) +func NewMonitor(options ...func(*Monitor)) *Monitor { + m := &Monitor{ + Nodes: make([]*Node, 0), + Network: NewNetwork(), + monitorQuit: make(chan struct{}), + nodeQuit: make(map[string]chan struct{}), + recalculateNetworkUptimeEvery: 10 * time.Second, + numValidatorsUpdateInterval: 5 * time.Second, + logger: log.NewNopLogger(), + } + + for _, option := range options { + option(m) + } + + return m +} + +// RecalculateNetworkUptimeEvery lets you change network uptime update interval. +func RecalculateNetworkUptimeEvery(d time.Duration) func(m *Monitor) { + return func(m *Monitor) { + m.recalculateNetworkUptimeEvery = d + } +} + +// SetNumValidatorsUpdateInterval lets you change num validators update interval. +func SetNumValidatorsUpdateInterval(d time.Duration) func(m *Monitor) { + return func(m *Monitor) { + m.numValidatorsUpdateInterval = d + } +} + +// SetLogger lets you set your own logger +func (m *Monitor) SetLogger(l log.Logger) { + m.logger = l +} + +// Monitor begins to monitor the node `n`. The node will be started and added +// to the monitor. +func (m *Monitor) Monitor(n *Node) error { + m.mtx.Lock() + m.Nodes = append(m.Nodes, n) + m.mtx.Unlock() + + blockCh := make(chan tmtypes.Header, 10) + n.SendBlocksTo(blockCh) + blockLatencyCh := make(chan float64, 10) + n.SendBlockLatenciesTo(blockLatencyCh) + disconnectCh := make(chan bool, 10) + n.NotifyAboutDisconnects(disconnectCh) + + if err := n.Start(); err != nil { + return err + } + + m.Network.NewNode(n.Name) + + m.nodeQuit[n.Name] = make(chan struct{}) + go m.listen(n.Name, blockCh, blockLatencyCh, disconnectCh, m.nodeQuit[n.Name]) + + return nil +} + +// Unmonitor stops monitoring node `n`. The node will be stopped and removed +// from the monitor. +func (m *Monitor) Unmonitor(n *Node) { + m.Network.NodeDeleted(n.Name) + + n.Stop() + close(m.nodeQuit[n.Name]) + delete(m.nodeQuit, n.Name) + i, _ := m.NodeByName(n.Name) + + m.mtx.Lock() + m.Nodes[i] = m.Nodes[len(m.Nodes)-1] + m.Nodes = m.Nodes[:len(m.Nodes)-1] + m.mtx.Unlock() +} + +// NodeByName returns the node and its index if such node exists within the +// monitor. Otherwise, -1 and nil are returned. +func (m *Monitor) NodeByName(name string) (index int, node *Node) { + m.mtx.Lock() + defer m.mtx.Unlock() + + for i, n := range m.Nodes { + if name == n.Name { + return i, n + } + } + return -1, nil +} + +// NodeIsOnline is called when connection to the node is restored. +// Must be safe to call multiple times. +func (m *Monitor) NodeIsOnline(name string) { + + _, node := m.NodeByName(name) + if nil != node { + if online, ok := m.Network.nodeStatusMap[name]; ok && online { + m.mtx.Lock() + node.Online = online + m.mtx.Unlock() + } + } + +} + +// Start starts the monitor's routines: recalculating network uptime and +// updating number of validators. +func (m *Monitor) Start() error { + go m.recalculateNetworkUptimeLoop() + go m.updateNumValidatorLoop() + + return nil +} + +// Stop stops the monitor's routines. +func (m *Monitor) Stop() { + close(m.monitorQuit) + + for _, n := range m.Nodes { + m.Unmonitor(n) + } +} + +// main loop where we listen for events from the node +func (m *Monitor) listen(nodeName string, blockCh <-chan tmtypes.Header, blockLatencyCh <-chan float64, disconnectCh <-chan bool, quit <-chan struct{}) { + logger := m.logger.With("node", nodeName) + + for { + select { + case <-quit: + return + case b := <-blockCh: + m.Network.NewBlock(b) + m.Network.NodeIsOnline(nodeName) + m.NodeIsOnline(nodeName) + case l := <-blockLatencyCh: + m.Network.NewBlockLatency(l) + m.Network.NodeIsOnline(nodeName) + m.NodeIsOnline(nodeName) + case disconnected := <-disconnectCh: + if disconnected { + m.Network.NodeIsDown(nodeName) + } else { + m.Network.NodeIsOnline(nodeName) + m.NodeIsOnline(nodeName) + } + case <-time.After(nodeLivenessTimeout): + logger.Info("event", fmt.Sprintf("node was not responding for %v", nodeLivenessTimeout)) + m.Network.NodeIsDown(nodeName) + } + } +} + +// recalculateNetworkUptimeLoop every N seconds. +func (m *Monitor) recalculateNetworkUptimeLoop() { + for { + select { + case <-m.monitorQuit: + return + case <-time.After(m.recalculateNetworkUptimeEvery): + m.Network.RecalculateUptime() + } + } +} + +// updateNumValidatorLoop sends a request to a random node once every N seconds, +// which in turn makes an RPC call to get the latest validators. +func (m *Monitor) updateNumValidatorLoop() { + rand.Seed(time.Now().Unix()) + + var height int64 + var num int + var err error + + for { + m.mtx.Lock() + nodesCount := len(m.Nodes) + m.mtx.Unlock() + if 0 == nodesCount { + time.Sleep(m.numValidatorsUpdateInterval) + continue + } + + randomNodeIndex := rand.Intn(nodesCount) + + select { + case <-m.monitorQuit: + return + case <-time.After(m.numValidatorsUpdateInterval): + i := 0 + + m.mtx.Lock() + for _, n := range m.Nodes { + if i == randomNodeIndex { + height, num, err = n.NumValidators() + if err != nil { + m.logger.Info("err", errors.Wrap(err, "update num validators failed")) + } + break + } + i++ + } + m.mtx.Unlock() + + m.Network.UpdateNumValidatorsForHeight(num, height) + } + } +} diff --git a/tools/tm-monitor/monitor/monitor_test.go b/tools/tm-monitor/monitor/monitor_test.go new file mode 100644 index 000000000..9694e5771 --- /dev/null +++ b/tools/tm-monitor/monitor/monitor_test.go @@ -0,0 +1,72 @@ +package monitor_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto/ed25519" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + mock "github.com/tendermint/tendermint/tools/tm-monitor/mock" + monitor "github.com/tendermint/tendermint/tools/tm-monitor/monitor" + tmtypes "github.com/tendermint/tendermint/types" +) + +func TestMonitorUpdatesNumberOfValidators(t *testing.T) { + m := startMonitor(t) + defer m.Stop() + + n, _ := createValidatorNode(t) + m.Monitor(n) + assert.Equal(t, 1, m.Network.NumNodesMonitored) + assert.Equal(t, 1, m.Network.NumNodesMonitoredOnline) + + time.Sleep(1 * time.Second) + + // DATA RACE + // assert.Equal(t, 1, m.Network.NumValidators()) +} + +func TestMonitorRecalculatesNetworkUptime(t *testing.T) { + m := startMonitor(t) + defer m.Stop() + assert.Equal(t, 100.0, m.Network.Uptime()) + + n, _ := createValidatorNode(t) + m.Monitor(n) + + m.Network.NodeIsDown(n.Name) // simulate node failure + time.Sleep(200 * time.Millisecond) + m.Network.NodeIsOnline(n.Name) + time.Sleep(1 * time.Second) + + assert.True(t, m.Network.Uptime() < 100.0, "Uptime should be less than 100%") +} + +func startMonitor(t *testing.T) *monitor.Monitor { + m := monitor.NewMonitor( + monitor.SetNumValidatorsUpdateInterval(200*time.Millisecond), + monitor.RecalculateNetworkUptimeEvery(200*time.Millisecond), + ) + err := m.Start() + require.Nil(t, err) + return m +} + +func createValidatorNode(t *testing.T) (n *monitor.Node, emMock *mock.EventMeter) { + emMock = &mock.EventMeter{} + + stubs := make(map[string]interface{}) + pubKey := ed25519.GenPrivKey().PubKey() + stubs["validators"] = ctypes.ResultValidators{BlockHeight: blockHeight, Validators: []*tmtypes.Validator{tmtypes.NewValidator(pubKey, 0)}} + stubs["status"] = ctypes.ResultStatus{ValidatorInfo: ctypes.ValidatorInfo{PubKey: pubKey}} + cdc := amino.NewCodec() + rpcClientMock := &mock.RpcClient{Stubs: stubs} + rpcClientMock.SetCodec(cdc) + + n = monitor.NewNodeWithEventMeterAndRpcClient("tcp://127.0.0.1:26657", emMock, rpcClientMock) + return +} diff --git a/tools/tm-monitor/monitor/network.go b/tools/tm-monitor/monitor/network.go new file mode 100644 index 000000000..9b147c06b --- /dev/null +++ b/tools/tm-monitor/monitor/network.go @@ -0,0 +1,199 @@ +package monitor + +import ( + "sync" + "time" + + metrics "github.com/rcrowley/go-metrics" + tmtypes "github.com/tendermint/tendermint/types" +) + +// UptimeData stores data for how long network has been running. +type UptimeData struct { + StartTime time.Time `json:"start_time"` + Uptime float64 `json:"uptime" amino:"unsafe"` // percentage of time we've been healthy, ever + + totalDownTime time.Duration // total downtime (only updated when we come back online) + wentDown time.Time +} + +// Health describes the health of the network. Note that this applies only to +// the observed nodes, and not to the entire cluster, which may consist of +// thousands of machines. It may change in the future. +type Health int + +const ( + // FullHealth means all nodes online, synced, validators making blocks + FullHealth = Health(0) + // ModerateHealth means we're making blocks + ModerateHealth = Health(1) + // Dead means we're not making blocks due to all validators freezing or crashing + Dead = Health(2) +) + +// Common statistics for network of nodes +type Network struct { + Height int64 `json:"height"` + + AvgBlockTime float64 `json:"avg_block_time" amino:"unsafe"` // ms (avg over last minute) + blockTimeMeter metrics.Meter + AvgTxThroughput float64 `json:"avg_tx_throughput" amino:"unsafe"` // tx/s (avg over last minute) + txThroughputMeter metrics.Meter + AvgBlockLatency float64 `json:"avg_block_latency" amino:"unsafe"` // ms (avg over last minute) + blockLatencyMeter metrics.Meter + + NumValidators int `json:"num_validators"` + NumNodesMonitored int `json:"num_nodes_monitored"` + NumNodesMonitoredOnline int `json:"num_nodes_monitored_online"` + + Health Health `json:"health"` + + UptimeData *UptimeData `json:"uptime_data"` + + nodeStatusMap map[string]bool + + mu sync.Mutex +} + +func NewNetwork() *Network { + return &Network{ + blockTimeMeter: metrics.NewMeter(), + txThroughputMeter: metrics.NewMeter(), + blockLatencyMeter: metrics.NewMeter(), + Health: FullHealth, + UptimeData: &UptimeData{ + StartTime: time.Now(), + Uptime: 100.0, + }, + nodeStatusMap: make(map[string]bool), + } +} + +func (n *Network) NewBlock(b tmtypes.Header) { + n.mu.Lock() + defer n.mu.Unlock() + + if n.Height >= b.Height { + return + } + + n.Height = b.Height + + n.blockTimeMeter.Mark(1) + if n.blockTimeMeter.Rate1() > 0.0 { + n.AvgBlockTime = (1.0 / n.blockTimeMeter.Rate1()) * 1000 // 1/s to ms + } else { + n.AvgBlockTime = 0.0 + } + n.txThroughputMeter.Mark(int64(b.NumTxs)) + n.AvgTxThroughput = n.txThroughputMeter.Rate1() +} + +func (n *Network) NewBlockLatency(l float64) { + n.mu.Lock() + defer n.mu.Unlock() + + n.blockLatencyMeter.Mark(int64(l)) + n.AvgBlockLatency = n.blockLatencyMeter.Rate1() / 1000000.0 // ns to ms +} + +// RecalculateUptime calculates uptime on demand. +func (n *Network) RecalculateUptime() { + n.mu.Lock() + defer n.mu.Unlock() + + since := time.Since(n.UptimeData.StartTime) + uptime := since - n.UptimeData.totalDownTime + if n.Health != FullHealth { + uptime -= time.Since(n.UptimeData.wentDown) + } + n.UptimeData.Uptime = (float64(uptime) / float64(since)) * 100.0 +} + +// NodeIsDown is called when the node disconnects for whatever reason. +// Must be safe to call multiple times. +func (n *Network) NodeIsDown(name string) { + n.mu.Lock() + defer n.mu.Unlock() + + if online, ok := n.nodeStatusMap[name]; !ok || online { + n.nodeStatusMap[name] = false + n.NumNodesMonitoredOnline-- + n.UptimeData.wentDown = time.Now() + n.updateHealth() + } +} + +// NodeIsOnline is called when connection to the node is restored. +// Must be safe to call multiple times. +func (n *Network) NodeIsOnline(name string) { + n.mu.Lock() + defer n.mu.Unlock() + + if online, ok := n.nodeStatusMap[name]; ok && !online { + n.nodeStatusMap[name] = true + n.NumNodesMonitoredOnline++ + n.UptimeData.totalDownTime += time.Since(n.UptimeData.wentDown) + n.updateHealth() + } +} + +// NewNode is called when the new node is added to the monitor. +func (n *Network) NewNode(name string) { + n.NumNodesMonitored++ + n.NumNodesMonitoredOnline++ +} + +// NodeDeleted is called when the node is deleted from under the monitor. +func (n *Network) NodeDeleted(name string) { + n.NumNodesMonitored-- + n.NumNodesMonitoredOnline-- +} + +func (n *Network) updateHealth() { + // if we are connected to all validators, we're at full health + // TODO: make sure they're all at the same height (within a block) + // and all proposing (and possibly validating ) Alternatively, just + // check there hasn't been a new round in numValidators rounds + if n.NumValidators != 0 && n.NumNodesMonitoredOnline == n.NumValidators { + n.Health = FullHealth + } else if n.NumNodesMonitoredOnline > 0 && n.NumNodesMonitoredOnline <= n.NumNodesMonitored { + n.Health = ModerateHealth + } else { + n.Health = Dead + } +} + +func (n *Network) UpdateNumValidatorsForHeight(num int, height int64) { + n.mu.Lock() + defer n.mu.Unlock() + + if n.Height <= height { + n.NumValidators = num + } +} + +func (n *Network) GetHealthString() string { + switch n.Health { + case FullHealth: + return "full" + case ModerateHealth: + return "moderate" + case Dead: + return "dead" + default: + return "undefined" + } +} + +// Uptime returns network's uptime in percentages. +func (n *Network) Uptime() float64 { + n.mu.Lock() + defer n.mu.Unlock() + return n.UptimeData.Uptime +} + +// StartTime returns time we started monitoring. +func (n *Network) StartTime() time.Time { + return n.UptimeData.StartTime +} diff --git a/tools/tm-monitor/monitor/network_test.go b/tools/tm-monitor/monitor/network_test.go new file mode 100644 index 000000000..df2d42813 --- /dev/null +++ b/tools/tm-monitor/monitor/network_test.go @@ -0,0 +1,80 @@ +package monitor_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + monitor "github.com/tendermint/tendermint/tools/tm-monitor/monitor" + tmtypes "github.com/tendermint/tendermint/types" +) + +func TestNetworkNewBlock(t *testing.T) { + n := monitor.NewNetwork() + + n.NewBlock(tmtypes.Header{Height: 5, NumTxs: 100}) + assert.Equal(t, int64(5), n.Height) + assert.Equal(t, 0.0, n.AvgBlockTime) + assert.Equal(t, 0.0, n.AvgTxThroughput) +} + +func TestNetworkNewBlockLatency(t *testing.T) { + n := monitor.NewNetwork() + + n.NewBlockLatency(9000000.0) // nanoseconds + assert.Equal(t, 0.0, n.AvgBlockLatency) +} + +func TestNetworkNodeIsDownThenOnline(t *testing.T) { + n := monitor.NewNetwork() + n.NewNode("test") + + n.NodeIsDown("test") + assert.Equal(t, 0, n.NumNodesMonitoredOnline) + assert.Equal(t, monitor.Dead, n.Health) + n.NodeIsDown("test") + assert.Equal(t, 0, n.NumNodesMonitoredOnline) + + n.NodeIsOnline("test") + assert.Equal(t, 1, n.NumNodesMonitoredOnline) + assert.Equal(t, monitor.ModerateHealth, n.Health) + n.NodeIsOnline("test") + assert.Equal(t, 1, n.NumNodesMonitoredOnline) +} + +func TestNetworkNewNode(t *testing.T) { + n := monitor.NewNetwork() + assert.Equal(t, 0, n.NumNodesMonitored) + assert.Equal(t, 0, n.NumNodesMonitoredOnline) + n.NewNode("test") + assert.Equal(t, 1, n.NumNodesMonitored) + assert.Equal(t, 1, n.NumNodesMonitoredOnline) +} + +func TestNetworkNodeDeleted(t *testing.T) { + n := monitor.NewNetwork() + n.NewNode("test") + n.NodeDeleted("test") + assert.Equal(t, 0, n.NumNodesMonitored) + assert.Equal(t, 0, n.NumNodesMonitoredOnline) +} + +func TestNetworkGetHealthString(t *testing.T) { + n := monitor.NewNetwork() + assert.Equal(t, "full", n.GetHealthString()) + n.Health = monitor.ModerateHealth + assert.Equal(t, "moderate", n.GetHealthString()) + n.Health = monitor.Dead + assert.Equal(t, "dead", n.GetHealthString()) +} + +func TestNetworkUptime(t *testing.T) { + n := monitor.NewNetwork() + assert.Equal(t, 100.0, n.Uptime()) +} + +func TestNetworkStartTime(t *testing.T) { + n := monitor.NewNetwork() + assert.True(t, n.StartTime().Before(time.Now())) +} diff --git a/tools/tm-monitor/monitor/node.go b/tools/tm-monitor/monitor/node.go new file mode 100644 index 000000000..b99870207 --- /dev/null +++ b/tools/tm-monitor/monitor/node.go @@ -0,0 +1,261 @@ +package monitor + +import ( + "encoding/json" + "math" + "time" + + "github.com/pkg/errors" + + crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/libs/events" + "github.com/tendermint/tendermint/libs/log" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + rpc_client "github.com/tendermint/tendermint/rpc/lib/client" + em "github.com/tendermint/tendermint/tools/tm-monitor/eventmeter" + tmtypes "github.com/tendermint/tendermint/types" +) + +const maxRestarts = 25 + +type Node struct { + rpcAddr string + + IsValidator bool `json:"is_validator"` // validator or non-validator? + pubKey crypto.PubKey `json:"pub_key"` + + Name string `json:"name"` + Online bool `json:"online"` + Height int64 `json:"height"` + BlockLatency float64 `json:"block_latency" amino:"unsafe"` // ms, interval between block commits + + // em holds the ws connection. Each eventMeter callback is called in a separate go-routine. + em eventMeter + + // rpcClient is an client for making RPC calls to TM + rpcClient rpc_client.HTTPClient + + blockCh chan<- tmtypes.Header + blockLatencyCh chan<- float64 + disconnectCh chan<- bool + + checkIsValidatorInterval time.Duration + + quit chan struct{} + + logger log.Logger +} + +func NewNode(rpcAddr string, options ...func(*Node)) *Node { + em := em.NewEventMeter(rpcAddr, UnmarshalEvent) + rpcClient := rpc_client.NewURIClient(rpcAddr) // HTTP client by default + rpcClient.SetCodec(cdc) + return NewNodeWithEventMeterAndRpcClient(rpcAddr, em, rpcClient, options...) +} + +func NewNodeWithEventMeterAndRpcClient(rpcAddr string, em eventMeter, rpcClient rpc_client.HTTPClient, options ...func(*Node)) *Node { + n := &Node{ + rpcAddr: rpcAddr, + em: em, + rpcClient: rpcClient, + Name: rpcAddr, + quit: make(chan struct{}), + checkIsValidatorInterval: 5 * time.Second, + logger: log.NewNopLogger(), + } + + for _, option := range options { + option(n) + } + + return n +} + +// SetCheckIsValidatorInterval lets you change interval for checking whenever +// node is still a validator or not. +func SetCheckIsValidatorInterval(d time.Duration) func(n *Node) { + return func(n *Node) { + n.checkIsValidatorInterval = d + } +} + +func (n *Node) SendBlocksTo(ch chan<- tmtypes.Header) { + n.blockCh = ch +} + +func (n *Node) SendBlockLatenciesTo(ch chan<- float64) { + n.blockLatencyCh = ch +} + +func (n *Node) NotifyAboutDisconnects(ch chan<- bool) { + n.disconnectCh = ch +} + +// SetLogger lets you set your own logger +func (n *Node) SetLogger(l log.Logger) { + n.logger = l + n.em.SetLogger(l) +} + +func (n *Node) Start() error { + if err := n.em.Start(); err != nil { + return err + } + + n.em.RegisterLatencyCallback(latencyCallback(n)) + err := n.em.Subscribe(tmtypes.EventQueryNewBlockHeader.String(), newBlockCallback(n)) + if err != nil { + return err + } + n.em.RegisterDisconnectCallback(disconnectCallback(n)) + + n.Online = true + + n.checkIsValidator() + go n.checkIsValidatorLoop() + + return nil +} + +func (n *Node) Stop() { + n.Online = false + + n.em.Stop() + + close(n.quit) +} + +// implements eventmeter.EventCallbackFunc +func newBlockCallback(n *Node) em.EventCallbackFunc { + return func(metric *em.EventMetric, data interface{}) { + block := data.(tmtypes.TMEventData).(tmtypes.EventDataNewBlockHeader).Header + + n.Height = block.Height + n.logger.Info("new block", "height", block.Height, "numTxs", block.NumTxs) + + if n.blockCh != nil { + n.blockCh <- block + } + } +} + +// implements eventmeter.EventLatencyFunc +func latencyCallback(n *Node) em.LatencyCallbackFunc { + return func(latency float64) { + n.BlockLatency = latency / 1000000.0 // ns to ms + n.logger.Info("new block latency", "latency", n.BlockLatency) + + if n.blockLatencyCh != nil { + n.blockLatencyCh <- latency + } + } +} + +// implements eventmeter.DisconnectCallbackFunc +func disconnectCallback(n *Node) em.DisconnectCallbackFunc { + return func() { + n.Online = false + n.logger.Info("status", "down") + + if n.disconnectCh != nil { + n.disconnectCh <- true + } + } +} + +func (n *Node) RestartEventMeterBackoff() error { + attempt := 0 + + for { + d := time.Duration(math.Exp2(float64(attempt))) + time.Sleep(d * time.Second) + + if err := n.em.Start(); err != nil { + n.logger.Info("restart failed", "err", err) + } else { + // TODO: authenticate pubkey + return nil + } + + attempt++ + + if attempt > maxRestarts { + return errors.New("Reached max restarts") + } + } +} + +func (n *Node) NumValidators() (height int64, num int, err error) { + height, vals, err := n.validators() + if err != nil { + return 0, 0, err + } + return height, len(vals), nil +} + +func (n *Node) validators() (height int64, validators []*tmtypes.Validator, err error) { + vals := new(ctypes.ResultValidators) + if _, err = n.rpcClient.Call("validators", nil, vals); err != nil { + return 0, make([]*tmtypes.Validator, 0), err + } + return vals.BlockHeight, vals.Validators, nil +} + +func (n *Node) checkIsValidatorLoop() { + for { + select { + case <-n.quit: + return + case <-time.After(n.checkIsValidatorInterval): + n.checkIsValidator() + } + } +} + +func (n *Node) checkIsValidator() { + _, validators, err := n.validators() + if err == nil { + for _, v := range validators { + key, err1 := n.getPubKey() + // TODO: use bytes.Equal + if err1 == nil && v.PubKey == key { + n.IsValidator = true + } + } + } else { + n.logger.Info("check is validator failed", "err", err) + } +} + +func (n *Node) getPubKey() (crypto.PubKey, error) { + if n.pubKey != nil { + return n.pubKey, nil + } + + status := new(ctypes.ResultStatus) + _, err := n.rpcClient.Call("status", nil, status) + if err != nil { + return nil, err + } + n.pubKey = status.ValidatorInfo.PubKey + return n.pubKey, nil +} + +type eventMeter interface { + Start() error + Stop() + RegisterLatencyCallback(em.LatencyCallbackFunc) + RegisterDisconnectCallback(em.DisconnectCallbackFunc) + Subscribe(string, em.EventCallbackFunc) error + Unsubscribe(string) error + SetLogger(l log.Logger) +} + +// UnmarshalEvent unmarshals a json event +func UnmarshalEvent(b json.RawMessage) (string, events.EventData, error) { + event := new(ctypes.ResultEvent) + if err := cdc.UnmarshalJSON(b, event); err != nil { + return "", nil, err + } + return event.Query, event.Data, nil +} diff --git a/tools/tm-monitor/monitor/node_test.go b/tools/tm-monitor/monitor/node_test.go new file mode 100644 index 000000000..10c2a13f1 --- /dev/null +++ b/tools/tm-monitor/monitor/node_test.go @@ -0,0 +1,93 @@ +package monitor_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto/ed25519" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + em "github.com/tendermint/tendermint/tools/tm-monitor/eventmeter" + mock "github.com/tendermint/tendermint/tools/tm-monitor/mock" + monitor "github.com/tendermint/tendermint/tools/tm-monitor/monitor" + tmtypes "github.com/tendermint/tendermint/types" +) + +const ( + blockHeight = int64(1) +) + +func TestNodeStartStop(t *testing.T) { + n, _ := startValidatorNode(t) + defer n.Stop() + + assert.Equal(t, true, n.Online) + assert.Equal(t, true, n.IsValidator) +} + +func TestNodeNewBlockReceived(t *testing.T) { + blockCh := make(chan tmtypes.Header, 100) + n, emMock := startValidatorNode(t) + defer n.Stop() + n.SendBlocksTo(blockCh) + + blockHeader := tmtypes.Header{Height: 5} + emMock.Call("eventCallback", &em.EventMetric{}, tmtypes.EventDataNewBlockHeader{blockHeader}) + + assert.Equal(t, int64(5), n.Height) + assert.Equal(t, blockHeader, <-blockCh) +} + +func TestNodeNewBlockLatencyReceived(t *testing.T) { + blockLatencyCh := make(chan float64, 100) + n, emMock := startValidatorNode(t) + defer n.Stop() + n.SendBlockLatenciesTo(blockLatencyCh) + + emMock.Call("latencyCallback", 1000000.0) + + assert.Equal(t, 1.0, n.BlockLatency) + assert.Equal(t, 1000000.0, <-blockLatencyCh) +} + +func TestNodeConnectionLost(t *testing.T) { + disconnectCh := make(chan bool, 100) + n, emMock := startValidatorNode(t) + defer n.Stop() + n.NotifyAboutDisconnects(disconnectCh) + + emMock.Call("disconnectCallback") + + assert.Equal(t, true, <-disconnectCh) + assert.Equal(t, false, n.Online) +} + +func TestNumValidators(t *testing.T) { + n, _ := startValidatorNode(t) + defer n.Stop() + + height, num, err := n.NumValidators() + assert.Nil(t, err) + assert.Equal(t, blockHeight, height) + assert.Equal(t, 1, num) +} + +func startValidatorNode(t *testing.T) (n *monitor.Node, emMock *mock.EventMeter) { + emMock = &mock.EventMeter{} + + stubs := make(map[string]interface{}) + pubKey := ed25519.GenPrivKey().PubKey() + stubs["validators"] = ctypes.ResultValidators{BlockHeight: blockHeight, Validators: []*tmtypes.Validator{tmtypes.NewValidator(pubKey, 0)}} + stubs["status"] = ctypes.ResultStatus{ValidatorInfo: ctypes.ValidatorInfo{PubKey: pubKey}} + cdc := amino.NewCodec() + rpcClientMock := &mock.RpcClient{Stubs: stubs} + rpcClientMock.SetCodec(cdc) + + n = monitor.NewNodeWithEventMeterAndRpcClient("tcp://127.0.0.1:26657", emMock, rpcClientMock) + + err := n.Start() + require.Nil(t, err) + return +} diff --git a/tools/tm-monitor/monitor/wire.go b/tools/tm-monitor/monitor/wire.go new file mode 100644 index 000000000..696b02778 --- /dev/null +++ b/tools/tm-monitor/monitor/wire.go @@ -0,0 +1,12 @@ +package monitor + +import ( + amino "github.com/tendermint/go-amino" + ctypes "github.com/tendermint/tendermint/rpc/core/types" +) + +var cdc = amino.NewCodec() + +func init() { + ctypes.RegisterAmino(cdc) +} diff --git a/tools/tm-monitor/rpc.go b/tools/tm-monitor/rpc.go new file mode 100644 index 000000000..ab62e0462 --- /dev/null +++ b/tools/tm-monitor/rpc.go @@ -0,0 +1,124 @@ +package main + +import ( + "errors" + "net/http" + + "github.com/tendermint/tendermint/libs/log" + rpc "github.com/tendermint/tendermint/rpc/lib/server" + monitor "github.com/tendermint/tendermint/tools/tm-monitor/monitor" +) + +func startRPC(listenAddr string, m *monitor.Monitor, logger log.Logger) { + routes := routes(m) + + mux := http.NewServeMux() + wm := rpc.NewWebsocketManager(routes, nil) + mux.HandleFunc("/websocket", wm.WebsocketHandler) + rpc.RegisterRPCFuncs(mux, routes, cdc, logger) + if _, err := rpc.StartHTTPServer(listenAddr, mux, logger, rpc.Config{}); err != nil { + panic(err) + } +} + +func routes(m *monitor.Monitor) map[string]*rpc.RPCFunc { + return map[string]*rpc.RPCFunc{ + "status": rpc.NewRPCFunc(RPCStatus(m), ""), + "status/network": rpc.NewRPCFunc(RPCNetworkStatus(m), ""), + "status/node": rpc.NewRPCFunc(RPCNodeStatus(m), "name"), + "monitor": rpc.NewRPCFunc(RPCMonitor(m), "endpoint"), + "unmonitor": rpc.NewRPCFunc(RPCUnmonitor(m), "endpoint"), + + // "start_meter": rpc.NewRPCFunc(network.StartMeter, "chainID,valID,event"), + // "stop_meter": rpc.NewRPCFunc(network.StopMeter, "chainID,valID,event"), + // "meter": rpc.NewRPCFunc(GetMeterResult(network), "chainID,valID,event"), + } +} + +// RPCStatus returns common statistics for the network and statistics per node. +func RPCStatus(m *monitor.Monitor) interface{} { + return func() (networkAndNodes, error) { + return networkAndNodes{m.Network, m.Nodes}, nil + } +} + +// RPCNetworkStatus returns common statistics for the network. +func RPCNetworkStatus(m *monitor.Monitor) interface{} { + return func() (*monitor.Network, error) { + return m.Network, nil + } +} + +// RPCNodeStatus returns statistics for the given node. +func RPCNodeStatus(m *monitor.Monitor) interface{} { + return func(name string) (*monitor.Node, error) { + if i, n := m.NodeByName(name); i != -1 { + return n, nil + } + return nil, errors.New("Cannot find node with that name") + } +} + +// RPCMonitor allows to dynamically add a endpoint to under the monitor. Safe +// to call multiple times. +func RPCMonitor(m *monitor.Monitor) interface{} { + return func(endpoint string) (*monitor.Node, error) { + i, n := m.NodeByName(endpoint) + if i == -1 { + n = monitor.NewNode(endpoint) + if err := m.Monitor(n); err != nil { + return nil, err + } + } + return n, nil + } +} + +// RPCUnmonitor removes the given endpoint from under the monitor. +func RPCUnmonitor(m *monitor.Monitor) interface{} { + return func(endpoint string) (bool, error) { + if i, n := m.NodeByName(endpoint); i != -1 { + m.Unmonitor(n) + return true, nil + } + return false, errors.New("Cannot find node with that name") + } +} + +// func (tn *TendermintNetwork) StartMeter(chainID, valID, eventID string) error { +// tn.mtx.Lock() +// defer tn.mtx.Unlock() +// val, err := tn.getChainVal(chainID, valID) +// if err != nil { +// return err +// } +// return val.EventMeter().Subscribe(eventID, nil) +// } + +// func (tn *TendermintNetwork) StopMeter(chainID, valID, eventID string) error { +// tn.mtx.Lock() +// defer tn.mtx.Unlock() +// val, err := tn.getChainVal(chainID, valID) +// if err != nil { +// return err +// } +// return val.EventMeter().Unsubscribe(eventID) +// } + +// func (tn *TendermintNetwork) GetMeter(chainID, valID, eventID string) (*eventmeter.EventMetric, error) { +// tn.mtx.Lock() +// defer tn.mtx.Unlock() +// val, err := tn.getChainVal(chainID, valID) +// if err != nil { +// return nil, err +// } + +// return val.EventMeter().GetMetric(eventID) +// } + +//--> types + +type networkAndNodes struct { + Network *monitor.Network `json:"network"` + Nodes []*monitor.Node `json:"nodes"` +} diff --git a/tools/tm-monitor/ton.go b/tools/tm-monitor/ton.go new file mode 100644 index 000000000..cad17b39c --- /dev/null +++ b/tools/tm-monitor/ton.go @@ -0,0 +1,100 @@ +package main + +import ( + "fmt" + "io" + "os" + "text/tabwriter" + "time" + + monitor "github.com/tendermint/tendermint/tools/tm-monitor/monitor" +) + +const ( + // Default refresh rate - 200ms + defaultRefreshRate = time.Millisecond * 200 +) + +// Ton - table of nodes. +// +// It produces the unordered list of nodes and updates it periodically. +// +// Default output is stdout, but it could be changed. Note if you want for +// refresh to work properly, output must support [ANSI escape +// codes](http://en.wikipedia.org/wiki/ANSI_escape_code). +// +// Ton was inspired by [Linux top +// program](https://en.wikipedia.org/wiki/Top_(software)) as the name suggests. +type Ton struct { + monitor *monitor.Monitor + + RefreshRate time.Duration + Output io.Writer + quit chan struct{} +} + +func NewTon(m *monitor.Monitor) *Ton { + return &Ton{ + RefreshRate: defaultRefreshRate, + Output: os.Stdout, + quit: make(chan struct{}), + monitor: m, + } +} + +func (o *Ton) Start() { + clearScreen(o.Output) + o.Print() + go o.refresher() +} + +func (o *Ton) Print() { + moveCursor(o.Output, 1, 1) + o.printHeader() + fmt.Println() + o.printTable() +} + +func (o *Ton) Stop() { + close(o.quit) +} + +func (o *Ton) printHeader() { + n := o.monitor.Network + fmt.Fprintf(o.Output, "%v up %.2f%%\n", n.StartTime(), n.Uptime()) + fmt.Println() + fmt.Fprintf(o.Output, "Height: %d\n", n.Height) + fmt.Fprintf(o.Output, "Avg block time: %.3f ms\n", n.AvgBlockTime) + fmt.Fprintf(o.Output, "Avg tx throughput: %.0f per sec\n", n.AvgTxThroughput) + fmt.Fprintf(o.Output, "Avg block latency: %.3f ms\n", n.AvgBlockLatency) + fmt.Fprintf(o.Output, "Active nodes: %d/%d (health: %s) Validators: %d\n", n.NumNodesMonitoredOnline, n.NumNodesMonitored, n.GetHealthString(), n.NumValidators) +} + +func (o *Ton) printTable() { + w := tabwriter.NewWriter(o.Output, 0, 0, 5, ' ', 0) + fmt.Fprintln(w, "NAME\tHEIGHT\tBLOCK LATENCY\tONLINE\tVALIDATOR\t") + for _, n := range o.monitor.Nodes { + fmt.Fprintln(w, fmt.Sprintf("%s\t%d\t%.3f ms\t%v\t%v\t", n.Name, n.Height, n.BlockLatency, n.Online, n.IsValidator)) + } + w.Flush() +} + +// Internal loop for refreshing +func (o *Ton) refresher() { + for { + select { + case <-o.quit: + return + case <-time.After(o.RefreshRate): + o.Print() + } + } +} + +func clearScreen(w io.Writer) { + fmt.Fprint(w, "\033[2J") +} + +func moveCursor(w io.Writer, x int, y int) { + fmt.Fprintf(w, "\033[%d;%dH", x, y) +} diff --git a/tools/tm-monitor/wire.go b/tools/tm-monitor/wire.go new file mode 100644 index 000000000..071c363b0 --- /dev/null +++ b/tools/tm-monitor/wire.go @@ -0,0 +1,12 @@ +package main + +import ( + amino "github.com/tendermint/go-amino" + ctypes "github.com/tendermint/tendermint/rpc/core/types" +) + +var cdc = amino.NewCodec() + +func init() { + ctypes.RegisterAmino(cdc) +} diff --git a/types/block.go b/types/block.go index 6adc0c4c4..488570764 100644 --- a/types/block.go +++ b/types/block.go @@ -8,17 +8,17 @@ import ( "sync" "time" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/merkle" - "golang.org/x/crypto/ripemd160" + "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/crypto/tmhash" + cmn "github.com/tendermint/tendermint/libs/common" ) // Block defines the atomic unit of a Tendermint blockchain. // TODO: add Version byte type Block struct { mtx sync.Mutex - *Header `json:"header"` - *Data `json:"data"` + Header `json:"header"` + Data `json:"data"` Evidence EvidenceData `json:"evidence"` LastCommit *Commit `json:"last_commit"` } @@ -27,15 +27,15 @@ type Block struct { // It populates the same set of fields validated by ValidateBasic func MakeBlock(height int64, txs []Tx, commit *Commit) *Block { block := &Block{ - Header: &Header{ + Header: Header{ Height: height, Time: time.Now(), NumTxs: int64(len(txs)), }, - LastCommit: commit, - Data: &Data{ + Data: Data{ Txs: txs, }, + LastCommit: commit, } block.fillHeader() return block @@ -43,6 +43,9 @@ func MakeBlock(height int64, txs []Tx, commit *Commit) *Block { // AddEvidence appends the given evidence to the block func (b *Block) AddEvidence(evidence []Evidence) { + if b == nil { + return + } b.Evidence.Evidence = append(b.Evidence.Evidence, evidence...) } @@ -98,7 +101,7 @@ func (b *Block) Hash() cmn.HexBytes { b.mtx.Lock() defer b.mtx.Unlock() - if b == nil || b.Header == nil || b.Data == nil || b.LastCommit == nil { + if b == nil || b.LastCommit == nil { return nil } b.fillHeader() @@ -107,6 +110,7 @@ func (b *Block) Hash() cmn.HexBytes { // MakePartSet returns a PartSet containing parts of a serialized block. // This is the form in which the block is gossipped to peers. +// CONTRACT: partSize is greater than zero. func (b *Block) MakePartSet(partSize int) *PartSet { if b == nil { return nil @@ -208,7 +212,7 @@ type Header struct { // Hash returns the hash of the header. // Returns nil if ValidatorHash is missing, // since a Header is not valid unless there is -// a ValidaotrsHash (corresponding to the validator set). +// a ValidatorsHash (corresponding to the validator set). func (h *Header) Hash() cmn.HexBytes { if h == nil || len(h.ValidatorsHash) == 0 { return nil @@ -392,6 +396,9 @@ func (commit *Commit) ValidateBasic() error { // Hash returns the hash of the commit func (commit *Commit) Hash() cmn.HexBytes { + if commit == nil { + return nil + } if commit.hash == nil { bs := make([]merkle.Hasher, len(commit.Precommits)) for i, precommit := range commit.Precommits { @@ -464,7 +471,7 @@ func (data *Data) StringIndented(indent string) string { txStrings[i] = fmt.Sprintf("... (%v total)", len(data.Txs)) break } - txStrings[i] = fmt.Sprintf("Tx:%v", tx) + txStrings[i] = fmt.Sprintf("%X (%d bytes)", tx.Hash(), len(tx)) } return fmt.Sprintf(`Data{ %s %v @@ -504,7 +511,7 @@ func (data *EvidenceData) StringIndented(indent string) string { } evStrings[i] = fmt.Sprintf("Evidence:%v", ev) } - return fmt.Sprintf(`Data{ + return fmt.Sprintf(`EvidenceData{ %s %v %s}#%v`, indent, strings.Join(evStrings, "\n"+indent+" "), @@ -552,7 +559,7 @@ type hasher struct { } func (h hasher) Hash() []byte { - hasher := ripemd160.New() + hasher := tmhash.New() if h.item != nil && !cmn.IsTypedNil(h.item) && !cmn.IsEmpty(h.item) { bz, err := cdc.MarshalBinaryBare(h.item) if err != nil { diff --git a/types/block_meta.go b/types/block_meta.go index 6dd502e4f..d8926af0b 100644 --- a/types/block_meta.go +++ b/types/block_meta.go @@ -3,7 +3,7 @@ package types // BlockMeta contains meta information about a block - namely, it's ID and Header. type BlockMeta struct { BlockID BlockID `json:"block_id"` // the block hash and partsethash - Header *Header `json:"header"` // The block's Header + Header Header `json:"header"` // The block's Header } // NewBlockMeta returns a new BlockMeta from the block and its blockParts. diff --git a/types/block_test.go b/types/block_test.go index 1132a6f5f..1d27a7746 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -7,10 +7,28 @@ import ( "github.com/stretchr/testify/require" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) -func TestValidateBlock(t *testing.T) { +func TestBlockAddEvidence(t *testing.T) { + txs := []Tx{Tx("foo"), Tx("bar")} + lastID := makeBlockIDRandom() + h := int64(3) + + voteSet, valSet, vals := randVoteSet(h-1, 1, VoteTypePrecommit, 10, 1) + commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals) + require.NoError(t, err) + + block := MakeBlock(h, txs, commit) + require.NotNil(t, block) + + ev := NewMockGoodEvidence(h, 0, valSet.Validators[0].Address) + block.AddEvidence([]Evidence{ev}) +} + +func TestBlockValidateBasic(t *testing.T) { + require.Error(t, (*Block)(nil).ValidateBasic()) + txs := []Tx{Tx("foo"), Tx("bar")} lastID := makeBlockIDRandom() h := int64(3) @@ -57,6 +75,59 @@ func TestValidateBlock(t *testing.T) { block.DataHash = cmn.RandBytes(len(block.DataHash)) err = block.ValidateBasic() require.Error(t, err) + + // tamper with evidence + block = MakeBlock(h, txs, commit) + block.EvidenceHash = []byte("something else") + err = block.ValidateBasic() + require.Error(t, err) +} + +func TestBlockHash(t *testing.T) { + assert.Nil(t, (*Block)(nil).Hash()) + assert.Nil(t, MakeBlock(int64(3), []Tx{Tx("Hello World")}, nil).Hash()) +} + +func TestBlockMakePartSet(t *testing.T) { + assert.Nil(t, (*Block)(nil).MakePartSet(2)) + + partSet := MakeBlock(int64(3), []Tx{Tx("Hello World")}, nil).MakePartSet(1024) + assert.NotNil(t, partSet) + assert.Equal(t, 1, partSet.Total()) +} + +func TestBlockHashesTo(t *testing.T) { + assert.False(t, (*Block)(nil).HashesTo(nil)) + + lastID := makeBlockIDRandom() + h := int64(3) + voteSet, valSet, vals := randVoteSet(h-1, 1, VoteTypePrecommit, 10, 1) + commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals) + require.NoError(t, err) + + block := MakeBlock(h, []Tx{Tx("Hello World")}, commit) + block.ValidatorsHash = valSet.Hash() + assert.False(t, block.HashesTo([]byte{})) + assert.False(t, block.HashesTo([]byte("something else"))) + assert.True(t, block.HashesTo(block.Hash())) +} + +func TestBlockSize(t *testing.T) { + size := MakeBlock(int64(3), []Tx{Tx("Hello World")}, nil).Size() + if size <= 0 { + t.Fatal("Size of the block is zero or negative") + } +} + +func TestBlockString(t *testing.T) { + assert.Equal(t, "nil-Block", (*Block)(nil).String()) + assert.Equal(t, "nil-Block", (*Block)(nil).StringIndented("")) + assert.Equal(t, "nil-Block", (*Block)(nil).StringShort()) + + block := MakeBlock(int64(3), []Tx{Tx("Hello World")}, nil) + assert.NotEqual(t, "nil-Block", block.String()) + assert.NotEqual(t, "nil-Block", block.StringIndented("")) + assert.NotEqual(t, "nil-Block", block.StringShort()) } func makeBlockIDRandom() BlockID { @@ -86,3 +157,61 @@ func TestNilDataHashDoesntCrash(t *testing.T) { assert.Equal(t, []byte((*Data)(nil).Hash()), nilBytes) assert.Equal(t, []byte(new(Data).Hash()), nilBytes) } + +func TestCommit(t *testing.T) { + lastID := makeBlockIDRandom() + h := int64(3) + voteSet, _, vals := randVoteSet(h-1, 1, VoteTypePrecommit, 10, 1) + commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals) + require.NoError(t, err) + + assert.NotNil(t, commit.FirstPrecommit()) + assert.Equal(t, h-1, commit.Height()) + assert.Equal(t, 1, commit.Round()) + assert.Equal(t, VoteTypePrecommit, commit.Type()) + if commit.Size() <= 0 { + t.Fatalf("commit %v has a zero or negative size: %d", commit, commit.Size()) + } + + require.NotNil(t, commit.BitArray()) + assert.Equal(t, cmn.NewBitArray(10).Size(), commit.BitArray().Size()) + + assert.Equal(t, voteSet.GetByIndex(0), commit.GetByIndex(0)) + assert.True(t, commit.IsCommit()) +} + +func TestCommitValidateBasic(t *testing.T) { + commit := randCommit() + assert.NoError(t, commit.ValidateBasic()) + + // nil precommit is OK + commit = randCommit() + commit.Precommits[0] = nil + assert.NoError(t, commit.ValidateBasic()) + + // tamper with types + commit = randCommit() + commit.Precommits[0].Type = VoteTypePrevote + assert.Error(t, commit.ValidateBasic()) + + // tamper with height + commit = randCommit() + commit.Precommits[0].Height = int64(100) + assert.Error(t, commit.ValidateBasic()) + + // tamper with round + commit = randCommit() + commit.Precommits[0].Round = 100 + assert.Error(t, commit.ValidateBasic()) +} + +func randCommit() *Commit { + lastID := makeBlockIDRandom() + h := int64(3) + voteSet, _, vals := randVoteSet(h-1, 1, VoteTypePrecommit, 10, 1) + commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals) + if err != nil { + panic(err) + } + return commit +} diff --git a/types/canonical_json.go b/types/canonical_json.go index 88509712f..aca9e9b79 100644 --- a/types/canonical_json.go +++ b/types/canonical_json.go @@ -3,7 +3,7 @@ package types import ( "time" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // Canonical json is amino's json for structs with fields in alphabetical order diff --git a/types/event_buffer.go b/types/event_buffer.go deleted file mode 100644 index 18b41014e..000000000 --- a/types/event_buffer.go +++ /dev/null @@ -1,50 +0,0 @@ -package types - -// Interface assertions -var _ TxEventPublisher = (*TxEventBuffer)(nil) - -// TxEventBuffer is a buffer of events, which uses a slice to temporarily store -// events. -type TxEventBuffer struct { - next TxEventPublisher - capacity int - events []EventDataTx -} - -// NewTxEventBuffer accepts a TxEventPublisher and returns a new buffer with the given -// capacity. -func NewTxEventBuffer(next TxEventPublisher, capacity int) *TxEventBuffer { - return &TxEventBuffer{ - next: next, - capacity: capacity, - events: make([]EventDataTx, 0, capacity), - } -} - -// Len returns the number of events cached. -func (b TxEventBuffer) Len() int { - return len(b.events) -} - -// PublishEventTx buffers an event to be fired upon finality. -func (b *TxEventBuffer) PublishEventTx(e EventDataTx) error { - b.events = append(b.events, e) - return nil -} - -// Flush publishes events by running next.PublishWithTags on all cached events. -// Blocks. Clears cached events. -func (b *TxEventBuffer) Flush() error { - for _, e := range b.events { - err := b.next.PublishEventTx(e) - if err != nil { - return err - } - } - - // Clear out the elements and set the length to 0 - // but maintain the underlying slice's capacity. - // See Issue https://github.com/tendermint/tendermint/issues/1189 - b.events = b.events[:0] - return nil -} diff --git a/types/event_buffer_test.go b/types/event_buffer_test.go deleted file mode 100644 index 74ae9da29..000000000 --- a/types/event_buffer_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package types - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -type eventBusMock struct{} - -func (eventBusMock) PublishEventTx(e EventDataTx) error { - return nil -} - -func TestEventBuffer(t *testing.T) { - b := NewTxEventBuffer(eventBusMock{}, 1) - b.PublishEventTx(EventDataTx{}) - assert.Equal(t, 1, b.Len()) - b.Flush() - assert.Equal(t, 0, b.Len()) -} diff --git a/types/event_bus.go b/types/event_bus.go index cb4b17d51..b4965feee 100644 --- a/types/event_bus.go +++ b/types/event_bus.go @@ -4,9 +4,9 @@ import ( "context" "fmt" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" ) const defaultCapacity = 0 @@ -49,7 +49,7 @@ func (b *EventBus) OnStart() error { } func (b *EventBus) OnStop() { - b.pubsub.OnStop() + b.pubsub.Stop() } func (b *EventBus) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error { diff --git a/types/event_bus_test.go b/types/event_bus_test.go index a5de2e84d..768b5b325 100644 --- a/types/event_bus_test.go +++ b/types/event_bus_test.go @@ -3,7 +3,6 @@ package types import ( "context" "fmt" - "math/rand" "testing" "time" @@ -11,9 +10,9 @@ import ( "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" - cmn "github.com/tendermint/tmlibs/common" ) func TestEventBusPublishEventTx(t *testing.T) { @@ -59,6 +58,64 @@ func TestEventBusPublishEventTx(t *testing.T) { } } +func TestEventBusPublish(t *testing.T) { + eventBus := NewEventBus() + err := eventBus.Start() + require.NoError(t, err) + defer eventBus.Stop() + + eventsCh := make(chan interface{}) + err = eventBus.Subscribe(context.Background(), "test", tmquery.Empty{}, eventsCh) + require.NoError(t, err) + + const numEventsExpected = 14 + done := make(chan struct{}) + go func() { + numEvents := 0 + for range eventsCh { + numEvents++ + if numEvents >= numEventsExpected { + close(done) + } + } + }() + + err = eventBus.Publish(EventNewBlockHeader, EventDataNewBlockHeader{}) + require.NoError(t, err) + err = eventBus.PublishEventNewBlock(EventDataNewBlock{}) + require.NoError(t, err) + err = eventBus.PublishEventNewBlockHeader(EventDataNewBlockHeader{}) + require.NoError(t, err) + err = eventBus.PublishEventVote(EventDataVote{}) + require.NoError(t, err) + err = eventBus.PublishEventProposalHeartbeat(EventDataProposalHeartbeat{}) + require.NoError(t, err) + err = eventBus.PublishEventNewRoundStep(EventDataRoundState{}) + require.NoError(t, err) + err = eventBus.PublishEventTimeoutPropose(EventDataRoundState{}) + require.NoError(t, err) + err = eventBus.PublishEventTimeoutWait(EventDataRoundState{}) + require.NoError(t, err) + err = eventBus.PublishEventNewRound(EventDataRoundState{}) + require.NoError(t, err) + err = eventBus.PublishEventCompleteProposal(EventDataRoundState{}) + require.NoError(t, err) + err = eventBus.PublishEventPolka(EventDataRoundState{}) + require.NoError(t, err) + err = eventBus.PublishEventUnlock(EventDataRoundState{}) + require.NoError(t, err) + err = eventBus.PublishEventRelock(EventDataRoundState{}) + require.NoError(t, err) + err = eventBus.PublishEventLock(EventDataRoundState{}) + require.NoError(t, err) + + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatalf("expected to receive %d events after 1 sec.", numEventsExpected) + } +} + func BenchmarkEventBus(b *testing.B) { benchmarks := []struct { name string @@ -92,7 +149,7 @@ func BenchmarkEventBus(b *testing.B) { func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *testing.B) { // for random* functions - rand.Seed(time.Now().Unix()) + cmn.Seed(time.Now().Unix()) eventBus := NewEventBusWithBufferCapacity(0) // set buffer capacity to 0 so we are not testing cache eventBus.Start() @@ -126,11 +183,7 @@ func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *tes } } -var events = []string{EventBond, - EventUnbond, - EventRebond, - EventDupeout, - EventFork, +var events = []string{ EventNewBlock, EventNewBlockHeader, EventNewRound, @@ -145,14 +198,10 @@ var events = []string{EventBond, EventVote} func randEvent() string { - return events[rand.Intn(len(events))] + return events[cmn.RandIntn(len(events))] } -var queries = []tmpubsub.Query{EventQueryBond, - EventQueryUnbond, - EventQueryRebond, - EventQueryDupeout, - EventQueryFork, +var queries = []tmpubsub.Query{ EventQueryNewBlock, EventQueryNewBlockHeader, EventQueryNewRound, @@ -167,5 +216,5 @@ var queries = []tmpubsub.Query{EventQueryBond, EventQueryVote} func randQuery() tmpubsub.Query { - return queries[rand.Intn(len(queries))] + return queries[cmn.RandIntn(len(queries))] } diff --git a/types/events.go b/types/events.go index 2b87297cd..c26fecb71 100644 --- a/types/events.go +++ b/types/events.go @@ -10,22 +10,17 @@ import ( // Reserved event types const ( - EventBond = "Bond" EventCompleteProposal = "CompleteProposal" - EventDupeout = "Dupeout" - EventFork = "Fork" EventLock = "Lock" EventNewBlock = "NewBlock" EventNewBlockHeader = "NewBlockHeader" EventNewRound = "NewRound" EventNewRoundStep = "NewRoundStep" EventPolka = "Polka" - EventRebond = "Rebond" EventRelock = "Relock" EventTimeoutPropose = "TimeoutPropose" EventTimeoutWait = "TimeoutWait" EventTx = "Tx" - EventUnbond = "Unbond" EventUnlock = "Unlock" EventVote = "Vote" EventProposalHeartbeat = "ProposalHeartbeat" @@ -69,7 +64,7 @@ type EventDataNewBlock struct { // light weight event for benchmarking type EventDataNewBlockHeader struct { - Header *Header `json:"header"` + Header Header `json:"header"` } // All txs fire EventDataTx @@ -113,11 +108,6 @@ const ( ) var ( - EventQueryBond = QueryForEvent(EventBond) - EventQueryUnbond = QueryForEvent(EventUnbond) - EventQueryRebond = QueryForEvent(EventRebond) - EventQueryDupeout = QueryForEvent(EventDupeout) - EventQueryFork = QueryForEvent(EventFork) EventQueryNewBlock = QueryForEvent(EventNewBlock) EventQueryNewBlockHeader = QueryForEvent(EventNewBlockHeader) EventQueryNewRound = QueryForEvent(EventNewRound) diff --git a/types/events_test.go b/types/events_test.go new file mode 100644 index 000000000..a4b71d922 --- /dev/null +++ b/types/events_test.go @@ -0,0 +1,23 @@ +package types + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestQueryTxFor(t *testing.T) { + tx := Tx("foo") + assert.Equal(t, + fmt.Sprintf("tm.event='Tx' AND tx.hash='%X'", tx.Hash()), + EventQueryTxFor(tx).String(), + ) +} + +func TestQueryForEvent(t *testing.T) { + assert.Equal(t, + "tm.event='NewBlock'", + QueryForEvent(EventNewBlock).String(), + ) +} diff --git a/types/evidence.go b/types/evidence.go index 4d8b59272..6313f43a5 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -4,9 +4,10 @@ import ( "bytes" "fmt" - "github.com/tendermint/go-amino" + amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tmlibs/merkle" + "github.com/tendermint/tendermint/crypto/merkle" ) // ErrEvidenceInvalid wraps a piece of evidence and the error denoting how or why it is invalid. @@ -180,7 +181,7 @@ type EvidenceList []Evidence // Hash returns the simple merkle root hash of the EvidenceList. func (evl EvidenceList) Hash() []byte { // Recursive impl. - // Copied from tmlibs/merkle to avoid allocations + // Copied from crypto/merkle to avoid allocations switch len(evl) { case 0: return nil diff --git a/types/evidence_test.go b/types/evidence_test.go index 5bbb2a37d..54eba01cd 100644 --- a/types/evidence_test.go +++ b/types/evidence_test.go @@ -36,7 +36,7 @@ func TestEvidence(t *testing.T) { blockID3 := makeBlockID("blockhash", 10000, "partshash") blockID4 := makeBlockID("blockhash", 10000, "partshash2") - chainID := "mychain" + const chainID = "mychain" vote1 := makeVote(val, chainID, 0, 10, 2, 1, blockID) badVote := makeVote(val, chainID, 0, 10, 2, 1, blockID) @@ -72,3 +72,30 @@ func TestEvidence(t *testing.T) { } } } + +func TestDuplicatedVoteEvidence(t *testing.T) { + ev := randomDuplicatedVoteEvidence() + + assert.True(t, ev.Equal(ev)) + assert.False(t, ev.Equal(&DuplicateVoteEvidence{})) +} + +func TestEvidenceList(t *testing.T) { + ev := randomDuplicatedVoteEvidence() + evl := EvidenceList([]Evidence{ev}) + + assert.NotNil(t, evl.Hash()) + assert.True(t, evl.Has(ev)) + assert.False(t, evl.Has(&DuplicateVoteEvidence{})) +} + +func randomDuplicatedVoteEvidence() *DuplicateVoteEvidence { + val := NewMockPV() + blockID := makeBlockID("blockhash", 1000, "partshash") + blockID2 := makeBlockID("blockhash2", 1000, "partshash") + const chainID = "mychain" + return &DuplicateVoteEvidence{ + VoteA: makeVote(val, chainID, 0, 10, 2, 1, blockID), + VoteB: makeVote(val, chainID, 0, 10, 2, 1, blockID2), + } +} diff --git a/types/genesis.go b/types/genesis.go index 099bb499c..220ee0e0e 100644 --- a/types/genesis.go +++ b/types/genesis.go @@ -6,7 +6,7 @@ import ( "time" "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) //------------------------------------------------------------ @@ -26,17 +26,7 @@ type GenesisDoc struct { ConsensusParams *ConsensusParams `json:"consensus_params,omitempty"` Validators []GenesisValidator `json:"validators"` AppHash cmn.HexBytes `json:"app_hash"` - AppStateJSON json.RawMessage `json:"app_state,omitempty"` - AppOptions json.RawMessage `json:"app_options,omitempty"` // DEPRECATED -} - -// AppState returns raw application state. -// TODO: replace with AppState field during next breaking release (0.18) -func (genDoc *GenesisDoc) AppState() json.RawMessage { - if len(genDoc.AppOptions) > 0 { - return genDoc.AppOptions - } - return genDoc.AppStateJSON + AppState json.RawMessage `json:"app_state,omitempty"` } // SaveAs is a utility method for saving GenensisDoc as a JSON file. diff --git a/types/genesis_test.go b/types/genesis_test.go index 24398a9a5..925bba790 100644 --- a/types/genesis_test.go +++ b/types/genesis_test.go @@ -1,10 +1,14 @@ package types import ( + "io/ioutil" + "os" "testing" + "time" "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/crypto" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/ed25519" ) func TestGenesisBad(t *testing.T) { @@ -36,7 +40,7 @@ func TestGenesisGood(t *testing.T) { // create a base gendoc from struct baseGenDoc := &GenesisDoc{ ChainID: "abc", - Validators: []GenesisValidator{{crypto.GenPrivKeyEd25519().PubKey(), 10, "myval"}}, + Validators: []GenesisValidator{{ed25519.GenPrivKey().PubKey(), 10, "myval"}}, } genDocBytes, err = cdc.MarshalJSON(baseGenDoc) assert.NoError(t, err, "error marshalling genDoc") @@ -59,3 +63,44 @@ func TestGenesisGood(t *testing.T) { genDoc, err = GenesisDocFromJSON(genDocBytes) assert.Error(t, err, "expected error for genDoc json with block size of 0") } + +func TestGenesisSaveAs(t *testing.T) { + tmpfile, err := ioutil.TempFile("", "genesis") + require.NoError(t, err) + defer os.Remove(tmpfile.Name()) + + genDoc := randomGenesisDoc() + + // save + genDoc.SaveAs(tmpfile.Name()) + stat, err := tmpfile.Stat() + require.NoError(t, err) + if err != nil && stat.Size() <= 0 { + t.Fatalf("SaveAs failed to write any bytes to %v", tmpfile.Name()) + } + + err = tmpfile.Close() + require.NoError(t, err) + + // load + genDoc2, err := GenesisDocFromFile(tmpfile.Name()) + require.NoError(t, err) + + // fails to unknown reason + // assert.EqualValues(t, genDoc2, genDoc) + assert.Equal(t, genDoc2.Validators, genDoc.Validators) +} + +func TestGenesisValidatorHash(t *testing.T) { + genDoc := randomGenesisDoc() + assert.NotEmpty(t, genDoc.ValidatorHash()) +} + +func randomGenesisDoc() *GenesisDoc { + return &GenesisDoc{ + GenesisTime: time.Now().UTC(), + ChainID: "abc", + Validators: []GenesisValidator{{ed25519.GenPrivKey().PubKey(), 10, "myval"}}, + ConsensusParams: DefaultConsensusParams(), + } +} diff --git a/types/heartbeat.go b/types/heartbeat.go index fcf545f2c..cebe2864c 100644 --- a/types/heartbeat.go +++ b/types/heartbeat.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // Heartbeat is a simple vote-like structure so validators can diff --git a/types/heartbeat_test.go b/types/heartbeat_test.go index 174c3ba97..f55c0bf3f 100644 --- a/types/heartbeat_test.go +++ b/types/heartbeat_test.go @@ -4,8 +4,7 @@ import ( "testing" "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/ed25519" ) func TestHeartbeatCopy(t *testing.T) { @@ -27,7 +26,7 @@ func TestHeartbeatString(t *testing.T) { hb := &Heartbeat{ValidatorIndex: 1, Height: 11, Round: 2} require.Equal(t, hb.String(), "Heartbeat{1:000000000000 11/02 (0) }") - var key crypto.PrivKeyEd25519 + var key ed25519.PrivKeyEd25519 sig, err := key.Sign([]byte("Tendermint")) require.NoError(t, err) hb.Signature = sig diff --git a/types/params.go b/types/params.go index d068342c6..3056c82a0 100644 --- a/types/params.go +++ b/types/params.go @@ -2,11 +2,12 @@ package types import ( abci "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/merkle" + "github.com/tendermint/tendermint/crypto/merkle" + cmn "github.com/tendermint/tendermint/libs/common" ) const ( + // MaxBlockSizeBytes is the maximum permitted size of the blocks. MaxBlockSizeBytes = 104857600 // 100MB ) @@ -56,7 +57,7 @@ func DefaultConsensusParams() *ConsensusParams { func DefaultBlockSize() BlockSize { return BlockSize{ MaxBytes: 22020096, // 21MB - MaxTxs: 100000, + MaxTxs: 10000, MaxGas: -1, } } diff --git a/types/params_test.go b/types/params_test.go index f645585eb..e8e13dba0 100644 --- a/types/params_test.go +++ b/types/params_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + abci "github.com/tendermint/tendermint/abci/types" ) func newConsensusParams(blockSize, partSize int) ConsensusParams { @@ -86,3 +87,59 @@ func TestConsensusParamsHash(t *testing.T) { assert.NotEqual(t, hashes[i], hashes[i+1]) } } + +func TestConsensusParamsUpdate(t *testing.T) { + testCases := []struct { + params ConsensusParams + updates *abci.ConsensusParams + updatedParams ConsensusParams + }{ + // empty updates + { + makeParams(1, 2, 3, 4, 5, 6), + &abci.ConsensusParams{}, + makeParams(1, 2, 3, 4, 5, 6), + }, + // negative BlockPartSizeBytes + { + makeParams(1, 2, 3, 4, 5, 6), + &abci.ConsensusParams{ + BlockSize: &abci.BlockSize{ + MaxBytes: -100, + MaxTxs: -200, + MaxGas: -300, + }, + TxSize: &abci.TxSize{ + MaxBytes: -400, + MaxGas: -500, + }, + BlockGossip: &abci.BlockGossip{ + BlockPartSizeBytes: -600, + }, + }, + makeParams(1, 2, 3, 4, 5, 6), + }, + // fine updates + { + makeParams(1, 2, 3, 4, 5, 6), + &abci.ConsensusParams{ + BlockSize: &abci.BlockSize{ + MaxBytes: 100, + MaxTxs: 200, + MaxGas: 300, + }, + TxSize: &abci.TxSize{ + MaxBytes: 400, + MaxGas: 500, + }, + BlockGossip: &abci.BlockGossip{ + BlockPartSizeBytes: 600, + }, + }, + makeParams(100, 200, 300, 400, 500, 600), + }, + } + for _, tc := range testCases { + assert.Equal(t, tc.updatedParams, tc.params.Update(tc.updates)) + } +} diff --git a/types/part_set.go b/types/part_set.go index 18cfe802c..f6d7f6b6e 100644 --- a/types/part_set.go +++ b/types/part_set.go @@ -7,10 +7,9 @@ import ( "io" "sync" - "golang.org/x/crypto/ripemd160" - - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/merkle" + "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/crypto/tmhash" + cmn "github.com/tendermint/tendermint/libs/common" ) var ( @@ -31,7 +30,7 @@ func (part *Part) Hash() []byte { if part.hash != nil { return part.hash } - hasher := ripemd160.New() + hasher := tmhash.New() hasher.Write(part.Bytes) // nolint: errcheck, gas part.hash = hasher.Sum(nil) return part.hash diff --git a/types/part_set_test.go b/types/part_set_test.go index 545b4d42b..3576e747e 100644 --- a/types/part_set_test.go +++ b/types/part_set_test.go @@ -1,11 +1,13 @@ package types import ( - "bytes" "io/ioutil" "testing" - cmn "github.com/tendermint/tmlibs/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cmn "github.com/tendermint/tendermint/libs/common" ) const ( @@ -13,24 +15,21 @@ const ( ) func TestBasicPartSet(t *testing.T) { - // Construct random data of size partSize * 100 data := cmn.RandBytes(testPartSize * 100) - partSet := NewPartSetFromData(data, testPartSize) - if len(partSet.Hash()) == 0 { - t.Error("Expected to get hash") - } - if partSet.Total() != 100 { - t.Errorf("Expected to get 100 parts, but got %v", partSet.Total()) - } - if !partSet.IsComplete() { - t.Errorf("PartSet should be complete") - } + + assert.NotEmpty(t, partSet.Hash()) + assert.Equal(t, 100, partSet.Total()) + assert.Equal(t, 100, partSet.BitArray().Size()) + assert.True(t, partSet.HashesTo(partSet.Hash())) + assert.True(t, partSet.IsComplete()) + assert.Equal(t, 100, partSet.Count()) // Test adding parts to a new partSet. partSet2 := NewPartSetFromHeader(partSet.Header()) + assert.True(t, partSet2.HasHeader(partSet.Header())) for i := 0; i < partSet.Total(); i++ { part := partSet.GetPart(i) //t.Logf("\n%v", part) @@ -39,31 +38,28 @@ func TestBasicPartSet(t *testing.T) { t.Errorf("Failed to add part %v, error: %v", i, err) } } - - if !bytes.Equal(partSet.Hash(), partSet2.Hash()) { - t.Error("Expected to get same hash") - } - if partSet2.Total() != 100 { - t.Errorf("Expected to get 100 parts, but got %v", partSet2.Total()) - } - if !partSet2.IsComplete() { - t.Errorf("Reconstructed PartSet should be complete") - } + // adding part with invalid index + added, err := partSet2.AddPart(&Part{Index: 10000}) + assert.False(t, added) + assert.Error(t, err) + // adding existing part + added, err = partSet2.AddPart(partSet2.GetPart(0)) + assert.False(t, added) + assert.Nil(t, err) + + assert.Equal(t, partSet.Hash(), partSet2.Hash()) + assert.Equal(t, 100, partSet2.Total()) + assert.True(t, partSet2.IsComplete()) // Reconstruct data, assert that they are equal. data2Reader := partSet2.GetReader() data2, err := ioutil.ReadAll(data2Reader) - if err != nil { - t.Errorf("Error reading data2Reader: %v", err) - } - if !bytes.Equal(data, data2) { - t.Errorf("Got wrong data.") - } + require.NoError(t, err) + assert.Equal(t, data, data2) } func TestWrongProof(t *testing.T) { - // Construct random data of size partSize * 100 data := cmn.RandBytes(testPartSize * 100) partSet := NewPartSetFromData(data, testPartSize) @@ -86,5 +82,4 @@ func TestWrongProof(t *testing.T) { if added || err == nil { t.Errorf("Expected to fail adding a part with bad bytes.") } - } diff --git a/types/priv_validator.go b/types/priv_validator.go index 85db65a41..1642be41b 100644 --- a/types/priv_validator.go +++ b/types/priv_validator.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/ed25519" ) // PrivValidator defines the functionality of a local Tendermint validator @@ -47,7 +48,7 @@ type MockPV struct { } func NewMockPV() *MockPV { - return &MockPV{crypto.GenPrivKeyEd25519()} + return &MockPV{ed25519.GenPrivKey()} } // Implements PrivValidator. diff --git a/types/proto3/block.pb.go b/types/proto3/block.pb.go new file mode 100644 index 000000000..805828f82 --- /dev/null +++ b/types/proto3/block.pb.go @@ -0,0 +1,261 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: block.proto + +/* +Package proto3 is a generated protocol buffer package. + +It is generated from these files: + block.proto + +It has these top-level messages: + PartSetHeader + BlockID + Header + Timestamp +*/ +package proto3 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type PartSetHeader struct { + Total int32 `protobuf:"zigzag32,1,opt,name=Total" json:"Total,omitempty"` + Hash []byte `protobuf:"bytes,2,opt,name=Hash,proto3" json:"Hash,omitempty"` +} + +func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } +func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } +func (*PartSetHeader) ProtoMessage() {} +func (*PartSetHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *PartSetHeader) GetTotal() int32 { + if m != nil { + return m.Total + } + return 0 +} + +func (m *PartSetHeader) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +type BlockID struct { + Hash []byte `protobuf:"bytes,1,opt,name=Hash,proto3" json:"Hash,omitempty"` + PartsHeader *PartSetHeader `protobuf:"bytes,2,opt,name=PartsHeader" json:"PartsHeader,omitempty"` +} + +func (m *BlockID) Reset() { *m = BlockID{} } +func (m *BlockID) String() string { return proto.CompactTextString(m) } +func (*BlockID) ProtoMessage() {} +func (*BlockID) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *BlockID) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *BlockID) GetPartsHeader() *PartSetHeader { + if m != nil { + return m.PartsHeader + } + return nil +} + +type Header struct { + // basic block info + ChainID string `protobuf:"bytes,1,opt,name=ChainID" json:"ChainID,omitempty"` + Height int64 `protobuf:"zigzag64,2,opt,name=Height" json:"Height,omitempty"` + Time *Timestamp `protobuf:"bytes,3,opt,name=Time" json:"Time,omitempty"` + NumTxs int64 `protobuf:"zigzag64,4,opt,name=NumTxs" json:"NumTxs,omitempty"` + // prev block info + LastBlockID *BlockID `protobuf:"bytes,5,opt,name=LastBlockID" json:"LastBlockID,omitempty"` + TotalTxs int64 `protobuf:"zigzag64,6,opt,name=TotalTxs" json:"TotalTxs,omitempty"` + // hashes of block data + LastCommitHash []byte `protobuf:"bytes,7,opt,name=LastCommitHash,proto3" json:"LastCommitHash,omitempty"` + DataHash []byte `protobuf:"bytes,8,opt,name=DataHash,proto3" json:"DataHash,omitempty"` + // hashes from the app output from the prev block + ValidatorsHash []byte `protobuf:"bytes,9,opt,name=ValidatorsHash,proto3" json:"ValidatorsHash,omitempty"` + ConsensusHash []byte `protobuf:"bytes,10,opt,name=ConsensusHash,proto3" json:"ConsensusHash,omitempty"` + AppHash []byte `protobuf:"bytes,11,opt,name=AppHash,proto3" json:"AppHash,omitempty"` + LastResultsHash []byte `protobuf:"bytes,12,opt,name=LastResultsHash,proto3" json:"LastResultsHash,omitempty"` + // consensus info + EvidenceHash []byte `protobuf:"bytes,13,opt,name=EvidenceHash,proto3" json:"EvidenceHash,omitempty"` +} + +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Header) GetChainID() string { + if m != nil { + return m.ChainID + } + return "" +} + +func (m *Header) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *Header) GetTime() *Timestamp { + if m != nil { + return m.Time + } + return nil +} + +func (m *Header) GetNumTxs() int64 { + if m != nil { + return m.NumTxs + } + return 0 +} + +func (m *Header) GetLastBlockID() *BlockID { + if m != nil { + return m.LastBlockID + } + return nil +} + +func (m *Header) GetTotalTxs() int64 { + if m != nil { + return m.TotalTxs + } + return 0 +} + +func (m *Header) GetLastCommitHash() []byte { + if m != nil { + return m.LastCommitHash + } + return nil +} + +func (m *Header) GetDataHash() []byte { + if m != nil { + return m.DataHash + } + return nil +} + +func (m *Header) GetValidatorsHash() []byte { + if m != nil { + return m.ValidatorsHash + } + return nil +} + +func (m *Header) GetConsensusHash() []byte { + if m != nil { + return m.ConsensusHash + } + return nil +} + +func (m *Header) GetAppHash() []byte { + if m != nil { + return m.AppHash + } + return nil +} + +func (m *Header) GetLastResultsHash() []byte { + if m != nil { + return m.LastResultsHash + } + return nil +} + +func (m *Header) GetEvidenceHash() []byte { + if m != nil { + return m.EvidenceHash + } + return nil +} + +// Timestamp wraps how amino encodes time. Note that this is different from the protobuf well-known type +// protobuf/timestamp.proto in the sense that there seconds and nanos are varint encoded. See: +// https://github.com/google/protobuf/blob/d2980062c859649523d5fd51d6b55ab310e47482/src/google/protobuf/timestamp.proto#L123-L135 +// Also nanos do not get skipped if they are zero in amino. +type Timestamp struct { + Seconds int64 `protobuf:"fixed64,1,opt,name=seconds" json:"seconds,omitempty"` + Nanos int32 `protobuf:"fixed32,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*PartSetHeader)(nil), "proto3.PartSetHeader") + proto.RegisterType((*BlockID)(nil), "proto3.BlockID") + proto.RegisterType((*Header)(nil), "proto3.Header") + proto.RegisterType((*Timestamp)(nil), "proto3.Timestamp") +} + +func init() { proto.RegisterFile("block.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 372 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x92, 0x4f, 0x6b, 0xe3, 0x30, + 0x10, 0xc5, 0xf1, 0xe6, 0xff, 0x38, 0xd9, 0x6c, 0x86, 0xdd, 0xc5, 0xf4, 0x14, 0x4c, 0x5b, 0x72, + 0x0a, 0xb4, 0x39, 0x94, 0xd2, 0x53, 0x9b, 0x14, 0x12, 0x28, 0xa5, 0xa8, 0x21, 0x77, 0x25, 0x16, + 0x8d, 0xa9, 0x2d, 0x19, 0x4b, 0x29, 0xfd, 0x7c, 0xfd, 0x64, 0x45, 0x23, 0xdb, 0x8d, 0x73, 0x4a, + 0xde, 0x9b, 0x37, 0xbf, 0x91, 0x47, 0x02, 0x7f, 0x9b, 0xa8, 0xdd, 0xfb, 0x34, 0xcb, 0x95, 0x51, + 0xd8, 0xa6, 0x9f, 0x59, 0x78, 0x0b, 0x83, 0x17, 0x9e, 0x9b, 0x57, 0x61, 0x96, 0x82, 0x47, 0x22, + 0xc7, 0xbf, 0xd0, 0x5a, 0x2b, 0xc3, 0x93, 0xc0, 0x1b, 0x7b, 0x93, 0x11, 0x73, 0x02, 0x11, 0x9a, + 0x4b, 0xae, 0xf7, 0xc1, 0xaf, 0xb1, 0x37, 0xe9, 0x33, 0xfa, 0x1f, 0x6e, 0xa0, 0xf3, 0x60, 0x89, + 0xab, 0x45, 0x55, 0xf6, 0x7e, 0xca, 0x78, 0x03, 0xbe, 0x25, 0x6b, 0xc7, 0xa5, 0x4e, 0xff, 0xfa, + 0x9f, 0x1b, 0x3f, 0x9b, 0xd6, 0x86, 0xb2, 0xe3, 0x64, 0xf8, 0xd5, 0x80, 0x76, 0x71, 0x98, 0x00, + 0x3a, 0xf3, 0x3d, 0x8f, 0xe5, 0x6a, 0x41, 0xe8, 0x1e, 0x2b, 0x25, 0xfe, 0xb7, 0x99, 0xf8, 0x6d, + 0x6f, 0x08, 0x8c, 0xac, 0x50, 0x78, 0x01, 0xcd, 0x75, 0x9c, 0x8a, 0xa0, 0x41, 0xe3, 0x46, 0xe5, + 0x38, 0xeb, 0x69, 0xc3, 0xd3, 0x8c, 0x51, 0xd9, 0xb6, 0x3f, 0x1f, 0xd2, 0xf5, 0xa7, 0x0e, 0x9a, + 0xae, 0xdd, 0x29, 0xbc, 0x02, 0xff, 0x89, 0x6b, 0x53, 0x7c, 0x57, 0xd0, 0x22, 0xca, 0xb0, 0xa4, + 0x14, 0x36, 0x3b, 0xce, 0xe0, 0x19, 0x74, 0x69, 0x47, 0x16, 0xd6, 0x26, 0x58, 0xa5, 0xf1, 0x12, + 0x7e, 0xdb, 0xe8, 0x5c, 0xa5, 0x69, 0x6c, 0x68, 0x43, 0x1d, 0xda, 0xd0, 0x89, 0x6b, 0x19, 0x0b, + 0x6e, 0x38, 0x25, 0xba, 0x94, 0xa8, 0xb4, 0x65, 0x6c, 0x78, 0x12, 0x47, 0xdc, 0xa8, 0x5c, 0x53, + 0xa2, 0xe7, 0x18, 0x75, 0x17, 0xcf, 0x61, 0x30, 0x57, 0x52, 0x0b, 0xa9, 0x0f, 0x2e, 0x06, 0x14, + 0xab, 0x9b, 0x76, 0xa3, 0xf7, 0x59, 0x46, 0x75, 0x9f, 0xea, 0xa5, 0xc4, 0x09, 0x0c, 0xed, 0xa9, + 0x98, 0xd0, 0x87, 0xc4, 0x38, 0x42, 0x9f, 0x12, 0xa7, 0x36, 0x86, 0xd0, 0x7f, 0xfc, 0x88, 0x23, + 0x21, 0x77, 0x82, 0x62, 0x03, 0x8a, 0xd5, 0xbc, 0xf0, 0x0e, 0x7a, 0xd5, 0xce, 0xed, 0x50, 0x2d, + 0x76, 0x4a, 0x46, 0x9a, 0xae, 0xf1, 0x0f, 0x2b, 0xa5, 0x7d, 0x6d, 0x92, 0x4b, 0xa5, 0xe9, 0x16, + 0x87, 0xcc, 0x89, 0x6d, 0xf1, 0x38, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x6b, 0x74, 0x2f, 0xbd, + 0xb2, 0x02, 0x00, 0x00, +} diff --git a/types/proto3/block.proto b/types/proto3/block.proto new file mode 100644 index 000000000..bc3cf8749 --- /dev/null +++ b/types/proto3/block.proto @@ -0,0 +1,48 @@ +syntax = "proto3"; + +package proto3; + + +message PartSetHeader { + sint32 Total = 1; + bytes Hash = 2; +} + +message BlockID { + bytes Hash = 1; + PartSetHeader PartsHeader = 2; +} + +message Header { + // basic block info + string ChainID = 1; + sint64 Height = 2; + Timestamp Time = 3; + sint64 NumTxs = 4; + + // prev block info + BlockID LastBlockID = 5; + sint64 TotalTxs = 6; + + // hashes of block data + bytes LastCommitHash = 7; // commit from validators from the last block + bytes DataHash = 8; // transactions + + // hashes from the app output from the prev block + bytes ValidatorsHash = 9; // validators for the current block + bytes ConsensusHash = 10; // consensus params for current block + bytes AppHash = 11; // state after txs from the previous block + bytes LastResultsHash = 12; // root hash of all results from the txs from the previous block + + // consensus info + bytes EvidenceHash = 13; // evidence included in the block +} + +// Timestamp wraps how amino encodes time. Note that this is different from the protobuf well-known type +// protobuf/timestamp.proto in the sense that there seconds and nanos are varint encoded. See: +// https://github.com/google/protobuf/blob/d2980062c859649523d5fd51d6b55ab310e47482/src/google/protobuf/timestamp.proto#L123-L135 +// Also nanos do not get skipped if they are zero in amino. +message Timestamp { + sfixed64 seconds = 1; + sfixed32 nanos = 2; +} diff --git a/types/proto3_test.go b/types/proto3_test.go new file mode 100644 index 000000000..19a624a65 --- /dev/null +++ b/types/proto3_test.go @@ -0,0 +1,115 @@ +package types + +import ( + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" + + "github.com/tendermint/tendermint/types/proto3" +) + +func TestProto3Compatibility(t *testing.T) { + tm, err := time.Parse("Mon Jan 2 15:04:05 -0700 MST 2006", "Mon Jan 2 15:04:05 -0700 MST 2006") + assert.NoError(t, err) + // add some nanos, otherwise protobuf will skip over this while amino (still) won't! + tm = tm.Add(50000 * time.Nanosecond) + seconds := tm.Unix() + nanos := int32(tm.Nanosecond()) + t.Log("seconds", seconds) + t.Log("nanos", nanos) + + pbHeader := proto3.Header{ + ChainID: "cosmos", + Height: 150, + Time: &proto3.Timestamp{Seconds: seconds, Nanos: nanos}, + NumTxs: 7, + LastBlockID: &proto3.BlockID{ + Hash: []byte("some serious hashing"), + PartsHeader: &proto3.PartSetHeader{ + Total: 8, + Hash: []byte("some more serious hashing"), + }, + }, + TotalTxs: 100, + LastCommitHash: []byte("commit hash"), + DataHash: []byte("data hash"), + ValidatorsHash: []byte("validators hash"), + } + aminoHeader := Header{ + ChainID: "cosmos", + Height: 150, + Time: tm, + NumTxs: 7, + LastBlockID: BlockID{ + Hash: []byte("some serious hashing"), + PartsHeader: PartSetHeader{ + Total: 8, + Hash: []byte("some more serious hashing"), + }, + }, + TotalTxs: 100, + LastCommitHash: []byte("commit hash"), + DataHash: []byte("data hash"), + ValidatorsHash: []byte("validators hash"), + } + ab, err := cdc.MarshalBinaryBare(aminoHeader) + assert.NoError(t, err, "unexpected error") + + pb, err := proto.Marshal(&pbHeader) + assert.NoError(t, err, "unexpected error") + // This works: + assert.Equal(t, ab, pb, "encoding doesn't match") + + emptyLastBlockPb := proto3.Header{ + ChainID: "cosmos", + Height: 150, + Time: &proto3.Timestamp{Seconds: seconds, Nanos: nanos}, + NumTxs: 7, + // This is not fully skipped in amino (yet) although it is empty: + LastBlockID: &proto3.BlockID{ + PartsHeader: &proto3.PartSetHeader{ + }, + }, + TotalTxs: 100, + LastCommitHash: []byte("commit hash"), + DataHash: []byte("data hash"), + ValidatorsHash: []byte("validators hash"), + } + emptyLastBlockAm := Header{ + ChainID: "cosmos", + Height: 150, + Time: tm, + NumTxs: 7, + TotalTxs: 100, + LastCommitHash: []byte("commit hash"), + DataHash: []byte("data hash"), + ValidatorsHash: []byte("validators hash"), + } + + ab, err = cdc.MarshalBinaryBare(emptyLastBlockAm) + assert.NoError(t, err, "unexpected error") + + pb, err = proto.Marshal(&emptyLastBlockPb) + assert.NoError(t, err, "unexpected error") + // This works: + assert.Equal(t, ab, pb, "encoding doesn't match") + + pb, err = proto.Marshal(&proto3.Header{}) + assert.NoError(t, err, "unexpected error") + t.Log(pb) + + // While in protobuf Header{} encodes to an empty byte slice it does not in amino: + ab, err = cdc.MarshalBinaryBare(Header{}) + assert.NoError(t, err, "unexpected error") + t.Log(ab) + + pb, err = proto.Marshal(&proto3.Timestamp{}) + assert.NoError(t, err, "unexpected error") + t.Log(pb) + + ab, err = cdc.MarshalBinaryBare(time.Time{}) + assert.NoError(t, err, "unexpected error") + t.Log(ab) +} diff --git a/types/protobuf.go b/types/protobuf.go index ad7362e03..0e1e446db 100644 --- a/types/protobuf.go +++ b/types/protobuf.go @@ -7,7 +7,9 @@ import ( "time" abci "github.com/tendermint/tendermint/abci/types" - crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/secp256k1" ) //------------------------------------------------------- @@ -61,12 +63,12 @@ func (tm2pb) Validator(val *Validator) abci.Validator { // TODO: add cases when new pubkey types are added to crypto func (tm2pb) PubKey(pubKey crypto.PubKey) abci.PubKey { switch pk := pubKey.(type) { - case crypto.PubKeyEd25519: + case ed25519.PubKeyEd25519: return abci.PubKey{ Type: ABCIPubKeyTypeEd25519, Data: pk[:], } - case crypto.PubKeySecp256k1: + case secp256k1.PubKeySecp256k1: return abci.PubKey{ Type: ABCIPubKeyTypeSecp256k1, Data: pk[:], @@ -78,7 +80,7 @@ func (tm2pb) PubKey(pubKey crypto.PubKey) abci.PubKey { // XXX: panics on nil or unknown pubkey type func (tm2pb) Validators(vals *ValidatorSet) []abci.Validator { - validators := make([]abci.Validator, len(vals.Validators)) + validators := make([]abci.Validator, vals.Size()) for i, val := range vals.Validators { validators[i] = TM2PB.Validator(val) } @@ -161,14 +163,14 @@ func (pb2tm) PubKey(pubKey abci.PubKey) (crypto.PubKey, error) { if len(pubKey.Data) != sizeEd { return nil, fmt.Errorf("Invalid size for PubKeyEd25519. Got %d, expected %d", len(pubKey.Data), sizeEd) } - var pk crypto.PubKeyEd25519 + var pk ed25519.PubKeyEd25519 copy(pk[:], pubKey.Data) return pk, nil case ABCIPubKeyTypeSecp256k1: if len(pubKey.Data) != sizeSecp { return nil, fmt.Errorf("Invalid size for PubKeyEd25519. Got %d, expected %d", len(pubKey.Data), sizeSecp) } - var pk crypto.PubKeySecp256k1 + var pk secp256k1.PubKeySecp256k1 copy(pk[:], pubKey.Data) return pk, nil default: diff --git a/types/protobuf_test.go b/types/protobuf_test.go index cd986fd81..6ee79b906 100644 --- a/types/protobuf_test.go +++ b/types/protobuf_test.go @@ -2,15 +2,18 @@ package types import ( "testing" + "time" "github.com/stretchr/testify/assert" abci "github.com/tendermint/tendermint/abci/types" - crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/secp256k1" ) func TestABCIPubKey(t *testing.T) { - pkEd := crypto.GenPrivKeyEd25519().PubKey() - pkSecp := crypto.GenPrivKeySecp256k1().PubKey() + pkEd := ed25519.GenPrivKey().PubKey() + pkSecp := secp256k1.GenPrivKey().PubKey() testABCIPubKey(t, pkEd, ABCIPubKeyTypeEd25519) testABCIPubKey(t, pkSecp, ABCIPubKeyTypeSecp256k1) } @@ -23,7 +26,7 @@ func testABCIPubKey(t *testing.T, pk crypto.PubKey, typeStr string) { } func TestABCIValidators(t *testing.T) { - pkEd := crypto.GenPrivKeyEd25519().PubKey() + pkEd := ed25519.GenPrivKey().PubKey() // correct validator tmValExpected := &Validator{ @@ -43,6 +46,9 @@ func TestABCIValidators(t *testing.T) { assert.Nil(t, err) assert.Equal(t, tmValExpected, tmVals[0]) + abciVals := TM2PB.Validators(NewValidatorSet(tmVals)) + assert.Equal(t, []abci.Validator{abciVal}, abciVals) + // val with address tmVal.Address = pkEd.Address() @@ -67,3 +73,50 @@ func TestABCIConsensusParams(t *testing.T) { assert.Equal(t, *cp, cp2) } + +func TestABCIHeader(t *testing.T) { + header := &Header{ + Height: int64(3), + Time: time.Now(), + NumTxs: int64(10), + } + abciHeader := TM2PB.Header(header) + + assert.Equal(t, int64(3), abciHeader.Height) +} + +func TestABCIEvidence(t *testing.T) { + val := NewMockPV() + blockID := makeBlockID("blockhash", 1000, "partshash") + blockID2 := makeBlockID("blockhash2", 1000, "partshash") + const chainID = "mychain" + ev := &DuplicateVoteEvidence{ + PubKey: val.GetPubKey(), + VoteA: makeVote(val, chainID, 0, 10, 2, 1, blockID), + VoteB: makeVote(val, chainID, 0, 10, 2, 1, blockID2), + } + abciEv := TM2PB.Evidence( + ev, + NewValidatorSet([]*Validator{NewValidator(val.GetPubKey(), 10)}), + time.Now(), + ) + + assert.Equal(t, "duplicate/vote", abciEv.Type) +} + +type pubKeyEddie struct{} + +func (pubKeyEddie) Address() Address { return []byte{} } +func (pubKeyEddie) Bytes() []byte { return []byte{} } +func (pubKeyEddie) VerifyBytes(msg []byte, sig crypto.Signature) bool { return false } +func (pubKeyEddie) Equals(crypto.PubKey) bool { return false } + +func TestABCIValidatorFromPubKeyAndPower(t *testing.T) { + pubkey := ed25519.GenPrivKey().PubKey() + + abciVal := TM2PB.ValidatorFromPubKeyAndPower(pubkey, 10) + assert.Equal(t, int64(10), abciVal.Power) + + assert.Panics(t, func() { TM2PB.ValidatorFromPubKeyAndPower(nil, 10) }) + assert.Panics(t, func() { TM2PB.ValidatorFromPubKeyAndPower(pubKeyEddie{}, 10) }) +} diff --git a/types/results.go b/types/results.go index 9f4f33c36..17d5891c3 100644 --- a/types/results.go +++ b/types/results.go @@ -2,8 +2,8 @@ package types import ( abci "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/merkle" + "github.com/tendermint/tendermint/crypto/merkle" + cmn "github.com/tendermint/tendermint/libs/common" ) //----------------------------------------------------------------------------- @@ -24,15 +24,16 @@ func (a ABCIResult) Hash() []byte { // ABCIResults wraps the deliver tx results to return a proof type ABCIResults []ABCIResult -// NewResults creates ABCIResults from ResponseDeliverTx -func NewResults(del []*abci.ResponseDeliverTx) ABCIResults { - res := make(ABCIResults, len(del)) - for i, d := range del { +// NewResults creates ABCIResults from the list of ResponseDeliverTx. +func NewResults(responses []*abci.ResponseDeliverTx) ABCIResults { + res := make(ABCIResults, len(responses)) + for i, d := range responses { res[i] = NewResultFromResponse(d) } return res } +// NewResultFromResponse creates ABCIResult from ResponseDeliverTx. func NewResultFromResponse(response *abci.ResponseDeliverTx) ABCIResult { return ABCIResult{ Code: response.Code, @@ -51,6 +52,8 @@ func (a ABCIResults) Bytes() []byte { // Hash returns a merkle hash of all results func (a ABCIResults) Hash() []byte { + // NOTE: we copy the impl of the merkle tree for txs - + // we should be consistent and either do it for both or not. return merkle.SimpleHashFromHashers(a.toHashers()) } diff --git a/types/results_test.go b/types/results_test.go index 009e2693d..8cbe319ff 100644 --- a/types/results_test.go +++ b/types/results_test.go @@ -5,6 +5,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" ) func TestABCIResults(t *testing.T) { @@ -41,3 +42,14 @@ func TestABCIResults(t *testing.T) { assert.True(t, valid, "%d", i) } } + +func TestABCIBytes(t *testing.T) { + results := NewResults([]*abci.ResponseDeliverTx{ + {Code: 0, Data: []byte{}}, + {Code: 0, Data: []byte("one")}, + {Code: 14, Data: nil}, + {Code: 14, Data: []byte("foo")}, + {Code: 14, Data: []byte("bar")}, + }) + assert.NotNil(t, results.Bytes()) +} diff --git a/types/tx.go b/types/tx.go index cad7dda3a..489f0b232 100644 --- a/types/tx.go +++ b/types/tx.go @@ -6,19 +6,19 @@ import ( "fmt" abci "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/merkle" + "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/crypto/tmhash" + cmn "github.com/tendermint/tendermint/libs/common" ) // Tx is an arbitrary byte array. // NOTE: Tx has no types at this level, so when wire encoded it's just length-prefixed. -// Alternatively, it may make sense to add types here and let -// []byte be type 0x1 so we can have versioned txs if need be in the future. +// Might we want types here ? type Tx []byte -// Hash computes the RIPEMD160 hash of the wire encoded transaction. +// Hash computes the TMHASH hash of the wire encoded transaction. func (tx Tx) Hash() []byte { - return aminoHasher(tx).Hash() + return tmhash.Sum(tx) } // String returns the hex-encoded transaction as a string. @@ -32,7 +32,7 @@ type Txs []Tx // Hash returns the simple Merkle root hash of the transactions. func (txs Txs) Hash() []byte { // Recursive impl. - // Copied from tmlibs/merkle to avoid allocations + // Copied from tendermint/crypto/merkle to avoid allocations switch len(txs) { case 0: return nil diff --git a/types/tx_test.go b/types/tx_test.go index 2a93ceb31..df7a74496 100644 --- a/types/tx_test.go +++ b/types/tx_test.go @@ -6,8 +6,8 @@ import ( "github.com/stretchr/testify/assert" - cmn "github.com/tendermint/tmlibs/common" - ctest "github.com/tendermint/tmlibs/test" + cmn "github.com/tendermint/tendermint/libs/common" + ctest "github.com/tendermint/tendermint/libs/test" ) func makeTxs(cnt, size int) Txs { @@ -24,21 +24,32 @@ func randInt(low, high int) int { } func TestTxIndex(t *testing.T) { - assert := assert.New(t) for i := 0; i < 20; i++ { txs := makeTxs(15, 60) for j := 0; j < len(txs); j++ { tx := txs[j] idx := txs.Index(tx) - assert.Equal(j, idx) + assert.Equal(t, j, idx) } - assert.Equal(-1, txs.Index(nil)) - assert.Equal(-1, txs.Index(Tx("foodnwkf"))) + assert.Equal(t, -1, txs.Index(nil)) + assert.Equal(t, -1, txs.Index(Tx("foodnwkf"))) + } +} + +func TestTxIndexByHash(t *testing.T) { + for i := 0; i < 20; i++ { + txs := makeTxs(15, 60) + for j := 0; j < len(txs); j++ { + tx := txs[j] + idx := txs.IndexByHash(tx.Hash()) + assert.Equal(t, j, idx) + } + assert.Equal(t, -1, txs.IndexByHash(nil)) + assert.Equal(t, -1, txs.IndexByHash(Tx("foodnwkf").Hash())) } } func TestValidTxProof(t *testing.T) { - assert := assert.New(t) cases := []struct { txs Txs }{ @@ -58,21 +69,21 @@ func TestValidTxProof(t *testing.T) { leaf := txs[i] leafHash := leaf.Hash() proof := txs.Proof(i) - assert.Equal(i, proof.Index, "%d: %d", h, i) - assert.Equal(len(txs), proof.Total, "%d: %d", h, i) - assert.EqualValues(root, proof.RootHash, "%d: %d", h, i) - assert.EqualValues(leaf, proof.Data, "%d: %d", h, i) - assert.EqualValues(leafHash, proof.LeafHash(), "%d: %d", h, i) - assert.Nil(proof.Validate(root), "%d: %d", h, i) - assert.NotNil(proof.Validate([]byte("foobar")), "%d: %d", h, i) + assert.Equal(t, i, proof.Index, "%d: %d", h, i) + assert.Equal(t, len(txs), proof.Total, "%d: %d", h, i) + assert.EqualValues(t, root, proof.RootHash, "%d: %d", h, i) + assert.EqualValues(t, leaf, proof.Data, "%d: %d", h, i) + assert.EqualValues(t, leafHash, proof.LeafHash(), "%d: %d", h, i) + assert.Nil(t, proof.Validate(root), "%d: %d", h, i) + assert.NotNil(t, proof.Validate([]byte("foobar")), "%d: %d", h, i) // read-write must also work var p2 TxProof bin, err := cdc.MarshalBinary(proof) - assert.Nil(err) + assert.Nil(t, err) err = cdc.UnmarshalBinary(bin, &p2) - if assert.Nil(err, "%d: %d: %+v", h, i, err) { - assert.Nil(p2.Validate(root), "%d: %d", h, i) + if assert.Nil(t, err, "%d: %d: %+v", h, i, err) { + assert.Nil(t, p2.Validate(root), "%d: %d", h, i) } } } @@ -86,8 +97,6 @@ func TestTxProofUnchangable(t *testing.T) { } func testTxProofUnchangable(t *testing.T) { - assert := assert.New(t) - // make some proof txs := makeTxs(randInt(2, 100), randInt(16, 128)) root := txs.Hash() @@ -95,9 +104,9 @@ func testTxProofUnchangable(t *testing.T) { proof := txs.Proof(i) // make sure it is valid to start with - assert.Nil(proof.Validate(root)) + assert.Nil(t, proof.Validate(root)) bin, err := cdc.MarshalBinary(proof) - assert.Nil(err) + assert.Nil(t, err) // try mutating the data and make sure nothing breaks for j := 0; j < 500; j++ { diff --git a/types/validator.go b/types/validator.go index bea975a4f..e43acf09d 100644 --- a/types/validator.go +++ b/types/validator.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // Volatile state for each Validator diff --git a/types/validator_set.go b/types/validator_set.go index f2fac2929..60fc2d83b 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -7,8 +7,8 @@ import ( "sort" "strings" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/merkle" + "github.com/tendermint/tendermint/crypto/merkle" + cmn "github.com/tendermint/tendermint/libs/common" ) // ValidatorSet represent a set of *Validator at a given height. @@ -39,14 +39,15 @@ func NewValidatorSet(vals []*Validator) *ValidatorSet { Validators: validators, } - if vals != nil { + if len(vals) > 0 { vs.IncrementAccum(1) } return vs } -// incrementAccum and update the proposer +// IncrementAccum increments accum of each validator and updates the +// proposer. Panics if validator set is empty. func (valSet *ValidatorSet) IncrementAccum(times int) { // Add VotingPower * times to each validator and order into heap. validatorsHeap := cmn.NewHeap() diff --git a/types/validator_set_test.go b/types/validator_set_test.go index c78a36063..1756f7890 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -11,9 +11,64 @@ import ( "github.com/stretchr/testify/assert" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" ) +func TestValidatorSetBasic(t *testing.T) { + for _, vset := range []*ValidatorSet{NewValidatorSet([]*Validator{}), NewValidatorSet(nil)} { + assert.Panics(t, func() { vset.IncrementAccum(1) }) + + assert.EqualValues(t, vset, vset.Copy()) + assert.False(t, vset.HasAddress([]byte("some val"))) + idx, val := vset.GetByAddress([]byte("some val")) + assert.Equal(t, -1, idx) + assert.Nil(t, val) + addr, val := vset.GetByIndex(-100) + assert.Nil(t, addr) + assert.Nil(t, val) + addr, val = vset.GetByIndex(0) + assert.Nil(t, addr) + assert.Nil(t, val) + addr, val = vset.GetByIndex(100) + assert.Nil(t, addr) + assert.Nil(t, val) + assert.Zero(t, vset.Size()) + assert.Equal(t, int64(0), vset.TotalVotingPower()) + assert.Nil(t, vset.GetProposer()) + assert.Nil(t, vset.Hash()) + + // add + val = randValidator_() + assert.True(t, vset.Add(val)) + assert.True(t, vset.HasAddress(val.Address)) + idx, val2 := vset.GetByAddress(val.Address) + assert.Equal(t, 0, idx) + assert.Equal(t, val, val2) + addr, val2 = vset.GetByIndex(0) + assert.Equal(t, []byte(val.Address), addr) + assert.Equal(t, val, val2) + assert.Equal(t, 1, vset.Size()) + assert.Equal(t, val.VotingPower, vset.TotalVotingPower()) + assert.Equal(t, val, vset.GetProposer()) + assert.NotNil(t, vset.Hash()) + assert.NotPanics(t, func() { vset.IncrementAccum(1) }) + + // update + assert.False(t, vset.Update(randValidator_())) + val.VotingPower = 100 + assert.True(t, vset.Update(val)) + + // remove + val2, removed := vset.Remove(randValidator_().Address) + assert.Nil(t, val2) + assert.False(t, removed) + val2, removed = vset.Remove(val.Address) + assert.Equal(t, val.Address, val2.Address) + assert.True(t, removed) + } +} + func TestCopy(t *testing.T) { vset := randValidatorSet(10) vsetHash := vset.Hash() @@ -33,7 +88,7 @@ func BenchmarkValidatorSetCopy(b *testing.B) { b.StopTimer() vset := NewValidatorSet([]*Validator{}) for i := 0; i < 1000; i++ { - privKey := crypto.GenPrivKeyEd25519() + privKey := ed25519.GenPrivKey() pubKey := privKey.PubKey() val := NewValidator(pubKey, 0) if !vset.Add(val) { @@ -197,7 +252,7 @@ func newValidator(address []byte, power int64) *Validator { func randPubKey() crypto.PubKey { var pubKey [32]byte copy(pubKey[:], cmn.RandBytes(32)) - return crypto.PubKeyEd25519(pubKey) + return ed25519.PubKeyEd25519(pubKey) } func randValidator_() *Validator { @@ -314,7 +369,7 @@ func TestSafeSubClip(t *testing.T) { //------------------------------------------------------------------- func TestValidatorSetVerifyCommit(t *testing.T) { - privKey := crypto.GenPrivKeyEd25519() + privKey := ed25519.GenPrivKey() pubKey := privKey.PubKey() v1 := NewValidator(pubKey, 1000) vset := NewValidatorSet([]*Validator{v1}) diff --git a/types/vote.go b/types/vote.go index 1e7b263b6..ed4ebd73e 100644 --- a/types/vote.go +++ b/types/vote.go @@ -7,7 +7,7 @@ import ( "time" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) var ( diff --git a/types/vote_set.go b/types/vote_set.go index a60d95daf..c51681053 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -8,7 +8,7 @@ import ( "github.com/pkg/errors" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // UNSTABLE diff --git a/types/vote_set_test.go b/types/vote_set_test.go index 9efef41ba..32ceb7b16 100644 --- a/types/vote_set_test.go +++ b/types/vote_set_test.go @@ -6,8 +6,8 @@ import ( "time" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" - tst "github.com/tendermint/tmlibs/test" + cmn "github.com/tendermint/tendermint/libs/common" + tst "github.com/tendermint/tendermint/libs/test" ) // NOTE: privValidators are in order diff --git a/types/vote_test.go b/types/vote_test.go index cbb22aaae..836baa615 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -4,7 +4,9 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/ed25519" ) func examplePrevote() *Vote { @@ -50,29 +52,9 @@ func TestVoteSignable(t *testing.T) { } } -func TestVoteString(t *testing.T) { - tc := []struct { - name string - in string - out string - }{ - {"Precommit", examplePrecommit().String(), `Vote{56789:616464720000 12345/02/2(Precommit) 686173680000 @ 2017-12-25T03:00:01.234Z}`}, - {"Prevote", examplePrevote().String(), `Vote{56789:616464720000 12345/02/1(Prevote) 686173680000 @ 2017-12-25T03:00:01.234Z}`}, - } - - for _, tt := range tc { - tt := tt - t.Run(tt.name, func(st *testing.T) { - if tt.in != tt.out { - t.Errorf("Got unexpected string for Proposal. Expected:\n%v\nGot:\n%v", tt.in, tt.out) - } - }) - } -} - func TestVoteVerifySignature(t *testing.T) { privVal := NewMockPV() - pubKey := privVal.GetPubKey() + pubkey := privVal.GetPubKey() vote := examplePrecommit() signBytes := vote.SignBytes("test_chain_id") @@ -82,7 +64,7 @@ func TestVoteVerifySignature(t *testing.T) { require.NoError(t, err) // verify the same vote - valid := pubKey.VerifyBytes(vote.SignBytes("test_chain_id"), vote.Signature) + valid := pubkey.VerifyBytes(vote.SignBytes("test_chain_id"), vote.Signature) require.True(t, valid) // serialize, deserialize and verify again.... @@ -95,7 +77,7 @@ func TestVoteVerifySignature(t *testing.T) { // verify the transmitted vote newSignBytes := precommit.SignBytes("test_chain_id") require.Equal(t, string(signBytes), string(newSignBytes)) - valid = pubKey.VerifyBytes(newSignBytes, precommit.Signature) + valid = pubkey.VerifyBytes(newSignBytes, precommit.Signature) require.True(t, valid) } @@ -119,3 +101,21 @@ func TestIsVoteTypeValid(t *testing.T) { }) } } + +func TestVoteVerify(t *testing.T) { + privVal := NewMockPV() + pubkey := privVal.GetPubKey() + + vote := examplePrevote() + vote.ValidatorAddress = pubkey.Address() + + err := vote.Verify("test_chain_id", ed25519.GenPrivKey().PubKey()) + if assert.Error(t, err) { + assert.Equal(t, ErrVoteInvalidValidatorAddress, err) + } + + err = vote.Verify("test_chain_id", pubkey) + if assert.Error(t, err) { + assert.Equal(t, ErrVoteInvalidSignature, err) + } +} diff --git a/types/wire.go b/types/wire.go index 6342d7eba..9221de968 100644 --- a/types/wire.go +++ b/types/wire.go @@ -2,11 +2,11 @@ package types import ( "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/crypto" + cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino" ) var cdc = amino.NewCodec() func init() { - crypto.RegisterAmino(cdc) + cryptoAmino.RegisterAmino(cdc) } diff --git a/version/version.go b/version/version.go index df553115a..706264372 100644 --- a/version/version.go +++ b/version/version.go @@ -3,14 +3,14 @@ package version // Version components const ( Maj = "0" - Min = "21" - Fix = "0" + Min = "22" + Fix = "5" ) var ( // Version is the current version of Tendermint // Must be a string because scripts like dist.sh read this file. - Version = "0.21.0" + Version = "0.22.5" // GitCommit is the current HEAD set using ldflags. GitCommit string