Browse Source

Merge pull request #1432 from tendermint/bucky/aminoify

Bucky/aminoify
pull/1347/head v0.19.0-rc3
Ethan Buchman 7 years ago
committed by GitHub
parent
commit
93c4312cdd
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
117 changed files with 2510 additions and 1062 deletions
  1. +0
    -21
      .circleci/config.yml
  2. +4
    -0
      .gitignore
  3. +54
    -8
      CHANGELOG.md
  4. +4
    -4
      DOCKER/Dockerfile
  5. +1
    -1
      DOCKER/Dockerfile.develop
  6. +2
    -1
      DOCKER/README.md
  7. +15
    -14
      Gopkg.lock
  8. +14
    -11
      Gopkg.toml
  9. +4
    -9
      Makefile
  10. +9
    -5
      blockchain/reactor.go
  11. +3
    -4
      blockchain/store.go
  12. +1
    -1
      blockchain/store_test.go
  13. +2
    -2
      cmd/tendermint/commands/lite.go
  14. +1
    -2
      cmd/tendermint/commands/probe_upnp.go
  15. +6
    -6
      cmd/tendermint/commands/run_node.go
  16. +5
    -0
      codecov.yml
  17. +84
    -81
      config/config.go
  18. +4
    -0
      config/toml.go
  19. +2
    -2
      consensus/common_test.go
  20. +6
    -6
      consensus/mempool_test.go
  21. +58
    -23
      consensus/reactor.go
  22. +109
    -1
      consensus/reactor_test.go
  23. +6
    -6
      consensus/replay.go
  24. +9
    -10
      consensus/replay_file.go
  25. +3
    -3
      consensus/replay_test.go
  26. +108
    -109
      consensus/state.go
  27. +1
    -1
      consensus/wal_generator.go
  28. +2
    -3
      docs/conf.py
  29. +1
    -1
      docs/examples/getting-started.md
  30. +4
    -4
      docs/examples/install_tendermint.sh
  31. +2
    -1
      docs/index.rst
  32. +3
    -3
      docs/specification/byzantine-consensus-algorithm.rst
  33. +4
    -0
      docs/specification/configuration.rst
  34. +16
    -2
      docs/specification/new-spec/bft-time.md
  35. +114
    -0
      docs/specification/new-spec/light-client.md
  36. +8
    -1
      docs/specification/new-spec/reactors/pex/pex.md
  37. +1
    -0
      docs/specification/rpc.rst
  38. +8
    -3
      evidence/reactor.go
  39. +9
    -0
      lite/proxy/block.go
  40. +218
    -0
      lite/proxy/validate_test.go
  41. +5
    -1
      mempool/reactor.go
  42. +22
    -21
      node/node.go
  43. +12
    -0
      node/wire.go
  44. +4
    -4
      p2p/base_reactor.go
  45. +1
    -2
      p2p/conn/connection.go
  46. +3
    -5
      p2p/conn/secret_connection.go
  47. +2
    -1
      p2p/conn/secret_connection_test.go
  48. +72
    -0
      p2p/dummy/peer.go
  49. +2
    -3
      p2p/key.go
  50. +1
    -1
      p2p/listener.go
  51. +26
    -15
      p2p/netaddress.go
  52. +46
    -29
      p2p/netaddress_test.go
  53. +1
    -0
      p2p/node_info.go
  54. +3
    -1
      p2p/peer.go
  55. +1
    -2
      p2p/peer_set.go
  56. +1
    -1
      p2p/peer_set_test.go
  57. +9
    -2
      p2p/peer_test.go
  58. +140
    -13
      p2p/pex/addrbook.go
  59. +125
    -0
      p2p/pex/addrbook_test.go
  60. +1
    -1
      p2p/pex/known_address.go
  61. +58
    -31
      p2p/pex/pex_reactor.go
  62. +68
    -19
      p2p/pex/pex_reactor_test.go
  63. +37
    -9
      p2p/switch.go
  64. +59
    -12
      p2p/switch_test.go
  65. +1
    -1
      p2p/test_util.go
  66. +1
    -1
      p2p/upnp/upnp.go
  67. +9
    -0
      rpc/client/httpclient.go
  68. +1
    -0
      rpc/client/interface.go
  69. +4
    -0
      rpc/client/localclient.go
  70. +20
    -11
      rpc/client/rpc_test.go
  71. +1
    -0
      rpc/core/doc.go
  72. +31
    -0
      rpc/core/health.go
  73. +2
    -0
      rpc/core/net.go
  74. +2
    -2
      rpc/core/pipe.go
  75. +1
    -0
      rpc/core/routes.go
  76. +46
    -3
      rpc/core/status.go
  77. +10
    -4
      rpc/core/tx.go
  78. +16
    -7
      rpc/core/types/responses.go
  79. +13
    -13
      rpc/lib/client/ws_client.go
  80. +32
    -13
      rpc/lib/server/http_server.go
  81. +1
    -2
      rpc/lib/types/types.go
  82. +0
    -35
      scripts/dep_utils/checkout.sh
  83. +1
    -0
      scripts/wal2json/main.go
  84. +181
    -0
      scripts/wire2amino.go
  85. +2
    -2
      state/state_test.go
  86. +90
    -51
      state/txindex/kv/kv.go
  87. +54
    -3
      state/txindex/kv/kv_test.go
  88. +0
    -4
      test/README.md
  89. +0
    -9
      test/p2p/data/app/init.sh
  90. +0
    -53
      test/p2p/data/chain_config.json
  91. +37
    -37
      test/p2p/data/mach1/core/config/genesis.json
  92. +6
    -1
      test/p2p/data/mach1/core/config/node_key.json
  93. +10
    -10
      test/p2p/data/mach1/core/config/priv_validator.json
  94. +37
    -37
      test/p2p/data/mach2/core/config/genesis.json
  95. +6
    -1
      test/p2p/data/mach2/core/config/node_key.json
  96. +10
    -10
      test/p2p/data/mach2/core/config/priv_validator.json
  97. +37
    -37
      test/p2p/data/mach3/core/config/genesis.json
  98. +6
    -1
      test/p2p/data/mach3/core/config/node_key.json
  99. +10
    -10
      test/p2p/data/mach3/core/config/priv_validator.json
  100. +37
    -37
      test/p2p/data/mach4/core/config/genesis.json

+ 0
- 21
.circleci/config.yml View File

@ -130,19 +130,6 @@ jobs:
paths: paths:
- "profiles/*" - "profiles/*"
test_libs:
<<: *defaults
steps:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v1-pkg-cache
- restore_cache:
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
- run:
name: Run tests
command: bash test/test_libs.sh
test_persistence: test_persistence:
<<: *defaults <<: *defaults
steps: steps:
@ -205,14 +192,6 @@ workflows:
- test_cover: - test_cover:
requires: requires:
- setup_dependencies - setup_dependencies
- test_libs:
filters:
branches:
only:
- develop
- master
requires:
- setup_dependencies
- test_persistence: - test_persistence:
requires: requires:
- setup_abci - setup_abci


+ 4
- 0
.gitignore View File

@ -17,7 +17,11 @@ test/logs
coverage.txt coverage.txt
docs/_build docs/_build
docs/tools docs/tools
docs/abci-spec.rst
*.log *.log
scripts/wal2json/wal2json scripts/wal2json/wal2json
scripts/cutWALUntil/cutWALUntil scripts/cutWALUntil/cutWALUntil
.idea/
*.iml

+ 54
- 8
CHANGELOG.md View File

@ -7,7 +7,6 @@ BREAKING CHANGES:
- Upgrade consensus for more real-time use of evidence - Upgrade consensus for more real-time use of evidence
FEATURES: FEATURES:
- Peer reputation management
- Use the chain as its own CA for nodes and validators - Use the chain as its own CA for nodes and validators
- Tooling to run multiple blockchains/apps, possibly in a single process - Tooling to run multiple blockchains/apps, possibly in a single process
- State syncing (without transaction replay) - State syncing (without transaction replay)
@ -25,19 +24,66 @@ BUG FIXES:
- Graceful handling/recovery for apps that have non-determinism or fail to halt - Graceful handling/recovery for apps that have non-determinism or fail to halt
- Graceful handling/recovery for violations of safety, or liveness - Graceful handling/recovery for violations of safety, or liveness
## 0.17.0 (TBD)
## 0.18.0 (April 6th, 2018)
BREAKING: BREAKING:
- [genesis] rename `app_options` to `app_state`
- [types] Merkle tree uses different encoding for varints (see tmlibs v0.8.0)
- [types] ValidtorSet.GetByAddress returns -1 if no validator found
- [p2p] require all addresses come with an ID no matter what
- [rpc] Listening address must contain tcp:// or unix:// prefix
FEATURES:
- [rpc] StartHTTPAndTLSServer (not used yet)
- [rpc] Include validator's voting power in `/status`
- [rpc] `/tx` and `/tx_search` responses now include the transaction hash
- [rpc] Include peer NodeIDs in `/net_info`
IMPROVEMENTS: IMPROVEMENTS:
- [config] exposed `auth_enc` flag to enable/disable encryption
- [config] trim whitespace from elements of lists (like `persistent_peers`)
- [rpc] `/tx_search` results are sorted by height
- [p2p] do not try to connect to ourselves (ok, maybe only once)
- [p2p] seeds respond with a bias towards good peers
BUG FIXES:
- [rpc] fix subscribing using an abci.ResponseDeliverTx tag
- [rpc] fix tx_indexers matchRange
- [rpc] fix unsubscribing (see tmlibs v0.8.0)
## 0.17.1 (March 27th, 2018)
BUG FIXES:
- [types] Actually support `app_state` in genesis as `AppStateJSON`
## 0.17.0 (March 27th, 2018)
BREAKING:
- [types] WriteSignBytes -> SignBytes
IMPROVEMENTS:
- [all] renamed `dummy` (`persistent_dummy`) to `kvstore` (`persistent_kvstore`) (name "dummy" is deprecated and will not work in the next breaking release)
- [docs] note on determinism (docs/determinism.rst)
- [genesis] `app_options` field is deprecated. please rename it to `app_state` in your genesis file(s). `app_options` will not work in the next breaking release
- [p2p] dial seeds directly without potential peers
- [p2p] exponential backoff for addrs in the address book
- [p2p] mark peer as good if it contributed enough votes or block parts
- [p2p] stop peer if it sends incorrect data, msg to unknown channel, msg we did not expect
- [p2p] when `auth_enc` is true, all dialed peers must have a node ID in their address - [p2p] when `auth_enc` is true, all dialed peers must have a node ID in their address
- [all] renamed `dummy` (`persistent_dummy`) to `kvstore`
(`persistent_kvstore`) (name "dummy" is deprecated and will not work in
release after this one)
- [spec] various improvements
- switched from glide to dep internally for package management
- [wire] prep work for upgrading to new go-wire (which is now called go-amino)
FEATURES:
- [config] exposed `auth_enc` flag to enable/disable encryption
- [config] added the `--p2p.private_peer_ids` flag and `PrivatePeerIDs` config variable (see config for description)
- [rpc] added `/health` endpoint, which returns empty result for now
- [types/priv_validator] new format and socket client, allowing for remote signing
BUG FIXES:
- [consensus] fix liveness bug by introducing ValidBlock mechanism
## 0.16.0 (February 20th, 2017)
## 0.16.0 (February 20th, 2018)
BREAKING CHANGES: BREAKING CHANGES:
- [config] use $TMHOME/config for all config and json files - [config] use $TMHOME/config for all config and json files


+ 4
- 4
DOCKER/Dockerfile View File

@ -1,8 +1,8 @@
FROM alpine:3.6
FROM alpine:3.7
# This is the release of tendermint to pull in. # This is the release of tendermint to pull in.
ENV TM_VERSION 0.15.0
ENV TM_SHA256SUM 71cc271c67eca506ca492c8b90b090132f104bf5dbfe0af2702a50886e88de17
ENV TM_VERSION 0.17.1
ENV TM_SHA256SUM d57008c63d2d9176861137e38ed203da486febf20ae7d388fb810a75afff8f24
# Tendermint will be looking for genesis file in /tendermint (unless you change # Tendermint will be looking for genesis file in /tendermint (unless you change
# `genesis_file` in config.toml). You can put your config.toml and private # `genesis_file` in config.toml). You can put your config.toml and private
@ -26,7 +26,7 @@ RUN mkdir -p $DATA_ROOT && \
RUN apk add --no-cache bash curl jq RUN apk add --no-cache bash curl jq
RUN apk add --no-cache openssl && \ RUN apk add --no-cache openssl && \
wget https://s3-us-west-2.amazonaws.com/tendermint/binaries/tendermint/v${TM_VERSION}/tendermint_${TM_VERSION}_linux_amd64.zip && \
wget https://github.com/tendermint/tendermint/releases/download/v${TM_VERSION}/tendermint_${TM_VERSION}_linux_amd64.zip && \
echo "${TM_SHA256SUM} tendermint_${TM_VERSION}_linux_amd64.zip" | sha256sum -c && \ echo "${TM_SHA256SUM} tendermint_${TM_VERSION}_linux_amd64.zip" | sha256sum -c && \
unzip -d /bin tendermint_${TM_VERSION}_linux_amd64.zip && \ unzip -d /bin tendermint_${TM_VERSION}_linux_amd64.zip && \
apk del openssl && \ apk del openssl && \


+ 1
- 1
DOCKER/Dockerfile.develop View File

@ -1,4 +1,4 @@
FROM alpine:3.6
FROM alpine:3.7
ENV DATA_ROOT /tendermint ENV DATA_ROOT /tendermint
ENV TMHOME $DATA_ROOT ENV TMHOME $DATA_ROOT


+ 2
- 1
DOCKER/README.md View File

@ -1,6 +1,7 @@
# Supported tags and respective `Dockerfile` links # Supported tags and respective `Dockerfile` links
- `0.15.0`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/170777300ea92dc21a8aec1abc16cb51812513a4/DOCKER/Dockerfile)
- `0.17.1`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/208ac32fa266657bd6c304e84ec828aa252bb0b8/DOCKER/Dockerfile)
- `0.15.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/170777300ea92dc21a8aec1abc16cb51812513a4/DOCKER/Dockerfile)
- `0.13.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/a28b3fff49dce2fb31f90abb2fc693834e0029c2/DOCKER/Dockerfile) - `0.13.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/a28b3fff49dce2fb31f90abb2fc693834e0029c2/DOCKER/Dockerfile)
- `0.12.1` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/457c688346b565e90735431619ca3ca597ef9007/DOCKER/Dockerfile) - `0.12.1` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/457c688346b565e90735431619ca3ca597ef9007/DOCKER/Dockerfile)
- `0.12.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/70d8afa6e952e24c573ece345560a5971bf2cc0e/DOCKER/Dockerfile) - `0.12.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/70d8afa6e952e24c573ece345560a5971bf2cc0e/DOCKER/Dockerfile)


+ 15
- 14
Gopkg.lock View File

@ -105,7 +105,7 @@
"json/scanner", "json/scanner",
"json/token" "json/token"
] ]
revision = "f40e974e75af4e271d97ce0fc917af5898ae7bda"
revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168"
[[projects]] [[projects]]
name = "github.com/inconshreveable/mousetrap" name = "github.com/inconshreveable/mousetrap"
@ -159,7 +159,7 @@
branch = "master" branch = "master"
name = "github.com/rcrowley/go-metrics" name = "github.com/rcrowley/go-metrics"
packages = ["."] packages = ["."]
revision = "8732c616f52954686704c8645fe1a9d59e9df7c1"
revision = "d932a24a8ccb8fcadc993e5c6c58f93dac168294"
[[projects]] [[projects]]
name = "github.com/spf13/afero" name = "github.com/spf13/afero"
@ -167,8 +167,8 @@
".", ".",
"mem" "mem"
] ]
revision = "bb8f1927f2a9d3ab41c9340aa034f6b803f4359c"
version = "v1.0.2"
revision = "63644898a8da0bc22138abf860edaf5277b6102e"
version = "v1.1.0"
[[projects]] [[projects]]
name = "github.com/spf13/cast" name = "github.com/spf13/cast"
@ -179,8 +179,8 @@
[[projects]] [[projects]]
name = "github.com/spf13/cobra" name = "github.com/spf13/cobra"
packages = ["."] packages = ["."]
revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b"
version = "v0.0.1"
revision = "a1f051bc3eba734da4772d60e2d677f47cf93ef4"
version = "v0.0.2"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -226,7 +226,7 @@
"leveldb/table", "leveldb/table",
"leveldb/util" "leveldb/util"
] ]
revision = "169b1b37be738edb2813dab48c97a549bcf99bb5"
revision = "714f901b98fdb3aa954b4193d8cbd64a28d80cad"
[[projects]] [[projects]]
name = "github.com/tendermint/abci" name = "github.com/tendermint/abci"
@ -301,7 +301,7 @@
"ripemd160", "ripemd160",
"salsa20/salsa" "salsa20/salsa"
] ]
revision = "88942b9c40a4c9d203b82b3731787b672d6e809b"
revision = "b2aa35443fbc700ab74c586ae79b81c171851023"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -315,13 +315,13 @@
"lex/httplex", "lex/httplex",
"trace" "trace"
] ]
revision = "6078986fec03a1dcc236c34816c71b0e05018fda"
revision = "61147c48b25b599e5b561d2e9c4f3e1ef489ca41"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/sys" name = "golang.org/x/sys"
packages = ["unix"] packages = ["unix"]
revision = "91ee8cde435411ca3f1cd365e8f20131aed4d0a1"
revision = "3b87a42e500a6dc65dae1a55d0b641295971163e"
[[projects]] [[projects]]
name = "golang.org/x/text" name = "golang.org/x/text"
@ -348,7 +348,7 @@
branch = "master" branch = "master"
name = "google.golang.org/genproto" name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"] packages = ["googleapis/rpc/status"]
revision = "ab0870e398d5dd054b868c0db1481ab029b9a9f2"
revision = "ce84044298496ef4b54b4a0a0909ba593cc60e30"
[[projects]] [[projects]]
name = "google.golang.org/grpc" name = "google.golang.org/grpc"
@ -372,17 +372,18 @@
"transport" "transport"
] ]
revision = "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" revision = "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
source = "github.com/grpc/grpc-go"
version = "v1.7.5" version = "v1.7.5"
[[projects]] [[projects]]
name = "gopkg.in/yaml.v2" name = "gopkg.in/yaml.v2"
packages = ["."] packages = ["."]
revision = "7f97868eec74b32b0982dd158a51a446d1da7eb5"
version = "v2.1.1"
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
inputs-digest = "ed1149ed5293b7b28b7505627648c6a1152aaff0ed06a3849995b29751ae00f3"
inputs-digest = "e5e6c7942710846bdb5589fba10c90a869773f2685da825b955510afe0d7c5a4"
solver-name = "gps-cdcl" solver-name = "gps-cdcl"
solver-version = 1 solver-version = 1

+ 14
- 11
Gopkg.toml View File

@ -35,23 +35,23 @@
[[constraint]] [[constraint]]
name = "github.com/go-kit/kit" name = "github.com/go-kit/kit"
version = "0.6.0"
version = "~0.6.0"
[[constraint]] [[constraint]]
name = "github.com/gogo/protobuf" name = "github.com/gogo/protobuf"
version = "1.0.0"
version = "~1.0.0"
[[constraint]] [[constraint]]
name = "github.com/golang/protobuf" name = "github.com/golang/protobuf"
version = "1.0.0"
version = "~1.0.0"
[[constraint]] [[constraint]]
name = "github.com/gorilla/websocket" name = "github.com/gorilla/websocket"
version = "1.2.0"
version = "~1.2.0"
[[constraint]] [[constraint]]
name = "github.com/pkg/errors" name = "github.com/pkg/errors"
version = "0.8.0"
version = "~0.8.0"
[[constraint]] [[constraint]]
name = "github.com/rcrowley/go-metrics" name = "github.com/rcrowley/go-metrics"
@ -59,15 +59,15 @@
[[constraint]] [[constraint]]
name = "github.com/spf13/cobra" name = "github.com/spf13/cobra"
version = "0.0.1"
version = "~0.0.1"
[[constraint]] [[constraint]]
name = "github.com/spf13/viper" name = "github.com/spf13/viper"
version = "1.0.0"
version = "~1.0.0"
[[constraint]] [[constraint]]
name = "github.com/stretchr/testify" name = "github.com/stretchr/testify"
version = "1.2.1"
version = "~1.2.1"
[[constraint]] [[constraint]]
name = "github.com/tendermint/abci" name = "github.com/tendermint/abci"
@ -81,13 +81,16 @@
name = "github.com/tendermint/go-amino" name = "github.com/tendermint/go-amino"
version = "0.9.6" version = "0.9.6"
[[constraint]]
[[override]]
# [[constraint]]
name = "github.com/tendermint/tmlibs" name = "github.com/tendermint/tmlibs"
version = "0.8.1"
version = "~0.8.1"
# branch = "develop"
[[constraint]] [[constraint]]
name = "google.golang.org/grpc" name = "google.golang.org/grpc"
version = "1.7.3"
source = "github.com/grpc/grpc-go"
version = "~1.7.3"
[prune] [prune]
go-tests = true go-tests = true


+ 4
- 9
Makefile View File

@ -14,13 +14,13 @@ check: check_tools ensure_deps
### Build ### Build
build: build:
go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/
CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/
build_race: build_race:
go build -race $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint
CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint
install: install:
go install $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' ./cmd/tendermint
CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' ./cmd/tendermint
######################################## ########################################
### Distribution ### Distribution
@ -119,11 +119,6 @@ test_integrations:
make test_persistence make test_persistence
make test_p2p make test_p2p
test_libs:
# checkout every github.com/tendermint dir and run its tests
# NOTE: on release-* or master branches only (set by Jenkins)
docker run --name run_libs -t tester bash test/test_libs.sh
test_release: test_release:
@go test -tags release $(PACKAGES) @go test -tags release $(PACKAGES)
@ -186,4 +181,4 @@ metalinter_all:
# To avoid unintended conflicts with file names, always add to .PHONY # To avoid unintended conflicts with file names, always add to .PHONY
# unless there is a reason not to. # unless there is a reason not to.
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html # https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
.PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test_cover test_apps test_persistence test_p2p test test_race test_libs test_integrations test_release test100 vagrant_test fmt
.PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt

+ 9
- 5
blockchain/reactor.go View File

@ -30,7 +30,7 @@ const (
// NOTE: keep up to date with bcBlockResponseMessage // NOTE: keep up to date with bcBlockResponseMessage
bcBlockResponseMessagePrefixSize = 4 bcBlockResponseMessagePrefixSize = 4
bcBlockResponseMessageFieldKeySize = 1 bcBlockResponseMessageFieldKeySize = 1
maxMessageSize = types.MaxBlockSizeBytes +
maxMsgSize = types.MaxBlockSizeBytes +
bcBlockResponseMessagePrefixSize + bcBlockResponseMessagePrefixSize +
bcBlockResponseMessageFieldKeySize bcBlockResponseMessageFieldKeySize
) )
@ -75,9 +75,9 @@ func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *Bl
store.Height())) store.Height()))
} }
const cap = 1000 // must be bigger than peers count
requestsCh := make(chan BlockRequest, cap)
errorsCh := make(chan peerError, cap) // so we don't block in #Receive#pool.AddBlock
const capacity = 1000 // must be bigger than peers count
requestsCh := make(chan BlockRequest, capacity)
errorsCh := make(chan peerError, capacity) // so we don't block in #Receive#pool.AddBlock
pool := NewBlockPool( pool := NewBlockPool(
store.Height()+1, store.Height()+1,
@ -133,7 +133,7 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
Priority: 10, Priority: 10,
SendQueueCapacity: 1000, SendQueueCapacity: 1000,
RecvBufferCapacity: 50 * 4096, RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: maxMessageSize,
RecvMessageCapacity: maxMsgSize,
}, },
} }
} }
@ -345,6 +345,10 @@ func RegisterBlockchainMessages(cdc *amino.Codec) {
// DecodeMessage decodes BlockchainMessage. // DecodeMessage decodes BlockchainMessage.
// TODO: ensure that bz is completely read. // TODO: ensure that bz is completely read.
func DecodeMessage(bz []byte) (msg BlockchainMessage, err error) { func DecodeMessage(bz []byte) (msg BlockchainMessage, err error) {
if len(bz) > maxMsgSize {
return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
len(bz), maxMsgSize)
}
err = cdc.UnmarshalBinaryBare(bz, &msg) err = cdc.UnmarshalBinaryBare(bz, &msg)
if err != nil { if err != nil {
err = cmn.ErrorWrap(err, "DecodeMessage() had bytes left over") err = cmn.ErrorWrap(err, "DecodeMessage() had bytes left over")


+ 3
- 4
blockchain/store.go View File

@ -1,7 +1,6 @@
package blockchain package blockchain
import ( import (
"encoding/json"
"fmt" "fmt"
"sync" "sync"
@ -218,12 +217,12 @@ func calcSeenCommitKey(height int64) []byte {
var blockStoreKey = []byte("blockStore") var blockStoreKey = []byte("blockStore")
type BlockStoreStateJSON struct { type BlockStoreStateJSON struct {
Height int64
Height int64 `json:"height"`
} }
// Save persists the blockStore state to the database as JSON. // Save persists the blockStore state to the database as JSON.
func (bsj BlockStoreStateJSON) Save(db dbm.DB) { func (bsj BlockStoreStateJSON) Save(db dbm.DB) {
bytes, err := json.Marshal(bsj)
bytes, err := cdc.MarshalJSON(bsj)
if err != nil { if err != nil {
cmn.PanicSanity(cmn.Fmt("Could not marshal state bytes: %v", err)) cmn.PanicSanity(cmn.Fmt("Could not marshal state bytes: %v", err))
} }
@ -240,7 +239,7 @@ func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON {
} }
} }
bsj := BlockStoreStateJSON{} bsj := BlockStoreStateJSON{}
err := json.Unmarshal(bytes, &bsj)
err := cdc.UnmarshalJSON(bytes, &bsj)
if err != nil { if err != nil {
panic(fmt.Sprintf("Could not unmarshal bytes: %X", bytes)) panic(fmt.Sprintf("Could not unmarshal bytes: %X", bytes))
} }


+ 1
- 1
blockchain/store_test.go View File

@ -31,7 +31,7 @@ func TestNewBlockStore(t *testing.T) {
db := db.NewMemDB() db := db.NewMemDB()
db.Set(blockStoreKey, []byte(`{"height": 10000}`)) db.Set(blockStoreKey, []byte(`{"height": 10000}`))
bs := NewBlockStore(db) bs := NewBlockStore(db)
assert.Equal(t, bs.Height(), int64(10000), "failed to properly parse blockstore")
require.Equal(t, int64(10000), bs.Height(), "failed to properly parse blockstore")
panicCausers := []struct { panicCausers := []struct {
data []byte data []byte


+ 2
- 2
cmd/tendermint/commands/lite.go View File

@ -34,14 +34,14 @@ var (
) )
func init() { func init() {
LiteCmd.Flags().StringVar(&listenAddr, "laddr", ":8888", "Serve the proxy on the given port")
LiteCmd.Flags().StringVar(&listenAddr, "laddr", "tcp://localhost:8888", "Serve the proxy on the given address")
LiteCmd.Flags().StringVar(&nodeAddr, "node", "tcp://localhost:46657", "Connect to a Tendermint node at this address") LiteCmd.Flags().StringVar(&nodeAddr, "node", "tcp://localhost:46657", "Connect to a Tendermint node at this address")
LiteCmd.Flags().StringVar(&chainID, "chain-id", "tendermint", "Specify the Tendermint chain ID") LiteCmd.Flags().StringVar(&chainID, "chain-id", "tendermint", "Specify the Tendermint chain ID")
LiteCmd.Flags().StringVar(&home, "home-dir", ".tendermint-lite", "Specify the home directory") LiteCmd.Flags().StringVar(&home, "home-dir", ".tendermint-lite", "Specify the home directory")
} }
func ensureAddrHasSchemeOrDefaultToTCP(addr string) (string, error) { func ensureAddrHasSchemeOrDefaultToTCP(addr string) (string, error) {
u, err := url.Parse(nodeAddr)
u, err := url.Parse(addr)
if err != nil { if err != nil {
return "", err return "", err
} }


+ 1
- 2
cmd/tendermint/commands/probe_upnp.go View File

@ -1,7 +1,6 @@
package commands package commands
import ( import (
"encoding/json"
"fmt" "fmt"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -22,7 +21,7 @@ func probeUpnp(cmd *cobra.Command, args []string) error {
fmt.Println("Probe failed: ", err) fmt.Println("Probe failed: ", err)
} else { } else {
fmt.Println("Probe success!") fmt.Println("Probe success!")
jsonBytes, err := json.Marshal(capabilities)
jsonBytes, err := cdc.MarshalJSON(capabilities)
if err != nil { if err != nil {
return err return err
} }


+ 6
- 6
cmd/tendermint/commands/run_node.go View File

@ -31,18 +31,19 @@ func AddNodeFlags(cmd *cobra.Command) {
// p2p flags // p2p flags
cmd.Flags().String("p2p.laddr", config.P2P.ListenAddress, "Node listen address. (0.0.0.0:0 means any interface, any port)") cmd.Flags().String("p2p.laddr", config.P2P.ListenAddress, "Node listen address. (0.0.0.0:0 means any interface, any port)")
cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "Comma delimited host:port seed nodes")
cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "Comma delimited host:port persistent peers")
cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "Comma-delimited ID@host:port seed nodes")
cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "Comma-delimited ID@host:port persistent peers")
cmd.Flags().Bool("p2p.skip_upnp", config.P2P.SkipUPNP, "Skip UPNP configuration") cmd.Flags().Bool("p2p.skip_upnp", config.P2P.SkipUPNP, "Skip UPNP configuration")
cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "Enable/disable Peer-Exchange") cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "Enable/disable Peer-Exchange")
cmd.Flags().Bool("p2p.seed_mode", config.P2P.SeedMode, "Enable/disable seed mode") cmd.Flags().Bool("p2p.seed_mode", config.P2P.SeedMode, "Enable/disable seed mode")
cmd.Flags().String("p2p.private_peer_ids", config.P2P.PrivatePeerIDs, "Comma-delimited private peer IDs")
// consensus flags // consensus flags
cmd.Flags().Bool("consensus.create_empty_blocks", config.Consensus.CreateEmptyBlocks, "Set this to false to only produce blocks when there are txs or when the AppHash changes") cmd.Flags().Bool("consensus.create_empty_blocks", config.Consensus.CreateEmptyBlocks, "Set this to false to only produce blocks when there are txs or when the AppHash changes")
} }
// NewRunNodeCmd returns the command that allows the CLI to start a
// node. It can be used with a custom PrivValidator and in-process ABCI application.
// NewRunNodeCmd returns the command that allows the CLI to start a node.
// It can be used with a custom PrivValidator and in-process ABCI application.
func NewRunNodeCmd(nodeProvider nm.NodeProvider) *cobra.Command { func NewRunNodeCmd(nodeProvider nm.NodeProvider) *cobra.Command {
cmd := &cobra.Command{ cmd := &cobra.Command{
Use: "node", Use: "node",
@ -56,9 +57,8 @@ func NewRunNodeCmd(nodeProvider nm.NodeProvider) *cobra.Command {
if err := n.Start(); err != nil { if err := n.Start(); err != nil {
return fmt.Errorf("Failed to start node: %v", err) return fmt.Errorf("Failed to start node: %v", err)
} else {
logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo())
} }
logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo())
// Trap signal, run forever. // Trap signal, run forever.
n.RunForever() n.RunForever()


+ 5
- 0
codecov.yml View File

@ -16,3 +16,8 @@ comment:
require_changes: no require_changes: no
require_base: no require_base: no
require_head: yes require_head: yes
ignore:
- "docs"
- "DOCKER"
- "scripts"

+ 84
- 81
config/config.go View File

@ -137,10 +137,6 @@ type BaseConfig struct {
DBPath string `mapstructure:"db_dir"` DBPath string `mapstructure:"db_dir"`
} }
func (c BaseConfig) ChainID() string {
return c.chainID
}
// DefaultBaseConfig returns a default base configuration for a Tendermint node // DefaultBaseConfig returns a default base configuration for a Tendermint node
func DefaultBaseConfig() BaseConfig { func DefaultBaseConfig() BaseConfig {
return BaseConfig{ return BaseConfig{
@ -161,32 +157,36 @@ func DefaultBaseConfig() BaseConfig {
// TestBaseConfig returns a base configuration for testing a Tendermint node // TestBaseConfig returns a base configuration for testing a Tendermint node
func TestBaseConfig() BaseConfig { func TestBaseConfig() BaseConfig {
conf := DefaultBaseConfig()
conf.chainID = "tendermint_test"
conf.ProxyApp = "kvstore"
conf.FastSync = false
conf.DBBackend = "memdb"
return conf
cfg := DefaultBaseConfig()
cfg.chainID = "tendermint_test"
cfg.ProxyApp = "kvstore"
cfg.FastSync = false
cfg.DBBackend = "memdb"
return cfg
}
func (cfg BaseConfig) ChainID() string {
return cfg.chainID
} }
// GenesisFile returns the full path to the genesis.json file // GenesisFile returns the full path to the genesis.json file
func (b BaseConfig) GenesisFile() string {
return rootify(b.Genesis, b.RootDir)
func (cfg BaseConfig) GenesisFile() string {
return rootify(cfg.Genesis, cfg.RootDir)
} }
// PrivValidatorFile returns the full path to the priv_validator.json file // PrivValidatorFile returns the full path to the priv_validator.json file
func (b BaseConfig) PrivValidatorFile() string {
return rootify(b.PrivValidator, b.RootDir)
func (cfg BaseConfig) PrivValidatorFile() string {
return rootify(cfg.PrivValidator, cfg.RootDir)
} }
// NodeKeyFile returns the full path to the node_key.json file // NodeKeyFile returns the full path to the node_key.json file
func (b BaseConfig) NodeKeyFile() string {
return rootify(b.NodeKey, b.RootDir)
func (cfg BaseConfig) NodeKeyFile() string {
return rootify(cfg.NodeKey, cfg.RootDir)
} }
// DBDir returns the full path to the database directory // DBDir returns the full path to the database directory
func (b BaseConfig) DBDir() string {
return rootify(b.DBPath, b.RootDir)
func (cfg BaseConfig) DBDir() string {
return rootify(cfg.DBPath, cfg.RootDir)
} }
// DefaultLogLevel returns a default log level of "error" // DefaultLogLevel returns a default log level of "error"
@ -229,11 +229,11 @@ func DefaultRPCConfig() *RPCConfig {
// TestRPCConfig returns a configuration for testing the RPC server // TestRPCConfig returns a configuration for testing the RPC server
func TestRPCConfig() *RPCConfig { func TestRPCConfig() *RPCConfig {
conf := DefaultRPCConfig()
conf.ListenAddress = "tcp://0.0.0.0:36657"
conf.GRPCListenAddress = "tcp://0.0.0.0:36658"
conf.Unsafe = true
return conf
cfg := DefaultRPCConfig()
cfg.ListenAddress = "tcp://0.0.0.0:36657"
cfg.GRPCListenAddress = "tcp://0.0.0.0:36658"
cfg.Unsafe = true
return cfg
} }
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
@ -250,8 +250,8 @@ type P2PConfig struct {
// We only use these if we can’t connect to peers in the addrbook // We only use these if we can’t connect to peers in the addrbook
Seeds string `mapstructure:"seeds"` Seeds string `mapstructure:"seeds"`
// Comma separated list of persistent peers to connect to
// We always connect to these
// Comma separated list of nodes to keep persistent connections to
// Do not add private peers to this list if you don't want them advertised
PersistentPeers string `mapstructure:"persistent_peers"` PersistentPeers string `mapstructure:"persistent_peers"`
// Skip UPNP port forwarding // Skip UPNP port forwarding
@ -289,6 +289,9 @@ type P2PConfig struct {
// Authenticated encryption // Authenticated encryption
AuthEnc bool `mapstructure:"auth_enc"` AuthEnc bool `mapstructure:"auth_enc"`
// Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
PrivatePeerIDs string `mapstructure:"private_peer_ids"`
} }
// DefaultP2PConfig returns a default configuration for the peer-to-peer layer // DefaultP2PConfig returns a default configuration for the peer-to-peer layer
@ -310,16 +313,16 @@ func DefaultP2PConfig() *P2PConfig {
// TestP2PConfig returns a configuration for testing the peer-to-peer layer // TestP2PConfig returns a configuration for testing the peer-to-peer layer
func TestP2PConfig() *P2PConfig { func TestP2PConfig() *P2PConfig {
conf := DefaultP2PConfig()
conf.ListenAddress = "tcp://0.0.0.0:36656"
conf.SkipUPNP = true
conf.FlushThrottleTimeout = 10
return conf
cfg := DefaultP2PConfig()
cfg.ListenAddress = "tcp://0.0.0.0:36656"
cfg.SkipUPNP = true
cfg.FlushThrottleTimeout = 10
return cfg
} }
// AddrBookFile returns the full path to the address book // AddrBookFile returns the full path to the address book
func (p *P2PConfig) AddrBookFile() string {
return rootify(p.AddrBook, p.RootDir)
func (cfg *P2PConfig) AddrBookFile() string {
return rootify(cfg.AddrBook, cfg.RootDir)
} }
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
@ -348,14 +351,14 @@ func DefaultMempoolConfig() *MempoolConfig {
// TestMempoolConfig returns a configuration for testing the Tendermint mempool // TestMempoolConfig returns a configuration for testing the Tendermint mempool
func TestMempoolConfig() *MempoolConfig { func TestMempoolConfig() *MempoolConfig {
config := DefaultMempoolConfig()
config.CacheSize = 1000
return config
cfg := DefaultMempoolConfig()
cfg.CacheSize = 1000
return cfg
} }
// WalDir returns the full path to the mempool's write-ahead log // WalDir returns the full path to the mempool's write-ahead log
func (m *MempoolConfig) WalDir() string {
return rootify(m.WalPath, m.RootDir)
func (cfg *MempoolConfig) WalDir() string {
return rootify(cfg.WalPath, cfg.RootDir)
} }
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
@ -394,6 +397,44 @@ type ConsensusConfig struct {
PeerQueryMaj23SleepDuration int `mapstructure:"peer_query_maj23_sleep_duration"` PeerQueryMaj23SleepDuration int `mapstructure:"peer_query_maj23_sleep_duration"`
} }
// DefaultConsensusConfig returns a default configuration for the consensus service
func DefaultConsensusConfig() *ConsensusConfig {
return &ConsensusConfig{
WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"),
WalLight: false,
TimeoutPropose: 3000,
TimeoutProposeDelta: 500,
TimeoutPrevote: 1000,
TimeoutPrevoteDelta: 500,
TimeoutPrecommit: 1000,
TimeoutPrecommitDelta: 500,
TimeoutCommit: 1000,
SkipTimeoutCommit: false,
MaxBlockSizeTxs: 10000,
MaxBlockSizeBytes: 1, // TODO
CreateEmptyBlocks: true,
CreateEmptyBlocksInterval: 0,
PeerGossipSleepDuration: 100,
PeerQueryMaj23SleepDuration: 2000,
}
}
// TestConsensusConfig returns a configuration for testing the consensus service
func TestConsensusConfig() *ConsensusConfig {
cfg := DefaultConsensusConfig()
cfg.TimeoutPropose = 100
cfg.TimeoutProposeDelta = 1
cfg.TimeoutPrevote = 10
cfg.TimeoutPrevoteDelta = 1
cfg.TimeoutPrecommit = 10
cfg.TimeoutPrecommitDelta = 1
cfg.TimeoutCommit = 10
cfg.SkipTimeoutCommit = true
cfg.PeerGossipSleepDuration = 5
cfg.PeerQueryMaj23SleepDuration = 250
return cfg
}
// WaitForTxs returns true if the consensus should wait for transactions before entering the propose step // WaitForTxs returns true if the consensus should wait for transactions before entering the propose step
func (cfg *ConsensusConfig) WaitForTxs() bool { func (cfg *ConsensusConfig) WaitForTxs() bool {
return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0 return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0
@ -434,55 +475,17 @@ func (cfg *ConsensusConfig) PeerQueryMaj23Sleep() time.Duration {
return time.Duration(cfg.PeerQueryMaj23SleepDuration) * time.Millisecond return time.Duration(cfg.PeerQueryMaj23SleepDuration) * time.Millisecond
} }
// DefaultConsensusConfig returns a default configuration for the consensus service
func DefaultConsensusConfig() *ConsensusConfig {
return &ConsensusConfig{
WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"),
WalLight: false,
TimeoutPropose: 3000,
TimeoutProposeDelta: 500,
TimeoutPrevote: 1000,
TimeoutPrevoteDelta: 500,
TimeoutPrecommit: 1000,
TimeoutPrecommitDelta: 500,
TimeoutCommit: 1000,
SkipTimeoutCommit: false,
MaxBlockSizeTxs: 10000,
MaxBlockSizeBytes: 1, // TODO
CreateEmptyBlocks: true,
CreateEmptyBlocksInterval: 0,
PeerGossipSleepDuration: 100,
PeerQueryMaj23SleepDuration: 2000,
}
}
// TestConsensusConfig returns a configuration for testing the consensus service
func TestConsensusConfig() *ConsensusConfig {
config := DefaultConsensusConfig()
config.TimeoutPropose = 100
config.TimeoutProposeDelta = 1
config.TimeoutPrevote = 10
config.TimeoutPrevoteDelta = 1
config.TimeoutPrecommit = 10
config.TimeoutPrecommitDelta = 1
config.TimeoutCommit = 10
config.SkipTimeoutCommit = true
config.PeerGossipSleepDuration = 5
config.PeerQueryMaj23SleepDuration = 250
return config
}
// WalFile returns the full path to the write-ahead log file // WalFile returns the full path to the write-ahead log file
func (c *ConsensusConfig) WalFile() string {
if c.walFile != "" {
return c.walFile
func (cfg *ConsensusConfig) WalFile() string {
if cfg.walFile != "" {
return cfg.walFile
} }
return rootify(c.WalPath, c.RootDir)
return rootify(cfg.WalPath, cfg.RootDir)
} }
// SetWalFile sets the path to the write-ahead log file // SetWalFile sets the path to the write-ahead log file
func (c *ConsensusConfig) SetWalFile(walFile string) {
c.walFile = walFile
func (cfg *ConsensusConfig) SetWalFile(walFile string) {
cfg.walFile = walFile
} }
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------


+ 4
- 0
config/toml.go View File

@ -127,6 +127,7 @@ laddr = "{{ .P2P.ListenAddress }}"
seeds = "" seeds = ""
# Comma separated list of nodes to keep persistent connections to # Comma separated list of nodes to keep persistent connections to
# Do not add private peers to this list if you don't want them advertised
persistent_peers = "" persistent_peers = ""
# Path to address book # Path to address book
@ -162,6 +163,9 @@ seed_mode = {{ .P2P.SeedMode }}
# Authenticated encryption # Authenticated encryption
auth_enc = {{ .P2P.AuthEnc }} auth_enc = {{ .P2P.AuthEnc }}
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
private_peer_ids = "{{ .P2P.PrivatePeerIDs }}"
##### mempool configuration options ##### ##### mempool configuration options #####
[mempool] [mempool]


+ 2
- 2
consensus/common_test.go View File

@ -102,13 +102,13 @@ func signVotes(voteType byte, hash []byte, header types.PartSetHeader, vss ...*v
func incrementHeight(vss ...*validatorStub) { func incrementHeight(vss ...*validatorStub) {
for _, vs := range vss { for _, vs := range vss {
vs.Height += 1
vs.Height++
} }
} }
func incrementRound(vss ...*validatorStub) { func incrementRound(vss ...*validatorStub) {
for _, vs := range vss { for _, vs := range vss {
vs.Round += 1
vs.Round++
} }
} }


+ 6
- 6
consensus/mempool_test.go View File

@ -152,6 +152,7 @@ func TestMempoolRmBadTx(t *testing.T) {
txs := cs.mempool.Reap(1) txs := cs.mempool.Reap(1)
if len(txs) == 0 { if len(txs) == 0 {
emptyMempoolCh <- struct{}{} emptyMempoolCh <- struct{}{}
return
} }
time.Sleep(10 * time.Millisecond) time.Sleep(10 * time.Millisecond)
} }
@ -199,7 +200,7 @@ func (app *CounterApplication) DeliverTx(tx []byte) abci.ResponseDeliverTx {
Code: code.CodeTypeBadNonce, Code: code.CodeTypeBadNonce,
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)} Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}
} }
app.txCount += 1
app.txCount++
return abci.ResponseDeliverTx{Code: code.CodeTypeOK} return abci.ResponseDeliverTx{Code: code.CodeTypeOK}
} }
@ -210,7 +211,7 @@ func (app *CounterApplication) CheckTx(tx []byte) abci.ResponseCheckTx {
Code: code.CodeTypeBadNonce, Code: code.CodeTypeBadNonce,
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.mempoolTxCount, txValue)} Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.mempoolTxCount, txValue)}
} }
app.mempoolTxCount += 1
app.mempoolTxCount++
return abci.ResponseCheckTx{Code: code.CodeTypeOK} return abci.ResponseCheckTx{Code: code.CodeTypeOK}
} }
@ -224,9 +225,8 @@ func (app *CounterApplication) Commit() abci.ResponseCommit {
app.mempoolTxCount = app.txCount app.mempoolTxCount = app.txCount
if app.txCount == 0 { if app.txCount == 0 {
return abci.ResponseCommit{} return abci.ResponseCommit{}
} else {
hash := make([]byte, 8)
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
return abci.ResponseCommit{Data: hash}
} }
hash := make([]byte, 8)
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
return abci.ResponseCommit{Data: hash}
} }

+ 58
- 23
consensus/reactor.go View File

@ -25,7 +25,9 @@ const (
VoteChannel = byte(0x22) VoteChannel = byte(0x22)
VoteSetBitsChannel = byte(0x23) VoteSetBitsChannel = byte(0x23)
maxConsensusMessageSize = 1048576 // 1MB; NOTE/TODO: keep in sync with types.PartSet sizes.
maxMsgSize = 1048576 // 1MB; NOTE/TODO: keep in sync with types.PartSet sizes.
blocksToContributeToBecomeGoodPeer = 10000
) )
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
@ -110,28 +112,28 @@ func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor {
ID: StateChannel, ID: StateChannel,
Priority: 5, Priority: 5,
SendQueueCapacity: 100, SendQueueCapacity: 100,
RecvMessageCapacity: maxConsensusMessageSize,
RecvMessageCapacity: maxMsgSize,
}, },
{ {
ID: DataChannel, // maybe split between gossiping current block and catchup stuff ID: DataChannel, // maybe split between gossiping current block and catchup stuff
Priority: 10, // once we gossip the whole block there's nothing left to send until next height or round Priority: 10, // once we gossip the whole block there's nothing left to send until next height or round
SendQueueCapacity: 100, SendQueueCapacity: 100,
RecvBufferCapacity: 50 * 4096, RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: maxConsensusMessageSize,
RecvMessageCapacity: maxMsgSize,
}, },
{ {
ID: VoteChannel, ID: VoteChannel,
Priority: 5, Priority: 5,
SendQueueCapacity: 100, SendQueueCapacity: 100,
RecvBufferCapacity: 100 * 100, RecvBufferCapacity: 100 * 100,
RecvMessageCapacity: maxConsensusMessageSize,
RecvMessageCapacity: maxMsgSize,
}, },
{ {
ID: VoteSetBitsChannel, ID: VoteSetBitsChannel,
Priority: 1, Priority: 1,
SendQueueCapacity: 2, SendQueueCapacity: 2,
RecvBufferCapacity: 1024, RecvBufferCapacity: 1024,
RecvMessageCapacity: maxConsensusMessageSize,
RecvMessageCapacity: maxMsgSize,
}, },
} }
} }
@ -254,7 +256,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
ps.ApplyProposalPOLMessage(msg) ps.ApplyProposalPOLMessage(msg)
case *BlockPartMessage: case *BlockPartMessage:
ps.SetHasProposalBlockPart(msg.Height, msg.Round, msg.Part.Index) ps.SetHasProposalBlockPart(msg.Height, msg.Round, msg.Part.Index)
if numBlocks := ps.RecordBlockPart(msg); numBlocks > 10000 {
if numBlocks := ps.RecordBlockPart(msg); numBlocks%blocksToContributeToBecomeGoodPeer == 0 {
conR.Switch.MarkPeerAsGood(src) conR.Switch.MarkPeerAsGood(src)
} }
conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()} conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()}
@ -276,7 +278,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
ps.EnsureVoteBitArrays(height, valSize) ps.EnsureVoteBitArrays(height, valSize)
ps.EnsureVoteBitArrays(height-1, lastCommitSize) ps.EnsureVoteBitArrays(height-1, lastCommitSize)
ps.SetHasVote(msg.Vote) ps.SetHasVote(msg.Vote)
if blocks := ps.RecordVote(msg.Vote); blocks > 10000 {
if blocks := ps.RecordVote(msg.Vote); blocks%blocksToContributeToBecomeGoodPeer == 0 {
conR.Switch.MarkPeerAsGood(src) conR.Switch.MarkPeerAsGood(src)
} }
@ -372,19 +374,21 @@ func (conR *ConsensusReactor) startBroadcastRoutine() error {
} }
go func() { go func() {
var data interface{}
var ok bool
for { for {
select { select {
case data, ok := <-stepsCh:
case data, ok = <-stepsCh:
if ok { // a receive from a closed channel returns the zero value immediately if ok { // a receive from a closed channel returns the zero value immediately
edrs := data.(types.EventDataRoundState) edrs := data.(types.EventDataRoundState)
conR.broadcastNewRoundStep(edrs.RoundState.(*cstypes.RoundState)) conR.broadcastNewRoundStep(edrs.RoundState.(*cstypes.RoundState))
} }
case data, ok := <-votesCh:
case data, ok = <-votesCh:
if ok { if ok {
edv := data.(types.EventDataVote) edv := data.(types.EventDataVote)
conR.broadcastHasVoteMessage(edv.Vote) conR.broadcastHasVoteMessage(edv.Vote)
} }
case data, ok := <-heartbeatsCh:
case data, ok = <-heartbeatsCh:
if ok { if ok {
edph := data.(types.EventDataProposalHeartbeat) edph := data.(types.EventDataProposalHeartbeat)
conR.broadcastProposalHeartbeatMessage(edph) conR.broadcastProposalHeartbeatMessage(edph)
@ -393,6 +397,10 @@ func (conR *ConsensusReactor) startBroadcastRoutine() error {
conR.eventBus.UnsubscribeAll(ctx, subscriber) conR.eventBus.UnsubscribeAll(ctx, subscriber)
return return
} }
if !ok {
conR.eventBus.UnsubscribeAll(ctx, subscriber)
return
}
} }
}() }()
@ -603,11 +611,9 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype
logger.Debug("Sending block part for catchup failed") logger.Debug("Sending block part for catchup failed")
} }
return return
} else {
//logger.Info("No parts to send in catch-up, sleeping")
time.Sleep(conR.conS.config.PeerGossipSleep())
return
} }
//logger.Info("No parts to send in catch-up, sleeping")
time.Sleep(conR.conS.config.PeerGossipSleep())
} }
func (conR *ConsensusReactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) { func (conR *ConsensusReactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
@ -853,6 +859,10 @@ type peerStateStats struct {
blockParts int blockParts int
} }
func (pss peerStateStats) String() string {
return fmt.Sprintf("peerStateStats{votes: %d, blockParts: %d}", pss.votes, pss.blockParts)
}
// NewPeerState returns a new PeerState for the given Peer // NewPeerState returns a new PeerState for the given Peer
func NewPeerState(peer p2p.Peer) *PeerState { func NewPeerState(peer p2p.Peer) *PeerState {
return &PeerState{ return &PeerState{
@ -1083,27 +1093,46 @@ func (ps *PeerState) RecordVote(vote *types.Vote) int {
ps.mtx.Lock() ps.mtx.Lock()
defer ps.mtx.Unlock() defer ps.mtx.Unlock()
if ps.stats.lastVoteHeight == vote.Height {
if ps.stats.lastVoteHeight >= vote.Height {
return ps.stats.votes return ps.stats.votes
} }
ps.stats.lastVoteHeight = vote.Height ps.stats.lastVoteHeight = vote.Height
ps.stats.votes += 1
ps.stats.votes++
return ps.stats.votes
}
// VotesSent returns the number of blocks for which peer has been sending us
// votes.
func (ps *PeerState) VotesSent() int {
ps.mtx.Lock()
defer ps.mtx.Unlock()
return ps.stats.votes return ps.stats.votes
} }
// RecordVote updates internal statistics for this peer by recording the block part.
// It returns the total number of block parts (1 per block). This essentially means
// the number of blocks for which peer has been sending us block parts.
// RecordBlockPart updates internal statistics for this peer by recording the
// block part. It returns the total number of block parts (1 per block). This
// essentially means the number of blocks for which peer has been sending us
// block parts.
func (ps *PeerState) RecordBlockPart(bp *BlockPartMessage) int { func (ps *PeerState) RecordBlockPart(bp *BlockPartMessage) int {
ps.mtx.Lock() ps.mtx.Lock()
defer ps.mtx.Unlock() defer ps.mtx.Unlock()
if ps.stats.lastBlockPartHeight == bp.Height {
if ps.stats.lastBlockPartHeight >= bp.Height {
return ps.stats.blockParts return ps.stats.blockParts
} }
ps.stats.lastBlockPartHeight = bp.Height ps.stats.lastBlockPartHeight = bp.Height
ps.stats.blockParts += 1
ps.stats.blockParts++
return ps.stats.blockParts
}
// BlockPartsSent returns the number of blocks for which peer has been sending
// us block parts.
func (ps *PeerState) BlockPartsSent() int {
ps.mtx.Lock()
defer ps.mtx.Unlock()
return ps.stats.blockParts return ps.stats.blockParts
} }
@ -1253,11 +1282,13 @@ func (ps *PeerState) StringIndented(indent string) string {
ps.mtx.Lock() ps.mtx.Lock()
defer ps.mtx.Unlock() defer ps.mtx.Unlock()
return fmt.Sprintf(`PeerState{ return fmt.Sprintf(`PeerState{
%s Key %v
%s PRS %v
%s Key %v
%s PRS %v
%s Stats %v
%s}`, %s}`,
indent, ps.Peer.ID(), indent, ps.Peer.ID(),
indent, ps.PeerRoundState.StringIndented(indent+" "), indent, ps.PeerRoundState.StringIndented(indent+" "),
indent, ps.stats,
indent) indent)
} }
@ -1283,6 +1314,10 @@ func RegisterConsensusMessages(cdc *amino.Codec) {
// DecodeMessage decodes the given bytes into a ConsensusMessage. // DecodeMessage decodes the given bytes into a ConsensusMessage.
func DecodeMessage(bz []byte) (msg ConsensusMessage, err error) { func DecodeMessage(bz []byte) (msg ConsensusMessage, err error) {
if len(bz) > maxMsgSize {
return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
len(bz), maxMsgSize)
}
err = cdc.UnmarshalBinaryBare(bz, &msg) err = cdc.UnmarshalBinaryBare(bz, &msg)
return return
} }


+ 109
- 1
consensus/reactor_test.go View File

@ -11,10 +11,12 @@ import (
"time" "time"
"github.com/tendermint/abci/example/kvstore" "github.com/tendermint/abci/example/kvstore"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
cfg "github.com/tendermint/tendermint/config" cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p"
p2pdummy "github.com/tendermint/tendermint/p2p/dummy"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -121,6 +123,112 @@ func TestReactorProposalHeartbeats(t *testing.T) {
}, css) }, css)
} }
// Test we record block parts from other peers
func TestReactorRecordsBlockParts(t *testing.T) {
// create dummy peer
peer := p2pdummy.NewPeer()
ps := NewPeerState(peer).SetLogger(log.TestingLogger())
peer.Set(types.PeerStateKey, ps)
// create reactor
css := randConsensusNet(1, "consensus_reactor_records_block_parts_test", newMockTickerFunc(true), newPersistentKVStore)
reactor := NewConsensusReactor(css[0], false) // so we dont start the consensus states
reactor.SetEventBus(css[0].eventBus)
reactor.SetLogger(log.TestingLogger())
sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { return sw })
reactor.SetSwitch(sw)
err := reactor.Start()
require.NoError(t, err)
defer reactor.Stop()
// 1) new block part
parts := types.NewPartSetFromData(cmn.RandBytes(100), 10)
msg := &BlockPartMessage{
Height: 2,
Round: 0,
Part: parts.GetPart(0),
}
bz, err := cdc.MarshalBinaryBare(msg)
require.NoError(t, err)
reactor.Receive(DataChannel, peer, bz)
require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should have increased by 1")
// 2) block part with the same height, but different round
msg.Round = 1
bz, err = cdc.MarshalBinaryBare(msg)
require.NoError(t, err)
reactor.Receive(DataChannel, peer, bz)
require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same")
// 3) block part from earlier height
msg.Height = 1
msg.Round = 0
bz, err = cdc.MarshalBinaryBare(msg)
require.NoError(t, err)
reactor.Receive(DataChannel, peer, bz)
require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same")
}
// Test we record votes from other peers
func TestReactorRecordsVotes(t *testing.T) {
// create dummy peer
peer := p2pdummy.NewPeer()
ps := NewPeerState(peer).SetLogger(log.TestingLogger())
peer.Set(types.PeerStateKey, ps)
// create reactor
css := randConsensusNet(1, "consensus_reactor_records_votes_test", newMockTickerFunc(true), newPersistentKVStore)
reactor := NewConsensusReactor(css[0], false) // so we dont start the consensus states
reactor.SetEventBus(css[0].eventBus)
reactor.SetLogger(log.TestingLogger())
sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { return sw })
reactor.SetSwitch(sw)
err := reactor.Start()
require.NoError(t, err)
defer reactor.Stop()
_, val := css[0].state.Validators.GetByIndex(0)
// 1) new vote
vote := &types.Vote{
ValidatorIndex: 0,
ValidatorAddress: val.Address,
Height: 2,
Round: 0,
Timestamp: time.Now().UTC(),
Type: types.VoteTypePrevote,
BlockID: types.BlockID{},
}
bz, err := cdc.MarshalBinaryBare(&VoteMessage{vote})
require.NoError(t, err)
reactor.Receive(VoteChannel, peer, bz)
assert.Equal(t, 1, ps.VotesSent(), "number of votes sent should have increased by 1")
// 2) vote with the same height, but different round
vote.Round = 1
bz, err = cdc.MarshalBinaryBare(&VoteMessage{vote})
require.NoError(t, err)
reactor.Receive(VoteChannel, peer, bz)
assert.Equal(t, 1, ps.VotesSent(), "number of votes sent should stay the same")
// 3) vote from earlier height
vote.Height = 1
vote.Round = 0
bz, err = cdc.MarshalBinaryBare(&VoteMessage{vote})
require.NoError(t, err)
reactor.Receive(VoteChannel, peer, bz)
assert.Equal(t, 1, ps.VotesSent(), "number of votes sent should stay the same")
}
//------------------------------------------------------------- //-------------------------------------------------------------
// ensure we can make blocks despite cycling a validator set // ensure we can make blocks despite cycling a validator set
@ -332,7 +440,7 @@ func waitForAndValidateBlockWithTx(t *testing.T, n int, activeVals map[string]st
// but they should be in order. // but they should be in order.
for _, tx := range newBlock.Data.Txs { for _, tx := range newBlock.Data.Txs {
assert.EqualValues(t, txs[ntxs], tx) assert.EqualValues(t, txs[ntxs], tx)
ntxs += 1
ntxs++
} }
if ntxs == len(txs) { if ntxs == len(txs) {


+ 6
- 6
consensus/replay.go View File

@ -112,7 +112,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int64) error {
} }
} }
if found { if found {
return fmt.Errorf("WAL should not contain #ENDHEIGHT %d.", csHeight)
return fmt.Errorf("WAL should not contain #ENDHEIGHT %d", csHeight)
} }
// Search for last height marker // Search for last height marker
@ -125,7 +125,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int64) error {
return err return err
} }
if !found { if !found {
return fmt.Errorf("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d.", csHeight, csHeight-1)
return fmt.Errorf("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d", csHeight, csHeight-1)
} }
defer gr.Close() // nolint: errcheck defer gr.Close() // nolint: errcheck
@ -352,7 +352,7 @@ func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBl
var err error var err error
finalBlock := storeBlockHeight finalBlock := storeBlockHeight
if mutateState { if mutateState {
finalBlock -= 1
finalBlock--
} }
for i := appBlockHeight + 1; i <= finalBlock; i++ { for i := appBlockHeight + 1; i <= finalBlock; i++ {
h.logger.Info("Applying block", "height", i) h.logger.Info("Applying block", "height", i)
@ -362,7 +362,7 @@ func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBl
return nil, err return nil, err
} }
h.nBlocks += 1
h.nBlocks++
} }
if mutateState { if mutateState {
@ -390,7 +390,7 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap
return sm.State{}, err return sm.State{}, err
} }
h.nBlocks += 1
h.nBlocks++
return state, nil return state, nil
} }
@ -429,7 +429,7 @@ type mockProxyApp struct {
func (mock *mockProxyApp) DeliverTx(tx []byte) abci.ResponseDeliverTx { func (mock *mockProxyApp) DeliverTx(tx []byte) abci.ResponseDeliverTx {
r := mock.abciResponses.DeliverTx[mock.txCount] r := mock.abciResponses.DeliverTx[mock.txCount]
mock.txCount += 1
mock.txCount++
return *r return *r
} }


+ 9
- 10
consensus/replay_file.go View File

@ -87,9 +87,9 @@ func (cs *ConsensusState) ReplayFile(file string, console bool) error {
} }
if nextN > 0 { if nextN > 0 {
nextN -= 1
nextN--
} }
pb.count += 1
pb.count++
} }
return nil return nil
} }
@ -153,7 +153,7 @@ func (pb *playback) replayReset(count int, newStepCh chan interface{}) error {
if err := pb.cs.readReplayMessage(msg, newStepCh); err != nil { if err := pb.cs.readReplayMessage(msg, newStepCh); err != nil {
return err return err
} }
pb.count += 1
pb.count++
} }
return nil return nil
} }
@ -197,13 +197,12 @@ func (pb *playback) replayConsoleLoop() int {
if len(tokens) == 1 { if len(tokens) == 1 {
return 0 return 0
}
i, err := strconv.Atoi(tokens[1])
if err != nil {
fmt.Println("next takes an integer argument")
} else { } else {
i, err := strconv.Atoi(tokens[1])
if err != nil {
fmt.Println("next takes an integer argument")
} else {
return i
}
return i
} }
case "back": case "back":
@ -299,7 +298,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
// Create proxyAppConn connection (consensus, mempool, query) // Create proxyAppConn connection (consensus, mempool, query)
clientCreator := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()) clientCreator := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir())
proxyApp := proxy.NewAppConns(clientCreator, proxyApp := proxy.NewAppConns(clientCreator,
NewHandshaker(stateDB, state, blockStore, gdoc.AppState))
NewHandshaker(stateDB, state, blockStore, gdoc.AppState()))
err = proxyApp.Start() err = proxyApp.Start()
if err != nil { if err != nil {
cmn.Exit(cmn.Fmt("Error starting proxy app conns: %v", err)) cmn.Exit(cmn.Fmt("Error starting proxy app conns: %v", err))


+ 3
- 3
consensus/replay_test.go View File

@ -382,9 +382,9 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
expectedBlocksToSync := NUM_BLOCKS - nBlocks expectedBlocksToSync := NUM_BLOCKS - nBlocks
if nBlocks == NUM_BLOCKS && mode > 0 { if nBlocks == NUM_BLOCKS && mode > 0 {
expectedBlocksToSync += 1
expectedBlocksToSync++
} else if nBlocks > 0 && mode == 1 { } else if nBlocks > 0 && mode == 1 {
expectedBlocksToSync += 1
expectedBlocksToSync++
} }
if handshaker.NBlocks() != expectedBlocksToSync { if handshaker.NBlocks() != expectedBlocksToSync {
@ -533,7 +533,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
} }
blocks = append(blocks, block) blocks = append(blocks, block)
commits = append(commits, thisBlockCommit) commits = append(commits, thisBlockCommit)
height += 1
height++
} }
case *types.PartSetHeader: case *types.PartSetHeader:
thisBlockParts = types.NewPartSetFromHeader(*p) thisBlockParts = types.NewPartSetFromHeader(*p)


+ 108
- 109
consensus/state.go View File

@ -492,7 +492,7 @@ func (cs *ConsensusState) updateToState(state sm.State) {
func (cs *ConsensusState) newStep() { func (cs *ConsensusState) newStep() {
rs := cs.RoundStateEvent() rs := cs.RoundStateEvent()
cs.wal.Save(rs) cs.wal.Save(rs)
cs.nSteps += 1
cs.nSteps++
// newStep is called by updateToStep in NewConsensusState before the eventBus is set! // newStep is called by updateToStep in NewConsensusState before the eventBus is set!
if cs.eventBus != nil { if cs.eventBus != nil {
cs.eventBus.PublishEventNewRoundStep(rs) cs.eventBus.PublishEventNewRoundStep(rs)
@ -718,11 +718,7 @@ func (cs *ConsensusState) needProofBlock(height int64) bool {
func (cs *ConsensusState) proposalHeartbeat(height int64, round int) { func (cs *ConsensusState) proposalHeartbeat(height int64, round int) {
counter := 0 counter := 0
addr := cs.privValidator.GetAddress() addr := cs.privValidator.GetAddress()
valIndex, v := cs.Validators.GetByAddress(addr)
if v == nil {
// not a validator
valIndex = -1
}
valIndex, _ := cs.Validators.GetByAddress(addr)
chainID := cs.state.ChainID chainID := cs.state.ChainID
for { for {
rs := cs.GetRoundState() rs := cs.GetRoundState()
@ -739,7 +735,7 @@ func (cs *ConsensusState) proposalHeartbeat(height int64, round int) {
} }
cs.privValidator.SignHeartbeat(chainID, heartbeat) cs.privValidator.SignHeartbeat(chainID, heartbeat)
cs.eventBus.PublishEventProposalHeartbeat(types.EventDataProposalHeartbeat{heartbeat}) cs.eventBus.PublishEventProposalHeartbeat(types.EventDataProposalHeartbeat{heartbeat})
counter += 1
counter++
time.Sleep(proposalHeartbeatIntervalSeconds * time.Second) time.Sleep(proposalHeartbeatIntervalSeconds * time.Second)
} }
} }
@ -850,10 +846,10 @@ func (cs *ConsensusState) isProposalComplete() bool {
// make sure we have the prevotes from it too // make sure we have the prevotes from it too
if cs.Proposal.POLRound < 0 { if cs.Proposal.POLRound < 0 {
return true return true
} else {
// if this is false the proposer is lying or we haven't received the POL yet
return cs.Votes.Prevotes(cs.Proposal.POLRound).HasTwoThirdsMajority()
} }
// if this is false the proposer is lying or we haven't received the POL yet
return cs.Votes.Prevotes(cs.Proposal.POLRound).HasTwoThirdsMajority()
} }
// Create the next block to propose and return it. // Create the next block to propose and return it.
@ -1357,111 +1353,115 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
return added, ErrVoteHeightMismatch return added, ErrVoteHeightMismatch
} }
added, err = cs.LastCommit.AddVote(vote) added, err = cs.LastCommit.AddVote(vote)
if added {
cs.Logger.Info(cmn.Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
// if we can skip timeoutCommit and have all the votes now,
if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() {
// go straight to new round (skip timeout commit)
// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight)
cs.enterNewRound(cs.Height, 0)
}
if !added {
return added, err
}
cs.Logger.Info(cmn.Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
// if we can skip timeoutCommit and have all the votes now,
if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() {
// go straight to new round (skip timeout commit)
// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight)
cs.enterNewRound(cs.Height, 0)
} }
return return
} }
// A prevote/precommit for this height?
if vote.Height == cs.Height {
height := cs.Height
added, err = cs.Votes.AddVote(vote, peerID)
if added {
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
switch vote.Type {
case types.VoteTypePrevote:
prevotes := cs.Votes.Prevotes(vote.Round)
cs.Logger.Info("Added to prevote", "vote", vote, "prevotes", prevotes.StringShort())
blockID, ok := prevotes.TwoThirdsMajority()
// First, unlock if prevotes is a valid POL.
// >> lockRound < POLRound <= unlockOrChangeLockRound (see spec)
// NOTE: If (lockRound < POLRound) but !(POLRound <= unlockOrChangeLockRound),
// we'll still enterNewRound(H,vote.R) and enterPrecommit(H,vote.R) to process it
// there.
if (cs.LockedBlock != nil) && (cs.LockedRound < vote.Round) && (vote.Round <= cs.Round) {
if ok && !cs.LockedBlock.HashesTo(blockID.Hash) {
cs.Logger.Info("Unlocking because of POL.", "lockedRound", cs.LockedRound, "POLRound", vote.Round)
cs.LockedRound = 0
cs.LockedBlock = nil
cs.LockedBlockParts = nil
cs.eventBus.PublishEventUnlock(cs.RoundStateEvent())
}
}
// Update ValidBlock
if ok && !blockID.IsZero() && !cs.ValidBlock.HashesTo(blockID.Hash) && vote.Round > cs.ValidRound {
// update valid value
if cs.ProposalBlock.HashesTo(blockID.Hash) {
cs.ValidRound = vote.Round
cs.ValidBlock = cs.ProposalBlock
cs.ValidBlockParts = cs.ProposalBlockParts
}
//TODO: We might want to update ValidBlock also in case we don't have that block yet,
// and obtain the required block using gossiping
}
// Height mismatch is ignored.
// Not necessarily a bad peer, but not favourable behaviour.
if vote.Height != cs.Height {
err = ErrVoteHeightMismatch
cs.Logger.Info("Vote ignored and not added", "voteHeight", vote.Height, "csHeight", cs.Height, "err", err)
return
}
if cs.Round <= vote.Round && prevotes.HasTwoThirdsAny() {
// Round-skip over to PrevoteWait or goto Precommit.
cs.enterNewRound(height, vote.Round) // if the vote is ahead of us
if prevotes.HasTwoThirdsMajority() {
cs.enterPrecommit(height, vote.Round)
} else {
cs.enterPrevote(height, vote.Round) // if the vote is ahead of us
cs.enterPrevoteWait(height, vote.Round)
}
} else if cs.Proposal != nil && 0 <= cs.Proposal.POLRound && cs.Proposal.POLRound == vote.Round {
// If the proposal is now complete, enter prevote of cs.Round.
if cs.isProposalComplete() {
cs.enterPrevote(height, cs.Round)
}
}
case types.VoteTypePrecommit:
precommits := cs.Votes.Precommits(vote.Round)
cs.Logger.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort())
blockID, ok := precommits.TwoThirdsMajority()
if ok {
if len(blockID.Hash) == 0 {
cs.enterNewRound(height, vote.Round+1)
} else {
cs.enterNewRound(height, vote.Round)
cs.enterPrecommit(height, vote.Round)
cs.enterCommit(height, vote.Round)
if cs.config.SkipTimeoutCommit && precommits.HasAll() {
// if we have all the votes now,
// go straight to new round (skip timeout commit)
// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight)
cs.enterNewRound(cs.Height, 0)
}
}
} else if cs.Round <= vote.Round && precommits.HasTwoThirdsAny() {
cs.enterNewRound(height, vote.Round)
cs.enterPrecommit(height, vote.Round)
cs.enterPrecommitWait(height, vote.Round)
height := cs.Height
added, err = cs.Votes.AddVote(vote, peerID)
if !added {
// Either duplicate, or error upon cs.Votes.AddByIndex()
return
}
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
switch vote.Type {
case types.VoteTypePrevote:
prevotes := cs.Votes.Prevotes(vote.Round)
cs.Logger.Info("Added to prevote", "vote", vote, "prevotes", prevotes.StringShort())
blockID, ok := prevotes.TwoThirdsMajority()
// First, unlock if prevotes is a valid POL.
// >> lockRound < POLRound <= unlockOrChangeLockRound (see spec)
// NOTE: If (lockRound < POLRound) but !(POLRound <= unlockOrChangeLockRound),
// we'll still enterNewRound(H,vote.R) and enterPrecommit(H,vote.R) to process it
// there.
if (cs.LockedBlock != nil) && (cs.LockedRound < vote.Round) && (vote.Round <= cs.Round) {
if ok && !cs.LockedBlock.HashesTo(blockID.Hash) {
cs.Logger.Info("Unlocking because of POL.", "lockedRound", cs.LockedRound, "POLRound", vote.Round)
cs.LockedRound = 0
cs.LockedBlock = nil
cs.LockedBlockParts = nil
cs.eventBus.PublishEventUnlock(cs.RoundStateEvent())
}
}
// Update ValidBlock
if ok && !blockID.IsZero() && !cs.ValidBlock.HashesTo(blockID.Hash) && vote.Round > cs.ValidRound {
// update valid value
if cs.ProposalBlock.HashesTo(blockID.Hash) {
cs.ValidRound = vote.Round
cs.ValidBlock = cs.ProposalBlock
cs.ValidBlockParts = cs.ProposalBlockParts
}
//TODO: We might want to update ValidBlock also in case we don't have that block yet,
// and obtain the required block using gossiping
}
if cs.Round <= vote.Round && prevotes.HasTwoThirdsAny() {
// Round-skip over to PrevoteWait or goto Precommit.
cs.enterNewRound(height, vote.Round) // if the vote is ahead of us
if prevotes.HasTwoThirdsMajority() {
cs.enterPrecommit(height, vote.Round)
} else {
cs.enterPrevote(height, vote.Round) // if the vote is ahead of us
cs.enterPrevoteWait(height, vote.Round)
}
} else if cs.Proposal != nil && 0 <= cs.Proposal.POLRound && cs.Proposal.POLRound == vote.Round {
// If the proposal is now complete, enter prevote of cs.Round.
if cs.isProposalComplete() {
cs.enterPrevote(height, cs.Round)
}
}
case types.VoteTypePrecommit:
precommits := cs.Votes.Precommits(vote.Round)
cs.Logger.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort())
blockID, ok := precommits.TwoThirdsMajority()
if ok {
if len(blockID.Hash) == 0 {
cs.enterNewRound(height, vote.Round+1)
} else {
cs.enterNewRound(height, vote.Round)
cs.enterPrecommit(height, vote.Round)
cs.enterCommit(height, vote.Round)
if cs.config.SkipTimeoutCommit && precommits.HasAll() {
// if we have all the votes now,
// go straight to new round (skip timeout commit)
// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight)
cs.enterNewRound(cs.Height, 0)
} }
default:
cmn.PanicSanity(cmn.Fmt("Unexpected vote type %X", vote.Type)) // Should not happen.
} }
} else if cs.Round <= vote.Round && precommits.HasTwoThirdsAny() {
cs.enterNewRound(height, vote.Round)
cs.enterPrecommit(height, vote.Round)
cs.enterPrecommitWait(height, vote.Round)
} }
// Either duplicate, or error upon cs.Votes.AddByIndex()
return
} else {
err = ErrVoteHeightMismatch
default:
panic(cmn.Fmt("Unexpected vote type %X", vote.Type)) // go-wire should prevent this.
} }
// Height mismatch, bad peer?
cs.Logger.Info("Vote ignored and not added", "voteHeight", vote.Height, "csHeight", cs.Height, "err", err)
return return
} }
@ -1492,12 +1492,11 @@ func (cs *ConsensusState) signAddVote(type_ byte, hash []byte, header types.Part
cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""}) cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""})
cs.Logger.Info("Signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err) cs.Logger.Info("Signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err)
return vote return vote
} else {
//if !cs.replayMode {
cs.Logger.Error("Error signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err)
//}
return nil
} }
//if !cs.replayMode {
cs.Logger.Error("Error signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err)
//}
return nil
} }
//--------------------------------------------------------- //---------------------------------------------------------


+ 1
- 1
consensus/wal_generator.go View File

@ -53,7 +53,7 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
return nil, errors.Wrap(err, "failed to make genesis state") return nil, errors.Wrap(err, "failed to make genesis state")
} }
blockStore := bc.NewBlockStore(blockStoreDB) blockStore := bc.NewBlockStore(blockStoreDB)
handshaker := NewHandshaker(stateDB, state, blockStore, genDoc.AppState)
handshaker := NewHandshaker(stateDB, state, blockStore, genDoc.AppState())
proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app), handshaker) proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app), handshaker)
proxyApp.SetLogger(logger.With("module", "proxy")) proxyApp.SetLogger(logger.With("module", "proxy"))
if err := proxyApp.Start(); err != nil { if err := proxyApp.Start(); err != nil {


+ 2
- 3
docs/conf.py View File

@ -196,9 +196,8 @@ urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/statefuls
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/t_plus_k.png', filename=assets_dir+'/t_plus_k.png') urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/t_plus_k.png', filename=assets_dir+'/t_plus_k.png')
urllib.urlretrieve(tools_repo+tools_branch+'/terraform-digitalocean/README.rst', filename=tools_dir+'/terraform-digitalocean.rst') urllib.urlretrieve(tools_repo+tools_branch+'/terraform-digitalocean/README.rst', filename=tools_dir+'/terraform-digitalocean.rst')
urllib.urlretrieve(tools_repo+tools_branch+'/tm-bench/README.rst', filename=tools_dir+'/benchmarking-and-monitoring.rst')
# the readme for below is included in tm-bench
# urllib.urlretrieve('https://raw.githubusercontent.com/tendermint/tools/master/tm-monitor/README.rst', filename='tools/tm-monitor.rst')
urllib.urlretrieve(tools_repo+tools_branch+'/tm-bench/README.rst', filename=tools_dir+'/benchmarking.rst')
urllib.urlretrieve('https://raw.githubusercontent.com/tendermint/tools/master/tm-monitor/README.rst', filename='tools/monitoring.rst')
#### abci spec ################################# #### abci spec #################################


+ 1
- 1
docs/examples/getting-started.md View File

@ -12,7 +12,7 @@ and want to get started right away, continue. Otherwise, [review the documentati
On a fresh Ubuntu 16.04 machine can be done with [this script](https://git.io/vNLfY), like so: On a fresh Ubuntu 16.04 machine can be done with [this script](https://git.io/vNLfY), like so:
``` ```
curl -L https://git.io/vNLfY | bash
curl -L https://git.io/vxWlX | bash
source ~/.profile source ~/.profile
``` ```


+ 4
- 4
docs/examples/install_tendermint.sh View File

@ -4,8 +4,8 @@
# and has only been tested on Digital Ocean # and has only been tested on Digital Ocean
# get and unpack golang # get and unpack golang
curl -O https://storage.googleapis.com/golang/go1.9.2.linux-amd64.tar.gz
tar -xvf go1.9.2.linux-amd64.tar.gz
curl -O https://storage.googleapis.com/golang/go1.10.linux-amd64.tar.gz
tar -xvf go1.10.linux-amd64.tar.gz
apt install make apt install make
@ -26,7 +26,7 @@ go get $REPO
cd $GOPATH/src/$REPO cd $GOPATH/src/$REPO
## build ## build
git checkout v0.15.0
git checkout v0.17.0
make get_tools make get_tools
make get_vendor_deps make get_vendor_deps
make install
make install

+ 2
- 1
docs/index.rst View File

@ -44,7 +44,8 @@ Tendermint Tools
tools/docker.rst tools/docker.rst
tools/mintnet-kubernetes.rst tools/mintnet-kubernetes.rst
tools/terraform-digitalocean.rst tools/terraform-digitalocean.rst
tools/benchmarking-and-monitoring.rst
tools/benchmarking.rst
tools/monitoring.rst
Tendermint 102 Tendermint 102
-------------- --------------


+ 3
- 3
docs/specification/byzantine-consensus-algorithm.rst View File

@ -329,11 +329,11 @@ collateral on all other forks. Clients should verify the signatures on
the reorg-proposal, verify any evidence, and make a judgement or prompt the reorg-proposal, verify any evidence, and make a judgement or prompt
the end-user for a decision. For example, a phone wallet app may prompt the end-user for a decision. For example, a phone wallet app may prompt
the user with a security warning, while a refrigerator may accept any the user with a security warning, while a refrigerator may accept any
reorg-proposal signed by +½ of the original validators.
reorg-proposal signed by +1/2 of the original validators.
No non-synchronous Byzantine fault-tolerant algorithm can come to No non-synchronous Byzantine fault-tolerant algorithm can come to
consensus when + of validators are dishonest, yet a fork assumes that
+ of validators have already been dishonest by double-signing or
consensus when 1/3+ of validators are dishonest, yet a fork assumes that
1/3+ of validators have already been dishonest by double-signing or
lock-changing without justification. So, signing the reorg-proposal is a lock-changing without justification. So, signing the reorg-proposal is a
coordination problem that cannot be solved by any non-synchronous coordination problem that cannot be solved by any non-synchronous
protocol (i.e. automatically, and without making assumptions about the protocol (i.e. automatically, and without making assumptions about the


+ 4
- 0
docs/specification/configuration.rst View File

@ -89,6 +89,7 @@ like the file below, however, double check by inspecting the
seeds = "" seeds = ""
# Comma separated list of nodes to keep persistent connections to # Comma separated list of nodes to keep persistent connections to
# Do not add private peers to this list if you don't want them advertised
persistent_peers = "" persistent_peers = ""
# Path to address book # Path to address book
@ -124,6 +125,9 @@ like the file below, however, double check by inspecting the
# Authenticated encryption # Authenticated encryption
auth_enc = true auth_enc = true
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
private_peer_ids = ""
##### mempool configuration options ##### ##### mempool configuration options #####
[mempool] [mempool]


+ 16
- 2
docs/specification/new-spec/bft-time.md View File

@ -5,7 +5,7 @@ Time in Tendermint is defined with the Time field of the block header.
It satisfies the following properties: It satisfies the following properties:
- Time Monotonicity: Time is monotonically increasing, i.e., given
- Time Monotonicity: Time is monotonically increasing, i.e., given
a header H1 for height h1 and a header H2 for height `h2 = h1 + 1`, `H1.Time < H2.Time`. a header H1 for height h1 and a header H2 for height `h2 = h1 + 1`, `H1.Time < H2.Time`.
- Time Validity: Given a set of Commit votes that forms the `block.LastCommit` field, a range of - Time Validity: Given a set of Commit votes that forms the `block.LastCommit` field, a range of
valid values for the Time field of the block header is defined only by valid values for the Time field of the block header is defined only by
@ -16,7 +16,21 @@ In the context of Tendermint, time is of type int64 and denotes UNIX time in mil
corresponds to the number of milliseconds since January 1, 1970. Before defining rules that need to be enforced by the corresponds to the number of milliseconds since January 1, 1970. Before defining rules that need to be enforced by the
Tendermint consensus protocol, so the properties above holds, we introduce the following definition: Tendermint consensus protocol, so the properties above holds, we introduce the following definition:
- median of a set of `Vote` messages is equal to the median of `Vote.Time` fields of the corresponding `Vote` messages
- median of a set of `Vote` messages is equal to the median of `Vote.Time` fields of the corresponding `Vote` messages,
where the value of `Vote.Time` is counted number of times proportional to the process voting power. As in Tendermint
the voting power is not uniform (one process one vote), a vote message is actually an aggregator of the same votes whose
number is equal to the voting power of the process that has casted the corresponding votes message.
Let's consider the following example:
- we have four processes p1, p2, p3 and p4, with the following voting power distribution (p1, 23), (p2, 27), (p3, 10)
and (p4, 10). The total voting power is 70 (`N = 3f+1`, where `N` is the total voting power, and `f` is the maximum voting
power of the faulty processes), so we assume that the faulty processes have at most 23 of voting power.
Furthermore, we have the following vote messages in some LastCommit field (we ignore all fields except Time field):
- (p1, 100), (p2, 98), (p3, 1000), (p4, 500). We assume that p3 and p4 are faulty processes. Let's assume that the
`block.LastCommit` message contains votes of processes p2, p3 and p4. Median is then chosen the following way:
the value 98 is counted 27 times, the value 1000 is counted 10 times and the value 500 is counted also 10 times.
So the median value will be the value 98. No matter what set of messages with at least `2f+1` voting power we
choose, the median value will always be between the values sent by correct processes.
We ensure Time Monotonicity and Time Validity properties by the following rules: We ensure Time Monotonicity and Time Validity properties by the following rules:


+ 114
- 0
docs/specification/new-spec/light-client.md View File

@ -0,0 +1,114 @@
# Light client
A light client is a process that connects to the Tendermint Full Node(s) and then tries to verify the Merkle proofs
about the blockchain application. In this document we describe mechanisms that ensures that the Tendermint light client
has the same level of security as Full Node processes (without being itself a Full Node).
To be able to validate a Merkle proof, a light client needs to validate the blockchain header that contains the root app hash.
Validating a blockchain header in Tendermint consists in verifying that the header is committed (signed) by >2/3 of the
voting power of the corresponding validator set. As the validator set is a dynamic set (it is changing), one of the
core functionality of the light client is updating the current validator set, that is then used to verify the
blockchain header, and further the corresponding Merkle proofs.
For the purpose of this light client specification, we assume that the Tendermint Full Node exposes the following functions over
Tendermint RPC:
```golang
Header(height int64) (SignedHeader, error) // returns signed header for the given height
Validators(height int64) (ResultValidators, error) // returns validator set for the given height
LastHeader(valSetNumber int64) (SignedHeader, error) // returns last header signed by the validator set with the given validator set number
type SignedHeader struct {
Header Header
Commit Commit
ValSetNumber int64
}
type ResultValidators struct {
BlockHeight int64
Validators []Validator
// time the current validator set is initialised, i.e, time of the last validator change before header BlockHeight
ValSetTime int64
}
```
We assume that Tendermint keeps track of the validator set changes and that each time a validator set is changed it is
being assigned the next sequence number. We can call this number the validator set sequence number. Tendermint also remembers
the Time from the header when the next validator set is initialised (starts to be in power), and we refer to this time
as validator set init time.
Furthermore, we assume that each validator set change is signed (committed) by the current validator set. More precisely,
given a block `H` that contains transactions that are modifying the current validator set, the Merkle root hash of the next
validator set (modified based on transactions from block H) will be in block `H+1` (and signed by the current validator
set), and then starting from the block `H+2`, it will be signed by the next validator set.
Note that the real Tendermint RPC API is slightly different (for example, response messages contain more data and function
names are slightly different); we shortened (and modified) it for the purpose of this document to make the spec more
clear and simple. Furthermore, note that in case of the third function, the returned header has `ValSetNumber` equals to
`valSetNumber+1`.
Locally, light client manages the following state:
```golang
valSet []Validator // current validator set (last known and verified validator set)
valSetNumber int64 // sequence number of the current validator set
valSetHash []byte // hash of the current validator set
valSetTime int64 // time when the current validator set is initialised
```
The light client is initialised with the trusted validator set, for example based on the known validator set hash,
validator set sequence number and the validator set init time.
The core of the light client logic is captured by the VerifyAndUpdate function that is used to 1) verify if the given header is valid,
and 2) update the validator set (when the given header is valid and it is more recent than the seen headers).
```golang
VerifyAndUpdate(signedHeader SignedHeader):
assertThat signedHeader.valSetNumber >= valSetNumber
if isValid(signedHeader) and signedHeader.Header.Time <= valSetTime + UNBONDING_PERIOD then
setValidatorSet(signedHeader)
return true
else
updateValidatorSet(signedHeader.ValSetNumber)
return VerifyAndUpdate(signedHeader)
isValid(signedHeader SignedHeader):
valSetOfTheHeader = Validators(signedHeader.Header.Height)
assertThat Hash(valSetOfTheHeader) == signedHeader.Header.ValSetHash
assertThat signedHeader is passing basic validation
if votingPower(signedHeader.Commit) > 2/3 * votingPower(valSetOfTheHeader) then return true
else
return false
setValidatorSet(signedHeader SignedHeader):
nextValSet = Validators(signedHeader.Header.Height)
assertThat Hash(nextValSet) == signedHeader.Header.ValidatorsHash
valSet = nextValSet.Validators
valSetHash = signedHeader.Header.ValidatorsHash
valSetNumber = signedHeader.ValSetNumber
valSetTime = nextValSet.ValSetTime
votingPower(commit Commit):
votingPower = 0
for each precommit in commit.Precommits do:
if precommit.ValidatorAddress is in valSet and signature of the precommit verifies then
votingPower += valSet[precommit.ValidatorAddress].VotingPower
return votingPower
votingPower(validatorSet []Validator):
for each validator in validatorSet do:
votingPower += validator.VotingPower
return votingPower
updateValidatorSet(valSetNumberOfTheHeader):
while valSetNumber != valSetNumberOfTheHeader do
signedHeader = LastHeader(valSetNumber)
if isValid(signedHeader) then
setValidatorSet(signedHeader)
else return error
return
```
Note that in the logic above we assume that the light client will always go upward with respect to header verifications,
i.e., that it will always be used to verify more recent headers. In case a light client needs to be used to verify older
headers (go backward) the same mechanisms and similar logic can be used. In case a call to the FullNode or subsequent
checks fail, a light client need to implement some recovery strategy, for example connecting to other FullNode.

+ 8
- 1
docs/specification/new-spec/reactors/pex/pex.md View File

@ -57,10 +57,17 @@ a trust metric (see below), but it's best to start with something simple.
## Select Peers to Dial ## Select Peers to Dial
When we need more peers, we pick them randomly from the addrbook with some When we need more peers, we pick them randomly from the addrbook with some
configurable bias for unvetted peers. The bias should be lower when we have fewer peers,
configurable bias for unvetted peers. The bias should be lower when we have fewer peers
and can increase as we obtain more, ensuring that our first peers are more trustworthy, and can increase as we obtain more, ensuring that our first peers are more trustworthy,
but always giving us the chance to discover new good peers. but always giving us the chance to discover new good peers.
We track the last time we dialed a peer and the number of unsuccessful attempts
we've made. If too many attempts are made, we mark the peer as bad.
Connection attempts are made with exponential backoff (plus jitter). Because
the selection process happens every `ensurePeersPeriod`, we might not end up
dialing a peer for much longer than the backoff duration.
## Select Peers to Exchange ## Select Peers to Exchange
When we’re asked for peers, we select them as follows: When we’re asked for peers, we select them as follows:


+ 1
- 0
docs/specification/rpc.rst View File

@ -97,6 +97,7 @@ An HTTP Get request to the root RPC endpoint (e.g.
http://localhost:46657/genesis http://localhost:46657/genesis
http://localhost:46657/net_info http://localhost:46657/net_info
http://localhost:46657/num_unconfirmed_txs http://localhost:46657/num_unconfirmed_txs
http://localhost:46657/health
http://localhost:46657/status http://localhost:46657/status
http://localhost:46657/unconfirmed_txs http://localhost:46657/unconfirmed_txs
http://localhost:46657/unsafe_flush_mempool http://localhost:46657/unsafe_flush_mempool


+ 8
- 3
evidence/reactor.go View File

@ -2,11 +2,12 @@ package evidence
import ( import (
"fmt" "fmt"
"github.com/tendermint/go-amino"
"github.com/tendermint/tmlibs/log"
"reflect" "reflect"
"time" "time"
"github.com/tendermint/go-amino"
"github.com/tendermint/tmlibs/log"
"github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -14,7 +15,7 @@ import (
const ( const (
EvidenceChannel = byte(0x38) EvidenceChannel = byte(0x38)
maxEvidenceMessageSize = 1048576 // 1MB TODO make it configurable
maxMsgSize = 1048576 // 1MB TODO make it configurable
broadcastEvidenceIntervalS = 60 // broadcast uncommitted evidence this often broadcastEvidenceIntervalS = 60 // broadcast uncommitted evidence this often
) )
@ -146,6 +147,10 @@ func RegisterEvidenceMessages(cdc *amino.Codec) {
// DecodeMessage decodes a byte-array into a EvidenceMessage. // DecodeMessage decodes a byte-array into a EvidenceMessage.
func DecodeMessage(bz []byte) (msg EvidenceMessage, err error) { func DecodeMessage(bz []byte) (msg EvidenceMessage, err error) {
if len(bz) > maxMsgSize {
return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
len(bz), maxMsgSize)
}
err = cdc.UnmarshalBinaryBare(bz, &msg) err = cdc.UnmarshalBinaryBare(bz, &msg)
return return
} }


+ 9
- 0
lite/proxy/block.go View File

@ -11,11 +11,17 @@ import (
) )
func ValidateBlockMeta(meta *types.BlockMeta, check lite.Commit) error { func ValidateBlockMeta(meta *types.BlockMeta, check lite.Commit) error {
if meta == nil {
return errors.New("expecting a non-nil BlockMeta")
}
// TODO: check the BlockID?? // TODO: check the BlockID??
return ValidateHeader(meta.Header, check) return ValidateHeader(meta.Header, check)
} }
func ValidateBlock(meta *types.Block, check lite.Commit) error { func ValidateBlock(meta *types.Block, check lite.Commit) error {
if meta == nil {
return errors.New("expecting a non-nil Block")
}
err := ValidateHeader(meta.Header, check) err := ValidateHeader(meta.Header, check)
if err != nil { if err != nil {
return err return err
@ -27,6 +33,9 @@ func ValidateBlock(meta *types.Block, check lite.Commit) error {
} }
func ValidateHeader(head *types.Header, check lite.Commit) error { func ValidateHeader(head *types.Header, check lite.Commit) error {
if head == nil {
return errors.New("expecting a non-nil Header")
}
// make sure they are for the same height (obvious fail) // make sure they are for the same height (obvious fail)
if head.Height != check.Height() { if head.Height != check.Height() {
return certerr.ErrHeightMismatch(head.Height, check.Height()) return certerr.ErrHeightMismatch(head.Height, check.Height())


+ 218
- 0
lite/proxy/validate_test.go View File

@ -0,0 +1,218 @@
package proxy_test
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/tendermint/tendermint/lite"
"github.com/tendermint/tendermint/lite/proxy"
"github.com/tendermint/tendermint/types"
)
var (
deadBeefTxs = types.Txs{[]byte("DE"), []byte("AD"), []byte("BE"), []byte("EF")}
deadBeefHash = deadBeefTxs.Hash()
testTime1 = time.Date(2018, 1, 1, 1, 1, 1, 1, time.UTC)
testTime2 = time.Date(2017, 1, 2, 1, 1, 1, 1, time.UTC)
)
var hdrHeight11 = &types.Header{
Height: 11,
Time: testTime1,
ValidatorsHash: []byte("Tendermint"),
}
func TestValidateBlock(t *testing.T) {
tests := []struct {
block *types.Block
commit lite.Commit
wantErr string
}{
{
block: nil, wantErr: "non-nil Block",
},
{
block: &types.Block{}, wantErr: "nil Header",
},
{
block: &types.Block{Header: new(types.Header)},
},
// Start Header.Height mismatch test
{
block: &types.Block{Header: &types.Header{Height: 10}},
commit: lite.Commit{Header: &types.Header{Height: 11}},
wantErr: "don't match - 10 vs 11",
},
{
block: &types.Block{Header: &types.Header{Height: 11}},
commit: lite.Commit{Header: &types.Header{Height: 11}},
},
// End Header.Height mismatch test
// Start Header.Hash mismatch test
{
block: &types.Block{Header: hdrHeight11},
commit: lite.Commit{Header: &types.Header{Height: 11}},
wantErr: "Headers don't match",
},
{
block: &types.Block{Header: hdrHeight11},
commit: lite.Commit{Header: hdrHeight11},
},
// End Header.Hash mismatch test
// Start Header.Data hash mismatch test
{
block: &types.Block{
Header: &types.Header{Height: 11},
Data: &types.Data{Txs: []types.Tx{[]byte("0xDE"), []byte("AD")}},
},
commit: lite.Commit{
Header: &types.Header{Height: 11},
Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("0xDEADBEEF")}},
},
wantErr: "Data hash doesn't match header",
},
{
block: &types.Block{
Header: &types.Header{Height: 11, DataHash: deadBeefHash},
Data: &types.Data{Txs: deadBeefTxs},
},
commit: lite.Commit{
Header: &types.Header{Height: 11},
Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}},
},
},
// End Header.Data hash mismatch test
}
for i, tt := range tests {
err := proxy.ValidateBlock(tt.block, tt.commit)
if tt.wantErr != "" {
if err == nil {
assert.FailNowf(t, "Unexpectedly passed", "#%d", i)
} else {
assert.Contains(t, err.Error(), tt.wantErr, "#%d should contain the substring\n\n", i)
}
continue
}
assert.Nil(t, err, "#%d: expecting a nil error", i)
}
}
func TestValidateBlockMeta(t *testing.T) {
tests := []struct {
meta *types.BlockMeta
commit lite.Commit
wantErr string
}{
{
meta: nil, wantErr: "non-nil BlockMeta",
},
{
meta: &types.BlockMeta{}, wantErr: "non-nil Header",
},
{
meta: &types.BlockMeta{Header: new(types.Header)},
},
// Start Header.Height mismatch test
{
meta: &types.BlockMeta{Header: &types.Header{Height: 10}},
commit: lite.Commit{Header: &types.Header{Height: 11}},
wantErr: "don't match - 10 vs 11",
},
{
meta: &types.BlockMeta{Header: &types.Header{Height: 11}},
commit: lite.Commit{Header: &types.Header{Height: 11}},
},
// End Header.Height mismatch test
// Start Headers don't match test
{
meta: &types.BlockMeta{Header: hdrHeight11},
commit: lite.Commit{Header: &types.Header{Height: 11}},
wantErr: "Headers don't match",
},
{
meta: &types.BlockMeta{Header: hdrHeight11},
commit: lite.Commit{Header: hdrHeight11},
},
{
meta: &types.BlockMeta{
Header: &types.Header{
Height: 11,
ValidatorsHash: []byte("lite-test"),
// TODO: should be able to use empty time after Amino upgrade
Time: testTime1,
},
},
commit: lite.Commit{
Header: &types.Header{Height: 11, DataHash: deadBeefHash},
},
wantErr: "Headers don't match",
},
{
meta: &types.BlockMeta{
Header: &types.Header{
Height: 11, DataHash: deadBeefHash,
ValidatorsHash: []byte("Tendermint"),
Time: testTime1,
},
},
commit: lite.Commit{
Header: &types.Header{
Height: 11, DataHash: deadBeefHash,
ValidatorsHash: []byte("Tendermint"),
Time: testTime2,
},
Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}},
},
wantErr: "Headers don't match",
},
{
meta: &types.BlockMeta{
Header: &types.Header{
Height: 11, DataHash: deadBeefHash,
ValidatorsHash: []byte("Tendermint"),
Time: testTime2,
},
},
commit: lite.Commit{
Header: &types.Header{
Height: 11, DataHash: deadBeefHash,
ValidatorsHash: []byte("Tendermint-x"),
Time: testTime2,
},
Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}},
},
wantErr: "Headers don't match",
},
// End Headers don't match test
}
for i, tt := range tests {
err := proxy.ValidateBlockMeta(tt.meta, tt.commit)
if tt.wantErr != "" {
if err == nil {
assert.FailNowf(t, "Unexpectedly passed", "#%d: wanted error %q", i, tt.wantErr)
} else {
assert.Contains(t, err.Error(), tt.wantErr, "#%d should contain the substring\n\n", i)
}
continue
}
assert.Nil(t, err, "#%d: expecting a nil error", i)
}
}

+ 5
- 1
mempool/reactor.go View File

@ -18,7 +18,7 @@ import (
const ( const (
MempoolChannel = byte(0x30) MempoolChannel = byte(0x30)
maxMempoolMessageSize = 1048576 // 1MB TODO make it configurable
maxMsgSize = 1048576 // 1MB TODO make it configurable
peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount
) )
@ -167,6 +167,10 @@ func RegisterMempoolMessages(cdc *amino.Codec) {
// DecodeMessage decodes a byte-array into a MempoolMessage. // DecodeMessage decodes a byte-array into a MempoolMessage.
func DecodeMessage(bz []byte) (msg MempoolMessage, err error) { func DecodeMessage(bz []byte) (msg MempoolMessage, err error) {
if len(bz) > maxMsgSize {
return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
len(bz), maxMsgSize)
}
err = cdc.UnmarshalBinaryBare(bz, &msg) err = cdc.UnmarshalBinaryBare(bz, &msg)
return return
} }


+ 22
- 21
node/node.go View File

@ -2,12 +2,10 @@ package node
import ( import (
"bytes" "bytes"
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"net" "net"
"net/http" "net/http"
"strings"
abci "github.com/tendermint/abci/types" abci "github.com/tendermint/abci/types"
amino "github.com/tendermint/go-amino" amino "github.com/tendermint/go-amino"
@ -163,7 +161,7 @@ func NewNode(config *cfg.Config,
// and sync tendermint and the app by performing a handshake // and sync tendermint and the app by performing a handshake
// and replaying any necessary blocks // and replaying any necessary blocks
consensusLogger := logger.With("module", "consensus") consensusLogger := logger.With("module", "consensus")
handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc.AppState)
handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc.AppState())
handshaker.SetLogger(consensusLogger) handshaker.SetLogger(consensusLogger)
proxyApp := proxy.NewAppConns(clientCreator, handshaker) proxyApp := proxy.NewAppConns(clientCreator, handshaker)
proxyApp.SetLogger(logger.With("module", "proxy")) proxyApp.SetLogger(logger.With("module", "proxy"))
@ -278,12 +276,11 @@ func NewNode(config *cfg.Config,
trustMetricStore = trust.NewTrustMetricStore(trustHistoryDB, trust.DefaultConfig()) trustMetricStore = trust.NewTrustMetricStore(trustHistoryDB, trust.DefaultConfig())
trustMetricStore.SetLogger(p2pLogger) trustMetricStore.SetLogger(p2pLogger)
var seeds []string
if config.P2P.Seeds != "" {
seeds = strings.Split(config.P2P.Seeds, ",")
}
pexReactor := pex.NewPEXReactor(addrBook, pexReactor := pex.NewPEXReactor(addrBook,
&pex.PEXReactorConfig{Seeds: seeds, SeedMode: config.P2P.SeedMode})
&pex.PEXReactorConfig{
Seeds: cmn.SplitAndTrim(config.P2P.Seeds, ",", " "),
SeedMode: config.P2P.SeedMode,
PrivatePeerIDs: cmn.SplitAndTrim(config.P2P.PrivatePeerIDs, ",", " ")})
pexReactor.SetLogger(p2pLogger) pexReactor.SetLogger(p2pLogger)
sw.AddReactor("PEX", pexReactor) sw.AddReactor("PEX", pexReactor)
} }
@ -333,7 +330,7 @@ func NewNode(config *cfg.Config,
return nil, err return nil, err
} }
if config.TxIndex.IndexTags != "" { if config.TxIndex.IndexTags != "" {
txIndexer = kv.NewTxIndex(store, kv.IndexTags(strings.Split(config.TxIndex.IndexTags, ",")))
txIndexer = kv.NewTxIndex(store, kv.IndexTags(cmn.SplitAndTrim(config.TxIndex.IndexTags, ",", " ")))
} else if config.TxIndex.IndexAllTags { } else if config.TxIndex.IndexAllTags {
txIndexer = kv.NewTxIndex(store, kv.IndexAllTags()) txIndexer = kv.NewTxIndex(store, kv.IndexAllTags())
} else { } else {
@ -408,9 +405,14 @@ func (n *Node) OnStart() error {
} }
n.Logger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", n.config.NodeKeyFile()) n.Logger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", n.config.NodeKeyFile())
// Start the switch
n.sw.SetNodeInfo(n.makeNodeInfo(nodeKey.PubKey()))
nodeInfo := n.makeNodeInfo(nodeKey.PubKey())
n.sw.SetNodeInfo(nodeInfo)
n.sw.SetNodeKey(nodeKey) n.sw.SetNodeKey(nodeKey)
// Add ourselves to addrbook to prevent dialing ourselves
n.addrBook.AddOurAddress(nodeInfo.NetAddress())
// Start the switch
err = n.sw.Start() err = n.sw.Start()
if err != nil { if err != nil {
return err return err
@ -418,7 +420,7 @@ func (n *Node) OnStart() error {
// Always connect to persistent peers // Always connect to persistent peers
if n.config.P2P.PersistentPeers != "" { if n.config.P2P.PersistentPeers != "" {
err = n.sw.DialPeersAsync(n.addrBook, strings.Split(n.config.P2P.PersistentPeers, ","), true)
err = n.sw.DialPeersAsync(n.addrBook, cmn.SplitAndTrim(n.config.P2P.PersistentPeers, ",", " "), true)
if err != nil { if err != nil {
return err return err
} }
@ -489,7 +491,7 @@ func (n *Node) ConfigureRPC() {
func (n *Node) startRPC() ([]net.Listener, error) { func (n *Node) startRPC() ([]net.Listener, error) {
n.ConfigureRPC() n.ConfigureRPC()
listenAddrs := strings.Split(n.config.RPC.ListenAddress, ",")
listenAddrs := cmn.SplitAndTrim(n.config.RPC.ListenAddress, ",", " ")
coreCodec := amino.NewCodec() coreCodec := amino.NewCodec()
ctypes.RegisterAmino(coreCodec) ctypes.RegisterAmino(coreCodec)
@ -639,19 +641,18 @@ func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
bytes := db.Get(genesisDocKey) bytes := db.Get(genesisDocKey)
if len(bytes) == 0 { if len(bytes) == 0 {
return nil, errors.New("Genesis doc not found") return nil, errors.New("Genesis doc not found")
} else {
var genDoc *types.GenesisDoc
err := json.Unmarshal(bytes, &genDoc)
if err != nil {
cmn.PanicCrisis(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, bytes))
}
return genDoc, nil
} }
var genDoc *types.GenesisDoc
err := cdc.UnmarshalJSON(bytes, &genDoc)
if err != nil {
cmn.PanicCrisis(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, bytes))
}
return genDoc, nil
} }
// panics if failed to marshal the given genesis document // panics if failed to marshal the given genesis document
func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) { func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) {
bytes, err := json.Marshal(genDoc)
bytes, err := cdc.MarshalJSON(genDoc)
if err != nil { if err != nil {
cmn.PanicCrisis(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err)) cmn.PanicCrisis(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err))
} }


+ 12
- 0
node/wire.go View File

@ -0,0 +1,12 @@
package node
import (
amino "github.com/tendermint/go-amino"
crypto "github.com/tendermint/go-crypto"
)
var cdc = amino.NewCodec()
func init() {
crypto.RegisterAmino(cdc)
}

+ 4
- 4
p2p/base_reactor.go View File

@ -47,7 +47,7 @@ func NewBaseReactor(name string, impl Reactor) *BaseReactor {
func (br *BaseReactor) SetSwitch(sw *Switch) { func (br *BaseReactor) SetSwitch(sw *Switch) {
br.Switch = sw br.Switch = sw
} }
func (_ *BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil }
func (_ *BaseReactor) AddPeer(peer Peer) {}
func (_ *BaseReactor) RemovePeer(peer Peer, reason interface{}) {}
func (_ *BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {}
func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil }
func (*BaseReactor) AddPeer(peer Peer) {}
func (*BaseReactor) RemovePeer(peer Peer, reason interface{}) {}
func (*BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {}

+ 1
- 2
p2p/conn/connection.go View File

@ -432,9 +432,8 @@ func (c *MConnection) sendPacketMsg() bool {
// Nothing to send? // Nothing to send?
if leastChannel == nil { if leastChannel == nil {
return true return true
} else {
// c.Logger.Info("Found a PacketMsg to send")
} }
// c.Logger.Info("Found a msgPacket to send")
// Make & send a PacketMsg from this channel // Make & send a PacketMsg from this channel
_n, err := leastChannel.writePacketMsgTo(c.bufConnWriter) _n, err := leastChannel.writePacketMsgTo(c.bufConnWriter)


+ 3
- 5
p2p/conn/secret_connection.go View File

@ -111,7 +111,7 @@ func (sc *SecretConnection) RemotePubKey() crypto.PubKey {
// CONTRACT: data smaller than dataMaxSize is read atomically. // CONTRACT: data smaller than dataMaxSize is read atomically.
func (sc *SecretConnection) Write(data []byte) (n int, err error) { func (sc *SecretConnection) Write(data []byte) (n int, err error) {
for 0 < len(data) { for 0 < len(data) {
var frame []byte = make([]byte, totalFrameSize)
var frame = make([]byte, totalFrameSize)
var chunk []byte var chunk []byte
if dataMaxSize < len(data) { if dataMaxSize < len(data) {
chunk = data[:dataMaxSize] chunk = data[:dataMaxSize]
@ -134,9 +134,8 @@ func (sc *SecretConnection) Write(data []byte) (n int, err error) {
_, err := sc.conn.Write(sealedFrame) _, err := sc.conn.Write(sealedFrame)
if err != nil { if err != nil {
return n, err return n, err
} else {
n += len(chunk)
} }
n += len(chunk)
} }
return return
} }
@ -229,7 +228,6 @@ func shareEphPubKey(conn io.ReadWriteCloser, locEphPub *[32]byte) (remEphPub *[3
// Otherwise: // Otherwise:
var _remEphPub = trs.FirstValue().([32]byte) var _remEphPub = trs.FirstValue().([32]byte)
return &_remEphPub, nil return &_remEphPub, nil
} }
func computeSharedSecret(remPubKey, locPrivKey *[32]byte) (shrSecret *[32]byte) { func computeSharedSecret(remPubKey, locPrivKey *[32]byte) (shrSecret *[32]byte) {
@ -342,7 +340,7 @@ func incr2Nonce(nonce *[24]byte) {
// increment nonce big-endian by 1 with wraparound. // increment nonce big-endian by 1 with wraparound.
func incrNonce(nonce *[24]byte) { func incrNonce(nonce *[24]byte) {
for i := 23; 0 <= i; i-- { for i := 23; 0 <= i; i-- {
nonce[i] += 1
nonce[i]++
if nonce[i] != 0 { if nonce[i] != 0 {
return return
} }


+ 2
- 1
p2p/conn/secret_connection_test.go View File

@ -73,6 +73,7 @@ func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection
return nil, nil, false return nil, nil, false
}, },
) )
require.Nil(tb, trs.FirstError()) require.Nil(tb, trs.FirstError())
require.True(tb, ok, "Unexpected task abortion") require.True(tb, ok, "Unexpected task abortion")
@ -181,7 +182,7 @@ func TestSecretConnectionReadWrite(t *testing.T) {
var readCount = 0 var readCount = 0
for _, readChunk := range reads { for _, readChunk := range reads {
read += readChunk read += readChunk
readCount += 1
readCount++
if len(write) <= len(read) { if len(write) <= len(read) {
break break
} }


+ 72
- 0
p2p/dummy/peer.go View File

@ -0,0 +1,72 @@
package dummy
import (
p2p "github.com/tendermint/tendermint/p2p"
tmconn "github.com/tendermint/tendermint/p2p/conn"
cmn "github.com/tendermint/tmlibs/common"
)
type peer struct {
cmn.BaseService
kv map[string]interface{}
}
var _ p2p.Peer = (*peer)(nil)
// NewPeer creates new dummy peer.
func NewPeer() *peer {
p := &peer{
kv: make(map[string]interface{}),
}
p.BaseService = *cmn.NewBaseService(nil, "peer", p)
return p
}
// ID always returns dummy.
func (p *peer) ID() p2p.ID {
return p2p.ID("dummy")
}
// IsOutbound always returns false.
func (p *peer) IsOutbound() bool {
return false
}
// IsPersistent always returns false.
func (p *peer) IsPersistent() bool {
return false
}
// NodeInfo always returns empty node info.
func (p *peer) NodeInfo() p2p.NodeInfo {
return p2p.NodeInfo{}
}
// Status always returns empry connection status.
func (p *peer) Status() tmconn.ConnectionStatus {
return tmconn.ConnectionStatus{}
}
// Send does not do anything and just returns true.
func (p *peer) Send(byte, []byte) bool {
return true
}
// TrySend does not do anything and just returns true.
func (p *peer) TrySend(byte, []byte) bool {
return true
}
// Set records value under key specified in the map.
func (p *peer) Set(key string, value interface{}) {
p.kv[key] = value
}
// Get returns a value associated with the key. Nil is returned if no value
// found.
func (p *peer) Get(key string) interface{} {
if value, ok := p.kv[key]; ok {
return value
}
return nil
}

+ 2
- 3
p2p/key.go View File

@ -52,9 +52,8 @@ func LoadOrGenNodeKey(filePath string) (*NodeKey, error) {
return nil, err return nil, err
} }
return nodeKey, nil return nodeKey, nil
} else {
return genNodeKey(filePath)
} }
return genNodeKey(filePath)
} }
func loadNodeKey(filePath string) (*NodeKey, error) { func loadNodeKey(filePath string) (*NodeKey, error) {
@ -65,7 +64,7 @@ func loadNodeKey(filePath string) (*NodeKey, error) {
nodeKey := new(NodeKey) nodeKey := new(NodeKey)
err = cdc.UnmarshalJSON(jsonBytes, nodeKey) err = cdc.UnmarshalJSON(jsonBytes, nodeKey)
if err != nil { if err != nil {
return nil, fmt.Errorf("Error reading NodeKey from %v: %v\n", filePath, err)
return nil, fmt.Errorf("Error reading NodeKey from %v: %v", filePath, err)
} }
return nodeKey, nil return nodeKey, nil
} }


+ 1
- 1
p2p/listener.go View File

@ -72,7 +72,7 @@ func NewDefaultListener(protocol string, lAddr string, skipUPNP bool, logger log
// Determine internal address... // Determine internal address...
var intAddr *NetAddress var intAddr *NetAddress
intAddr, err = NewNetAddressString(lAddr)
intAddr, err = NewNetAddressStringWithOptionalID(lAddr)
if err != nil { if err != nil {
panic(err) panic(err)
} }


+ 26
- 15
p2p/netaddress.go View File

@ -48,33 +48,45 @@ func NewNetAddress(id ID, addr net.Addr) *NetAddress {
} }
ip := tcpAddr.IP ip := tcpAddr.IP
port := uint16(tcpAddr.Port) port := uint16(tcpAddr.Port)
netAddr := NewNetAddressIPPort(ip, port)
netAddr.ID = id
return netAddr
na := NewNetAddressIPPort(ip, port)
na.ID = id
return na
} }
// NewNetAddressString returns a new NetAddress using the provided
// address in the form of "ID@IP:Port", where the ID is optional.
// NewNetAddressString returns a new NetAddress using the provided address in
// the form of "ID@IP:Port".
// Also resolves the host if host is not an IP. // Also resolves the host if host is not an IP.
func NewNetAddressString(addr string) (*NetAddress, error) { func NewNetAddressString(addr string) (*NetAddress, error) {
addr = removeProtocolIfDefined(addr)
spl := strings.Split(addr, "@")
if len(spl) < 2 {
return nil, fmt.Errorf("Address (%s) does not contain ID", addr)
}
return NewNetAddressStringWithOptionalID(addr)
}
// NewNetAddressStringWithOptionalID returns a new NetAddress using the
// provided address in the form of "ID@IP:Port", where the ID is optional.
// Also resolves the host if host is not an IP.
func NewNetAddressStringWithOptionalID(addr string) (*NetAddress, error) {
addrWithoutProtocol := removeProtocolIfDefined(addr)
var id ID var id ID
spl := strings.Split(addr, "@")
spl := strings.Split(addrWithoutProtocol, "@")
if len(spl) == 2 { if len(spl) == 2 {
idStr := spl[0] idStr := spl[0]
idBytes, err := hex.DecodeString(idStr) idBytes, err := hex.DecodeString(idStr)
if err != nil { if err != nil {
return nil, cmn.ErrorWrap(err, fmt.Sprintf("Address (%s) contains invalid ID", addr))
return nil, cmn.ErrorWrap(err, fmt.Sprintf("Address (%s) contains invalid ID", addrWithoutProtocol))
} }
if len(idBytes) != IDByteLength { if len(idBytes) != IDByteLength {
return nil, fmt.Errorf("Address (%s) contains ID of invalid length (%d). Should be %d hex-encoded bytes", return nil, fmt.Errorf("Address (%s) contains ID of invalid length (%d). Should be %d hex-encoded bytes",
addr, len(idBytes), IDByteLength)
addrWithoutProtocol, len(idBytes), IDByteLength)
} }
id, addr = ID(idStr), spl[1]
id, addrWithoutProtocol = ID(idStr), spl[1]
} }
host, portStr, err := net.SplitHostPort(addr)
host, portStr, err := net.SplitHostPort(addrWithoutProtocol)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -119,11 +131,10 @@ func NewNetAddressStrings(addrs []string) ([]*NetAddress, []error) {
// NewNetAddressIPPort returns a new NetAddress using the provided IP // NewNetAddressIPPort returns a new NetAddress using the provided IP
// and port number. // and port number.
func NewNetAddressIPPort(ip net.IP, port uint16) *NetAddress { func NewNetAddressIPPort(ip net.IP, port uint16) *NetAddress {
na := &NetAddress{
return &NetAddress{
IP: ip, IP: ip,
Port: port, Port: port,
} }
return na
} }
// Equals reports whether na and other are the same addresses, // Equals reports whether na and other are the same addresses,
@ -293,7 +304,7 @@ func (na *NetAddress) RFC6145() bool { return rfc6145.Contains(na.IP) }
func removeProtocolIfDefined(addr string) string { func removeProtocolIfDefined(addr string) string {
if strings.Contains(addr, "://") { if strings.Contains(addr, "://") {
return strings.Split(addr, "://")[1] return strings.Split(addr, "://")[1]
} else {
return addr
} }
return addr
} }

+ 46
- 29
p2p/netaddress_test.go View File

@ -9,20 +9,18 @@ import (
) )
func TestNewNetAddress(t *testing.T) { func TestNewNetAddress(t *testing.T) {
assert, require := assert.New(t), require.New(t)
tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:8080") tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:8080")
require.Nil(err)
require.Nil(t, err)
addr := NewNetAddress("", tcpAddr) addr := NewNetAddress("", tcpAddr)
assert.Equal("127.0.0.1:8080", addr.String())
assert.Equal(t, "127.0.0.1:8080", addr.String())
assert.NotPanics(func() {
assert.NotPanics(t, func() {
NewNetAddress("", &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8000}) NewNetAddress("", &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8000})
}, "Calling NewNetAddress with UDPAddr should not panic in testing") }, "Calling NewNetAddress with UDPAddr should not panic in testing")
} }
func TestNewNetAddressString(t *testing.T) {
func TestNewNetAddressStringWithOptionalID(t *testing.T) {
testCases := []struct { testCases := []struct {
addr string addr string
expected string expected string
@ -57,6 +55,28 @@ func TestNewNetAddressString(t *testing.T) {
{" @ ", "", false}, {" @ ", "", false},
} }
for _, tc := range testCases {
addr, err := NewNetAddressStringWithOptionalID(tc.addr)
if tc.correct {
if assert.Nil(t, err, tc.addr) {
assert.Equal(t, tc.expected, addr.String())
}
} else {
assert.NotNil(t, err, tc.addr)
}
}
}
func TestNewNetAddressString(t *testing.T) {
testCases := []struct {
addr string
expected string
correct bool
}{
{"127.0.0.1:8080", "127.0.0.1:8080", false},
{"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true},
}
for _, tc := range testCases { for _, tc := range testCases {
addr, err := NewNetAddressString(tc.addr) addr, err := NewNetAddressString(tc.addr)
if tc.correct { if tc.correct {
@ -70,23 +90,22 @@ func TestNewNetAddressString(t *testing.T) {
} }
func TestNewNetAddressStrings(t *testing.T) { func TestNewNetAddressStrings(t *testing.T) {
addrs, errs := NewNetAddressStrings([]string{"127.0.0.1:8080", "127.0.0.2:8080"})
assert.Len(t, errs, 0)
addrs, errs := NewNetAddressStrings([]string{
"127.0.0.1:8080",
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080",
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeed@127.0.0.2:8080"})
assert.Len(t, errs, 1)
assert.Equal(t, 2, len(addrs)) assert.Equal(t, 2, len(addrs))
} }
func TestNewNetAddressIPPort(t *testing.T) { func TestNewNetAddressIPPort(t *testing.T) {
assert := assert.New(t)
addr := NewNetAddressIPPort(net.ParseIP("127.0.0.1"), 8080) addr := NewNetAddressIPPort(net.ParseIP("127.0.0.1"), 8080)
assert.Equal("127.0.0.1:8080", addr.String())
assert.Equal(t, "127.0.0.1:8080", addr.String())
} }
func TestNetAddressProperties(t *testing.T) { func TestNetAddressProperties(t *testing.T) {
assert, require := assert.New(t), require.New(t)
// TODO add more test cases // TODO add more test cases
tests := []struct {
testCases := []struct {
addr string addr string
valid bool valid bool
local bool local bool
@ -96,21 +115,19 @@ func TestNetAddressProperties(t *testing.T) {
{"ya.ru:80", true, false, true}, {"ya.ru:80", true, false, true},
} }
for _, t := range tests {
addr, err := NewNetAddressString(t.addr)
require.Nil(err)
for _, tc := range testCases {
addr, err := NewNetAddressStringWithOptionalID(tc.addr)
require.Nil(t, err)
assert.Equal(t.valid, addr.Valid())
assert.Equal(t.local, addr.Local())
assert.Equal(t.routable, addr.Routable())
assert.Equal(t, tc.valid, addr.Valid())
assert.Equal(t, tc.local, addr.Local())
assert.Equal(t, tc.routable, addr.Routable())
} }
} }
func TestNetAddressReachabilityTo(t *testing.T) { func TestNetAddressReachabilityTo(t *testing.T) {
assert, require := assert.New(t), require.New(t)
// TODO add more test cases // TODO add more test cases
tests := []struct {
testCases := []struct {
addr string addr string
other string other string
reachability int reachability int
@ -119,13 +136,13 @@ func TestNetAddressReachabilityTo(t *testing.T) {
{"ya.ru:80", "127.0.0.1:8080", 1}, {"ya.ru:80", "127.0.0.1:8080", 1},
} }
for _, t := range tests {
addr, err := NewNetAddressString(t.addr)
require.Nil(err)
for _, tc := range testCases {
addr, err := NewNetAddressStringWithOptionalID(tc.addr)
require.Nil(t, err)
other, err := NewNetAddressString(t.other)
require.Nil(err)
other, err := NewNetAddressStringWithOptionalID(tc.other)
require.Nil(t, err)
assert.Equal(t.reachability, addr.ReachabilityTo(other))
assert.Equal(t, tc.reachability, addr.ReachabilityTo(other))
} }
} }

+ 1
- 0
p2p/node_info.go View File

@ -107,6 +107,7 @@ OUTER_LOOP:
return nil return nil
} }
// ID returns node's ID.
func (info NodeInfo) ID() ID { func (info NodeInfo) ID() ID {
return PubKeyToID(info.PubKey) return PubKeyToID(info.PubKey)
} }


+ 3
- 1
p2p/peer.go View File

@ -351,7 +351,9 @@ func createMConnection(conn net.Conn, p *peer, reactorsByCh map[byte]Reactor, ch
onReceive := func(chID byte, msgBytes []byte) { onReceive := func(chID byte, msgBytes []byte) {
reactor := reactorsByCh[chID] reactor := reactorsByCh[chID]
if reactor == nil { if reactor == nil {
onPeerError(p, fmt.Errorf("Unknown channel %X", chID))
// Note that its ok to panic here as it's caught in the conn._recover,
// which does onPeerError.
panic(cmn.Fmt("Unknown channel %X", chID))
} }
reactor.Receive(chID, p, msgBytes) reactor.Receive(chID, p, msgBytes)
} }


+ 1
- 2
p2p/peer_set.go View File

@ -68,9 +68,8 @@ func (ps *PeerSet) Get(peerKey ID) Peer {
item, ok := ps.lookup[peerKey] item, ok := ps.lookup[peerKey]
if ok { if ok {
return item.peer return item.peer
} else {
return nil
} }
return nil
} }
// Remove discards peer by its Key, if the peer was previously memoized. // Remove discards peer by its Key, if the peer was previously memoized.


+ 1
- 1
p2p/peer_set_test.go View File

@ -115,7 +115,7 @@ func TestPeerSetAddDuplicate(t *testing.T) {
errsTally := make(map[error]int) errsTally := make(map[error]int)
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
err := <-errsChan err := <-errsChan
errsTally[err] += 1
errsTally[err]++
} }
// Our next procedure is to ensure that only one addition // Our next procedure is to ensure that only one addition


+ 9
- 2
p2p/peer_test.go View File

@ -140,6 +140,8 @@ func (p *remotePeer) Stop() {
} }
func (p *remotePeer) accept(l net.Listener) { func (p *remotePeer) accept(l net.Listener) {
conns := []net.Conn{}
for { for {
conn, err := l.Accept() conn, err := l.Accept()
if err != nil { if err != nil {
@ -160,10 +162,15 @@ func (p *remotePeer) accept(l net.Listener) {
if err != nil { if err != nil {
golog.Fatalf("Failed to perform handshake: %+v", err) golog.Fatalf("Failed to perform handshake: %+v", err)
} }
conns = append(conns, conn)
select { select {
case <-p.quit: case <-p.quit:
if err := conn.Close(); err != nil {
golog.Fatal(err)
for _, conn := range conns {
if err := conn.Close(); err != nil {
golog.Fatal(err)
}
} }
return return
default: default:


+ 140
- 13
p2p/pex/addrbook.go View File

@ -33,24 +33,33 @@ type AddrBook interface {
// Add our own addresses so we don't later add ourselves // Add our own addresses so we don't later add ourselves
AddOurAddress(*p2p.NetAddress) AddOurAddress(*p2p.NetAddress)
// Check if it is our address
OurAddress(*p2p.NetAddress) bool
// Add and remove an address // Add and remove an address
AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error
RemoveAddress(addr *p2p.NetAddress)
RemoveAddress(*p2p.NetAddress)
// Check if the address is in the book
HasAddress(*p2p.NetAddress) bool
// Do we need more peers? // Do we need more peers?
NeedMoreAddrs() bool NeedMoreAddrs() bool
// Pick an address to dial // Pick an address to dial
PickAddress(newBias int) *p2p.NetAddress
PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress
// Mark address // Mark address
MarkGood(*p2p.NetAddress) MarkGood(*p2p.NetAddress)
MarkAttempt(*p2p.NetAddress) MarkAttempt(*p2p.NetAddress)
MarkBad(*p2p.NetAddress) MarkBad(*p2p.NetAddress)
IsGood(*p2p.NetAddress) bool
// Send a selection of addresses to peers // Send a selection of addresses to peers
GetSelection() []*p2p.NetAddress GetSelection() []*p2p.NetAddress
// Send a selection of addresses with bias
GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress
// TODO: remove // TODO: remove
ListOfKnownAddresses() []*knownAddress ListOfKnownAddresses() []*knownAddress
@ -74,7 +83,7 @@ type addrBook struct {
// accessed concurrently // accessed concurrently
mtx sync.Mutex mtx sync.Mutex
rand *rand.Rand rand *rand.Rand
ourAddrs map[string]*p2p.NetAddress
ourAddrs map[string]struct{}
addrLookup map[p2p.ID]*knownAddress // new & old addrLookup map[p2p.ID]*knownAddress // new & old
bucketsOld []map[string]*knownAddress bucketsOld []map[string]*knownAddress
bucketsNew []map[string]*knownAddress bucketsNew []map[string]*knownAddress
@ -89,7 +98,7 @@ type addrBook struct {
func NewAddrBook(filePath string, routabilityStrict bool) *addrBook { func NewAddrBook(filePath string, routabilityStrict bool) *addrBook {
am := &addrBook{ am := &addrBook{
rand: rand.New(rand.NewSource(time.Now().UnixNano())), // TODO: seed from outside rand: rand.New(rand.NewSource(time.Now().UnixNano())), // TODO: seed from outside
ourAddrs: make(map[string]*p2p.NetAddress),
ourAddrs: make(map[string]struct{}),
addrLookup: make(map[p2p.ID]*knownAddress), addrLookup: make(map[p2p.ID]*knownAddress),
filePath: filePath, filePath: filePath,
routabilityStrict: routabilityStrict, routabilityStrict: routabilityStrict,
@ -150,7 +159,15 @@ func (a *addrBook) AddOurAddress(addr *p2p.NetAddress) {
a.mtx.Lock() a.mtx.Lock()
defer a.mtx.Unlock() defer a.mtx.Unlock()
a.Logger.Info("Add our address to book", "addr", addr) a.Logger.Info("Add our address to book", "addr", addr)
a.ourAddrs[addr.String()] = addr
a.ourAddrs[addr.String()] = struct{}{}
}
// OurAddress returns true if it is our address.
func (a *addrBook) OurAddress(addr *p2p.NetAddress) bool {
a.mtx.Lock()
_, ok := a.ourAddrs[addr.String()]
a.mtx.Unlock()
return ok
} }
// AddAddress implements AddrBook - adds the given address as received from the given source. // AddAddress implements AddrBook - adds the given address as received from the given source.
@ -173,6 +190,22 @@ func (a *addrBook) RemoveAddress(addr *p2p.NetAddress) {
a.removeFromAllBuckets(ka) a.removeFromAllBuckets(ka)
} }
// IsGood returns true if peer was ever marked as good and haven't
// done anything wrong since then.
func (a *addrBook) IsGood(addr *p2p.NetAddress) bool {
a.mtx.Lock()
defer a.mtx.Unlock()
return a.addrLookup[addr.ID].isOld()
}
// HasAddress returns true if the address is in the book.
func (a *addrBook) HasAddress(addr *p2p.NetAddress) bool {
a.mtx.Lock()
defer a.mtx.Unlock()
ka := a.addrLookup[addr.ID]
return ka != nil
}
// NeedMoreAddrs implements AddrBook - returns true if there are not have enough addresses in the book. // NeedMoreAddrs implements AddrBook - returns true if there are not have enough addresses in the book.
func (a *addrBook) NeedMoreAddrs() bool { func (a *addrBook) NeedMoreAddrs() bool {
return a.Size() < needAddressThreshold return a.Size() < needAddressThreshold
@ -180,27 +213,27 @@ func (a *addrBook) NeedMoreAddrs() bool {
// PickAddress implements AddrBook. It picks an address to connect to. // PickAddress implements AddrBook. It picks an address to connect to.
// The address is picked randomly from an old or new bucket according // The address is picked randomly from an old or new bucket according
// to the newBias argument, which must be between [0, 100] (or else is truncated to that range)
// to the biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to that range)
// and determines how biased we are to pick an address from a new bucket. // and determines how biased we are to pick an address from a new bucket.
// PickAddress returns nil if the AddrBook is empty or if we try to pick // PickAddress returns nil if the AddrBook is empty or if we try to pick
// from an empty bucket. // from an empty bucket.
func (a *addrBook) PickAddress(newBias int) *p2p.NetAddress {
func (a *addrBook) PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress {
a.mtx.Lock() a.mtx.Lock()
defer a.mtx.Unlock() defer a.mtx.Unlock()
if a.size() == 0 { if a.size() == 0 {
return nil return nil
} }
if newBias > 100 {
newBias = 100
if biasTowardsNewAddrs > 100 {
biasTowardsNewAddrs = 100
} }
if newBias < 0 {
newBias = 0
if biasTowardsNewAddrs < 0 {
biasTowardsNewAddrs = 0
} }
// Bias between new and old addresses. // Bias between new and old addresses.
oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(newBias))
newCorrelation := math.Sqrt(float64(a.nNew)) * float64(newBias)
oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(biasTowardsNewAddrs))
newCorrelation := math.Sqrt(float64(a.nNew)) * float64(biasTowardsNewAddrs)
// pick a random peer from a random bucket // pick a random peer from a random bucket
var bucket map[string]*knownAddress var bucket map[string]*knownAddress
@ -295,6 +328,100 @@ func (a *addrBook) GetSelection() []*p2p.NetAddress {
return allAddr[:numAddresses] return allAddr[:numAddresses]
} }
// GetSelectionWithBias implements AddrBook.
// It randomly selects some addresses (old & new). Suitable for peer-exchange protocols.
//
// Each address is picked randomly from an old or new bucket according to the
// biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to
// that range) and determines how biased we are to pick an address from a new
// bucket.
func (a *addrBook) GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress {
a.mtx.Lock()
defer a.mtx.Unlock()
if a.size() == 0 {
return nil
}
if biasTowardsNewAddrs > 100 {
biasTowardsNewAddrs = 100
}
if biasTowardsNewAddrs < 0 {
biasTowardsNewAddrs = 0
}
numAddresses := cmn.MaxInt(
cmn.MinInt(minGetSelection, a.size()),
a.size()*getSelectionPercent/100)
numAddresses = cmn.MinInt(maxGetSelection, numAddresses)
selection := make([]*p2p.NetAddress, numAddresses)
oldBucketToAddrsMap := make(map[int]map[string]struct{})
var oldIndex int
newBucketToAddrsMap := make(map[int]map[string]struct{})
var newIndex int
selectionIndex := 0
ADDRS_LOOP:
for selectionIndex < numAddresses {
pickFromOldBucket := int((float64(selectionIndex)/float64(numAddresses))*100) >= biasTowardsNewAddrs
pickFromOldBucket = (pickFromOldBucket && a.nOld > 0) || a.nNew == 0
bucket := make(map[string]*knownAddress)
// loop until we pick a random non-empty bucket
for len(bucket) == 0 {
if pickFromOldBucket {
oldIndex = a.rand.Intn(len(a.bucketsOld))
bucket = a.bucketsOld[oldIndex]
} else {
newIndex = a.rand.Intn(len(a.bucketsNew))
bucket = a.bucketsNew[newIndex]
}
}
// pick a random index
randIndex := a.rand.Intn(len(bucket))
// loop over the map to return that index
var selectedAddr *p2p.NetAddress
for _, ka := range bucket {
if randIndex == 0 {
selectedAddr = ka.Addr
break
}
randIndex--
}
// if we have selected the address before, restart the loop
// otherwise, record it and continue
if pickFromOldBucket {
if addrsMap, ok := oldBucketToAddrsMap[oldIndex]; ok {
if _, ok = addrsMap[selectedAddr.String()]; ok {
continue ADDRS_LOOP
}
} else {
oldBucketToAddrsMap[oldIndex] = make(map[string]struct{})
}
oldBucketToAddrsMap[oldIndex][selectedAddr.String()] = struct{}{}
} else {
if addrsMap, ok := newBucketToAddrsMap[newIndex]; ok {
if _, ok = addrsMap[selectedAddr.String()]; ok {
continue ADDRS_LOOP
}
} else {
newBucketToAddrsMap[newIndex] = make(map[string]struct{})
}
newBucketToAddrsMap[newIndex][selectedAddr.String()] = struct{}{}
}
selection[selectionIndex] = selectedAddr
selectionIndex++
}
return selection
}
// ListOfKnownAddresses returns the new and old addresses. // ListOfKnownAddresses returns the new and old addresses.
func (a *addrBook) ListOfKnownAddresses() []*knownAddress { func (a *addrBook) ListOfKnownAddresses() []*knownAddress {
a.mtx.Lock() a.mtx.Lock()


+ 125
- 0
p2p/pex/addrbook_test.go View File

@ -157,6 +157,13 @@ func TestAddrBookPromoteToOld(t *testing.T) {
t.Errorf("selection could not be bigger than the book") t.Errorf("selection could not be bigger than the book")
} }
selection = book.GetSelectionWithBias(30)
t.Logf("selection: %v", selection)
if len(selection) > book.Size() {
t.Errorf("selection with bias could not be bigger than the book")
}
assert.Equal(t, book.Size(), 100, "expecting book size to be 100") assert.Equal(t, book.Size(), 100, "expecting book size to be 100")
} }
@ -229,3 +236,121 @@ func TestAddrBookRemoveAddress(t *testing.T) {
book.RemoveAddress(nonExistingAddr) book.RemoveAddress(nonExistingAddr)
assert.Equal(t, 0, book.Size()) assert.Equal(t, 0, book.Size())
} }
func TestAddrBookGetSelection(t *testing.T) {
fname := createTempFileName("addrbook_test")
defer deleteTempFile(fname)
book := NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
// 1) empty book
assert.Empty(t, book.GetSelection())
// 2) add one address
addr := randIPv4Address(t)
book.AddAddress(addr, addr)
assert.Equal(t, 1, len(book.GetSelection()))
assert.Equal(t, addr, book.GetSelection()[0])
// 3) add a bunch of addresses
randAddrs := randNetAddressPairs(t, 100)
for _, addrSrc := range randAddrs {
book.AddAddress(addrSrc.addr, addrSrc.src)
}
// check there is no duplicates
addrs := make(map[string]*p2p.NetAddress)
selection := book.GetSelection()
for _, addr := range selection {
if dup, ok := addrs[addr.String()]; ok {
t.Fatalf("selection %v contains duplicates %v", selection, dup)
}
addrs[addr.String()] = addr
}
if len(selection) > book.Size() {
t.Errorf("selection %v could not be bigger than the book", selection)
}
}
func TestAddrBookGetSelectionWithBias(t *testing.T) {
const biasTowardsNewAddrs = 30
fname := createTempFileName("addrbook_test")
defer deleteTempFile(fname)
book := NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
// 1) empty book
selection := book.GetSelectionWithBias(biasTowardsNewAddrs)
assert.Empty(t, selection)
// 2) add one address
addr := randIPv4Address(t)
book.AddAddress(addr, addr)
selection = book.GetSelectionWithBias(biasTowardsNewAddrs)
assert.Equal(t, 1, len(selection))
assert.Equal(t, addr, selection[0])
// 3) add a bunch of addresses
randAddrs := randNetAddressPairs(t, 100)
for _, addrSrc := range randAddrs {
book.AddAddress(addrSrc.addr, addrSrc.src)
}
// check there is no duplicates
addrs := make(map[string]*p2p.NetAddress)
selection = book.GetSelectionWithBias(biasTowardsNewAddrs)
for _, addr := range selection {
if dup, ok := addrs[addr.String()]; ok {
t.Fatalf("selection %v contains duplicates %v", selection, dup)
}
addrs[addr.String()] = addr
}
if len(selection) > book.Size() {
t.Fatalf("selection %v could not be bigger than the book", selection)
}
// 4) mark 80% of the addresses as good
randAddrsLen := len(randAddrs)
for i, addrSrc := range randAddrs {
if int((float64(i)/float64(randAddrsLen))*100) >= 20 {
book.MarkGood(addrSrc.addr)
}
}
selection = book.GetSelectionWithBias(biasTowardsNewAddrs)
// check that ~70% of addresses returned are good
good := 0
for _, addr := range selection {
if book.IsGood(addr) {
good++
}
}
got, expected := int((float64(good)/float64(len(selection)))*100), (100 - biasTowardsNewAddrs)
if got >= expected {
t.Fatalf("expected more good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)", got, expected, good, len(selection))
}
}
func TestAddrBookHasAddress(t *testing.T) {
fname := createTempFileName("addrbook_test")
defer deleteTempFile(fname)
book := NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
addr := randIPv4Address(t)
book.AddAddress(addr, addr)
assert.True(t, book.HasAddress(addr))
book.RemoveAddress(addr)
assert.False(t, book.HasAddress(addr))
}

+ 1
- 1
p2p/pex/known_address.go View File

@ -56,7 +56,7 @@ func (ka *knownAddress) isNew() bool {
func (ka *knownAddress) markAttempt() { func (ka *knownAddress) markAttempt() {
now := time.Now() now := time.Now()
ka.LastAttempt = now ka.LastAttempt = now
ka.Attempts += 1
ka.Attempts++
} }
func (ka *knownAddress) markGood() { func (ka *knownAddress) markGood() {


+ 58
- 31
p2p/pex/pex_reactor.go View File

@ -21,24 +21,31 @@ const (
// PexChannel is a channel for PEX messages // PexChannel is a channel for PEX messages
PexChannel = byte(0x00) PexChannel = byte(0x00)
maxPexMessageSize = 1048576 // 1MB
maxMsgSize = 1048576 // 1MB
// ensure we have enough peers // ensure we have enough peers
defaultEnsurePeersPeriod = 30 * time.Second defaultEnsurePeersPeriod = 30 * time.Second
defaultMinNumOutboundPeers = 10 defaultMinNumOutboundPeers = 10
// Seed/Crawler constants // Seed/Crawler constants
// TODO:
// We want seeds to only advertise good peers.
// Peers are marked by external mechanisms.
// We need a config value that can be set to be
// on the order of how long it would take before a good
// peer is marked good.
defaultSeedDisconnectWaitPeriod = 2 * time.Minute // disconnect after this
defaultCrawlPeerInterval = 2 * time.Minute // dont redial for this. TODO: back-off
defaultCrawlPeersPeriod = 30 * time.Second // check some peers every this
// We want seeds to only advertise good peers. Therefore they should wait at
// least as long as we expect it to take for a peer to become good before
// disconnecting.
// see consensus/reactor.go: blocksToContributeToBecomeGoodPeer
// 10000 blocks assuming 1s blocks ~ 2.7 hours.
defaultSeedDisconnectWaitPeriod = 3 * time.Hour
defaultCrawlPeerInterval = 2 * time.Minute // don't redial for this. TODO: back-off. what for?
defaultCrawlPeersPeriod = 30 * time.Second // check some peers every this
maxAttemptsToDial = 16 // ~ 35h in total (last attempt - 18h) maxAttemptsToDial = 16 // ~ 35h in total (last attempt - 18h)
// if node connects to seed, it does not have any trusted peers.
// Especially in the beginning, node should have more trusted peers than
// untrusted.
biasToSelectNewPeers = 30 // 70 to select good peers
) )
// PEXReactor handles PEX (peer exchange) and ensures that an // PEXReactor handles PEX (peer exchange) and ensures that an
@ -72,6 +79,10 @@ type PEXReactorConfig struct {
// Seeds is a list of addresses reactor may use // Seeds is a list of addresses reactor may use
// if it can't connect to peers in the addrbook. // if it can't connect to peers in the addrbook.
Seeds []string Seeds []string
// PrivatePeerIDs is a list of peer IDs, which must not be gossiped to other
// peers.
PrivatePeerIDs []string
} }
type _attemptsToDial struct { type _attemptsToDial struct {
@ -150,7 +161,12 @@ func (r *PEXReactor) AddPeer(p Peer) {
// Let the ensurePeersRoutine handle asking for more // Let the ensurePeersRoutine handle asking for more
// peers when we need - we don't trust inbound peers as much. // peers when we need - we don't trust inbound peers as much.
addr := p.NodeInfo().NetAddress() addr := p.NodeInfo().NetAddress()
r.book.AddAddress(addr, addr)
if !isAddrPrivate(addr, r.config.PrivatePeerIDs) {
err := r.book.AddAddress(addr, addr)
if err != nil {
r.Logger.Error("Failed to add new address", "err", err)
}
}
} }
} }
@ -181,8 +197,7 @@ func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) {
// Seeds disconnect after sending a batch of addrs // Seeds disconnect after sending a batch of addrs
if r.config.SeedMode { if r.config.SeedMode {
// TODO: should we be more selective ?
r.SendAddrs(src, r.book.GetSelection())
r.SendAddrs(src, r.book.GetSelectionWithBias(biasToSelectNewPeers))
r.Switch.StopPeerGracefully(src) r.Switch.StopPeerGracefully(src)
} else { } else {
r.SendAddrs(src, r.book.GetSelection()) r.SendAddrs(src, r.book.GetSelection())
@ -250,8 +265,11 @@ func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error {
srcAddr := src.NodeInfo().NetAddress() srcAddr := src.NodeInfo().NetAddress()
for _, netAddr := range addrs { for _, netAddr := range addrs {
if netAddr != nil {
r.book.AddAddress(netAddr, srcAddr)
if netAddr != nil && !isAddrPrivate(netAddr, r.config.PrivatePeerIDs) {
err := r.book.AddAddress(netAddr, srcAddr)
if err != nil {
r.Logger.Error("Failed to add new address", "err", err)
}
} }
} }
return nil return nil
@ -401,11 +419,15 @@ func (r *PEXReactor) dialPeer(addr *p2p.NetAddress) {
// TODO: detect more "bad peer" scenarios // TODO: detect more "bad peer" scenarios
if _, ok := err.(p2p.ErrSwitchAuthenticationFailure); ok { if _, ok := err.(p2p.ErrSwitchAuthenticationFailure); ok {
r.book.MarkBad(addr) r.book.MarkBad(addr)
r.attemptsToDial.Delete(addr.DialString())
} else { } else {
r.book.MarkAttempt(addr) r.book.MarkAttempt(addr)
// FIXME: if the addr is going to be removed from the addrbook (hard to
// tell at this point), we need to Delete it from attemptsToDial, not
// record another attempt.
// record attempt
r.attemptsToDial.Store(addr.DialString(), _attemptsToDial{attempts + 1, time.Now()})
} }
// record attempt
r.attemptsToDial.Store(addr.DialString(), _attemptsToDial{attempts + 1, time.Now()})
} else { } else {
// cleanup any history // cleanup any history
r.attemptsToDial.Delete(addr.DialString()) r.attemptsToDial.Delete(addr.DialString())
@ -455,9 +477,8 @@ func (r *PEXReactor) AttemptsToDial(addr *p2p.NetAddress) int {
lAttempts, attempted := r.attemptsToDial.Load(addr.DialString()) lAttempts, attempted := r.attemptsToDial.Load(addr.DialString())
if attempted { if attempted {
return lAttempts.(_attemptsToDial).number return lAttempts.(_attemptsToDial).number
} else {
return 0
} }
return 0
} }
//---------------------------------------------------------- //----------------------------------------------------------
@ -551,24 +572,16 @@ func (r *PEXReactor) crawlPeers() {
r.book.MarkAttempt(pi.Addr) r.book.MarkAttempt(pi.Addr)
continue continue
} }
}
// Crawl the connected peers asking for more addresses
for _, pi := range peerInfos {
// We will wait a minimum period of time before crawling peers again
if now.Sub(pi.LastAttempt) >= defaultCrawlPeerInterval {
peer := r.Switch.Peers().Get(pi.Addr.ID)
if peer != nil {
r.RequestAddrs(peer)
}
}
// Ask for more addresses
peer := r.Switch.Peers().Get(pi.Addr.ID)
r.RequestAddrs(peer)
} }
} }
// attemptDisconnects checks if we've been with each peer long enough to disconnect // attemptDisconnects checks if we've been with each peer long enough to disconnect
func (r *PEXReactor) attemptDisconnects() { func (r *PEXReactor) attemptDisconnects() {
for _, peer := range r.Switch.Peers().List() { for _, peer := range r.Switch.Peers().List() {
status := peer.Status()
if status.Duration < defaultSeedDisconnectWaitPeriod {
if peer.Status().Duration < defaultSeedDisconnectWaitPeriod {
continue continue
} }
if peer.IsPersistent() { if peer.IsPersistent() {
@ -578,6 +591,16 @@ func (r *PEXReactor) attemptDisconnects() {
} }
} }
// isAddrPrivate returns true if addr is private.
func isAddrPrivate(addr *p2p.NetAddress, privatePeerIDs []string) bool {
for _, id := range privatePeerIDs {
if string(addr.ID) == id {
return true
}
}
return false
}
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// Messages // Messages
@ -593,6 +616,10 @@ func RegisterPexMessage(cdc *amino.Codec) {
// DecodeMessage implements interface registered above. // DecodeMessage implements interface registered above.
func DecodeMessage(bz []byte) (msg PexMessage, err error) { func DecodeMessage(bz []byte) (msg PexMessage, err error) {
if len(bz) > maxMsgSize {
return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
len(bz), maxMsgSize)
}
err = cdc.UnmarshalBinary(bz, &msg) err = cdc.UnmarshalBinary(bz, &msg)
return return
} }


+ 68
- 19
p2p/pex/pex_reactor_test.go View File

@ -62,35 +62,45 @@ func TestPEXReactorRunning(t *testing.T) {
N := 3 N := 3
switches := make([]*p2p.Switch, N) switches := make([]*p2p.Switch, N)
// directory to store address books
dir, err := ioutil.TempDir("", "pex_reactor") dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(t, err) require.Nil(t, err)
defer os.RemoveAll(dir) // nolint: errcheck defer os.RemoveAll(dir) // nolint: errcheck
book := NewAddrBook(filepath.Join(dir, "addrbook.json"), false)
book.SetLogger(log.TestingLogger())
books := make([]*addrBook, N)
logger := log.TestingLogger()
// create switches // create switches
for i := 0; i < N; i++ { for i := 0; i < N; i++ {
switches[i] = p2p.MakeSwitch(config, i, "127.0.0.1", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { switches[i] = p2p.MakeSwitch(config, i, "127.0.0.1", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch {
sw.SetLogger(log.TestingLogger().With("switch", i))
books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false)
books[i].SetLogger(logger.With("pex", i))
sw.SetAddrBook(books[i])
r := NewPEXReactor(book, &PEXReactorConfig{})
r.SetLogger(log.TestingLogger())
sw.SetLogger(logger.With("pex", i))
r := NewPEXReactor(books[i], &PEXReactorConfig{})
r.SetLogger(logger.With("pex", i))
r.SetEnsurePeersPeriod(250 * time.Millisecond) r.SetEnsurePeersPeriod(250 * time.Millisecond)
sw.AddReactor("pex", r) sw.AddReactor("pex", r)
return sw return sw
}) })
} }
// fill the address book and add listeners
for _, s := range switches {
addr := s.NodeInfo().NetAddress()
book.AddAddress(addr, addr)
s.AddListener(p2p.NewDefaultListener("tcp", s.NodeInfo().ListenAddr, true, log.TestingLogger()))
addOtherNodeAddrToAddrBook := func(switchIndex, otherSwitchIndex int) {
addr := switches[otherSwitchIndex].NodeInfo().NetAddress()
books[switchIndex].AddAddress(addr, addr)
} }
// start switches
for _, s := range switches {
err := s.Start() // start switch and reactors
addOtherNodeAddrToAddrBook(0, 1)
addOtherNodeAddrToAddrBook(1, 0)
addOtherNodeAddrToAddrBook(2, 1)
for i, sw := range switches {
sw.AddListener(p2p.NewDefaultListener("tcp", sw.NodeInfo().ListenAddr, true, logger.With("pex", i)))
err := sw.Start() // start switch and reactors
require.Nil(t, err) require.Nil(t, err)
} }
@ -126,6 +136,7 @@ func TestPEXReactorRequestMessageAbuse(t *testing.T) {
defer teardownReactor(book) defer teardownReactor(book)
sw := createSwitchAndAddReactors(r) sw := createSwitchAndAddReactors(r)
sw.SetAddrBook(book)
peer := newMockPeer() peer := newMockPeer()
p2p.AddPeerToSwitch(sw, peer) p2p.AddPeerToSwitch(sw, peer)
@ -155,6 +166,7 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) {
defer teardownReactor(book) defer teardownReactor(book)
sw := createSwitchAndAddReactors(r) sw := createSwitchAndAddReactors(r)
sw.SetAddrBook(book)
peer := newMockPeer() peer := newMockPeer()
p2p.AddPeerToSwitch(sw, peer) p2p.AddPeerToSwitch(sw, peer)
@ -181,13 +193,11 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) {
} }
func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) { func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) {
// directory to store address books
dir, err := ioutil.TempDir("", "pex_reactor") dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(t, err) require.Nil(t, err)
defer os.RemoveAll(dir) // nolint: errcheck defer os.RemoveAll(dir) // nolint: errcheck
book := NewAddrBook(filepath.Join(dir, "addrbook.json"), false)
book.SetLogger(log.TestingLogger())
// 1. create seed // 1. create seed
seed := p2p.MakeSwitch( seed := p2p.MakeSwitch(
config, config,
@ -195,6 +205,10 @@ func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) {
"127.0.0.1", "127.0.0.1",
"123.123.123", "123.123.123",
func(i int, sw *p2p.Switch) *p2p.Switch { func(i int, sw *p2p.Switch) *p2p.Switch {
book := NewAddrBook(filepath.Join(dir, "addrbook0.json"), false)
book.SetLogger(log.TestingLogger())
sw.SetAddrBook(book)
sw.SetLogger(log.TestingLogger()) sw.SetLogger(log.TestingLogger())
r := NewPEXReactor(book, &PEXReactorConfig{}) r := NewPEXReactor(book, &PEXReactorConfig{})
@ -221,6 +235,10 @@ func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) {
"127.0.0.1", "127.0.0.1",
"123.123.123", "123.123.123",
func(i int, sw *p2p.Switch) *p2p.Switch { func(i int, sw *p2p.Switch) *p2p.Switch {
book := NewAddrBook(filepath.Join(dir, "addrbook1.json"), false)
book.SetLogger(log.TestingLogger())
sw.SetAddrBook(book)
sw.SetLogger(log.TestingLogger()) sw.SetLogger(log.TestingLogger())
r := NewPEXReactor( r := NewPEXReactor(
@ -246,7 +264,8 @@ func TestPEXReactorCrawlStatus(t *testing.T) {
defer teardownReactor(book) defer teardownReactor(book)
// Seed/Crawler mode uses data from the Switch // Seed/Crawler mode uses data from the Switch
_ = createSwitchAndAddReactors(pexR)
sw := createSwitchAndAddReactors(pexR)
sw.SetAddrBook(book)
// Create a peer, add it to the peer set and the addrbook. // Create a peer, add it to the peer set and the addrbook.
peer := p2p.CreateRandomPeer(false) peer := p2p.CreateRandomPeer(false)
@ -267,11 +286,31 @@ func TestPEXReactorCrawlStatus(t *testing.T) {
// TODO: test // TODO: test
} }
func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) {
peer := p2p.CreateRandomPeer(false)
pexR, book := createReactor(&PEXReactorConfig{PrivatePeerIDs: []string{string(peer.NodeInfo().ID())}})
defer teardownReactor(book)
// we have to send a request to receive responses
pexR.RequestAddrs(peer)
size := book.Size()
addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()}
msg := cdc.MustMarshalBinary(&pexAddrsMessage{Addrs: addrs})
pexR.Receive(PexChannel, peer, msg)
assert.Equal(t, size, book.Size())
pexR.AddPeer(peer)
assert.Equal(t, size, book.Size())
}
func TestPEXReactorDialPeer(t *testing.T) { func TestPEXReactorDialPeer(t *testing.T) {
pexR, book := createReactor(&PEXReactorConfig{}) pexR, book := createReactor(&PEXReactorConfig{})
defer teardownReactor(book) defer teardownReactor(book)
_ = createSwitchAndAddReactors(pexR)
sw := createSwitchAndAddReactors(pexR)
sw.SetAddrBook(book)
peer := newMockPeer() peer := newMockPeer()
addr := peer.NodeInfo().NetAddress() addr := peer.NodeInfo().NetAddress()
@ -288,6 +327,15 @@ func TestPEXReactorDialPeer(t *testing.T) {
// must be skipped because it is too early // must be skipped because it is too early
assert.Equal(t, 1, pexR.AttemptsToDial(addr)) assert.Equal(t, 1, pexR.AttemptsToDial(addr))
if !testing.Short() {
time.Sleep(3 * time.Second)
// 3rd attempt
pexR.dialPeer(addr)
assert.Equal(t, 2, pexR.AttemptsToDial(addr))
}
} }
type mockPeer struct { type mockPeer struct {
@ -368,6 +416,7 @@ func assertPeersWithTimeout(
} }
func createReactor(config *PEXReactorConfig) (r *PEXReactor, book *addrBook) { func createReactor(config *PEXReactorConfig) (r *PEXReactor, book *addrBook) {
// directory to store address book
dir, err := ioutil.TempDir("", "pex_reactor") dir, err := ioutil.TempDir("", "pex_reactor")
if err != nil { if err != nil {
panic(err) panic(err)
@ -375,7 +424,7 @@ func createReactor(config *PEXReactorConfig) (r *PEXReactor, book *addrBook) {
book = NewAddrBook(filepath.Join(dir, "addrbook.json"), true) book = NewAddrBook(filepath.Join(dir, "addrbook.json"), true)
book.SetLogger(log.TestingLogger()) book.SetLogger(log.TestingLogger())
r = NewPEXReactor(book, &PEXReactorConfig{})
r = NewPEXReactor(book, config)
r.SetLogger(log.TestingLogger()) r.SetLogger(log.TestingLogger())
return return
} }


+ 37
- 9
p2p/switch.go View File

@ -31,15 +31,21 @@ const (
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// An AddrBook represents an address book from the pex package, which is used
// to store peer addresses.
type AddrBook interface { type AddrBook interface {
AddAddress(addr *NetAddress, src *NetAddress) error AddAddress(addr *NetAddress, src *NetAddress) error
AddOurAddress(*NetAddress)
OurAddress(*NetAddress) bool
MarkGood(*NetAddress) MarkGood(*NetAddress)
RemoveAddress(*NetAddress)
HasAddress(*NetAddress) bool
Save() Save()
} }
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// `Switch` handles peer connections and exposes an API to receive incoming messages
// Switch handles peer connections and exposes an API to receive incoming messages
// on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one // on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one
// or more `Channels`. So while sending outgoing messages is typically performed on the peer, // or more `Channels`. So while sending outgoing messages is typically performed on the peer,
// incoming messages are received on the reactor. // incoming messages are received on the reactor.
@ -64,6 +70,7 @@ type Switch struct {
rng *rand.Rand // seed for randomizing dial times and orders rng *rand.Rand // seed for randomizing dial times and orders
} }
// NewSwitch creates a new Switch with the given config.
func NewSwitch(config *cfg.P2PConfig) *Switch { func NewSwitch(config *cfg.P2PConfig) *Switch {
sw := &Switch{ sw := &Switch{
config: config, config: config,
@ -341,20 +348,21 @@ func (sw *Switch) IsDialing(id ID) bool {
// DialPeersAsync dials a list of peers asynchronously in random order (optionally, making them persistent). // DialPeersAsync dials a list of peers asynchronously in random order (optionally, making them persistent).
func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent bool) error { func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent bool) error {
netAddrs, errs := NewNetAddressStrings(peers) netAddrs, errs := NewNetAddressStrings(peers)
// only log errors, dial correct addresses
for _, err := range errs { for _, err := range errs {
sw.Logger.Error("Error in peer's address", "err", err) sw.Logger.Error("Error in peer's address", "err", err)
} }
ourAddr := sw.nodeInfo.NetAddress()
// TODO: move this out of here ?
if addrBook != nil { if addrBook != nil {
// add peers to `addrBook` // add peers to `addrBook`
ourAddr := sw.nodeInfo.NetAddress()
for _, netAddr := range netAddrs { for _, netAddr := range netAddrs {
// do not add our address or ID // do not add our address or ID
if netAddr.Same(ourAddr) {
continue
if !netAddr.Same(ourAddr) {
addrBook.AddAddress(netAddr, ourAddr)
} }
// TODO: move this out of here ?
addrBook.AddAddress(netAddr, ourAddr)
} }
// Persist some peers to disk right away. // Persist some peers to disk right away.
// NOTE: integration tests depend on this // NOTE: integration tests depend on this
@ -365,8 +373,14 @@ func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent b
perm := sw.rng.Perm(len(netAddrs)) perm := sw.rng.Perm(len(netAddrs))
for i := 0; i < len(perm); i++ { for i := 0; i < len(perm); i++ {
go func(i int) { go func(i int) {
sw.randomSleep(0)
j := perm[i] j := perm[i]
// do not dial ourselves
if netAddrs[j].Same(ourAddr) {
return
}
sw.randomSleep(0)
err := sw.DialPeerWithAddress(netAddrs[j], persistent) err := sw.DialPeerWithAddress(netAddrs[j], persistent)
if err != nil { if err != nil {
sw.Logger.Error("Error dialing peer", "err", err) sw.Logger.Error("Error dialing peer", "err", err)
@ -520,6 +534,15 @@ func (sw *Switch) addPeer(pc peerConn) error {
// Avoid self // Avoid self
if sw.nodeKey.ID() == peerID { if sw.nodeKey.ID() == peerID {
addr := peerNodeInfo.NetAddress()
// remove the given address from the address book if we're added it earlier
sw.addrBook.RemoveAddress(addr)
// add the given address to the address book to avoid dialing ourselves
// again this is our public address
sw.addrBook.AddOurAddress(addr)
return ErrSwitchConnectToSelf return ErrSwitchConnectToSelf
} }
@ -545,7 +568,9 @@ func (sw *Switch) addPeer(pc peerConn) error {
// All good. Start peer // All good. Start peer
if sw.IsRunning() { if sw.IsRunning() {
sw.startInitPeer(peer)
if err = sw.startInitPeer(peer); err != nil {
return err
}
} }
// Add the peer to .peers. // Add the peer to .peers.
@ -559,14 +584,17 @@ func (sw *Switch) addPeer(pc peerConn) error {
return nil return nil
} }
func (sw *Switch) startInitPeer(peer *peer) {
func (sw *Switch) startInitPeer(peer *peer) error {
err := peer.Start() // spawn send/recv routines err := peer.Start() // spawn send/recv routines
if err != nil { if err != nil {
// Should never happen // Should never happen
sw.Logger.Error("Error starting peer", "peer", peer, "err", err) sw.Logger.Error("Error starting peer", "peer", peer, "err", err)
return err
} }
for _, reactor := range sw.reactors { for _, reactor := range sw.reactors {
reactor.AddPeer(peer) reactor.AddPeer(peer)
} }
return nil
} }

+ 59
- 12
p2p/switch_test.go View File

@ -38,8 +38,6 @@ type TestReactor struct {
mtx sync.Mutex mtx sync.Mutex
channels []*conn.ChannelDescriptor channels []*conn.ChannelDescriptor
peersAdded []Peer
peersRemoved []Peer
logMessages bool logMessages bool
msgsCounter int msgsCounter int
msgsReceived map[byte][]PeerMessage msgsReceived map[byte][]PeerMessage
@ -60,17 +58,9 @@ func (tr *TestReactor) GetChannels() []*conn.ChannelDescriptor {
return tr.channels return tr.channels
} }
func (tr *TestReactor) AddPeer(peer Peer) {
tr.mtx.Lock()
defer tr.mtx.Unlock()
tr.peersAdded = append(tr.peersAdded, peer)
}
func (tr *TestReactor) AddPeer(peer Peer) {}
func (tr *TestReactor) RemovePeer(peer Peer, reason interface{}) {
tr.mtx.Lock()
defer tr.mtx.Unlock()
tr.peersRemoved = append(tr.peersRemoved, peer)
}
func (tr *TestReactor) RemovePeer(peer Peer, reason interface{}) {}
func (tr *TestReactor) Receive(chID byte, peer Peer, msgBytes []byte) { func (tr *TestReactor) Receive(chID byte, peer Peer, msgBytes []byte) {
if tr.logMessages { if tr.logMessages {
@ -99,6 +89,10 @@ func MakeSwitchPair(t testing.TB, initSwitch func(int, *Switch) *Switch) (*Switc
} }
func initSwitchFunc(i int, sw *Switch) *Switch { func initSwitchFunc(i int, sw *Switch) *Switch {
sw.SetAddrBook(&addrBookMock{
addrs: make(map[string]struct{}),
ourAddrs: make(map[string]struct{})})
// Make two reactors of two channels each // Make two reactors of two channels each
sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{
{ID: byte(0x00), Priority: 10}, {ID: byte(0x00), Priority: 10},
@ -108,6 +102,7 @@ func initSwitchFunc(i int, sw *Switch) *Switch {
{ID: byte(0x02), Priority: 10}, {ID: byte(0x02), Priority: 10},
{ID: byte(0x03), Priority: 10}, {ID: byte(0x03), Priority: 10},
}, true)) }, true))
return sw return sw
} }
@ -184,6 +179,32 @@ func TestConnAddrFilter(t *testing.T) {
assertNoPeersAfterTimeout(t, s2, 400*time.Millisecond) assertNoPeersAfterTimeout(t, s2, 400*time.Millisecond)
} }
func TestSwitchFiltersOutItself(t *testing.T) {
s1 := MakeSwitch(config, 1, "127.0.0.2", "123.123.123", initSwitchFunc)
// addr := s1.NodeInfo().NetAddress()
// // add ourselves like we do in node.go#427
// s1.addrBook.AddOurAddress(addr)
// simulate s1 having a public IP by creating a remote peer with the same ID
rp := &remotePeer{PrivKey: s1.nodeKey.PrivKey, Config: DefaultPeerConfig()}
rp.Start()
// addr should be rejected in addPeer based on the same ID
err := s1.DialPeerWithAddress(rp.Addr(), false)
if assert.Error(t, err) {
assert.Equal(t, ErrSwitchConnectToSelf, err)
}
assert.True(t, s1.addrBook.OurAddress(rp.Addr()))
assert.False(t, s1.addrBook.HasAddress(rp.Addr()))
rp.Stop()
assertNoPeersAfterTimeout(t, s1, 100*time.Millisecond)
}
func assertNoPeersAfterTimeout(t *testing.T, sw *Switch, timeout time.Duration) { func assertNoPeersAfterTimeout(t *testing.T, sw *Switch, timeout time.Duration) {
time.Sleep(timeout) time.Sleep(timeout)
if sw.Peers().Size() != 0 { if sw.Peers().Size() != 0 {
@ -349,3 +370,29 @@ func BenchmarkSwitchBroadcast(b *testing.B) {
b.Logf("success: %v, failure: %v", numSuccess, numFailure) b.Logf("success: %v, failure: %v", numSuccess, numFailure)
} }
type addrBookMock struct {
addrs map[string]struct{}
ourAddrs map[string]struct{}
}
var _ AddrBook = (*addrBookMock)(nil)
func (book *addrBookMock) AddAddress(addr *NetAddress, src *NetAddress) error {
book.addrs[addr.String()] = struct{}{}
return nil
}
func (book *addrBookMock) AddOurAddress(addr *NetAddress) { book.ourAddrs[addr.String()] = struct{}{} }
func (book *addrBookMock) OurAddress(addr *NetAddress) bool {
_, ok := book.ourAddrs[addr.String()]
return ok
}
func (book *addrBookMock) MarkGood(*NetAddress) {}
func (book *addrBookMock) HasAddress(addr *NetAddress) bool {
_, ok := book.addrs[addr.String()]
return ok
}
func (book *addrBookMock) RemoveAddress(addr *NetAddress) {
delete(book.addrs, addr.String())
}
func (book *addrBookMock) Save() {}

+ 1
- 1
p2p/test_util.go View File

@ -143,7 +143,7 @@ func MakeSwitch(cfg *cfg.P2PConfig, i int, network, version string, initSwitch f
Version: version, Version: version,
ListenAddr: cmn.Fmt("%v:%v", network, rand.Intn(64512)+1023), ListenAddr: cmn.Fmt("%v:%v", network, rand.Intn(64512)+1023),
} }
for ch, _ := range sw.reactorsByCh {
for ch := range sw.reactorsByCh {
ni.Channels = append(ni.Channels, ch) ni.Channels = append(ni.Channels, ch)
} }
sw.SetNodeInfo(ni) sw.SetNodeInfo(ni)


+ 1
- 1
p2p/upnp/upnp.go View File

@ -103,7 +103,7 @@ func Discover() (nat NAT, err error) {
return return
} }
} }
err = errors.New("UPnP port discovery failed.")
err = errors.New("UPnP port discovery failed")
return return
} }


+ 9
- 0
rpc/client/httpclient.go View File

@ -126,6 +126,15 @@ func (c *HTTP) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) {
return result, nil return result, nil
} }
func (c *HTTP) Health() (*ctypes.ResultHealth, error) {
result := new(ctypes.ResultHealth)
_, err := c.rpc.Call("health", map[string]interface{}{}, result)
if err != nil {
return nil, errors.Wrap(err, "Health")
}
return result, nil
}
func (c *HTTP) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { func (c *HTTP) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
result := new(ctypes.ResultBlockchainInfo) result := new(ctypes.ResultBlockchainInfo)
_, err := c.rpc.Call("blockchain", _, err := c.rpc.Call("blockchain",


+ 1
- 0
rpc/client/interface.go View File

@ -83,6 +83,7 @@ type Client interface {
type NetworkClient interface { type NetworkClient interface {
NetInfo() (*ctypes.ResultNetInfo, error) NetInfo() (*ctypes.ResultNetInfo, error)
DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error)
Health() (*ctypes.ResultHealth, error)
} }
// EventsClient is reactive, you can subscribe to any message, given the proper // EventsClient is reactive, you can subscribe to any message, given the proper


+ 4
- 0
rpc/client/localclient.go View File

@ -84,6 +84,10 @@ func (Local) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) {
return core.DumpConsensusState() return core.DumpConsensusState()
} }
func (Local) Health() (*ctypes.ResultHealth, error) {
return core.Health()
}
func (Local) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) { func (Local) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) {
return core.UnsafeDialSeeds(seeds) return core.UnsafeDialSeeds(seeds)
} }


+ 20
- 11
rpc/client/rpc_test.go View File

@ -78,6 +78,15 @@ func TestDumpConsensusState(t *testing.T) {
} }
} }
func TestHealth(t *testing.T) {
for i, c := range GetClients() {
nc, ok := c.(client.NetworkClient)
require.True(t, ok, "%d", i)
_, err := nc.Health()
require.Nil(t, err, "%d: %+v", i, err)
}
}
func TestGenesisAndValidators(t *testing.T) { func TestGenesisAndValidators(t *testing.T) {
for i, c := range GetClients() { for i, c := range GetClients() {
@ -244,13 +253,11 @@ func TestBroadcastTxCommit(t *testing.T) {
} }
func TestTx(t *testing.T) { func TestTx(t *testing.T) {
assert, require := assert.New(t), require.New(t)
// first we broadcast a tx // first we broadcast a tx
c := getHTTPClient() c := getHTTPClient()
_, _, tx := MakeTxKV() _, _, tx := MakeTxKV()
bres, err := c.BroadcastTxCommit(tx) bres, err := c.BroadcastTxCommit(tx)
require.Nil(err, "%+v", err)
require.Nil(t, err, "%+v", err)
txHeight := bres.Height txHeight := bres.Height
txHash := bres.Hash txHash := bres.Hash
@ -280,18 +287,19 @@ func TestTx(t *testing.T) {
ptx, err := c.Tx(tc.hash, tc.prove) ptx, err := c.Tx(tc.hash, tc.prove)
if !tc.valid { if !tc.valid {
require.NotNil(err)
require.NotNil(t, err)
} else { } else {
require.Nil(err, "%+v", err)
assert.EqualValues(txHeight, ptx.Height)
assert.EqualValues(tx, ptx.Tx)
assert.Zero(ptx.Index)
assert.True(ptx.TxResult.IsOK())
require.Nil(t, err, "%+v", err)
assert.EqualValues(t, txHeight, ptx.Height)
assert.EqualValues(t, tx, ptx.Tx)
assert.Zero(t, ptx.Index)
assert.True(t, ptx.TxResult.IsOK())
assert.EqualValues(t, txHash, ptx.Hash)
// time to verify the proof // time to verify the proof
proof := ptx.Proof proof := ptx.Proof
if tc.prove && assert.EqualValues(tx, proof.Data) {
assert.True(proof.Proof.Verify(proof.Index, proof.Total, txHash, proof.RootHash))
if tc.prove && assert.EqualValues(t, tx, proof.Data) {
assert.True(t, proof.Proof.Verify(proof.Index, proof.Total, txHash, proof.RootHash))
} }
} }
} }
@ -324,6 +332,7 @@ func TestTxSearch(t *testing.T) {
assert.EqualValues(t, tx, ptx.Tx) assert.EqualValues(t, tx, ptx.Tx)
assert.Zero(t, ptx.Index) assert.Zero(t, ptx.Index)
assert.True(t, ptx.TxResult.IsOK()) assert.True(t, ptx.TxResult.IsOK())
assert.EqualValues(t, txHash, ptx.Hash)
// time to verify the proof // time to verify the proof
proof := ptx.Proof proof := ptx.Proof


+ 1
- 0
rpc/core/doc.go View File

@ -81,6 +81,7 @@ Available endpoints:
/net_info /net_info
/num_unconfirmed_txs /num_unconfirmed_txs
/status /status
/health
/unconfirmed_txs /unconfirmed_txs
/unsafe_flush_mempool /unsafe_flush_mempool
/unsafe_stop_cpu_profiler /unsafe_stop_cpu_profiler


+ 31
- 0
rpc/core/health.go View File

@ -0,0 +1,31 @@
package core
import (
ctypes "github.com/tendermint/tendermint/rpc/core/types"
)
// Get node health. Returns empty result (200 OK) on success, no response - in
// case of an error.
//
// ```shell
// curl 'localhost:46657/health'
// ```
//
// ```go
// client := client.NewHTTP("tcp://0.0.0.0:46657", "/websocket")
// result, err := client.Health()
// ```
//
// > The above command returns JSON structured like this:
//
// ```json
// {
// "error": "",
// "result": {},
// "id": "",
// "jsonrpc": "2.0"
// }
// ```
func Health() (*ctypes.ResultHealth, error) {
return &ctypes.ResultHealth{}, nil
}

+ 2
- 0
rpc/core/net.go View File

@ -2,6 +2,7 @@ package core
import ( import (
"github.com/pkg/errors" "github.com/pkg/errors"
ctypes "github.com/tendermint/tendermint/rpc/core/types" ctypes "github.com/tendermint/tendermint/rpc/core/types"
) )
@ -42,6 +43,7 @@ func NetInfo() (*ctypes.ResultNetInfo, error) {
for _, peer := range p2pSwitch.Peers().List() { for _, peer := range p2pSwitch.Peers().List() {
peers = append(peers, ctypes.Peer{ peers = append(peers, ctypes.Peer{
NodeInfo: peer.NodeInfo(), NodeInfo: peer.NodeInfo(),
ID: peer.ID(),
IsOutbound: peer.IsOutbound(), IsOutbound: peer.IsOutbound(),
ConnectionStatus: peer.Status(), ConnectionStatus: peer.Status(),
}) })


+ 2
- 2
rpc/core/pipe.go View File

@ -3,10 +3,10 @@ package core
import ( import (
"time" "time"
crypto "github.com/tendermint/go-crypto"
"github.com/tendermint/go-crypto"
"github.com/tendermint/tendermint/consensus" "github.com/tendermint/tendermint/consensus"
cstypes "github.com/tendermint/tendermint/consensus/types" cstypes "github.com/tendermint/tendermint/consensus/types"
p2p "github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state" sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/state/txindex"


+ 1
- 0
rpc/core/routes.go View File

@ -13,6 +13,7 @@ var Routes = map[string]*rpc.RPCFunc{
"unsubscribe_all": rpc.NewWSRPCFunc(UnsubscribeAll, ""), "unsubscribe_all": rpc.NewWSRPCFunc(UnsubscribeAll, ""),
// info API // info API
"health": rpc.NewRPCFunc(Health, ""),
"status": rpc.NewRPCFunc(Status, ""), "status": rpc.NewRPCFunc(Status, ""),
"net_info": rpc.NewRPCFunc(NetInfo, ""), "net_info": rpc.NewRPCFunc(NetInfo, ""),
"blockchain": rpc.NewRPCFunc(BlockchainInfo, "minHeight,maxHeight"), "blockchain": rpc.NewRPCFunc(BlockchainInfo, "minHeight,maxHeight"),


+ 46
- 3
rpc/core/status.go View File

@ -1,9 +1,11 @@
package core package core
import ( import (
"bytes"
"time" "time"
ctypes "github.com/tendermint/tendermint/rpc/core/types" ctypes "github.com/tendermint/tendermint/rpc/core/types"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
) )
@ -48,7 +50,10 @@ import (
// "remote_addr": "", // "remote_addr": "",
// "network": "test-chain-qhVCa2", // "network": "test-chain-qhVCa2",
// "moniker": "vagrant-ubuntu-trusty-64", // "moniker": "vagrant-ubuntu-trusty-64",
// "pub_key": "844981FE99ABB19F7816F2D5E94E8A74276AB1153760A7799E925C75401856C6"
// "pub_key": "844981FE99ABB19F7816F2D5E94E8A74276AB1153760A7799E925C75401856C6",
// "validator_status": {
// "voting_power": 10
// }
// } // }
// }, // },
// "id": "", // "id": "",
@ -72,12 +77,50 @@ func Status() (*ctypes.ResultStatus, error) {
latestBlockTime := time.Unix(0, latestBlockTimeNano) latestBlockTime := time.Unix(0, latestBlockTimeNano)
return &ctypes.ResultStatus{
result := &ctypes.ResultStatus{
NodeInfo: p2pSwitch.NodeInfo(), NodeInfo: p2pSwitch.NodeInfo(),
PubKey: pubKey, PubKey: pubKey,
LatestBlockHash: latestBlockHash, LatestBlockHash: latestBlockHash,
LatestAppHash: latestAppHash, LatestAppHash: latestAppHash,
LatestBlockHeight: latestHeight, LatestBlockHeight: latestHeight,
LatestBlockTime: latestBlockTime, LatestBlockTime: latestBlockTime,
Syncing: consensusReactor.FastSync()}, nil
Syncing: consensusReactor.FastSync(),
}
// add ValidatorStatus if node is a validator
if val := validatorAtHeight(latestHeight); val != nil {
result.ValidatorStatus = ctypes.ValidatorStatus{
VotingPower: val.VotingPower,
}
}
return result, nil
}
func validatorAtHeight(h int64) *types.Validator {
lastBlockHeight, vals := consensusState.GetValidators()
privValAddress := pubKey.Address()
// if we're still at height h, search in the current validator set
if lastBlockHeight == h {
for _, val := range vals {
if bytes.Equal(val.Address, privValAddress) {
return val
}
}
}
// if we've moved to the next height, retrieve the validator set from DB
if lastBlockHeight > h {
vals, err := sm.LoadValidators(stateDB, h)
if err != nil {
// should not happen
return nil
}
_, val := vals.GetByAddress(privValAddress)
return val
}
return nil
} }

+ 10
- 4
rpc/core/tx.go View File

@ -44,7 +44,8 @@ import (
// "code": 0 // "code": 0
// }, // },
// "index": 0, // "index": 0,
// "height": 52
// "height": 52,
// "hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF"
// }, // },
// "id": "", // "id": "",
// "jsonrpc": "2.0" // "jsonrpc": "2.0"
@ -67,11 +68,12 @@ import (
// - `tx_result`: the `abci.Result` object // - `tx_result`: the `abci.Result` object
// - `index`: `int` - index of the transaction // - `index`: `int` - index of the transaction
// - `height`: `int` - height of the block where this transaction was in // - `height`: `int` - height of the block where this transaction was in
// - `hash`: `[]byte` - hash of the transaction
func Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { func Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
// if index is disabled, return error // if index is disabled, return error
if _, ok := txIndexer.(*null.TxIndex); ok { if _, ok := txIndexer.(*null.TxIndex); ok {
return nil, fmt.Errorf("Transaction indexing is disabled.")
return nil, fmt.Errorf("Transaction indexing is disabled")
} }
r, err := txIndexer.Get(hash) r, err := txIndexer.Get(hash)
@ -93,6 +95,7 @@ func Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
} }
return &ctypes.ResultTx{ return &ctypes.ResultTx{
Hash: hash,
Height: height, Height: height,
Index: uint32(index), Index: uint32(index),
TxResult: r.Result, TxResult: r.Result,
@ -137,7 +140,8 @@ func Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
// "tx": "mvZHHa7HhZ4aRT0xMDA=", // "tx": "mvZHHa7HhZ4aRT0xMDA=",
// "tx_result": {}, // "tx_result": {},
// "index": 31, // "index": 31,
// "height": 12
// "height": 12,
// "hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF"
// } // }
// ], // ],
// "id": "", // "id": "",
@ -161,10 +165,11 @@ func Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
// - `tx_result`: the `abci.Result` object // - `tx_result`: the `abci.Result` object
// - `index`: `int` - index of the transaction // - `index`: `int` - index of the transaction
// - `height`: `int` - height of the block where this transaction was in // - `height`: `int` - height of the block where this transaction was in
// - `hash`: `[]byte` - hash of the transaction
func TxSearch(query string, prove bool) ([]*ctypes.ResultTx, error) { func TxSearch(query string, prove bool) ([]*ctypes.ResultTx, error) {
// if index is disabled, return error // if index is disabled, return error
if _, ok := txIndexer.(*null.TxIndex); ok { if _, ok := txIndexer.(*null.TxIndex); ok {
return nil, fmt.Errorf("Transaction indexing is disabled.")
return nil, fmt.Errorf("Transaction indexing is disabled")
} }
q, err := tmquery.New(query) q, err := tmquery.New(query)
@ -191,6 +196,7 @@ func TxSearch(query string, prove bool) ([]*ctypes.ResultTx, error) {
} }
apiResults[i] = &ctypes.ResultTx{ apiResults[i] = &ctypes.ResultTx{
Hash: r.Tx.Hash(),
Height: height, Height: height,
Index: index, Index: index,
TxResult: r.Result, TxResult: r.Result,


+ 16
- 7
rpc/core/types/responses.go View File

@ -54,14 +54,19 @@ func NewResultCommit(header *types.Header, commit *types.Commit,
} }
} }
type ValidatorStatus struct {
VotingPower int64 `json:"voting_power"`
}
type ResultStatus struct { type ResultStatus struct {
NodeInfo p2p.NodeInfo `json:"node_info"`
PubKey crypto.PubKey `json:"pub_key"`
LatestBlockHash cmn.HexBytes `json:"latest_block_hash"`
LatestAppHash cmn.HexBytes `json:"latest_app_hash"`
LatestBlockHeight int64 `json:"latest_block_height"`
LatestBlockTime time.Time `json:"latest_block_time"`
Syncing bool `json:"syncing"`
NodeInfo p2p.NodeInfo `json:"node_info"`
PubKey crypto.PubKey `json:"pub_key"`
LatestBlockHash cmn.HexBytes `json:"latest_block_hash"`
LatestAppHash cmn.HexBytes `json:"latest_app_hash"`
LatestBlockHeight int64 `json:"latest_block_height"`
LatestBlockTime time.Time `json:"latest_block_time"`
Syncing bool `json:"syncing"`
ValidatorStatus ValidatorStatus `json:"validator_status,omitempty"`
} }
func (s *ResultStatus) TxIndexEnabled() bool { func (s *ResultStatus) TxIndexEnabled() bool {
@ -93,6 +98,7 @@ type ResultDialPeers struct {
type Peer struct { type Peer struct {
p2p.NodeInfo `json:"node_info"` p2p.NodeInfo `json:"node_info"`
p2p.ID `json:"node_id"`
IsOutbound bool `json:"is_outbound"` IsOutbound bool `json:"is_outbound"`
ConnectionStatus p2p.ConnectionStatus `json:"connection_status"` ConnectionStatus p2p.ConnectionStatus `json:"connection_status"`
} }
@ -123,6 +129,7 @@ type ResultBroadcastTxCommit struct {
} }
type ResultTx struct { type ResultTx struct {
Hash cmn.HexBytes `json:"hash"`
Height int64 `json:"height"` Height int64 `json:"height"`
Index uint32 `json:"index"` Index uint32 `json:"index"`
TxResult abci.ResponseDeliverTx `json:"tx_result"` TxResult abci.ResponseDeliverTx `json:"tx_result"`
@ -155,3 +162,5 @@ type ResultEvent struct {
Query string `json:"query"` Query string `json:"query"`
Data types.TMEventData `json:"data"` Data types.TMEventData `json:"data"`
} }
type ResultHealth struct{}

+ 13
- 13
rpc/lib/client/ws_client.go View File

@ -329,21 +329,21 @@ func (c *WSClient) reconnectRoutine() {
c.Logger.Error("failed to reconnect", "err", err, "original_err", originalError) c.Logger.Error("failed to reconnect", "err", err, "original_err", originalError)
c.Stop() c.Stop()
return return
} else {
// drain reconnectAfter
LOOP:
for {
select {
case <-c.reconnectAfter:
default:
break LOOP
}
}
err = c.processBacklog()
if err == nil {
c.startReadWriteRoutines()
}
// drain reconnectAfter
LOOP:
for {
select {
case <-c.reconnectAfter:
default:
break LOOP
} }
} }
err := c.processBacklog()
if err == nil {
c.startReadWriteRoutines()
}
case <-c.Quit(): case <-c.Quit():
return return
} }


+ 32
- 13
rpc/lib/server/http_server.go View File

@ -18,32 +18,51 @@ import (
) )
func StartHTTPServer(listenAddr string, handler http.Handler, logger log.Logger) (listener net.Listener, err error) { func StartHTTPServer(listenAddr string, handler http.Handler, logger log.Logger) (listener net.Listener, err error) {
// listenAddr should be fully formed including tcp:// or unix:// prefix
var proto, addr string var proto, addr string
parts := strings.SplitN(listenAddr, "://", 2) parts := strings.SplitN(listenAddr, "://", 2)
if len(parts) != 2 { if len(parts) != 2 {
logger.Error("WARNING (tendermint/rpc/lib): Please use fully formed listening addresses, including the tcp:// or unix:// prefix")
// we used to allow addrs without tcp/unix prefix by checking for a colon
// TODO: Deprecate
proto = types.SocketType(listenAddr)
addr = listenAddr
// return nil, errors.Errorf("Invalid listener address %s", lisenAddr)
} else {
proto, addr = parts[0], parts[1]
return nil, errors.Errorf("Invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)", listenAddr)
} }
proto, addr = parts[0], parts[1]
logger.Info(fmt.Sprintf("Starting RPC HTTP server on %s socket %v", proto, addr))
logger.Info(fmt.Sprintf("Starting RPC HTTP server on %s", listenAddr))
listener, err = net.Listen(proto, addr) listener, err = net.Listen(proto, addr)
if err != nil { if err != nil {
return nil, errors.Errorf("Failed to listen to %v: %v", listenAddr, err)
return nil, errors.Errorf("Failed to listen on %v: %v", listenAddr, err)
} }
go func() { go func() {
res := http.Serve(
err := http.Serve(
listener, listener,
RecoverAndLogHandler(handler, logger), RecoverAndLogHandler(handler, logger),
) )
logger.Error("RPC HTTP server stopped", "result", res)
logger.Error("RPC HTTP server stopped", "err", err)
}()
return listener, nil
}
func StartHTTPAndTLSServer(listenAddr string, handler http.Handler, certFile, keyFile string, logger log.Logger) (listener net.Listener, err error) {
var proto, addr string
parts := strings.SplitN(listenAddr, "://", 2)
if len(parts) != 2 {
return nil, errors.Errorf("Invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)", listenAddr)
}
proto, addr = parts[0], parts[1]
logger.Info(fmt.Sprintf("Starting RPC HTTPS server on %s (cert: %q, key: %q)", listenAddr, certFile, keyFile))
listener, err = net.Listen(proto, addr)
if err != nil {
return nil, errors.Errorf("Failed to listen on %v: %v", listenAddr, err)
}
go func() {
err := http.ServeTLS(
listener,
RecoverAndLogHandler(handler, logger),
certFile,
keyFile,
)
logger.Error("RPC HTTPS server stopped", "err", err)
}() }()
return listener, nil return listener, nil
} }


+ 1
- 2
rpc/lib/types/types.go View File

@ -118,9 +118,8 @@ func NewRPCErrorResponse(id string, code int, msg string, data string) RPCRespon
func (resp RPCResponse) String() string { func (resp RPCResponse) String() string {
if resp.Error == nil { if resp.Error == nil {
return fmt.Sprintf("[%s %v]", resp.ID, resp.Result) return fmt.Sprintf("[%s %v]", resp.ID, resp.Result)
} else {
return fmt.Sprintf("[%s %s]", resp.ID, resp.Error)
} }
return fmt.Sprintf("[%s %s]", resp.ID, resp.Error)
} }
func RPCParseError(id string, err error) RPCResponse { func RPCParseError(id string, err error) RPCResponse {


+ 0
- 35
scripts/dep_utils/checkout.sh View File

@ -1,35 +0,0 @@
#! /bin/bash
set -ex
set +u
if [[ "$DEP" == "" ]]; then
DEP=$GOPATH/src/github.com/tendermint/tendermint/Gopkg.lock
fi
set -u
set -u
function getVendoredVersion() {
grep -A100 "$LIB" "$DEP" | grep revision | head -n1 | grep -o '"[^"]\+"' | cut -d '"' -f 2
}
# fetch and checkout vendored dep
lib=$1
echo "----------------------------------"
echo "Getting $lib ..."
go get -t "github.com/tendermint/$lib/..."
VENDORED=$(getVendoredVersion "$lib")
cd "$GOPATH/src/github.com/tendermint/$lib" || exit
MASTER=$(git rev-parse origin/master)
if [[ "$VENDORED" != "$MASTER" ]]; then
echo "... VENDORED != MASTER ($VENDORED != $MASTER)"
echo "... Checking out commit $VENDORED"
git checkout "$VENDORED" &> /dev/null
fi

+ 1
- 0
scripts/wal2json/main.go View File

@ -4,6 +4,7 @@
Usage: Usage:
wal2json <path-to-wal> wal2json <path-to-wal>
*/ */
package main package main
import ( import (


+ 181
- 0
scripts/wire2amino.go View File

@ -0,0 +1,181 @@
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"time"
"github.com/tendermint/go-amino"
crypto "github.com/tendermint/go-crypto"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
priv_val "github.com/tendermint/tendermint/types/priv_validator"
)
type GenesisValidator struct {
PubKey Data `json:"pub_key"`
Power int64 `json:"power"`
Name string `json:"name"`
}
type Genesis struct {
GenesisTime time.Time `json:"genesis_time"`
ChainID string `json:"chain_id"`
ConsensusParams *types.ConsensusParams `json:"consensus_params,omitempty"`
Validators []GenesisValidator `json:"validators"`
AppHash cmn.HexBytes `json:"app_hash"`
AppStateJSON json.RawMessage `json:"app_state,omitempty"`
AppOptions json.RawMessage `json:"app_options,omitempty"` // DEPRECATED
}
type NodeKey struct {
PrivKey Data `json:"priv_key"`
}
type PrivVal struct {
Address cmn.HexBytes `json:"address"`
LastHeight int64 `json:"last_height"`
LastRound int `json:"last_round"`
LastStep int8 `json:"last_step"`
PubKey Data `json:"pub_key"`
PrivKey Data `json:"priv_key"`
}
type Data struct {
Type string `json:"type"`
Data cmn.HexBytes `json:"data"`
}
func convertNodeKey(cdc *amino.Codec, jsonBytes []byte) ([]byte, error) {
var nodeKey NodeKey
err := json.Unmarshal(jsonBytes, &nodeKey)
if err != nil {
return nil, err
}
var privKey crypto.PrivKeyEd25519
copy(privKey[:], nodeKey.PrivKey.Data)
nodeKeyNew := p2p.NodeKey{privKey}
bz, err := cdc.MarshalJSON(nodeKeyNew)
if err != nil {
return nil, err
}
return bz, nil
}
func convertPrivVal(cdc *amino.Codec, jsonBytes []byte) ([]byte, error) {
var privVal PrivVal
err := json.Unmarshal(jsonBytes, &privVal)
if err != nil {
return nil, err
}
var privKey crypto.PrivKeyEd25519
copy(privKey[:], privVal.PrivKey.Data)
var pubKey crypto.PubKeyEd25519
copy(pubKey[:], privVal.PubKey.Data)
privValNew := priv_val.FilePV{
Address: pubKey.Address(),
PubKey: pubKey,
LastHeight: privVal.LastHeight,
LastRound: privVal.LastRound,
LastStep: privVal.LastStep,
PrivKey: privKey,
}
bz, err := cdc.MarshalJSON(privValNew)
if err != nil {
return nil, err
}
return bz, nil
}
func convertGenesis(cdc *amino.Codec, jsonBytes []byte) ([]byte, error) {
var genesis Genesis
err := json.Unmarshal(jsonBytes, &genesis)
if err != nil {
return nil, err
}
genesisNew := types.GenesisDoc{
GenesisTime: genesis.GenesisTime,
ChainID: genesis.ChainID,
ConsensusParams: genesis.ConsensusParams,
// Validators
AppHash: genesis.AppHash,
AppStateJSON: genesis.AppStateJSON,
}
if genesis.AppOptions != nil {
genesisNew.AppStateJSON = genesis.AppOptions
}
for _, v := range genesis.Validators {
var pubKey crypto.PubKeyEd25519
copy(pubKey[:], v.PubKey.Data)
genesisNew.Validators = append(
genesisNew.Validators,
types.GenesisValidator{
PubKey: pubKey,
Power: v.Power,
Name: v.Name,
},
)
}
bz, err := cdc.MarshalJSON(genesisNew)
if err != nil {
return nil, err
}
return bz, nil
}
func main() {
cdc := amino.NewCodec()
crypto.RegisterAmino(cdc)
args := os.Args[1:]
if len(args) != 1 {
fmt.Println("Please specify a file to convert")
os.Exit(1)
}
filePath := args[0]
fileName := filepath.Base(filePath)
fileBytes, err := ioutil.ReadFile(filePath)
if err != nil {
panic(err)
}
var bz []byte
switch fileName {
case "node_key.json":
bz, err = convertNodeKey(cdc, fileBytes)
case "priv_validator.json":
bz, err = convertPrivVal(cdc, fileBytes)
case "genesis.json":
bz, err = convertGenesis(cdc, fileBytes)
default:
fmt.Println("Expected file name to be in (node_key.json, priv_validator.json, genesis.json)")
os.Exit(1)
}
if err != nil {
panic(err)
}
fmt.Println(string(bz))
}

+ 2
- 2
state/state_test.go View File

@ -219,7 +219,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) {
// use the next pubkey // use the next pubkey
if changeIndex < len(changeHeights) && i == changeHeights[changeIndex] { if changeIndex < len(changeHeights) && i == changeHeights[changeIndex] {
changeIndex++ changeIndex++
power += 1
power++
} }
header, blockID, responses := makeHeaderPartsResponsesValPowerChange(state, i, power) header, blockID, responses := makeHeaderPartsResponsesValPowerChange(state, i, power)
state, err = updateState(state, blockID, header, responses) state, err = updateState(state, blockID, header, responses)
@ -237,7 +237,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) {
// use the next pubkey (note our counter starts at 0 this time) // use the next pubkey (note our counter starts at 0 this time)
if changeIndex < len(changeHeights) && i == changeHeights[changeIndex]+1 { if changeIndex < len(changeHeights) && i == changeHeights[changeIndex]+1 {
changeIndex++ changeIndex++
power += 1
power++
} }
testCases[i-1] = power testCases[i-1] = power
} }


+ 90
- 51
state/txindex/kv/kv.go View File

@ -4,6 +4,7 @@ import (
"bytes" "bytes"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"sort"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -145,9 +146,8 @@ func (txi *TxIndex) Search(q *query.Query) ([]*types.TxResult, error) {
res, err := txi.Get(hash) res, err := txi.Get(hash)
if res == nil { if res == nil {
return []*types.TxResult{}, nil return []*types.TxResult{}, nil
} else {
return []*types.TxResult{res}, errors.Wrap(err, "error while retrieving the result")
} }
return []*types.TxResult{res}, errors.Wrap(err, "error while retrieving the result")
} }
// conditions to skip because they're handled before "everything else" // conditions to skip because they're handled before "everything else"
@ -168,10 +168,10 @@ func (txi *TxIndex) Search(q *query.Query) ([]*types.TxResult, error) {
for _, r := range ranges { for _, r := range ranges {
if !hashesInitialized { if !hashesInitialized {
hashes = txi.matchRange(r, startKeyForRange(r, height))
hashes = txi.matchRange(r, []byte(r.key))
hashesInitialized = true hashesInitialized = true
} else { } else {
hashes = intersect(hashes, txi.matchRange(r, startKeyForRange(r, height)))
hashes = intersect(hashes, txi.matchRange(r, []byte(r.key)))
} }
} }
} }
@ -200,6 +200,11 @@ func (txi *TxIndex) Search(q *query.Query) ([]*types.TxResult, error) {
i++ i++
} }
// sort by height by default
sort.Slice(results, func(i, j int) bool {
return results[i].Height < results[j].Height
})
return results, nil return results, nil
} }
@ -234,6 +239,52 @@ type queryRange struct {
includeUpperBound bool includeUpperBound bool
} }
func (r queryRange) lowerBoundValue() interface{} {
if r.lowerBound == nil {
return nil
}
if r.includeLowerBound {
return r.lowerBound
} else {
switch t := r.lowerBound.(type) {
case int64:
return t + 1
case time.Time:
return t.Unix() + 1
default:
panic("not implemented")
}
}
}
func (r queryRange) AnyBound() interface{} {
if r.lowerBound != nil {
return r.lowerBound
} else {
return r.upperBound
}
}
func (r queryRange) upperBoundValue() interface{} {
if r.upperBound == nil {
return nil
}
if r.includeUpperBound {
return r.upperBound
} else {
switch t := r.upperBound.(type) {
case int64:
return t - 1
case time.Time:
return t.Unix() - 1
default:
panic("not implemented")
}
}
}
func lookForRanges(conditions []query.Condition) (ranges queryRanges, indexes []int) { func lookForRanges(conditions []query.Condition) (ranges queryRanges, indexes []int) {
ranges = make(queryRanges) ranges = make(queryRanges)
for i, c := range conditions { for i, c := range conditions {
@ -297,34 +348,49 @@ func (txi *TxIndex) match(c query.Condition, startKey []byte) (hashes [][]byte)
return return
} }
func (txi *TxIndex) matchRange(r queryRange, startKey []byte) (hashes [][]byte) {
it := dbm.IteratePrefix(txi.store, startKey)
func (txi *TxIndex) matchRange(r queryRange, prefix []byte) (hashes [][]byte) {
// create a map to prevent duplicates
hashesMap := make(map[string][]byte)
lowerBound := r.lowerBoundValue()
upperBound := r.upperBoundValue()
it := dbm.IteratePrefix(txi.store, prefix)
defer it.Close() defer it.Close()
LOOP: LOOP:
for ; it.Valid(); it.Next() { for ; it.Valid(); it.Next() {
if !isTagKey(it.Key()) { if !isTagKey(it.Key()) {
continue continue
} }
if r.upperBound != nil {
// no other way to stop iterator other than checking for upperBound
switch (r.upperBound).(type) {
case int64:
v, err := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64)
if err == nil && v == r.upperBound {
if r.includeUpperBound {
hashes = append(hashes, it.Value())
}
break LOOP
}
// XXX: passing time in a ABCI Tags is not yet implemented
// case time.Time:
// v := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64)
// if v == r.upperBound {
// break
// }
switch r.AnyBound().(type) {
case int64:
v, err := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64)
if err != nil {
continue LOOP
} }
include := true
if lowerBound != nil && v < lowerBound.(int64) {
include = false
}
if upperBound != nil && v > upperBound.(int64) {
include = false
}
if include {
hashesMap[fmt.Sprintf("%X", it.Value())] = it.Value()
}
// XXX: passing time in a ABCI Tags is not yet implemented
// case time.Time:
// v := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64)
// if v == r.upperBound {
// break
// }
} }
hashes = append(hashes, it.Value())
}
hashes = make([][]byte, len(hashesMap))
i := 0
for _, h := range hashesMap {
hashes[i] = h
i++
} }
return return
} }
@ -342,33 +408,6 @@ func startKey(c query.Condition, height int64) []byte {
return []byte(key) return []byte(key)
} }
func startKeyForRange(r queryRange, height int64) []byte {
if r.lowerBound == nil {
return []byte(r.key)
}
var lowerBound interface{}
if r.includeLowerBound {
lowerBound = r.lowerBound
} else {
switch t := r.lowerBound.(type) {
case int64:
lowerBound = t + 1
case time.Time:
lowerBound = t.Unix() + 1
default:
panic("not implemented")
}
}
var key string
if height > 0 {
key = fmt.Sprintf("%s/%v/%d", r.key, lowerBound, height)
} else {
key = fmt.Sprintf("%s/%v", r.key, lowerBound)
}
return []byte(key)
}
func isTagKey(key []byte) bool { func isTagKey(key []byte) bool {
return strings.Count(string(key), tagKeySeparator) == 3 return strings.Count(string(key), tagKeySeparator) == 3
} }


+ 54
- 3
state/txindex/kv/kv_test.go View File

@ -123,6 +123,35 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) {
assert.Equal(t, []*types.TxResult{txResult}, results) assert.Equal(t, []*types.TxResult{txResult}, results)
} }
func TestTxSearchMultipleTxs(t *testing.T) {
allowedTags := []string{"account.number"}
indexer := NewTxIndex(db.NewMemDB(), IndexTags(allowedTags))
// indexed first, but bigger height (to test the order of transactions)
txResult := txResultWithTags([]cmn.KVPair{
{Key: []byte("account.number"), Value: []byte("1")},
})
txResult.Tx = types.Tx("Bob's account")
txResult.Height = 2
err := indexer.Index(txResult)
require.NoError(t, err)
// indexed second, but smaller height (to test the order of transactions)
txResult2 := txResultWithTags([]cmn.KVPair{
{Key: []byte("account.number"), Value: []byte("2")},
})
txResult2.Tx = types.Tx("Alice's account")
txResult2.Height = 1
err = indexer.Index(txResult2)
require.NoError(t, err)
results, err := indexer.Search(query.MustParse("account.number >= 1"))
assert.NoError(t, err)
require.Len(t, results, 2)
assert.Equal(t, []*types.TxResult{txResult2, txResult}, results)
}
func TestIndexAllTags(t *testing.T) { func TestIndexAllTags(t *testing.T) {
indexer := NewTxIndex(db.NewMemDB(), IndexAllTags()) indexer := NewTxIndex(db.NewMemDB(), IndexAllTags())
@ -147,12 +176,34 @@ func TestIndexAllTags(t *testing.T) {
func txResultWithTags(tags []cmn.KVPair) *types.TxResult { func txResultWithTags(tags []cmn.KVPair) *types.TxResult {
tx := types.Tx("HELLO WORLD") tx := types.Tx("HELLO WORLD")
return &types.TxResult{1, 0, tx, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Tags: tags}}
return &types.TxResult{
Height: 1,
Index: 0,
Tx: tx,
Result: abci.ResponseDeliverTx{
Data: []byte{0},
Code: abci.CodeTypeOK,
Log: "",
Tags: tags,
Fee: cmn.KI64Pair{Key: nil, Value: 0},
},
}
} }
func benchmarkTxIndex(txsCount int, b *testing.B) { func benchmarkTxIndex(txsCount int, b *testing.B) {
tx := types.Tx("HELLO WORLD") tx := types.Tx("HELLO WORLD")
txResult := &types.TxResult{1, 0, tx, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Tags: nil}}
txResult := &types.TxResult{
Height: 1,
Index: 0,
Tx: tx,
Result: abci.ResponseDeliverTx{
Data: []byte{0},
Code: abci.CodeTypeOK,
Log: "",
Tags: []cmn.KVPair{},
Fee: cmn.KI64Pair{Key: []uint8{}, Value: 0},
},
}
dir, err := ioutil.TempDir("", "tx_index_db") dir, err := ioutil.TempDir("", "tx_index_db")
if err != nil { if err != nil {
@ -168,7 +219,7 @@ func benchmarkTxIndex(txsCount int, b *testing.B) {
if err := batch.Add(txResult); err != nil { if err := batch.Add(txResult); err != nil {
b.Fatal(err) b.Fatal(err)
} }
txResult.Index += 1
txResult.Index++
} }
b.ResetTimer() b.ResetTimer()


+ 0
- 4
test/README.md View File

@ -19,7 +19,3 @@ and run the following tests in docker containers:
- send a tx on each node and ensure the state root is updated on all of them - send a tx on each node and ensure the state root is updated on all of them
- crash and restart nodes one at a time and ensure they can sync back up (via fastsync) - crash and restart nodes one at a time and ensure they can sync back up (via fastsync)
- crash and restart all nodes at once and ensure they can sync back up - crash and restart all nodes at once and ensure they can sync back up
If on a `release-x.x.x` branch, we also run
- `go test` for all our dependency libs (test/test_libs.sh)

+ 0
- 9
test/p2p/data/app/init.sh View File

@ -1,9 +0,0 @@
#! /bin/bash
# This is a sample bash script for a ABCI application
cd app/
git clone https://github.com/tendermint/nomnomcoin.git
cd nomnomcoin
npm install .
node app.js --eyes="unix:///data/tendermint/data/data.sock"

+ 0
- 53
test/p2p/data/chain_config.json View File

@ -1,53 +0,0 @@
{
"id": "",
"val_set_id": "anon",
"validators": [
{
"validator": {
"id": "mach1",
"pub_key": {
"type": "ed25519",
"data": "BE8933DFF1600C026E34718F1785A4CDEAB90C35698B394E38B6947AE91DE116"
}
},
"p2p_addr": "",
"rpc_addr": ""
},
{
"validator": {
"id": "mach2",
"pub_key": {
"type": "ed25519",
"data": "6DC534465323126587D2A2A93B59D689B717073B1DE968A25A6EF13D595318AD"
}
},
"p2p_addr": "",
"rpc_addr": "",
"index": 1
},
{
"validator": {
"id": "mach3",
"pub_key": {
"type": "ed25519",
"data": "AE67AC697D135AA0B4601EA57EAAB3FEBF4BAA4F229C45A598C2985B12FCD1A1"
}
},
"p2p_addr": "",
"rpc_addr": "",
"index": 2
},
{
"validator": {
"id": "mach4",
"pub_key": {
"type": "ed25519",
"data": "9EBC8F58CED4B46DCD5AB8ABA591DD253CD7CB5037273FDA32BC0B6461C4EFD9"
}
},
"p2p_addr": "",
"rpc_addr": "",
"index": 3
}
]
}

+ 37
- 37
test/p2p/data/mach1/core/config/genesis.json View File

@ -1,39 +1,39 @@
{ {
"app_hash": "",
"chain_id": "chain-9ujDWI",
"genesis_time": "2016-06-24T20:01:19.322Z",
"validators": [
{
"power": 1,
"name": "mach1",
"pub_key": {
"type": "ed25519",
"data": "BE8933DFF1600C026E34718F1785A4CDEAB90C35698B394E38B6947AE91DE116"
}
},
{
"power": 1,
"name": "mach2",
"pub_key": {
"type": "ed25519",
"data": "6DC534465323126587D2A2A93B59D689B717073B1DE968A25A6EF13D595318AD"
}
},
{
"power": 1,
"name": "mach3",
"pub_key": {
"type": "ed25519",
"data": "AE67AC697D135AA0B4601EA57EAAB3FEBF4BAA4F229C45A598C2985B12FCD1A1"
}
},
{
"power": 1,
"name": "mach4",
"pub_key": {
"type": "ed25519",
"data": "9EBC8F58CED4B46DCD5AB8ABA591DD253CD7CB5037273FDA32BC0B6461C4EFD9"
}
}
]
"genesis_time": "2016-06-24T20:01:19.322Z",
"chain_id": "chain-9ujDWI",
"validators": [
{
"pub_key": {
"type": "AC26791624DE60",
"value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY="
},
"power": 1,
"name": "mach1"
},
{
"pub_key": {
"type": "AC26791624DE60",
"value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0="
},
"power": 1,
"name": "mach2"
},
{
"pub_key": {
"type": "AC26791624DE60",
"value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE="
},
"power": 1,
"name": "mach3"
},
{
"pub_key": {
"type": "AC26791624DE60",
"value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k="
},
"power": 1,
"name": "mach4"
}
],
"app_hash": ""
} }

+ 6
- 1
test/p2p/data/mach1/core/config/node_key.json View File

@ -1 +1,6 @@
{"priv_key":{"type":"ed25519","data":"06962D169F314ABB9D05AE5A04B46E48F0FBD8F1830149B47493910CBDCA7796096E5B94CD179F545AE3C281D9BF5C9E0E3D8FF719048B62F7849094CFFA8591"}}
{
"priv_key": {
"type": "954568A3288910",
"value": "BpYtFp8xSrudBa5aBLRuSPD72PGDAUm0dJORDL3Kd5YJbluUzRefVFrjwoHZv1yeDj2P9xkEi2L3hJCUz/qFkQ=="
}
}

+ 10
- 10
test/p2p/data/mach1/core/config/priv_validator.json View File

@ -1,14 +1,14 @@
{ {
"address": "0E6925C3EE4C599DFF1536A5071AF4A26DF33635",
"last_height": 0,
"last_round": 0,
"last_step": 0,
"priv_key": {
"type": "ed25519",
"data": "547AA07C7A8CE16C5CB2A40C6C26D15B0A32960410A9F1EA6E50B636F1AB389ABE8933DFF1600C026E34718F1785A4CDEAB90C35698B394E38B6947AE91DE116"
"address": "7E9D1FB08EDBAFCF116638D4C8FAFAEE2ABE1AAA",
"pub_key": {
"type": "AC26791624DE60",
"value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY="
}, },
"pub_key": {
"type": "ed25519",
"data": "BE8933DFF1600C026E34718F1785A4CDEAB90C35698B394E38B6947AE91DE116"
"last_height": 0,
"last_round": 0,
"last_step": 0,
"priv_key": {
"type": "954568A3288910",
"value": "VHqgfHqM4WxcsqQMbCbRWwoylgQQqfHqblC2NvGrOJq+iTPf8WAMAm40cY8XhaTN6rkMNWmLOU44tpR66R3hFg=="
} }
} }

+ 37
- 37
test/p2p/data/mach2/core/config/genesis.json View File

@ -1,39 +1,39 @@
{ {
"app_hash": "",
"chain_id": "chain-9ujDWI",
"genesis_time": "2016-06-24T20:01:19.322Z",
"validators": [
{
"power": 1,
"name": "mach1",
"pub_key": {
"type": "ed25519",
"data": "BE8933DFF1600C026E34718F1785A4CDEAB90C35698B394E38B6947AE91DE116"
}
},
{
"power": 1,
"name": "mach2",
"pub_key": {
"type": "ed25519",
"data": "6DC534465323126587D2A2A93B59D689B717073B1DE968A25A6EF13D595318AD"
}
},
{
"power": 1,
"name": "mach3",
"pub_key": {
"type": "ed25519",
"data": "AE67AC697D135AA0B4601EA57EAAB3FEBF4BAA4F229C45A598C2985B12FCD1A1"
}
},
{
"power": 1,
"name": "mach4",
"pub_key": {
"type": "ed25519",
"data": "9EBC8F58CED4B46DCD5AB8ABA591DD253CD7CB5037273FDA32BC0B6461C4EFD9"
}
}
]
"genesis_time": "2016-06-24T20:01:19.322Z",
"chain_id": "chain-9ujDWI",
"validators": [
{
"pub_key": {
"type": "AC26791624DE60",
"value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY="
},
"power": 1,
"name": "mach1"
},
{
"pub_key": {
"type": "AC26791624DE60",
"value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0="
},
"power": 1,
"name": "mach2"
},
{
"pub_key": {
"type": "AC26791624DE60",
"value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE="
},
"power": 1,
"name": "mach3"
},
{
"pub_key": {
"type": "AC26791624DE60",
"value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k="
},
"power": 1,
"name": "mach4"
}
],
"app_hash": ""
} }

+ 6
- 1
test/p2p/data/mach2/core/config/node_key.json View File

@ -1 +1,6 @@
{"priv_key":{"type":"ed25519","data":"B8CE8B0D5138C10208526ABDADCE91C735FCCC4186E06E0972EC35E64973428A45EBC61F24CE1B91B3D26AFBAB11C2789EF04CBAC28183619C01116B66A9C528"}}
{
"priv_key": {
"type": "954568A3288910",
"value": "uM6LDVE4wQIIUmq9rc6RxzX8zEGG4G4Jcuw15klzQopF68YfJM4bkbPSavurEcJ4nvBMusKBg2GcARFrZqnFKA=="
}
}

+ 10
- 10
test/p2p/data/mach2/core/config/priv_validator.json View File

@ -1,14 +1,14 @@
{ {
"address": "99DBBD2AFC28FB5BAC5574AFAF0D9C806CED3B55",
"last_height": 0,
"last_round": 0,
"last_step": 0,
"priv_key": {
"type": "ed25519",
"data": "D047889E60502FC3129D0AB7F334B1838ED9ED1ECD99CBB96B71AD5ABF5A81436DC534465323126587D2A2A93B59D689B717073B1DE968A25A6EF13D595318AD"
"address": "8893D14FE09F1157E39CD34B98036048D51B4985",
"pub_key": {
"type": "AC26791624DE60",
"value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0="
}, },
"pub_key": {
"type": "ed25519",
"data": "6DC534465323126587D2A2A93B59D689B717073B1DE968A25A6EF13D595318AD"
"last_height": 0,
"last_round": 0,
"last_step": 0,
"priv_key": {
"type": "954568A3288910",
"value": "0EeInmBQL8MSnQq38zSxg47Z7R7Nmcu5a3GtWr9agUNtxTRGUyMSZYfSoqk7WdaJtxcHOx3paKJabvE9WVMYrQ=="
} }
} }

+ 37
- 37
test/p2p/data/mach3/core/config/genesis.json View File

@ -1,39 +1,39 @@
{ {
"app_hash": "",
"chain_id": "chain-9ujDWI",
"genesis_time": "2016-06-24T20:01:19.322Z",
"validators": [
{
"power": 1,
"name": "mach1",
"pub_key": {
"type": "ed25519",
"data": "BE8933DFF1600C026E34718F1785A4CDEAB90C35698B394E38B6947AE91DE116"
}
},
{
"power": 1,
"name": "mach2",
"pub_key": {
"type": "ed25519",
"data": "6DC534465323126587D2A2A93B59D689B717073B1DE968A25A6EF13D595318AD"
}
},
{
"power": 1,
"name": "mach3",
"pub_key": {
"type": "ed25519",
"data": "AE67AC697D135AA0B4601EA57EAAB3FEBF4BAA4F229C45A598C2985B12FCD1A1"
}
},
{
"power": 1,
"name": "mach4",
"pub_key": {
"type": "ed25519",
"data": "9EBC8F58CED4B46DCD5AB8ABA591DD253CD7CB5037273FDA32BC0B6461C4EFD9"
}
}
]
"genesis_time": "2016-06-24T20:01:19.322Z",
"chain_id": "chain-9ujDWI",
"validators": [
{
"pub_key": {
"type": "AC26791624DE60",
"value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY="
},
"power": 1,
"name": "mach1"
},
{
"pub_key": {
"type": "AC26791624DE60",
"value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0="
},
"power": 1,
"name": "mach2"
},
{
"pub_key": {
"type": "AC26791624DE60",
"value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE="
},
"power": 1,
"name": "mach3"
},
{
"pub_key": {
"type": "AC26791624DE60",
"value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k="
},
"power": 1,
"name": "mach4"
}
],
"app_hash": ""
} }

+ 6
- 1
test/p2p/data/mach3/core/config/node_key.json View File

@ -1 +1,6 @@
{"priv_key":{"type":"ed25519","data":"913DE8AC6D18922A53F6B0196EF023B4693FECFBB565E084F0B4941768F3DAE892B35ADD954562FE071C465BC244B2AFAED4A270EC849269341473CE192DE682"}}
{
"priv_key": {
"type": "954568A3288910",
"value": "kT3orG0YkipT9rAZbvAjtGk/7Pu1ZeCE8LSUF2jz2uiSs1rdlUVi/gccRlvCRLKvrtSicOyEkmk0FHPOGS3mgg=="
}
}

+ 10
- 10
test/p2p/data/mach3/core/config/priv_validator.json View File

@ -1,14 +1,14 @@
{ {
"address": "4C5F061DAC28660853904A66705B12CA2B317572",
"last_height": 0,
"last_round": 0,
"last_step": 0,
"priv_key": {
"type": "ed25519",
"data": "C1A4E47F349FC5F556F4A9A27BA776B94424C312BAA6CF6EE44B867348D7C3F2AE67AC697D135AA0B4601EA57EAAB3FEBF4BAA4F229C45A598C2985B12FCD1A1"
"address": "7C747D7E002932B3864E3FBE9AC04287043F66A0",
"pub_key": {
"type": "AC26791624DE60",
"value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE="
}, },
"pub_key": {
"type": "ed25519",
"data": "AE67AC697D135AA0B4601EA57EAAB3FEBF4BAA4F229C45A598C2985B12FCD1A1"
"last_height": 0,
"last_round": 0,
"last_step": 0,
"priv_key": {
"type": "954568A3288910",
"value": "waTkfzSfxfVW9Kmie6d2uUQkwxK6ps9u5EuGc0jXw/KuZ6xpfRNaoLRgHqV+qrP+v0uqTyKcRaWYwphbEvzRoQ=="
} }
} }

+ 37
- 37
test/p2p/data/mach4/core/config/genesis.json View File

@ -1,39 +1,39 @@
{ {
"app_hash": "",
"chain_id": "chain-9ujDWI",
"genesis_time": "2016-06-24T20:01:19.322Z",
"validators": [
{
"power": 1,
"name": "mach1",
"pub_key": {
"type": "ed25519",
"data": "BE8933DFF1600C026E34718F1785A4CDEAB90C35698B394E38B6947AE91DE116"
}
},
{
"power": 1,
"name": "mach2",
"pub_key": {
"type": "ed25519",
"data": "6DC534465323126587D2A2A93B59D689B717073B1DE968A25A6EF13D595318AD"
}
},
{
"power": 1,
"name": "mach3",
"pub_key": {
"type": "ed25519",
"data": "AE67AC697D135AA0B4601EA57EAAB3FEBF4BAA4F229C45A598C2985B12FCD1A1"
}
},
{
"power": 1,
"name": "mach4",
"pub_key": {
"type": "ed25519",
"data": "9EBC8F58CED4B46DCD5AB8ABA591DD253CD7CB5037273FDA32BC0B6461C4EFD9"
}
}
]
"genesis_time": "2016-06-24T20:01:19.322Z",
"chain_id": "chain-9ujDWI",
"validators": [
{
"pub_key": {
"type": "AC26791624DE60",
"value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY="
},
"power": 1,
"name": "mach1"
},
{
"pub_key": {
"type": "AC26791624DE60",
"value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0="
},
"power": 1,
"name": "mach2"
},
{
"pub_key": {
"type": "AC26791624DE60",
"value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE="
},
"power": 1,
"name": "mach3"
},
{
"pub_key": {
"type": "AC26791624DE60",
"value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k="
},
"power": 1,
"name": "mach4"
}
],
"app_hash": ""
} }

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save