Browse Source

Merge pull request #3276 from tendermint/release/v0.29.2

Release/v0.29.2
pull/3282/head v0.29.2
Ethan Buchman 5 years ago
committed by GitHub
parent
commit
a8dbc64319
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
140 changed files with 2632 additions and 865 deletions
  1. +24
    -138
      .circleci/config.yml
  2. +61
    -0
      .golangci.yml
  3. +44
    -0
      CHANGELOG.md
  4. +2
    -12
      CHANGELOG_PENDING.md
  5. +12
    -12
      Gopkg.lock
  6. +14
    -2
      Gopkg.toml
  7. +21
    -40
      Makefile
  8. +158
    -0
      PHILOSOPHY.md
  9. +1
    -0
      README.md
  10. +1
    -1
      Vagrantfile
  11. +1
    -1
      abci/types/messages_test.go
  12. +1
    -1
      benchmarks/codec_test.go
  13. +2
    -1
      blockchain/pool.go
  14. +2
    -2
      blockchain/reactor_test.go
  15. +14
    -9
      blockchain/store_test.go
  16. +1
    -1
      blockchain/wire.go
  17. +0
    -4
      cmd/tendermint/commands/root_test.go
  18. +1
    -1
      cmd/tendermint/commands/wire.go
  19. +11
    -2
      config/toml.go
  20. +1
    -2
      consensus/byzantine_test.go
  21. +0
    -30
      consensus/common_test.go
  22. +4
    -4
      consensus/reactor.go
  23. +1
    -1
      consensus/reactor_test.go
  24. +12
    -2
      consensus/replay.go
  25. +6
    -5
      consensus/replay_file.go
  26. +1
    -1
      consensus/replay_test.go
  27. +30
    -4
      consensus/state.go
  28. +0
    -4
      consensus/state_test.go
  29. +4
    -4
      consensus/types/round_state_test.go
  30. +16
    -7
      consensus/wal.go
  31. +9
    -3
      consensus/wal_test.go
  32. +1
    -1
      consensus/wire.go
  33. +2
    -3
      crypto/encoding/amino/encode_test.go
  34. +1
    -1
      crypto/merkle/proof.go
  35. +2
    -1
      crypto/merkle/proof_test.go
  36. +1
    -1
      crypto/merkle/rfc6962_test.go
  37. +1
    -1
      crypto/merkle/wire.go
  38. +0
    -104
      crypto/random.go
  39. +4
    -26
      crypto/secp256k1/secp256k1.go
  40. +24
    -0
      crypto/secp256k1/secp256k1_cgo.go
  41. +71
    -0
      crypto/secp256k1/secp256k1_nocgo.go
  42. +39
    -0
      crypto/secp256k1/secp256k1_nocgo_test.go
  43. +1
    -1
      crypto/secp256k1/secpk256k1_test.go
  44. +0
    -1
      crypto/xsalsa20symmetric/symmetric.go
  45. +0
    -4
      crypto/xsalsa20symmetric/symmetric_test.go
  46. +6
    -5
      docs/.vuepress/config.js
  47. +78
    -24
      docs/architecture/adr-033-pubsub.md
  48. +1
    -1
      docs/tendermint-core/rpc.md
  49. +5
    -2
      docs/tools/README.md
  50. +1
    -1
      docs/tools/benchmarking.md
  51. +1
    -1
      docs/tools/monitoring.md
  52. +146
    -0
      docs/tools/remote-signer-validation.md
  53. +0
    -2
      evidence/pool_test.go
  54. +1
    -1
      evidence/reactor.go
  55. +1
    -1
      evidence/wire.go
  56. +12
    -0
      libs/autofile/group.go
  57. +12
    -10
      libs/clist/clist_test.go
  58. +1
    -1
      libs/common/colors.go
  59. +17
    -0
      libs/common/net.go
  60. +1
    -1
      libs/events/events.go
  61. +3
    -2
      libs/fail/fail.go
  62. +8
    -8
      libs/flowrate/io_test.go
  63. +85
    -43
      libs/pubsub/pubsub.go
  64. +19
    -0
      libs/pubsub/pubsub_test.go
  65. +3
    -3
      libs/pubsub/query/query_test.go
  66. +1
    -1
      libs/test.sh
  67. +3
    -3
      lite/helpers.go
  68. +2
    -0
      lite/proxy/query_test.go
  69. +5
    -8
      mempool/mempool.go
  70. +1
    -1
      mempool/wire.go
  71. +42
    -33
      node/node.go
  72. +9
    -10
      node/node_test.go
  73. +52
    -32
      p2p/conn/connection.go
  74. +1
    -1
      p2p/conn/connection_test.go
  75. +52
    -0
      p2p/conn/secret_connection.go
  76. +26
    -9
      p2p/conn/secret_connection_test.go
  77. +1
    -1
      p2p/conn/wire.go
  78. +25
    -2
      p2p/pex/addrbook.go
  79. +235
    -24
      p2p/pex/addrbook_test.go
  80. +1
    -2
      p2p/pex/pex_reactor.go
  81. +75
    -0
      p2p/pex/pex_reactor_test.go
  82. +1
    -1
      p2p/pex/wire.go
  83. +3
    -5
      p2p/switch.go
  84. +7
    -17
      p2p/test_util.go
  85. +4
    -4
      p2p/transport_test.go
  86. +1
    -0
      p2p/trust/metric_test.go
  87. +1
    -1
      p2p/wire.go
  88. +13
    -11
      privval/client.go
  89. +2
    -2
      privval/client_test.go
  90. +3
    -3
      privval/file.go
  91. +1
    -1
      privval/remote_signer.go
  92. +17
    -24
      privval/socket_test.go
  93. +1
    -1
      privval/wire.go
  94. +1
    -1
      rpc/client/rpc_test.go
  95. +1
    -1
      rpc/core/types/wire.go
  96. +2
    -2
      rpc/grpc/grpc_test.go
  97. +1
    -2
      rpc/lib/client/args_test.go
  98. +1
    -1
      rpc/lib/client/http_client.go
  99. +1
    -1
      rpc/lib/client/ws_client.go
  100. +1
    -1
      rpc/lib/types/types_test.go

+ 24
- 138
.circleci/config.yml View File

@ -48,10 +48,10 @@ jobs:
key: v3-pkg-cache
paths:
- /go/pkg
# - save_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
# paths:
# - /go/src/github.com/tendermint/tendermint
- save_cache:
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
paths:
- /go/src/github.com/tendermint/tendermint
build_slate:
<<: *defaults
@ -60,23 +60,8 @@ jobs:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
# https://discuss.circleci.com/t/saving-cache-stopped-working-warning-skipping-this-step-disabled-in-configuration/24423/2
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- restore_cache:
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- run:
name: slate docs
command: |
@ -91,29 +76,14 @@ jobs:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
make get_dev_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- restore_cache:
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- run:
name: metalinter
command: |
set -ex
export PATH="$GOBIN:$PATH"
make metalinter
make lint
- run:
name: check_dep
command: |
@ -128,22 +98,8 @@ jobs:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- restore_cache:
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- run:
name: Run abci apps tests
command: |
@ -159,22 +115,8 @@ jobs:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- restore_cache:
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- run:
name: Run abci-cli tests
command: |
@ -188,22 +130,8 @@ jobs:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- restore_cache:
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- run: sudo apt-get update && sudo apt-get install -y --no-install-recommends bsdmainutils
- run:
name: Run tests
@ -217,22 +145,8 @@ jobs:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- restore_cache:
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- run: mkdir -p /tmp/logs
- run:
name: Run tests
@ -256,22 +170,8 @@ jobs:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- restore_cache:
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- run:
name: Run tests
command: bash test/persist/test_failure_indices.sh
@ -292,9 +192,7 @@ jobs:
name: run localnet and exit on failure
command: |
set -x
make get_tools
make get_vendor_deps
make build-linux
docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang:1.11.4 make build-linux
make localnet-start &
./scripts/localnet-blocks-test.sh 40 5 10 localhost
@ -317,22 +215,10 @@ jobs:
steps:
- attach_workspace:
at: /tmp/workspace
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- restore_cache:
key: v3-pkg-cache
- restore_cache:
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- run:
name: gather
command: |


+ 61
- 0
.golangci.yml View File

@ -0,0 +1,61 @@
run:
deadline: 1m
linters:
enable-all: true
disable:
- gocyclo
- golint
- maligned
- errcheck
- staticcheck
- dupl
- ineffassign
- interfacer
- unconvert
- goconst
- unparam
- nakedret
- lll
- gochecknoglobals
- govet
- gocritic
- gosec
- gochecknoinits
- scopelint
- stylecheck
# linters-settings:
# govet:
# check-shadowing: true
# golint:
# min-confidence: 0
# gocyclo:
# min-complexity: 10
# maligned:
# suggest-new: true
# dupl:
# threshold: 100
# goconst:
# min-len: 2
# min-occurrences: 2
# depguard:
# list-type: blacklist
# packages:
# # logging is allowed only by logutils.Log, logrus
# # is allowed to use only in logutils package
# - github.com/sirupsen/logrus
# misspell:
# locale: US
# lll:
# line-length: 140
# goimports:
# local-prefixes: github.com/golangci/golangci-lint
# gocritic:
# enabled-tags:
# - performance
# - style
# - experimental
# disabled-checks:
# - wrapperFunc
# - commentFormatting # https://github.com/go-critic/go-critic/issues/755

+ 44
- 0
CHANGELOG.md View File

@ -1,5 +1,49 @@
# Changelog
## v0.29.2
*February 7th, 2019*
Special thanks to external contributors on this release:
@ackratos, @rickyyangz
**Note**: This release contains security sensitive patches in the `p2p` and
`crypto` packages:
- p2p:
- Partial fix for MITM attacks on the p2p connection. MITM conditions may
still exist. See \#3010.
- crypto:
- Eliminate our fork of `btcd` and use the `btcd/btcec` library directly for
native secp256k1 signing. Note we still modify the signature encoding to
prevent malleability.
- Support the libsecp256k1 library via CGo through the `go-ethereum/crypto/secp256k1` package.
- Eliminate MixEntropy functions
### BREAKING CHANGES:
* Go API
- [crypto] [\#3278](https://github.com/tendermint/tendermint/issues/3278) Remove
MixEntropy functions
- [types] [\#3245](https://github.com/tendermint/tendermint/issues/3245) Commit uses `type CommitSig Vote` instead of `Vote` directly.
In preparation for removing redundant fields from the commit [\#1648](https://github.com/tendermint/tendermint/issues/1648)
### IMPROVEMENTS:
- [consensus] [\#3246](https://github.com/tendermint/tendermint/issues/3246) Better logging and notes on recovery for corrupted WAL file
- [crypto] [\#3163](https://github.com/tendermint/tendermint/issues/3163) Use ethereum's libsecp256k1 go-wrapper for signatures when cgo is available
- [crypto] [\#3162](https://github.com/tendermint/tendermint/issues/3162) Wrap btcd instead of forking it to keep up with fixes (used if cgo is not available)
- [makefile] [\#3233](https://github.com/tendermint/tendermint/issues/3233) Use golangci-lint instead of go-metalinter
- [tools] [\#3218](https://github.com/tendermint/tendermint/issues/3218) Add go-deadlock tool to help detect deadlocks
- [tools] [\#3106](https://github.com/tendermint/tendermint/issues/3106) Add tm-signer-harness test harness for remote signers
- [tests] [\#3258](https://github.com/tendermint/tendermint/issues/3258) Fixed a bunch of non-deterministic test failures
### BUG FIXES:
- [node] [\#3186](https://github.com/tendermint/tendermint/issues/3186) EventBus and indexerService should be started before first block (for replay last block on handshake) execution (@ackratos)
- [p2p] [\#3232](https://github.com/tendermint/tendermint/issues/3232) Fix infinite loop leading to addrbook deadlock for seed nodes
- [p2p] [\#3247](https://github.com/tendermint/tendermint/issues/3247) Fix panic in SeedMode when calling FlushStop and OnStop
concurrently
- [p2p] [\#3040](https://github.com/tendermint/tendermint/issues/3040) Fix MITM on secret connection by checking low-order points
- [privval] [\#3258](https://github.com/tendermint/tendermint/issues/3258) Fix race between sign requests and ping requests in socket
## v0.29.1
*January 24, 2019*


+ 2
- 12
CHANGELOG_PENDING.md View File

@ -1,21 +1,11 @@
## v0.30.0
## v0.30
*TBD*
**
Special thanks to external contributors on this release:
### BREAKING CHANGES:
* CLI/RPC/Config
* Apps
* Go API
* Blockchain Protocol
* P2P Protocol
### FEATURES:
### IMPROVEMENTS:


+ 12
- 12
Gopkg.lock View File

@ -10,12 +10,11 @@
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
[[projects]]
branch = "master"
digest = "1:c0decf632843204d2b8781de7b26e7038584e2dcccc7e2f401e88ae85b1df2b7"
digest = "1:093bf93a65962e8191e3e8cd8fc6c363f83d43caca9739c906531ba7210a9904"
name = "github.com/btcsuite/btcd"
packages = ["btcec"]
pruneopts = "UT"
revision = "67e573d211ace594f1366b4ce9d39726c4b19bd0"
revision = "ed77733ec07dfc8a513741138419b8d9d3de9d2d"
[[projects]]
digest = "1:1d8e1cb71c33a9470bbbae09bfec09db43c6bf358dfcae13cd8807c4e2a9a2bf"
@ -35,6 +34,14 @@
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
version = "v1.1.1"
[[projects]]
digest = "1:b42be5a3601f833e0b9f2d6625d887ec1309764bfcac3d518f3db425dcd4ec5c"
name = "github.com/ethereum/go-ethereum"
packages = ["crypto/secp256k1"]
pruneopts = "T"
revision = "9dc5d1a915ac0e0bd8429d6ac41df50eec91de5f"
version = "v1.8.21"
[[projects]]
digest = "1:544229a3ca0fb2dd5ebc2896d3d2ff7ce096d9751635301e44e37e761349ee70"
name = "github.com/fortytw2/leaktest"
@ -360,14 +367,6 @@
pruneopts = "UT"
revision = "6b91fda63f2e36186f1c9d0e48578defb69c5d43"
[[projects]]
digest = "1:83f5e189eea2baad419a6a410984514266ff690075759c87e9ede596809bd0b8"
name = "github.com/tendermint/btcd"
packages = ["btcec"]
pruneopts = "UT"
revision = "80daadac05d1cd29571fccf27002d79667a88b58"
version = "v0.1.1"
[[projects]]
digest = "1:ad9c4c1a4e7875330b1f62906f2830f043a23edb5db997e3a5ac5d3e6eadf80a"
name = "github.com/tendermint/go-amino"
@ -504,8 +503,10 @@
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"github.com/btcsuite/btcd/btcec",
"github.com/btcsuite/btcutil/base58",
"github.com/btcsuite/btcutil/bech32",
"github.com/ethereum/go-ethereum/crypto/secp256k1",
"github.com/fortytw2/leaktest",
"github.com/go-kit/kit/log",
"github.com/go-kit/kit/log/level",
@ -535,7 +536,6 @@
"github.com/syndtr/goleveldb/leveldb/errors",
"github.com/syndtr/goleveldb/leveldb/iterator",
"github.com/syndtr/goleveldb/leveldb/opt",
"github.com/tendermint/btcd/btcec",
"github.com/tendermint/go-amino",
"golang.org/x/crypto/bcrypt",
"golang.org/x/crypto/chacha20poly1305",


+ 14
- 2
Gopkg.toml View File

@ -75,14 +75,26 @@
name = "github.com/prometheus/client_golang"
version = "^0.9.1"
# we use the secp256k1 implementation:
[[constraint]]
name = "github.com/tendermint/btcd"
version = "v0.1.1"
name = "github.com/ethereum/go-ethereum"
version = "^v1.8.21"
# Prevent dep from pruning build scripts and codegen templates
# note: this leaves the whole go-ethereum package in vendor
# can be removed when https://github.com/golang/dep/issues/1847 is resolved
[[prune.project]]
name = "github.com/ethereum/go-ethereum"
unused-packages = false
###################################
## Some repos dont have releases.
## Pin to revision
[[constraint]]
name = "github.com/btcsuite/btcd"
revision = "ed77733ec07dfc8a513741138419b8d9d3de9d2d"
[[constraint]]
name = "golang.org/x/crypto"
revision = "505ab145d0a99da450461ae2c1a9f6cd10d1f447"


+ 21
- 40
Makefile View File

@ -1,7 +1,7 @@
GOTOOLS = \
github.com/mitchellh/gox \
github.com/golang/dep/cmd/dep \
github.com/alecthomas/gometalinter \
github.com/golangci/golangci-lint/cmd/golangci-lint \
github.com/gogo/protobuf/protoc-gen-gogo \
github.com/square/certstrap
GOBIN?=${GOPATH}/bin
@ -11,8 +11,6 @@ INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protob
BUILD_TAGS?='tendermint'
BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`"
LINT_FLAGS = --exclude '.*\.pb\.go' --exclude 'vendor/*' --vendor --deadline=600s
all: check build test install
check: check_tools get_vendor_deps
@ -82,10 +80,6 @@ get_tools:
@echo "--> Installing tools"
./scripts/get_tools.sh
get_dev_tools:
@echo "--> Downloading linters (this may take awhile)"
$(GOPATH)/src/github.com/alecthomas/gometalinter/scripts/install.sh -b $(GOBIN)
update_tools:
@echo "--> Updating tools"
./scripts/get_tools.sh
@ -226,6 +220,22 @@ test_race:
@echo "--> Running go test --race"
@GOCACHE=off go test -p 1 -v -race $(PACKAGES)
# uses https://github.com/sasha-s/go-deadlock/ to detect potential deadlocks
test_with_deadlock:
make set_with_deadlock
make test
make cleanup_after_test_with_deadlock
set_with_deadlock:
find . -name "*.go" | grep -v "vendor/" | xargs -n 1 sed -i.bak 's/sync.RWMutex/deadlock.RWMutex/'
find . -name "*.go" | grep -v "vendor/" | xargs -n 1 sed -i.bak 's/sync.Mutex/deadlock.Mutex/'
find . -name "*.go" | grep -v "vendor/" | xargs -n 1 goimports -w
# cleanes up after you ran test_with_deadlock
cleanup_after_test_with_deadlock:
find . -name "*.go" | grep -v "vendor/" | xargs -n 1 sed -i.bak 's/deadlock.RWMutex/sync.RWMutex/'
find . -name "*.go" | grep -v "vendor/" | xargs -n 1 sed -i.bak 's/deadlock.Mutex/sync.Mutex/'
find . -name "*.go" | grep -v "vendor/" | xargs -n 1 goimports -w
########################################
### Formatting, linting, and vetting
@ -233,38 +243,9 @@ test_race:
fmt:
@go fmt ./...
metalinter:
lint:
@echo "--> Running linter"
@gometalinter $(LINT_FLAGS) --disable-all \
--enable=deadcode \
--enable=gosimple \
--enable=misspell \
--enable=safesql \
./...
#--enable=gas \
#--enable=maligned \
#--enable=dupl \
#--enable=errcheck \
#--enable=goconst \
#--enable=gocyclo \
#--enable=goimports \
#--enable=golint \ <== comments on anything exported
#--enable=gotype \
#--enable=ineffassign \
#--enable=interfacer \
#--enable=megacheck \
#--enable=staticcheck \
#--enable=structcheck \
#--enable=unconvert \
#--enable=unparam \
#--enable=unused \
#--enable=varcheck \
#--enable=vet \
#--enable=vetshadow \
metalinter_all:
@echo "--> Running linter (all)"
gometalinter $(LINT_FLAGS) --enable-all --disable=lll ./...
@golangci-lint run
DESTINATION = ./index.html.md
@ -288,7 +269,7 @@ build-docker:
### Local testnet using docker
# Build linux binary on other platforms
build-linux:
build-linux: get_tools get_vendor_deps
GOOS=linux GOARCH=amd64 $(MAKE) build
build-docker-localnode:
@ -330,4 +311,4 @@ build-slate:
# To avoid unintended conflicts with file names, always add to .PHONY
# unless there is a reason not to.
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
.PHONY: check build build_race build_abci dist install install_abci check_dep check_tools get_tools get_dev_tools update_tools get_vendor_deps draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt rpc-docs build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate protoc_grpc protoc_all build_c install_c
.PHONY: check build build_race build_abci dist install install_abci check_dep check_tools get_tools update_tools get_vendor_deps draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt rpc-docs build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate protoc_grpc protoc_all build_c install_c test_with_deadlock cleanup_after_test_with_deadlock lint

+ 158
- 0
PHILOSOPHY.md View File

@ -0,0 +1,158 @@
## Design goals
The design goals for Tendermint (and the SDK and related libraries) are:
* Simplicity and Legibility
* Parallel performance, namely ability to utilize multicore architecture
* Ability to evolve the codebase bug-free
* Debuggability
* Complete correctness that considers all edge cases, esp in concurrency
* Future-proof modular architecture, message protocol, APIs, and encapsulation
### Justification
Legibility is key to maintaining bug-free software as it evolves toward more
optimizations, more ease of debugging, and additional features.
It is too easy to introduce bugs over time by replacing lines of code with
those that may panic, which means ideally locks are unlocked by defer
statements.
For example,
```go
func (obj *MyObj) something() {
mtx.Lock()
obj.something = other
mtx.Unlock()
}
```
It is too easy to refactor the codebase in the future to replace `other` with
`other.String()` for example, and this may introduce a bug that causes a
deadlock. So as much as reasonably possible, we need to be using defer
statements, even though it introduces additional overhead.
If it is necessary to optimize the unlocking of mutex locks, the solution is
more modularity via smaller functions, so that defer'd unlocks are scoped
within a smaller function.
Similarly, idiomatic for-loops should always be preferred over those that use
custom counters, because it is too easy to evolve the body of a for-loop to
become more complicated over time, and it becomes more and more difficult to
assess the correctness of such a for-loop by visual inspection.
### On performance
It doesn't matter whether there are alternative implementations that are 2x or
3x more performant, when the software doesn't work, deadlocks, or if bugs
cannot be debugged. By taking advantage of multicore concurrency, the
Tendermint implementation will at least be an order of magnitude within the
range of what is theoretically possible. The design philosophy of Tendermint,
and the choice of Go as implementation language, is designed to make Tendermint
implementation the standard specification for concurrent BFT software.
By focusing on the message protocols (e.g. ABCI, p2p messages), and
encapsulation e.g. IAVL module, (relatively) independent reactors, we are both
implementing a standard implementation to be used as the specification for
future implementations in more optimizable languages like Rust, Java, and C++;
as well as creating sufficiently performant software. Tendermint Core will
never be as fast as future implementations of the Tendermint Spec, because Go
isn't designed to be as fast as possible. The advantage of using Go is that we
can develop the whole stack of modular components **faster** than in other
languages.
Furthermore, the real bottleneck is in the application layer, and it isn't
necessary to support more than a sufficiently decentralized set of validators
(e.g. 100 ~ 300 validators is sufficient, with delegated bonded PoS).
Instead of optimizing Tendermint performance down to the metal, lets focus on
optimizing on other matters, namely ability to push feature complete software
that works well enough, can be debugged and maintained, and can serve as a spec
for future implementations.
### On encapsulation
In order to create maintainable, forward-optimizable software, it is critical
to develop well-encapsulated objects that have well understood properties, and
to re-use these easy-to-use-correctly components as building blocks for further
encapsulated meta-objects.
For example, mutexes are cheap enough for Tendermint's design goals when there
isn't goroutine contention, so it is encouraged to create concurrency safe
structures with struct-level mutexes. If they are used in the context of
non-concurrent logic, then the performance is good enough. If they are used in
the context of concurrent logic, then it will still perform correctly.
Examples of this design principle can be seen in the types.ValidatorSet struct,
and the cmn.Rand struct. It's one single struct declaration that can be used
in both concurrent and non-concurrent logic, and due to its well encapsulation,
it's easy to get the usage of the mutex right.
#### example: cmn.Rand:
`The default Source is safe for concurrent use by multiple goroutines, but
Sources created by NewSource are not`. The reason why the default
package-level source is safe for concurrent use is because it is protected (see
`lockedSource` in https://golang.org/src/math/rand/rand.go).
But we shouldn't rely on the global source, we should be creating our own
Rand/Source instances and using them, especially for determinism in testing.
So it is reasonable to have cmn.Rand be protected by a mutex. Whether we want
our own implementation of Rand is another question, but the answer there is
also in the affirmative. Sometimes you want to know where Rand is being used
in your code, so it becomes a simple matter of dropping in a log statement to
inject inspectability into Rand usage. Also, it is nice to be able to extend
the functionality of Rand with custom methods. For these reasons, and for the
reasons which is outlined in this design philosophy document, we should
continue to use the cmn.Rand object, with mutex protection.
Another key aspect of good encapsulation is the choice of exposed vs unexposed
methods. It should be clear to the reader of the code, which methods are
intended to be used in what context, and what safe usage is. Part of this is
solved by hiding methods via unexported methods. Another part of this is
naming conventions on the methods (e.g. underscores) with good documentation,
and code organization. If there are too many exposed methods and it isn't
clear what methods have what side effects, then there is something wrong about
the design of abstractions that should be revisited.
### On concurrency
In order for Tendermint to remain relevant in the years to come, it is vital
for Tendermint to take advantage of multicore architectures. Due to the nature
of the problem, namely consensus across a concurrent p2p gossip network, and to
handle RPC requests for a large number of consuming subscribers, it is
unavoidable for Tendermint development to require expertise in concurrency
design, especially when it comes to the reactor design, and also for RPC
request handling.
## Guidelines
Here are some guidelines for designing for (sufficient) performance and concurrency:
* Mutex locks are cheap enough when there isn't contention.
* Do not optimize code without analytical or observed proof that it is in a hot path.
* Don't over-use channels when mutex locks w/ encapsulation are sufficient.
* The need to drain channels are often a hint of unconsidered edge cases.
* The creation of O(N) one-off goroutines is generally technical debt that
needs to get addressed sooner than later. Avoid creating too many
goroutines as a patch around incomplete concurrency design, or at least be
aware of the debt and do not invest in the debt. On the other hand, Tendermint
is designed to have a limited number of peers (e.g. 10 or 20), so the creation
of O(C) goroutines per O(P) peers is still O(C\*P=constant).
* Use defer statements to unlock as much as possible. If you want to unlock sooner,
try to create more modular functions that do make use of defer statements.
## Matras
* Premature optimization kills
* Readability is paramount
* Beautiful is better than fast.
* In the face of ambiguity, refuse the temptation to guess.
* In the face of bugs, refuse the temptation to cover the bug.
* There should be one-- and preferably only one --obvious way to do it.

+ 1
- 0
README.md View File

@ -96,6 +96,7 @@ include the in-process Go APIs.
That said, breaking changes in the following packages will be documented in the
CHANGELOG even if they don't lead to MINOR version bumps:
- crypto
- types
- rpc/client
- config


+ 1
- 1
Vagrantfile View File

@ -53,6 +53,6 @@ Vagrant.configure("2") do |config|
# get all deps and tools, ready to install/test
su - vagrant -c 'source /home/vagrant/.bash_profile'
su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make get_tools && make get_dev_tools && make get_vendor_deps'
su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make get_tools && make get_vendor_deps'
SHELL
end

+ 1
- 1
abci/types/messages_test.go View File

@ -83,7 +83,7 @@ func TestWriteReadMessage2(t *testing.T) {
Log: phrase,
GasWanted: 10,
Tags: []cmn.KVPair{
cmn.KVPair{Key: []byte("abc"), Value: []byte("def")},
{Key: []byte("abc"), Value: []byte("def")},
},
},
// TODO: add the rest


+ 1
- 1
benchmarks/codec_test.go View File

@ -4,7 +4,7 @@ import (
"testing"
"time"
"github.com/tendermint/go-amino"
amino "github.com/tendermint/go-amino"
proto "github.com/tendermint/tendermint/benchmarks/proto"
"github.com/tendermint/tendermint/crypto/ed25519"


+ 2
- 1
blockchain/pool.go View File

@ -363,7 +363,8 @@ func (pool *BlockPool) sendError(err error, peerID p2p.ID) {
pool.errorsCh <- peerError{err, peerID}
}
// unused by tendermint; left for debugging purposes
// for debugging purposes
//nolint:unused
func (pool *BlockPool) debug() string {
pool.mtx.Lock()
defer pool.mtx.Unlock()


+ 2
- 2
blockchain/reactor_test.go View File

@ -100,8 +100,8 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
lastBlock := blockStore.LoadBlock(blockHeight - 1)
vote := makeVote(&lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVals[0])
lastCommit = &types.Commit{Precommits: []*types.Vote{vote}, BlockID: lastBlockMeta.BlockID}
vote := makeVote(&lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVals[0]).CommitSig()
lastCommit = &types.Commit{Precommits: []*types.CommitSig{vote}, BlockID: lastBlockMeta.BlockID}
}
thisBlock := makeBlock(blockHeight, state, lastCommit)


+ 14
- 9
blockchain/store_test.go View File

@ -6,6 +6,7 @@ import (
"runtime/debug"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -20,6 +21,15 @@ import (
tmtime "github.com/tendermint/tendermint/types/time"
)
// make a Commit with a single vote containing just the height and a timestamp
func makeTestCommit(height int64, timestamp time.Time) *types.Commit {
return &types.Commit{
Precommits: []*types.CommitSig{
{Height: height, Timestamp: timestamp},
},
}
}
func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) {
config := cfg.ResetTestRoot("blockchain_reactor_test")
// blockDB := dbm.NewDebugDB("blockDB", dbm.NewMemDB())
@ -86,8 +96,7 @@ var (
partSet = block.MakePartSet(2)
part1 = partSet.GetPart(0)
part2 = partSet.GetPart(1)
seenCommit1 = &types.Commit{Precommits: []*types.Vote{{Height: 10,
Timestamp: tmtime.Now()}}}
seenCommit1 = makeTestCommit(10, tmtime.Now())
)
// TODO: This test should be simplified ...
@ -107,8 +116,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
// save a block
block := makeBlock(bs.Height()+1, state, new(types.Commit))
validPartSet := block.MakePartSet(2)
seenCommit := &types.Commit{Precommits: []*types.Vote{{Height: 10,
Timestamp: tmtime.Now()}}}
seenCommit := makeTestCommit(10, tmtime.Now())
bs.SaveBlock(block, partSet, seenCommit)
require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed")
@ -127,8 +135,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
// End of setup, test data
commitAtH10 := &types.Commit{Precommits: []*types.Vote{{Height: 10,
Timestamp: tmtime.Now()}}}
commitAtH10 := makeTestCommit(10, tmtime.Now())
tuples := []struct {
block *types.Block
parts *types.PartSet
@ -351,9 +358,7 @@ func TestBlockFetchAtHeight(t *testing.T) {
block := makeBlock(bs.Height()+1, state, new(types.Commit))
partSet := block.MakePartSet(2)
seenCommit := &types.Commit{Precommits: []*types.Vote{{Height: 10,
Timestamp: tmtime.Now()}}}
seenCommit := makeTestCommit(10, tmtime.Now())
bs.SaveBlock(block, partSet, seenCommit)
require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed")


+ 1
- 1
blockchain/wire.go View File

@ -1,7 +1,7 @@
package blockchain
import (
"github.com/tendermint/go-amino"
amino "github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/types"
)


+ 0
- 4
cmd/tendermint/commands/root_test.go View File

@ -22,10 +22,6 @@ var (
defaultRoot = os.ExpandEnv("$HOME/.some/test/dir")
)
const (
rootName = "root"
)
// clearConfig clears env vars, the given root dir, and resets viper.
func clearConfig(dir string) {
if err := os.Unsetenv("TMHOME"); err != nil {


+ 1
- 1
cmd/tendermint/commands/wire.go View File

@ -1,7 +1,7 @@
package commands
import (
"github.com/tendermint/go-amino"
amino "github.com/tendermint/go-amino"
cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino"
)


+ 11
- 2
config/toml.go View File

@ -2,6 +2,7 @@ package config
import (
"bytes"
"fmt"
"os"
"path/filepath"
"text/template"
@ -317,6 +318,10 @@ namespace = "{{ .Instrumentation.Namespace }}"
/****** these are for test settings ***********/
func ResetTestRoot(testName string) *Config {
return ResetTestRootWithChainID(testName, "")
}
func ResetTestRootWithChainID(testName string, chainID string) *Config {
rootDir := os.ExpandEnv("$HOME/.tendermint_test")
rootDir = filepath.Join(rootDir, testName)
// Remove ~/.tendermint_test_bak
@ -353,6 +358,10 @@ func ResetTestRoot(testName string) *Config {
writeDefaultConfigFile(configFilePath)
}
if !cmn.FileExists(genesisFilePath) {
if chainID == "" {
chainID = "tendermint_test"
}
testGenesis := fmt.Sprintf(testGenesisFmt, chainID)
cmn.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644)
}
// we always overwrite the priv val
@ -363,9 +372,9 @@ func ResetTestRoot(testName string) *Config {
return config
}
var testGenesis = `{
var testGenesisFmt = `{
"genesis_time": "2018-10-10T08:20:13.695936996Z",
"chain_id": "tendermint_test",
"chain_id": "%s",
"validators": [
{
"pub_key": {


+ 1
- 2
consensus/byzantine_test.go View File

@ -76,8 +76,7 @@ func TestByzantine(t *testing.T) {
conR.SetLogger(logger.With("validator", i))
conR.SetEventBus(eventBus)
var conRI p2p.Reactor // nolint: gotype, gosimple
conRI = conR
var conRI p2p.Reactor = conR
// make first val byzantine
if i == 0 {


+ 0
- 30
consensus/common_test.go View File

@ -378,36 +378,6 @@ func ensureNewEvent(
}
}
func ensureNewRoundStep(stepCh <-chan interface{}, height int64, round int) {
ensureNewEvent(
stepCh,
height,
round,
ensureTimeout,
"Timeout expired while waiting for NewStep event")
}
func ensureNewVote(voteCh <-chan interface{}, height int64, round int) {
select {
case <-time.After(ensureTimeout):
break
case v := <-voteCh:
edv, ok := v.(types.EventDataVote)
if !ok {
panic(fmt.Sprintf("expected a *types.Vote, "+
"got %v. wrong subscription channel?",
reflect.TypeOf(v)))
}
vote := edv.Vote
if vote.Height != height {
panic(fmt.Sprintf("expected height %v, got %v", height, vote.Height))
}
if vote.Round != round {
panic(fmt.Sprintf("expected round %v, got %v", round, vote.Round))
}
}
}
func ensureNewRound(roundCh <-chan interface{}, height int64, round int) {
select {
case <-time.After(ensureTimeout):


+ 4
- 4
consensus/reactor.go View File

@ -8,7 +8,7 @@ import (
"github.com/pkg/errors"
"github.com/tendermint/go-amino"
amino "github.com/tendermint/go-amino"
cstypes "github.com/tendermint/tendermint/consensus/types"
cmn "github.com/tendermint/tendermint/libs/common"
tmevents "github.com/tendermint/tendermint/libs/events"
@ -438,9 +438,9 @@ func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote) {
func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage) {
nrsMsg = &NewRoundStepMessage{
Height: rs.Height,
Round: rs.Round,
Step: rs.Step,
Height: rs.Height,
Round: rs.Round,
Step: rs.Step,
SecondsSinceStartTime: int(time.Since(rs.StartTime).Seconds()),
LastCommitRound: rs.LastCommit.Round(),
}


+ 1
- 1
consensus/reactor_test.go View File

@ -14,7 +14,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/abci/client"
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/kvstore"
abci "github.com/tendermint/tendermint/abci/types"
bc "github.com/tendermint/tendermint/blockchain"


+ 12
- 2
consensus/replay.go View File

@ -6,6 +6,7 @@ import (
"hash/crc32"
"io"
"reflect"
//"strconv"
//"strings"
"time"
@ -143,8 +144,8 @@ func (cs *ConsensusState) catchupReplay(csHeight int64) error {
if err == io.EOF {
break
} else if IsDataCorruptionError(err) {
cs.Logger.Debug("data has been corrupted in last height of consensus WAL", "err", err, "height", csHeight)
panic(fmt.Sprintf("data has been corrupted (%v) in last height %d of consensus WAL", err, csHeight))
cs.Logger.Error("data has been corrupted in last height of consensus WAL", "err", err, "height", csHeight)
return err
} else if err != nil {
return err
}
@ -196,6 +197,7 @@ type Handshaker struct {
stateDB dbm.DB
initialState sm.State
store sm.BlockStore
eventBus types.BlockEventPublisher
genDoc *types.GenesisDoc
logger log.Logger
@ -209,6 +211,7 @@ func NewHandshaker(stateDB dbm.DB, state sm.State,
stateDB: stateDB,
initialState: state,
store: store,
eventBus: types.NopEventBus{},
genDoc: genDoc,
logger: log.NewNopLogger(),
nBlocks: 0,
@ -219,6 +222,12 @@ func (h *Handshaker) SetLogger(l log.Logger) {
h.logger = l
}
// SetEventBus - sets the event bus for publishing block related events.
// If not called, it defaults to types.NopEventBus.
func (h *Handshaker) SetEventBus(eventBus types.BlockEventPublisher) {
h.eventBus = eventBus
}
func (h *Handshaker) NBlocks() int {
return h.nBlocks
}
@ -432,6 +441,7 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap
meta := h.store.LoadBlockMeta(height)
blockExec := sm.NewBlockExecutor(h.stateDB, h.logger, proxyApp, sm.MockMempool{}, sm.MockEvidencePool{})
blockExec.SetEventBus(h.eventBus)
var err error
state, err = blockExec.ApplyBlock(state, meta.BlockID, block)


+ 6
- 5
consensus/replay_file.go View File

@ -326,17 +326,18 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
cmn.Exit(fmt.Sprintf("Error starting proxy app conns: %v", err))
}
eventBus := types.NewEventBus()
if err := eventBus.Start(); err != nil {
cmn.Exit(fmt.Sprintf("Failed to start event bus: %v", err))
}
handshaker := NewHandshaker(stateDB, state, blockStore, gdoc)
handshaker.SetEventBus(eventBus)
err = handshaker.Handshake(proxyApp)
if err != nil {
cmn.Exit(fmt.Sprintf("Error on handshake: %v", err))
}
eventBus := types.NewEventBus()
if err := eventBus.Start(); err != nil {
cmn.Exit(fmt.Sprintf("Failed to start event bus: %v", err))
}
mempool, evpool := sm.MockMempool{}, sm.MockEvidencePool{}
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)


+ 1
- 1
consensus/replay_test.go View File

@ -539,7 +539,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
if p.Type == types.PrecommitType {
thisBlockCommit = &types.Commit{
BlockID: p.BlockID,
Precommits: []*types.Vote{p},
Precommits: []*types.CommitSig{p.CommitSig()},
}
}
}


+ 30
- 4
consensus/state.go View File

@ -94,7 +94,7 @@ type ConsensusState struct {
// internal state
mtx sync.RWMutex
cstypes.RoundState
state sm.State // State until height-1.
state sm.State // State until height-1.
// state changes may be triggered by: msgs from peers,
// msgs from ourself, or by timeouts
@ -307,6 +307,23 @@ func (cs *ConsensusState) OnStart() error {
// reload from consensus log to catchup
if cs.doWALCatchup {
if err := cs.catchupReplay(cs.Height); err != nil {
// don't try to recover from data corruption error
if IsDataCorruptionError(err) {
cs.Logger.Error("Encountered corrupt WAL file", "err", err.Error())
cs.Logger.Error("Please repair the WAL file before restarting")
fmt.Println(`You can attempt to repair the WAL as follows:
----
WALFILE=~/.tendermint/data/cs.wal/wal
cp $WALFILE ${WALFILE}.bak # backup the file
go run scripts/wal2json/main.go $WALFILE > wal.json # this will panic, but can be ignored
rm $WALFILE # remove the corrupt file
go run scripts/json2wal/main.go wal.json $WALFILE # rebuild the file without corruption
----`)
return err
}
cs.Logger.Error("Error on catchup replay. Proceeding to start ConsensusState anyway", "err", err.Error())
// NOTE: if we ever do return an error here,
// make sure to stop the timeoutTicker
@ -437,7 +454,7 @@ func (cs *ConsensusState) updateRoundStep(round int, step cstypes.RoundStepType)
// enterNewRound(height, 0) at cs.StartTime.
func (cs *ConsensusState) scheduleRound0(rs *cstypes.RoundState) {
//cs.Logger.Info("scheduleRound0", "now", tmtime.Now(), "startTime", cs.StartTime)
sleepDuration := rs.StartTime.Sub(tmtime.Now()) // nolint: gotype, gosimple
sleepDuration := rs.StartTime.Sub(tmtime.Now())
cs.scheduleTimeout(sleepDuration, rs.Height, 0, cstypes.RoundStepNewHeight)
}
@ -472,7 +489,7 @@ func (cs *ConsensusState) reconstructLastCommit(state sm.State) {
if precommit == nil {
continue
}
added, err := lastPrecommits.AddVote(precommit)
added, err := lastPrecommits.AddVote(seenCommit.ToVote(precommit))
if !added || err != nil {
cmn.PanicCrisis(fmt.Sprintf("Failed to reconstruct LastCommit: %v", err))
}
@ -624,6 +641,15 @@ func (cs *ConsensusState) receiveRoutine(maxSteps int) {
cs.handleMsg(mi)
case mi = <-cs.internalMsgQueue:
cs.wal.WriteSync(mi) // NOTE: fsync
if _, ok := mi.Msg.(*VoteMessage); ok {
// we actually want to simulate failing during
// the previous WriteSync, but this isn't easy to do.
// Equivalent would be to fail here and manually remove
// some bytes from the end of the wal.
fail.Fail() // XXX
}
// handles proposals, block parts, votes
cs.handleMsg(mi)
case ti := <-cs.timeoutTicker.Chan(): // tockChan:
@ -1330,7 +1356,7 @@ func (cs *ConsensusState) recordMetrics(height int64, block *types.Block) {
missingValidators := 0
missingValidatorsPower := int64(0)
for i, val := range cs.Validators.Validators {
var vote *types.Vote
var vote *types.CommitSig
if i < len(block.LastCommit.Precommits) {
vote = block.LastCommit.Precommits[i]
}


+ 0
- 4
consensus/state_test.go View File

@ -22,10 +22,6 @@ func init() {
config = ResetConfig("consensus_state_test")
}
func ensureProposeTimeout(timeoutPropose time.Duration) time.Duration {
return time.Duration(timeoutPropose.Nanoseconds()*2) * time.Nanosecond
}
/*
ProposeSuite


+ 4
- 4
consensus/types/round_state_test.go View File

@ -3,7 +3,7 @@ package types
import (
"testing"
"github.com/tendermint/go-amino"
amino "github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/crypto/ed25519"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/types"
@ -16,7 +16,7 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) {
// Random validators
nval, ntxs := 100, 100
vset, _ := types.RandValidatorSet(nval, 1)
precommits := make([]*types.Vote, nval)
precommits := make([]*types.CommitSig, nval)
blockID := types.BlockID{
Hash: cmn.RandBytes(20),
PartsHeader: types.PartSetHeader{
@ -25,12 +25,12 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) {
}
sig := make([]byte, ed25519.SignatureSize)
for i := 0; i < nval; i++ {
precommits[i] = &types.Vote{
precommits[i] = (&types.Vote{
ValidatorAddress: types.Address(cmn.RandBytes(20)),
Timestamp: tmtime.Now(),
BlockID: blockID,
Signature: sig,
}
}).CommitSig()
}
txs := make([]types.Tx, ntxs)
for i := 0; i < ntxs; i++ {


+ 16
- 7
consensus/wal.go View File

@ -112,11 +112,20 @@ func (wal *baseWAL) OnStart() error {
return err
}
// Stop the underlying autofile group.
// Use Wait() to ensure it's finished shutting down
// before cleaning up files.
func (wal *baseWAL) OnStop() {
wal.group.Stop()
wal.group.Close()
}
// Wait for the underlying autofile group to finish shutting down
// so it's safe to cleanup files.
func (wal *baseWAL) Wait() {
wal.group.Wait()
}
// Write is called in newStep and for each receive on the
// peerMsgQueue and the timeoutTicker.
// NOTE: does not call fsync()
@ -163,7 +172,7 @@ func (wal *baseWAL) SearchForEndHeight(height int64, options *WALSearchOptions)
// NOTE: starting from the last file in the group because we're usually
// searching for the last height. See replay.go
min, max := wal.group.MinIndex(), wal.group.MaxIndex()
wal.Logger.Debug("Searching for height", "height", height, "min", min, "max", max)
wal.Logger.Info("Searching for height", "height", height, "min", min, "max", max)
for index := max; index >= min; index-- {
gr, err = wal.group.NewReader(index)
if err != nil {
@ -183,7 +192,7 @@ func (wal *baseWAL) SearchForEndHeight(height int64, options *WALSearchOptions)
break
}
if options.IgnoreDataCorruptionErrors && IsDataCorruptionError(err) {
wal.Logger.Debug("Corrupted entry. Skipping...", "err", err)
wal.Logger.Error("Corrupted entry. Skipping...", "err", err)
// do nothing
continue
} else if err != nil {
@ -194,7 +203,7 @@ func (wal *baseWAL) SearchForEndHeight(height int64, options *WALSearchOptions)
if m, ok := msg.Msg.(EndHeightMessage); ok {
lastHeightFound = m.Height
if m.Height == height { // found
wal.Logger.Debug("Found", "height", height, "index", index)
wal.Logger.Info("Found", "height", height, "index", index)
return gr, true, nil
}
}
@ -281,25 +290,25 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) {
return nil, err
}
if err != nil {
return nil, fmt.Errorf("failed to read checksum: %v", err)
return nil, DataCorruptionError{fmt.Errorf("failed to read checksum: %v", err)}
}
crc := binary.BigEndian.Uint32(b)
b = make([]byte, 4)
_, err = dec.rd.Read(b)
if err != nil {
return nil, fmt.Errorf("failed to read length: %v", err)
return nil, DataCorruptionError{fmt.Errorf("failed to read length: %v", err)}
}
length := binary.BigEndian.Uint32(b)
if length > maxMsgSizeBytes {
return nil, fmt.Errorf("length %d exceeded maximum possible value of %d bytes", length, maxMsgSizeBytes)
return nil, DataCorruptionError{fmt.Errorf("length %d exceeded maximum possible value of %d bytes", length, maxMsgSizeBytes)}
}
data := make([]byte, length)
_, err = dec.rd.Read(data)
if err != nil {
return nil, fmt.Errorf("failed to read data: %v", err)
return nil, DataCorruptionError{fmt.Errorf("failed to read data: %v", err)}
}
// check checksum before decoding data


+ 9
- 3
consensus/wal_test.go View File

@ -7,6 +7,7 @@ import (
"io/ioutil"
"os"
"path/filepath"
// "sync"
"testing"
"time"
@ -38,7 +39,12 @@ func TestWALTruncate(t *testing.T) {
wal.SetLogger(log.TestingLogger())
err = wal.Start()
require.NoError(t, err)
defer wal.Stop()
defer func() {
wal.Stop()
// wait for the wal to finish shutting down so we
// can safely remove the directory
wal.Wait()
}()
//60 block's size nearly 70K, greater than group's headBuf size(4096 * 10), when headBuf is full, truncate content will Flush to the file.
//at this time, RotateFile is called, truncate content exist in each file.
@ -67,8 +73,8 @@ func TestWALTruncate(t *testing.T) {
func TestWALEncoderDecoder(t *testing.T) {
now := tmtime.Now()
msgs := []TimedWALMessage{
TimedWALMessage{Time: now, Msg: EndHeightMessage{0}},
TimedWALMessage{Time: now, Msg: timeoutInfo{Duration: time.Second, Height: 1, Round: 1, Step: types.RoundStepPropose}},
{Time: now, Msg: EndHeightMessage{0}},
{Time: now, Msg: timeoutInfo{Duration: time.Second, Height: 1, Round: 1, Step: types.RoundStepPropose}},
}
b := new(bytes.Buffer)


+ 1
- 1
consensus/wire.go View File

@ -1,7 +1,7 @@
package consensus
import (
"github.com/tendermint/go-amino"
amino "github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/types"
)


+ 2
- 3
crypto/encoding/amino/encode_test.go View File

@ -25,9 +25,8 @@ func checkAminoBinary(t *testing.T, src, dst interface{}, size int) {
assert.Equal(t, byterSrc.Bytes(), bz, "Amino binary vs Bytes() mismatch")
}
// Make sure we have the expected length.
if size != -1 {
assert.Equal(t, size, len(bz), "Amino binary size mismatch")
}
assert.Equal(t, size, len(bz), "Amino binary size mismatch")
// Unmarshal.
err = cdc.UnmarshalBinaryBare(bz, dst)
require.Nil(t, err, "%+v", err)


+ 1
- 1
crypto/merkle/proof.go View File

@ -98,7 +98,7 @@ func (prt *ProofRuntime) Decode(pop ProofOp) (ProofOperator, error) {
}
func (prt *ProofRuntime) DecodeProof(proof *Proof) (ProofOperators, error) {
var poz ProofOperators
poz := make(ProofOperators, 0, len(proof.Ops))
for _, pop := range proof.Ops {
operator, err := prt.Decode(pop)
if err != nil {


+ 2
- 1
crypto/merkle/proof_test.go View File

@ -4,7 +4,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tendermint/go-amino"
amino "github.com/tendermint/go-amino"
cmn "github.com/tendermint/tendermint/libs/common"
)
@ -26,6 +26,7 @@ func NewDominoOp(key, input, output string) DominoOp {
}
}
//nolint:unused
func DominoOpDecoder(pop ProofOp) (ProofOperator, error) {
if pop.Type != ProofOpDomino {
panic("unexpected proof op type")


+ 1
- 1
crypto/merkle/rfc6962_test.go View File

@ -26,7 +26,7 @@ import (
func TestRFC6962Hasher(t *testing.T) {
_, leafHashTrail := trailsFromByteSlices([][]byte{[]byte("L123456")})
leafHash := leafHashTrail.Hash
_, leafHashTrail = trailsFromByteSlices([][]byte{[]byte{}})
_, leafHashTrail = trailsFromByteSlices([][]byte{{}})
emptyLeafHash := leafHashTrail.Hash
for _, tc := range []struct {
desc string


+ 1
- 1
crypto/merkle/wire.go View File

@ -1,7 +1,7 @@
package merkle
import (
"github.com/tendermint/go-amino"
amino "github.com/tendermint/go-amino"
)
var cdc *amino.Codec


+ 0
- 104
crypto/random.go View File

@ -1,42 +1,11 @@
package crypto
import (
"crypto/cipher"
crand "crypto/rand"
"crypto/sha256"
"encoding/hex"
"io"
"sync"
"golang.org/x/crypto/chacha20poly1305"
)
// NOTE: This is ignored for now until we have time
// to properly review the MixEntropy function - https://github.com/tendermint/tendermint/issues/2099.
//
// The randomness here is derived from xoring a chacha20 keystream with
// output from crypto/rand's OS Entropy Reader. (Due to fears of the OS'
// entropy being backdoored)
//
// For forward secrecy of produced randomness, the internal chacha key is hashed
// and thereby rotated after each call.
var gRandInfo *randInfo
func init() {
gRandInfo = &randInfo{}
// TODO: uncomment after reviewing MixEntropy -
// https://github.com/tendermint/tendermint/issues/2099
// gRandInfo.MixEntropy(randBytes(32)) // Init
}
// WARNING: This function needs review - https://github.com/tendermint/tendermint/issues/2099.
// Mix additional bytes of randomness, e.g. from hardware, user-input, etc.
// It is OK to call it multiple times. It does not diminish security.
func MixEntropy(seedBytes []byte) {
gRandInfo.MixEntropy(seedBytes)
}
// This only uses the OS's randomness
func randBytes(numBytes int) []byte {
b := make([]byte, numBytes)
@ -52,19 +21,6 @@ func CRandBytes(numBytes int) []byte {
return randBytes(numBytes)
}
/* TODO: uncomment after reviewing MixEntropy - https://github.com/tendermint/tendermint/issues/2099
// This uses the OS and the Seed(s).
func CRandBytes(numBytes int) []byte {
return randBytes(numBytes)
b := make([]byte, numBytes)
_, err := gRandInfo.Read(b)
if err != nil {
panic(err)
}
return b
}
*/
// CRandHex returns a hex encoded string that's floor(numDigits/2) * 2 long.
//
// Note: CRandHex(24) gives 96 bits of randomness that
@ -77,63 +33,3 @@ func CRandHex(numDigits int) string {
func CReader() io.Reader {
return crand.Reader
}
/* TODO: uncomment after reviewing MixEntropy - https://github.com/tendermint/tendermint/issues/2099
// Returns a crand.Reader mixed with user-supplied entropy
func CReader() io.Reader {
return gRandInfo
}
*/
//--------------------------------------------------------------------------------
type randInfo struct {
mtx sync.Mutex
seedBytes [chacha20poly1305.KeySize]byte
chacha cipher.AEAD
reader io.Reader
}
// You can call this as many times as you'd like.
// XXX/TODO: review - https://github.com/tendermint/tendermint/issues/2099
func (ri *randInfo) MixEntropy(seedBytes []byte) {
ri.mtx.Lock()
defer ri.mtx.Unlock()
// Make new ri.seedBytes using passed seedBytes and current ri.seedBytes:
// ri.seedBytes = sha256( seedBytes || ri.seedBytes )
h := sha256.New()
h.Write(seedBytes)
h.Write(ri.seedBytes[:])
hashBytes := h.Sum(nil)
copy(ri.seedBytes[:], hashBytes)
chacha, err := chacha20poly1305.New(ri.seedBytes[:])
if err != nil {
panic("Initializing chacha20 failed")
}
ri.chacha = chacha
// Create new reader
ri.reader = &cipher.StreamReader{S: ri, R: crand.Reader}
}
func (ri *randInfo) XORKeyStream(dst, src []byte) {
// nonce being 0 is safe due to never re-using a key.
emptyNonce := make([]byte, 12)
tmpDst := ri.chacha.Seal([]byte{}, emptyNonce, src, []byte{0})
// this removes the poly1305 tag as well, since chacha is a stream cipher
// and we truncate at input length.
copy(dst, tmpDst[:len(src)])
// hash seedBytes for forward secrecy, and initialize new chacha instance
newSeed := sha256.Sum256(ri.seedBytes[:])
chacha, err := chacha20poly1305.New(newSeed[:])
if err != nil {
panic("Initializing chacha20 failed")
}
ri.chacha = chacha
}
func (ri *randInfo) Read(b []byte) (n int, err error) {
ri.mtx.Lock()
n, err = ri.reader.Read(b)
ri.mtx.Unlock()
return
}

+ 4
- 26
crypto/secp256k1/secp256k1.go View File

@ -7,10 +7,12 @@ import (
"fmt"
"io"
secp256k1 "github.com/tendermint/btcd/btcec"
amino "github.com/tendermint/go-amino"
"golang.org/x/crypto/ripemd160"
secp256k1 "github.com/btcsuite/btcd/btcec"
amino "github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/crypto"
)
@ -44,16 +46,6 @@ func (privKey PrivKeySecp256k1) Bytes() []byte {
return cdc.MustMarshalBinaryBare(privKey)
}
// Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg.
func (privKey PrivKeySecp256k1) Sign(msg []byte) ([]byte, error) {
priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey[:])
sig, err := priv.Sign(crypto.Sha256(msg))
if err != nil {
return nil, err
}
return sig.Serialize(), nil
}
// PubKey performs the point-scalar multiplication from the privKey on the
// generator point to get the pubkey.
func (privKey PrivKeySecp256k1) PubKey() crypto.PubKey {
@ -137,20 +129,6 @@ func (pubKey PubKeySecp256k1) Bytes() []byte {
return bz
}
func (pubKey PubKeySecp256k1) VerifyBytes(msg []byte, sig []byte) bool {
pub, err := secp256k1.ParsePubKey(pubKey[:], secp256k1.S256())
if err != nil {
return false
}
parsedSig, err := secp256k1.ParseSignature(sig[:], secp256k1.S256())
if err != nil {
return false
}
// Underlying library ensures that this signature is in canonical form, to
// prevent Secp256k1 malleability from altering the sign of the s term.
return parsedSig.Verify(crypto.Sha256(msg), pub)
}
func (pubKey PubKeySecp256k1) String() string {
return fmt.Sprintf("PubKeySecp256k1{%X}", pubKey[:])
}


+ 24
- 0
crypto/secp256k1/secp256k1_cgo.go View File

@ -0,0 +1,24 @@
// +build libsecp256k1
package secp256k1
import (
"github.com/ethereum/go-ethereum/crypto/secp256k1"
"github.com/tendermint/tendermint/crypto"
)
// Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg.
func (privKey PrivKeySecp256k1) Sign(msg []byte) ([]byte, error) {
rsv, err := secp256k1.Sign(crypto.Sha256(msg), privKey[:])
if err != nil {
return nil, err
}
// we do not need v in r||s||v:
rs := rsv[:len(rsv)-1]
return rs, nil
}
func (pubKey PubKeySecp256k1) VerifyBytes(msg []byte, sig []byte) bool {
return secp256k1.VerifySignature(pubKey[:], crypto.Sha256(msg), sig)
}

+ 71
- 0
crypto/secp256k1/secp256k1_nocgo.go View File

@ -0,0 +1,71 @@
// +build !libsecp256k1
package secp256k1
import (
"math/big"
secp256k1 "github.com/btcsuite/btcd/btcec"
"github.com/tendermint/tendermint/crypto"
)
// used to reject malleable signatures
// see:
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39
var secp256k1N, _ = new(big.Int).SetString("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", 16)
var secp256k1halfN = new(big.Int).Div(secp256k1N, big.NewInt(2))
// Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg.
// The returned signature will be of the form R || S (in lower-S form).
func (privKey PrivKeySecp256k1) Sign(msg []byte) ([]byte, error) {
priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey[:])
sig, err := priv.Sign(crypto.Sha256(msg))
if err != nil {
return nil, err
}
sigBytes := serializeSig(sig)
return sigBytes, nil
}
// VerifyBytes verifies a signature of the form R || S.
// It rejects signatures which are not in lower-S form.
func (pubKey PubKeySecp256k1) VerifyBytes(msg []byte, sigStr []byte) bool {
if len(sigStr) != 64 {
return false
}
pub, err := secp256k1.ParsePubKey(pubKey[:], secp256k1.S256())
if err != nil {
return false
}
// parse the signature:
signature := signatureFromBytes(sigStr)
// Reject malleable signatures. libsecp256k1 does this check but btcec doesn't.
// see: https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93
if signature.S.Cmp(secp256k1halfN) > 0 {
return false
}
return signature.Verify(crypto.Sha256(msg), pub)
}
// Read Signature struct from R || S. Caller needs to ensure
// that len(sigStr) == 64.
func signatureFromBytes(sigStr []byte) *secp256k1.Signature {
return &secp256k1.Signature{
new(big.Int).SetBytes(sigStr[:32]),
new(big.Int).SetBytes(sigStr[32:64]),
}
}
// Serialize signature to R || S.
// R, S are padded to 32 bytes respectively.
func serializeSig(sig *secp256k1.Signature) []byte {
rBytes := sig.R.Bytes()
sBytes := sig.S.Bytes()
sigBytes := make([]byte, 64)
// 0 pad the byte arrays from the left if they aren't big enough.
copy(sigBytes[32-len(rBytes):32], rBytes)
copy(sigBytes[64-len(sBytes):64], sBytes)
return sigBytes
}

+ 39
- 0
crypto/secp256k1/secp256k1_nocgo_test.go View File

@ -0,0 +1,39 @@
// +build !libsecp256k1
package secp256k1
import (
"testing"
secp256k1 "github.com/btcsuite/btcd/btcec"
"github.com/stretchr/testify/require"
)
// Ensure that signature verification works, and that
// non-canonical signatures fail.
// Note: run with CGO_ENABLED=0 or go test -tags !cgo.
func TestSignatureVerificationAndRejectUpperS(t *testing.T) {
msg := []byte("We have lingered long enough on the shores of the cosmic ocean.")
for i := 0; i < 500; i++ {
priv := GenPrivKey()
sigStr, err := priv.Sign(msg)
require.NoError(t, err)
sig := signatureFromBytes(sigStr)
require.False(t, sig.S.Cmp(secp256k1halfN) > 0)
pub := priv.PubKey()
require.True(t, pub.VerifyBytes(msg, sigStr))
// malleate:
sig.S.Sub(secp256k1.S256().CurveParams.N, sig.S)
require.True(t, sig.S.Cmp(secp256k1halfN) > 0)
malSigStr := serializeSig(sig)
require.False(t, pub.VerifyBytes(msg, malSigStr),
"VerifyBytes incorrect with malleated & invalid S. sig=%v, key=%v",
sig,
priv,
)
}
}

+ 1
- 1
crypto/secp256k1/secpk256k1_test.go View File

@ -11,7 +11,7 @@ import (
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/secp256k1"
underlyingSecp256k1 "github.com/tendermint/btcd/btcec"
underlyingSecp256k1 "github.com/btcsuite/btcd/btcec"
)
type keyData struct {


+ 0
- 1
crypto/xsalsa20symmetric/symmetric.go View File

@ -17,7 +17,6 @@ const secretLen = 32
// secret must be 32 bytes long. Use something like Sha256(Bcrypt(passphrase))
// The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext.
// NOTE: call crypto.MixEntropy() first.
func EncryptSymmetric(plaintext []byte, secret []byte) (ciphertext []byte) {
if len(secret) != secretLen {
cmn.PanicSanity(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret)))


+ 0
- 4
crypto/xsalsa20symmetric/symmetric_test.go View File

@ -13,8 +13,6 @@ import (
func TestSimple(t *testing.T) {
crypto.MixEntropy([]byte("someentropy"))
plaintext := []byte("sometext")
secret := []byte("somesecretoflengththirtytwo===32")
ciphertext := EncryptSymmetric(plaintext, secret)
@ -26,8 +24,6 @@ func TestSimple(t *testing.T) {
func TestSimpleWithKDF(t *testing.T) {
crypto.MixEntropy([]byte("someentropy"))
plaintext := []byte("sometext")
secretPass := []byte("somesecret")
secret, err := bcrypt.GenerateFromPassword(secretPass, 12)


+ 6
- 5
docs/.vuepress/config.js View File

@ -21,7 +21,7 @@ module.exports = {
},
nav: [
{ text: "Back to Tendermint", link: "https://tendermint.com" },
{ text: "RPC", link: "../rpc/" }
{ text: "RPC", link: "https://tendermint.com/rpc/" }
],
sidebar: [
{
@ -79,10 +79,11 @@ module.exports = {
title: "Tools",
collapsable: false,
children: [
"/tools/",
"/tools/benchmarking",
"/tools/monitoring"
]
"/tools/",
"/tools/benchmarking",
"/tools/monitoring",
"/tools/remote-signer-validation"
]
},
{
title: "Tendermint Spec",


+ 78
- 24
docs/architecture/adr-033-pubsub.md View File

@ -5,13 +5,17 @@ Author: Anton Kaliaev (@melekes)
## Changelog
02-10-2018: Initial draft
16-01-2019: Second version based on our conversation with Jae
17-01-2019: Third version explaining how new design solves current issues
25-01-2019: Fourth version to treat buffered and unbuffered channels differently
## Context
Since the initial version of the pubsub, there's been a number of issues
raised: #951, #1879, #1880. Some of them are high-level issues questioning the
raised: [#951], [#1879], [#1880]. Some of them are high-level issues questioning the
core design choices made. Others are minor and mostly about the interface of
`Subscribe()` / `Publish()` functions.
@ -51,7 +55,10 @@ channels to distribute msg to these goroutines).
### Non-blocking send
There is also a question whenever we should have a non-blocking send:
There is also a question whenever we should have a non-blocking send.
Currently, sends are blocking, so publishing to one client can block on
publishing to another. This means a slow or unresponsive client can halt the
system. Instead, we can use a non-blocking send:
```go
for each subscriber {
@ -87,11 +94,26 @@ Go channels are de-facto standard for carrying data between goroutines.
### Why `Subscribe()` accepts an `out` channel?
Because in our tests, we create buffered channels (cap: 1). Alternatively, we
can make capacity an argument.
can make capacity an argument and return a channel.
## Decision
Change Subscribe() function to return a `Subscription` struct:
### MsgAndTags
Use a `MsgAndTags` struct on the subscription channel to indicate what tags the
msg matched.
```go
type MsgAndTags struct {
Msg interface{}
Tags TagMap
}
```
### Subscription Struct
Change `Subscribe()` function to return a `Subscription` struct:
```go
type Subscription struct {
@ -103,18 +125,18 @@ func (s *Subscription) Cancelled() <-chan struct{}
func (s *Subscription) Err() error
```
Out returns a channel onto which messages and tags are published.
Unsubscribe/UnsubscribeAll does not close the channel to avoid clients from
`Out()` returns a channel onto which messages and tags are published.
`Unsubscribe`/`UnsubscribeAll` does not close the channel to avoid clients from
receiving a nil message.
Cancelled returns a channel that's closed when the subscription is terminated
`Cancelled()` returns a channel that's closed when the subscription is terminated
and supposed to be used in a select statement.
If Cancelled is not closed yet, Err() returns nil.
If Cancelled is closed, Err returns a non-nil error explaining why:
Unsubscribed if the subscriber choose to unsubscribe,
OutOfCapacity if the subscriber is not pulling messages fast enough and the Out channel become full.
After Err returns a non-nil error, successive calls to Err() return the same error.
If the channel returned by `Cancelled()` is not closed yet, `Err()` returns nil.
If the channel is closed, `Err()` returns a non-nil error explaining why:
`ErrUnsubscribed` if the subscriber choose to unsubscribe,
`ErrOutOfCapacity` if the subscriber is not pulling messages fast enough and the channel returned by `Out()` became full.
After `Err()` returns a non-nil error, successive calls to `Err() return the same error.
```go
subscription, err := pubsub.Subscribe(...)
@ -130,42 +152,68 @@ select {
}
```
Make Out() channel buffered (cap: 1) by default. In most cases, we want to
### Capacity and Subscriptions
Make the `Out()` channel buffered (with capacity 1) by default. In most cases, we want to
terminate the slow subscriber. Only in rare cases, we want to block the pubsub
(e.g. when debugging consensus). This should lower the chances of the pubsub
being frozen.
```go
// outCap can be used to set capacity of Out channel (1 by default). Set to 0
for unbuffered channel (WARNING: it may block the pubsub).
// outCap can be used to set capacity of Out channel
// (1 by default, must be greater than 0).
Subscribe(ctx context.Context, clientID string, query Query, outCap... int) (Subscription, error) {
```
Also, Out() channel should return tags along with a message:
Use a different function for an unbuffered channel:
```go
type MsgAndTags struct {
Msg interface{}
Tags TagMap
}
// Subscription uses an unbuffered channel. Publishing will block.
SubscribeUnbuffered(ctx context.Context, clientID string, query Query) (Subscription, error) {
```
to inform clients of which Tags were used with Msg.
SubscribeUnbuffered should not be exposed to users.
### Blocking/Nonblocking
The publisher should treat these kinds of channels separately.
It should block on unbuffered channels (for use with internal consensus events
in the consensus tests) and not block on the buffered ones. If a client is too
slow to keep up with it's messages, it's subscription is terminated:
for each subscription {
out := subscription.outChan
if cap(out) == 0 {
// block on unbuffered channel
out <- msg
} else {
// don't block on buffered channels
select {
case out <- msg:
default:
// set the error, notify on the cancel chan
subscription.err = fmt.Errorf("client is too slow for msg)
close(subscription.cancelChan)
// ... unsubscribe and close out
}
}
}
### How this new design solves the current issues?
https://github.com/tendermint/tendermint/issues/951 (https://github.com/tendermint/tendermint/issues/1880)
[#951] ([#1880]):
Because of non-blocking send, situation where we'll deadlock is not possible
anymore. If the client stops reading messages, it will be removed.
https://github.com/tendermint/tendermint/issues/1879
[#1879]:
MsgAndTags is used now instead of a plain message.
### Future problems and their possible solutions
https://github.com/tendermint/tendermint/issues/2826
[#2826]
One question I am still pondering about: how to prevent pubsub from slowing
down consensus. We can increase the pubsub queue size (which is 0 now). Also,
@ -191,3 +239,9 @@ In review
- (since v1) no concurrency when it comes to publishing messages
### Neutral
[#951]: https://github.com/tendermint/tendermint/issues/951
[#1879]: https://github.com/tendermint/tendermint/issues/1879
[#1880]: https://github.com/tendermint/tendermint/issues/1880
[#2826]: https://github.com/tendermint/tendermint/issues/2826

+ 1
- 1
docs/tendermint-core/rpc.md View File

@ -2,6 +2,6 @@
The RPC documentation is hosted here:
- https://tendermint.com/rpc/
- [https://tendermint.com/rpc/](https://tendermint.com/rpc/)
To update the documentation, edit the relevant `godoc` comments in the [rpc/core directory](https://github.com/tendermint/tendermint/tree/develop/rpc/core).

+ 5
- 2
docs/tools/README.md View File

@ -1,4 +1,7 @@
# Overview
Tendermint comes with some tools for [benchmarking](./benchmarking.md)
and [monitoring](./monitoring.md).
Tendermint comes with some tools for:
* [Benchmarking](./benchmarking.md)
* [Monitoring](./monitoring.md)
* [Validation of remote signers](./remote-signer-validation.md)

+ 1
- 1
docs/tools/benchmarking.md View File

@ -2,7 +2,7 @@
Tendermint blockchain benchmarking tool:
- https://github.com/tendermint/tools/tree/master/tm-bench
- [https://github.com/tendermint/tendermint/tree/master/tools/tm-bench](https://github.com/tendermint/tendermint/tree/master/tools/tm-bench)
For example, the following:


+ 1
- 1
docs/tools/monitoring.md View File

@ -3,7 +3,7 @@
Tendermint blockchain monitoring tool; watches over one or more nodes,
collecting and providing various statistics to the user:
- https://github.com/tendermint/tendermint/tree/master/tools/tm-monitor
- [https://github.com/tendermint/tendermint/tree/master/tools/tm-monitor](https://github.com/tendermint/tendermint/tree/master/tools/tm-monitor)
## Quick Start


+ 146
- 0
docs/tools/remote-signer-validation.md View File

@ -0,0 +1,146 @@
# tm-signer-harness
Located under the `tools/tm-signer-harness` folder in the [Tendermint
repository](https://github.com/tendermint/tendermint).
The Tendermint remote signer test harness facilitates integration testing
between Tendermint and remote signers such as
[KMS](https://github.com/tendermint/kms). Such remote signers allow for signing
of important Tendermint messages using
[HSMs](https://en.wikipedia.org/wiki/Hardware_security_module), providing
additional security.
When executed, `tm-signer-harness`:
1. Runs a listener (either TCP or Unix sockets).
2. Waits for a connection from the remote signer.
3. Upon connection from the remote signer, executes a number of automated tests
to ensure compatibility.
4. Upon successful validation, the harness process exits with a 0 exit code.
Upon validation failure, it exits with a particular exit code related to the
error.
## Prerequisites
Requires the same prerequisites as for building
[Tendermint](https://github.com/tendermint/tendermint).
## Building
From the `tools/tm-signer-harness` directory in your Tendermint source
repository, simply run:
```bash
make
# To have global access to this executable
make install
```
## Docker Image
To build a Docker image containing the `tm-signer-harness`, also from the
`tools/tm-signer-harness` directory of your Tendermint source repo, simply run:
```bash
make docker-image
```
## Running against KMS
As an example of how to use `tm-signer-harness`, the following instructions show
you how to execute its tests against [KMS](https://github.com/tendermint/kms).
For this example, we will make use of the **software signing module in KMS**, as
the hardware signing module requires a physical
[YubiHSM](https://www.yubico.com/products/yubihsm/) device.
### Step 1: Install KMS on your local machine
See the [KMS repo](https://github.com/tendermint/kms) for details on how to set
KMS up on your local machine.
If you have [Rust](https://www.rust-lang.org/) installed on your local machine,
you can simply install KMS by:
```bash
cargo install tmkms
```
### Step 2: Make keys for KMS
The KMS software signing module needs a key with which to sign messages. In our
example, we will simply export a signing key from our local Tendermint instance.
```bash
# Will generate all necessary Tendermint configuration files, including:
# - ~/.tendermint/config/priv_validator_key.json
# - ~/.tendermint/data/priv_validator_state.json
tendermint init
# Extract the signing key from our local Tendermint instance
tm-signer-harness extract_key \ # Use the "extract_key" command
-tmhome ~/.tendermint \ # Where to find the Tendermint home directory
-output ./signing.key # Where to write the key
```
Also, because we want KMS to connect to `tm-signer-harness`, we will need to
provide a secret connection key from KMS' side:
```bash
tmkms keygen secret_connection.key
```
### Step 3: Configure and run KMS
KMS needs some configuration to tell it to use the softer signing module as well
as the `signing.key` file we just generated. Save the following to a file called
`tmkms.toml`:
```toml
[[validator]]
addr = "tcp://127.0.0.1:61219" # This is where we will find tm-signer-harness.
chain_id = "test-chain-0XwP5E" # The Tendermint chain ID for which KMS will be signing (found in ~/.tendermint/config/genesis.json).
reconnect = true # true is the default
secret_key = "./secret_connection.key" # Where to find our secret connection key.
[[providers.softsign]]
id = "test-chain-0XwP5E" # The Tendermint chain ID for which KMS will be signing (same as validator.chain_id above).
path = "./signing.key" # The signing key we extracted earlier.
```
Then run KMS with this configuration:
```bash
tmkms start -c tmkms.toml
```
This will start KMS, which will repeatedly try to connect to
`tcp://127.0.0.1:61219` until it is successful.
### Step 4: Run tm-signer-harness
Now we get to run the signer test harness:
```bash
tm-signer-harness run \ # The "run" command executes the tests
-addr tcp://127.0.0.1:61219 \ # The address we promised KMS earlier
-tmhome ~/.tendermint # Where to find our Tendermint configuration/data files.
```
If the current version of Tendermint and KMS are compatible, `tm-signer-harness`
should now exit with a 0 exit code. If they are somehow not compatible, it
should exit with a meaningful non-zero exit code (see the exit codes below).
### Step 5: Shut down KMS
Simply hit Ctrl+Break on your KMS instance (or use the `kill` command in Linux)
to terminate it gracefully.
## Exit Code Meanings
The following list shows the various exit codes from `tm-signer-harness` and
their meanings:
| Exit Code | Description |
| --- | --- |
| 0 | Success! |
| 1 | Invalid command line parameters supplied to `tm-signer-harness` |
| 2 | Maximum number of accept retries reached (the `-accept-retries` parameter) |
| 3 | Failed to load `${TMHOME}/config/genesis.json` |
| 4 | Failed to create listener specified by `-addr` parameter |
| 5 | Failed to start listener |
| 6 | Interrupted by `SIGINT` (e.g. when hitting Ctrl+Break or Ctrl+C) |
| 7 | Other unknown error |
| 8 | Test 1 failed: public key mismatch |
| 9 | Test 2 failed: signing of proposals failed |
| 10 | Test 3 failed: signing of votes failed |

+ 0
- 2
evidence/pool_test.go View File

@ -13,8 +13,6 @@ import (
tmtime "github.com/tendermint/tendermint/types/time"
)
var mockState = sm.State{}
func TestMain(m *testing.M) {
types.RegisterMockEvidences(cdc)


+ 1
- 1
evidence/reactor.go View File

@ -48,7 +48,7 @@ func (evR *EvidenceReactor) SetLogger(l log.Logger) {
// It returns the list of channels for this reactor.
func (evR *EvidenceReactor) GetChannels() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{
&p2p.ChannelDescriptor{
{
ID: EvidenceChannel,
Priority: 5,
},


+ 1
- 1
evidence/wire.go View File

@ -1,7 +1,7 @@
package evidence
import (
"github.com/tendermint/go-amino"
amino "github.com/tendermint/go-amino"
cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino"
"github.com/tendermint/tendermint/types"
)


+ 12
- 0
libs/autofile/group.go View File

@ -67,6 +67,11 @@ type Group struct {
minIndex int // Includes head
maxIndex int // Includes head, where Head will move to
// close this when the processTicks routine is done.
// this ensures we can cleanup the dir after calling Stop
// and the routine won't be trying to access it anymore
doneProcessTicks chan struct{}
// TODO: When we start deleting files, we need to start tracking GroupReaders
// and their dependencies.
}
@ -90,6 +95,7 @@ func OpenGroup(headPath string, groupOptions ...func(*Group)) (g *Group, err err
groupCheckDuration: defaultGroupCheckDuration,
minIndex: 0,
maxIndex: 0,
doneProcessTicks: make(chan struct{}),
}
for _, option := range groupOptions {
@ -140,6 +146,11 @@ func (g *Group) OnStop() {
g.Flush() // flush any uncommitted data
}
func (g *Group) Wait() {
// wait for processTicks routine to finish
<-g.doneProcessTicks
}
// Close closes the head file. The group must be stopped by this moment.
func (g *Group) Close() {
g.Flush() // flush any uncommitted data
@ -211,6 +222,7 @@ func (g *Group) Flush() error {
}
func (g *Group) processTicks() {
defer close(g.doneProcessTicks)
for {
select {
case <-g.ticker.C:


+ 12
- 10
libs/clist/clist_test.go View File

@ -65,12 +65,13 @@ func TestSmall(t *testing.T) {
}
/*
This test is quite hacky because it relies on SetFinalizer
which isn't guaranteed to run at all.
*/
// nolint: megacheck
// This test is quite hacky because it relies on SetFinalizer
// which isn't guaranteed to run at all.
//nolint:unused,deadcode
func _TestGCFifo(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skipf("Skipping on non-amd64 machine")
}
const numElements = 1000000
l := New()
@ -113,12 +114,13 @@ func _TestGCFifo(t *testing.T) {
}
}
/*
This test is quite hacky because it relies on SetFinalizer
which isn't guaranteed to run at all.
*/
// nolint: megacheck
// This test is quite hacky because it relies on SetFinalizer
// which isn't guaranteed to run at all.
//nolint:unused,deadcode
func _TestGCRandom(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skipf("Skipping on non-amd64 machine")
}
const numElements = 1000000
l := New()


+ 1
- 1
libs/common/colors.go View File

@ -43,7 +43,7 @@ func treat(s string, color string) string {
}
func treatAll(color string, args ...interface{}) string {
var parts []string
parts := make([]string, 0, len(args))
for _, arg := range args {
parts = append(parts, treat(fmt.Sprintf("%v", arg), color))
}


+ 17
- 0
libs/common/net.go View File

@ -24,3 +24,20 @@ func ProtocolAndAddress(listenAddr string) (string, string) {
}
return protocol, address
}
// GetFreePort gets a free port from the operating system.
// Ripped from https://github.com/phayes/freeport.
// BSD-licensed.
func GetFreePort() (int, error) {
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
if err != nil {
return 0, err
}
l, err := net.ListenTCP("tcp", addr)
if err != nil {
return 0, err
}
defer l.Close()
return l.Addr().(*net.TCPAddr).Port, nil
}

+ 1
- 1
libs/events/events.go View File

@ -188,7 +188,7 @@ func (cell *eventCell) RemoveListener(listenerID string) int {
func (cell *eventCell) FireEvent(data EventData) {
cell.mtx.RLock()
var eventCallbacks []EventCallback
eventCallbacks := make([]EventCallback, 0, len(cell.listeners))
for _, cb := range cell.listeners {
eventCallbacks = append(eventCallbacks, cb)
}


+ 3
- 2
libs/fail/fail.go View File

@ -72,7 +72,8 @@ func FailRand(n int) {
func Exit() {
fmt.Printf("*** fail-test %d ***\n", callIndex)
proc, _ := os.FindProcess(os.Getpid())
proc.Signal(os.Interrupt)
os.Exit(1)
// proc, _ := os.FindProcess(os.Getpid())
// proc.Signal(os.Interrupt)
// panic(fmt.Sprintf("*** fail-test %d ***", callIndex))
}

+ 8
- 8
libs/flowrate/io_test.go View File

@ -81,12 +81,12 @@ func TestReader(t *testing.T) {
// Active, Start, Duration, Idle, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, TimeRem, Progress
want := []Status{
Status{true, start, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
Status{true, start, _100ms, 0, 10, 1, 100, 100, 100, 100, 0, 0, 0},
Status{true, start, _200ms, _100ms, 20, 2, 100, 100, 100, 100, 0, 0, 0},
Status{true, start, _300ms, _200ms, 20, 3, 0, 90, 67, 100, 0, 0, 0},
Status{false, start, _300ms, 0, 20, 3, 0, 0, 67, 100, 0, 0, 0},
Status{false, start, _300ms, 0, 20, 3, 0, 0, 67, 100, 0, 0, 0},
{true, start, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{true, start, _100ms, 0, 10, 1, 100, 100, 100, 100, 0, 0, 0},
{true, start, _200ms, _100ms, 20, 2, 100, 100, 100, 100, 0, 0, 0},
{true, start, _300ms, _200ms, 20, 3, 0, 90, 67, 100, 0, 0, 0},
{false, start, _300ms, 0, 20, 3, 0, 0, 67, 100, 0, 0, 0},
{false, start, _300ms, 0, 20, 3, 0, 0, 67, 100, 0, 0, 0},
}
for i, s := range status {
if !statusesAreEqual(&s, &want[i]) {
@ -139,8 +139,8 @@ func TestWriter(t *testing.T) {
// Active, Start, Duration, Idle, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, TimeRem, Progress
want := []Status{
Status{true, start, _400ms, 0, 80, 4, 200, 200, 200, 200, 20, _100ms, 80000},
Status{true, start, _500ms, _100ms, 100, 5, 200, 200, 200, 200, 0, 0, 100000},
{true, start, _400ms, 0, 80, 4, 200, 200, 200, 200, 20, _100ms, 80000},
{true, start, _500ms, _100ms, 100, 5, 200, 200, 200, 200, 0, 0, 100000},
}
for i, s := range status {
if !statusesAreEqual(&s, &want[i]) {


+ 85
- 43
libs/pubsub/pubsub.go View File

@ -101,7 +101,7 @@ type Server struct {
cmdsCap int
mtx sync.RWMutex
subscriptions map[string]map[string]Query // subscriber -> query (string) -> Query
subscriptions map[string]map[string]struct{} // subscriber -> query (string) -> empty struct
}
// Option sets a parameter for the server.
@ -143,7 +143,7 @@ func (ts tagMap) Len() int {
// provided, the resulting server's queue is unbuffered.
func NewServer(options ...Option) *Server {
s := &Server{
subscriptions: make(map[string]map[string]Query),
subscriptions: make(map[string]map[string]struct{}),
}
s.BaseService = *cmn.NewBaseService(nil, "PubSub", s)
@ -193,11 +193,9 @@ func (s *Server) Subscribe(ctx context.Context, clientID string, query Query, ou
case s.cmds <- cmd{op: sub, clientID: clientID, query: query, ch: out}:
s.mtx.Lock()
if _, ok = s.subscriptions[clientID]; !ok {
s.subscriptions[clientID] = make(map[string]Query)
s.subscriptions[clientID] = make(map[string]struct{})
}
// preserve original query
// see Unsubscribe
s.subscriptions[clientID][query.String()] = query
s.subscriptions[clientID][query.String()] = struct{}{}
s.mtx.Unlock()
return nil
case <-ctx.Done():
@ -211,22 +209,23 @@ func (s *Server) Subscribe(ctx context.Context, clientID string, query Query, ou
// returned to the caller if the context is canceled or if subscription does
// not exist.
func (s *Server) Unsubscribe(ctx context.Context, clientID string, query Query) error {
var origQuery Query
s.mtx.RLock()
clientSubscriptions, ok := s.subscriptions[clientID]
if ok {
origQuery, ok = clientSubscriptions[query.String()]
_, ok = clientSubscriptions[query.String()]
}
s.mtx.RUnlock()
if !ok {
return ErrSubscriptionNotFound
}
// original query is used here because we're using pointers as map keys
select {
case s.cmds <- cmd{op: unsub, clientID: clientID, query: origQuery}:
case s.cmds <- cmd{op: unsub, clientID: clientID, query: query}:
s.mtx.Lock()
delete(clientSubscriptions, query.String())
if len(clientSubscriptions) == 0 {
delete(s.subscriptions, clientID)
}
s.mtx.Unlock()
return nil
case <-ctx.Done():
@ -286,17 +285,27 @@ func (s *Server) OnStop() {
// NOTE: not goroutine safe
type state struct {
// query -> client -> ch
queries map[Query]map[string]chan<- interface{}
// client -> query -> struct{}
clients map[string]map[Query]struct{}
// query string -> client -> ch
queryToChanMap map[string]map[string]chan<- interface{}
// client -> query string -> struct{}
clientToQueryMap map[string]map[string]struct{}
// query string -> queryPlusRefCount
queries map[string]*queryPlusRefCount
}
// queryPlusRefCount holds a pointer to a query and reference counter. When
// refCount is zero, query will be removed.
type queryPlusRefCount struct {
q Query
refCount int
}
// OnStart implements Service.OnStart by starting the server.
func (s *Server) OnStart() error {
go s.loop(state{
queries: make(map[Query]map[string]chan<- interface{}),
clients: make(map[string]map[Query]struct{}),
queryToChanMap: make(map[string]map[string]chan<- interface{}),
clientToQueryMap: make(map[string]map[string]struct{}),
queries: make(map[string]*queryPlusRefCount),
})
return nil
}
@ -317,7 +326,7 @@ loop:
state.removeAll(cmd.clientID)
}
case shutdown:
for clientID := range state.clients {
for clientID := range state.clientToQueryMap {
state.removeAll(clientID)
}
break loop
@ -330,66 +339,99 @@ loop:
}
func (state *state) add(clientID string, q Query, ch chan<- interface{}) {
qStr := q.String()
// initialize clientToChannelMap per query if needed
if _, ok := state.queries[q]; !ok {
state.queries[q] = make(map[string]chan<- interface{})
if _, ok := state.queryToChanMap[qStr]; !ok {
state.queryToChanMap[qStr] = make(map[string]chan<- interface{})
}
// create subscription
state.queries[q][clientID] = ch
state.queryToChanMap[qStr][clientID] = ch
// initialize queries if needed
if _, ok := state.queries[qStr]; !ok {
state.queries[qStr] = &queryPlusRefCount{q: q, refCount: 0}
}
// increment reference counter
state.queries[qStr].refCount++
// add client if needed
if _, ok := state.clients[clientID]; !ok {
state.clients[clientID] = make(map[Query]struct{})
if _, ok := state.clientToQueryMap[clientID]; !ok {
state.clientToQueryMap[clientID] = make(map[string]struct{})
}
state.clients[clientID][q] = struct{}{}
state.clientToQueryMap[clientID][qStr] = struct{}{}
}
func (state *state) remove(clientID string, q Query) {
clientToChannelMap, ok := state.queries[q]
qStr := q.String()
clientToChannelMap, ok := state.queryToChanMap[qStr]
if !ok {
return
}
ch, ok := clientToChannelMap[clientID]
if ok {
close(ch)
if !ok {
return
}
delete(state.clients[clientID], q)
close(ch)
// if it not subscribed to anything else, remove the client
if len(state.clients[clientID]) == 0 {
delete(state.clients, clientID)
}
// remove the query from client map.
// if client is not subscribed to anything else, remove it.
delete(state.clientToQueryMap[clientID], qStr)
if len(state.clientToQueryMap[clientID]) == 0 {
delete(state.clientToQueryMap, clientID)
}
delete(state.queries[q], clientID)
if len(state.queries[q]) == 0 {
delete(state.queries, q)
}
// remove the client from query map.
// if query has no other clients subscribed, remove it.
delete(state.queryToChanMap[qStr], clientID)
if len(state.queryToChanMap[qStr]) == 0 {
delete(state.queryToChanMap, qStr)
}
// decrease ref counter in queries
state.queries[qStr].refCount--
// remove the query if nobody else is using it
if state.queries[qStr].refCount == 0 {
delete(state.queries, qStr)
}
}
func (state *state) removeAll(clientID string) {
queryMap, ok := state.clients[clientID]
queryMap, ok := state.clientToQueryMap[clientID]
if !ok {
return
}
for q := range queryMap {
ch := state.queries[q][clientID]
for qStr := range queryMap {
ch := state.queryToChanMap[qStr][clientID]
close(ch)
delete(state.queries[q], clientID)
if len(state.queries[q]) == 0 {
delete(state.queries, q)
// remove the client from query map.
// if query has no other clients subscribed, remove it.
delete(state.queryToChanMap[qStr], clientID)
if len(state.queryToChanMap[qStr]) == 0 {
delete(state.queryToChanMap, qStr)
}
// decrease ref counter in queries
state.queries[qStr].refCount--
// remove the query if nobody else is using it
if state.queries[qStr].refCount == 0 {
delete(state.queries, qStr)
}
}
delete(state.clients, clientID)
// remove the client.
delete(state.clientToQueryMap, clientID)
}
func (state *state) send(msg interface{}, tags TagMap) {
for q, clientToChannelMap := range state.queries {
for qStr, clientToChannelMap := range state.queryToChanMap {
q := state.queries[qStr].q
if q.Matches(tags) {
for _, ch := range clientToChannelMap {
ch <- msg


+ 19
- 0
libs/pubsub/pubsub_test.go View File

@ -115,6 +115,25 @@ func TestUnsubscribe(t *testing.T) {
assert.False(t, ok)
}
func TestClientUnsubscribesTwice(t *testing.T) {
s := pubsub.NewServer()
s.SetLogger(log.TestingLogger())
s.Start()
defer s.Stop()
ctx := context.Background()
ch := make(chan interface{})
err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"), ch)
require.NoError(t, err)
err = s.Unsubscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"))
require.NoError(t, err)
err = s.Unsubscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"))
assert.Equal(t, pubsub.ErrSubscriptionNotFound, err)
err = s.UnsubscribeAll(ctx, clientID)
assert.Equal(t, pubsub.ErrSubscriptionNotFound, err)
}
func TestResubscribe(t *testing.T) {
s := pubsub.NewServer()
s.SetLogger(log.TestingLogger())


+ 3
- 3
libs/pubsub/query/query_test.go View File

@ -73,9 +73,9 @@ func TestConditions(t *testing.T) {
s string
conditions []query.Condition
}{
{s: "tm.events.type='NewBlock'", conditions: []query.Condition{query.Condition{Tag: "tm.events.type", Op: query.OpEqual, Operand: "NewBlock"}}},
{s: "tx.gas > 7 AND tx.gas < 9", conditions: []query.Condition{query.Condition{Tag: "tx.gas", Op: query.OpGreater, Operand: int64(7)}, query.Condition{Tag: "tx.gas", Op: query.OpLess, Operand: int64(9)}}},
{s: "tx.time >= TIME 2013-05-03T14:45:00Z", conditions: []query.Condition{query.Condition{Tag: "tx.time", Op: query.OpGreaterEqual, Operand: txTime}}},
{s: "tm.events.type='NewBlock'", conditions: []query.Condition{{Tag: "tm.events.type", Op: query.OpEqual, Operand: "NewBlock"}}},
{s: "tx.gas > 7 AND tx.gas < 9", conditions: []query.Condition{{Tag: "tx.gas", Op: query.OpGreater, Operand: int64(7)}, {Tag: "tx.gas", Op: query.OpLess, Operand: int64(9)}}},
{s: "tx.time >= TIME 2013-05-03T14:45:00Z", conditions: []query.Condition{{Tag: "tx.time", Op: query.OpGreaterEqual, Operand: txTime}}},
}
for _, tc := range testCases {


+ 1
- 1
libs/test.sh View File

@ -2,7 +2,7 @@
set -e
# run the linter
# make metalinter_test
# make lint
# setup certs
make gen_certs


+ 3
- 3
lite/helpers.go View File

@ -70,7 +70,7 @@ func (pkz privKeys) ToValidators(init, inc int64) *types.ValidatorSet {
// signHeader properly signs the header with all keys from first to last exclusive.
func (pkz privKeys) signHeader(header *types.Header, first, last int) *types.Commit {
votes := make([]*types.Vote, len(pkz))
commitSigs := make([]*types.CommitSig, len(pkz))
// We need this list to keep the ordering.
vset := pkz.ToValidators(1, 0)
@ -78,12 +78,12 @@ func (pkz privKeys) signHeader(header *types.Header, first, last int) *types.Com
// Fill in the votes we want.
for i := first; i < last && i < len(pkz); i++ {
vote := makeVote(header, vset, pkz[i])
votes[vote.ValidatorIndex] = vote
commitSigs[vote.ValidatorIndex] = vote.CommitSig()
}
res := &types.Commit{
BlockID: types.BlockID{Hash: header.Hash()},
Precommits: votes,
Precommits: commitSigs,
}
return res
}


+ 2
- 0
lite/proxy/query_test.go View File

@ -21,6 +21,7 @@ import (
var node *nm.Node
var chainID = "tendermint_test" // TODO use from config.
//nolint:unused
var waitForEventTimeout = 5 * time.Second
// TODO fix tests!!
@ -42,6 +43,7 @@ func kvstoreTx(k, v []byte) []byte {
// TODO: enable it after general proof format has been adapted
// in abci/examples/kvstore.go
//nolint:unused,deadcode
func _TestAppProofs(t *testing.T) {
assert, require := assert.New(t), require.New(t)


+ 5
- 8
mempool/mempool.go View File

@ -408,14 +408,11 @@ func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) {
case *abci.Response_CheckTx:
tx := req.GetCheckTx().Tx
memTx := mem.recheckCursor.Value.(*mempoolTx)
if !bytes.Equal(req.GetCheckTx().Tx, memTx.tx) {
cmn.PanicSanity(
fmt.Sprintf(
"Unexpected tx response from proxy during recheck\nExpected %X, got %X",
r.CheckTx.Data,
memTx.tx,
),
)
if !bytes.Equal(tx, memTx.tx) {
panic(fmt.Sprintf(
"Unexpected tx response from proxy during recheck\nExpected %X, got %X",
memTx.tx,
tx))
}
var postCheckErr error
if mem.postCheck != nil {


+ 1
- 1
mempool/wire.go View File

@ -1,7 +1,7 @@
package mempool
import (
"github.com/tendermint/go-amino"
amino "github.com/tendermint/go-amino"
)
var cdc = amino.NewCodec()


+ 42
- 33
node/node.go View File

@ -34,7 +34,7 @@ import (
rpccore "github.com/tendermint/tendermint/rpc/core"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
grpccore "github.com/tendermint/tendermint/rpc/grpc"
"github.com/tendermint/tendermint/rpc/lib/server"
rpcserver "github.com/tendermint/tendermint/rpc/lib/server"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/state/txindex"
"github.com/tendermint/tendermint/state/txindex/kv"
@ -217,11 +217,51 @@ func NewNode(config *cfg.Config,
return nil, fmt.Errorf("Error starting proxy app connections: %v", err)
}
// EventBus and IndexerService must be started before the handshake because
// we might need to index the txs of the replayed block as this might not have happened
// when the node stopped last time (i.e. the node stopped after it saved the block
// but before it indexed the txs, or, endblocker panicked)
eventBus := types.NewEventBus()
eventBus.SetLogger(logger.With("module", "events"))
err = eventBus.Start()
if err != nil {
return nil, err
}
// Transaction indexing
var txIndexer txindex.TxIndexer
switch config.TxIndex.Indexer {
case "kv":
store, err := dbProvider(&DBContext{"tx_index", config})
if err != nil {
return nil, err
}
if config.TxIndex.IndexTags != "" {
txIndexer = kv.NewTxIndex(store, kv.IndexTags(splitAndTrimEmpty(config.TxIndex.IndexTags, ",", " ")))
} else if config.TxIndex.IndexAllTags {
txIndexer = kv.NewTxIndex(store, kv.IndexAllTags())
} else {
txIndexer = kv.NewTxIndex(store)
}
default:
txIndexer = &null.TxIndex{}
}
indexerService := txindex.NewIndexerService(txIndexer, eventBus)
indexerService.SetLogger(logger.With("module", "txindex"))
err = indexerService.Start()
if err != nil {
return nil, err
}
// Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
// and replays any blocks as necessary to sync tendermint with the app.
consensusLogger := logger.With("module", "consensus")
handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc)
handshaker.SetLogger(consensusLogger)
handshaker.SetEventBus(eventBus)
if err := handshaker.Handshake(proxyApp); err != nil {
return nil, fmt.Errorf("Error during handshake: %v", err)
}
@ -343,35 +383,10 @@ func NewNode(config *cfg.Config,
consensusReactor := cs.NewConsensusReactor(consensusState, fastSync, cs.ReactorMetrics(csMetrics))
consensusReactor.SetLogger(consensusLogger)
eventBus := types.NewEventBus()
eventBus.SetLogger(logger.With("module", "events"))
// services which will be publishing and/or subscribing for messages (events)
// consensusReactor will set it on consensusState and blockExecutor
consensusReactor.SetEventBus(eventBus)
// Transaction indexing
var txIndexer txindex.TxIndexer
switch config.TxIndex.Indexer {
case "kv":
store, err := dbProvider(&DBContext{"tx_index", config})
if err != nil {
return nil, err
}
if config.TxIndex.IndexTags != "" {
txIndexer = kv.NewTxIndex(store, kv.IndexTags(splitAndTrimEmpty(config.TxIndex.IndexTags, ",", " ")))
} else if config.TxIndex.IndexAllTags {
txIndexer = kv.NewTxIndex(store, kv.IndexAllTags())
} else {
txIndexer = kv.NewTxIndex(store)
}
default:
txIndexer = &null.TxIndex{}
}
indexerService := txindex.NewIndexerService(txIndexer, eventBus)
indexerService.SetLogger(logger.With("module", "txindex"))
p2pLogger := logger.With("module", "p2p")
nodeInfo, err := makeNodeInfo(
config,
@ -534,11 +549,6 @@ func (n *Node) OnStart() error {
time.Sleep(genTime.Sub(now))
}
err := n.eventBus.Start()
if err != nil {
return err
}
// Add private IDs to addrbook to block those peers being added
n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " "))
@ -582,8 +592,7 @@ func (n *Node) OnStart() error {
}
}
// start tx indexer
return n.indexerService.Start()
return nil
}
// OnStop stops the Node. It implements cmn.Service.


+ 9
- 10
node/node_test.go View File

@ -88,13 +88,13 @@ func TestSplitAndTrimEmpty(t *testing.T) {
}
}
func TestNodeDelayedStop(t *testing.T) {
config := cfg.ResetTestRoot("node_delayed_node_test")
func TestNodeDelayedStart(t *testing.T) {
config := cfg.ResetTestRoot("node_delayed_start_test")
now := tmtime.Now()
// create & start node
n, err := DefaultNewNode(config, log.TestingLogger())
n.GenesisDoc().GenesisTime = now.Add(5 * time.Second)
n.GenesisDoc().GenesisTime = now.Add(2 * time.Second)
require.NoError(t, err)
n.Start()
@ -133,6 +133,7 @@ func TestNodeSetPrivValTCP(t *testing.T) {
types.NewMockPV(),
dialer,
)
privval.RemoteSignerConnDeadline(100 * time.Millisecond)(pvsc)
go func() {
err := pvsc.Start()
@ -172,20 +173,18 @@ func TestNodeSetPrivValIPC(t *testing.T) {
types.NewMockPV(),
dialer,
)
privval.RemoteSignerConnDeadline(100 * time.Millisecond)(pvsc)
done := make(chan struct{})
go func() {
defer close(done)
n, err := DefaultNewNode(config, log.TestingLogger())
err := pvsc.Start()
require.NoError(t, err)
assert.IsType(t, &privval.SocketVal{}, n.PrivValidator())
}()
defer pvsc.Stop()
err := pvsc.Start()
n, err := DefaultNewNode(config, log.TestingLogger())
require.NoError(t, err)
defer pvsc.Stop()
assert.IsType(t, &privval.SocketVal{}, n.PrivValidator())
<-done
}
// testFreeAddr claims a free port so we don't block on listener being ready.


+ 52
- 32
p2p/conn/connection.go View File

@ -8,6 +8,7 @@ import (
"math"
"net"
"reflect"
"sync"
"sync/atomic"
"time"
@ -84,11 +85,15 @@ type MConnection struct {
errored uint32
config MConnConfig
// Closing quitSendRoutine will cause
// doneSendRoutine to close.
// Closing quitSendRoutine will cause the sendRoutine to eventually quit.
// doneSendRoutine is closed when the sendRoutine actually quits.
quitSendRoutine chan struct{}
doneSendRoutine chan struct{}
// used to ensure FlushStop and OnStop
// are safe to call concurrently.
stopMtx sync.Mutex
flushTimer *cmn.ThrottleTimer // flush writes as necessary but throttled.
pingTimer *cmn.RepeatTimer // send pings periodically
@ -195,46 +200,69 @@ func (c *MConnection) OnStart() error {
if err := c.BaseService.OnStart(); err != nil {
return err
}
c.quitSendRoutine = make(chan struct{})
c.doneSendRoutine = make(chan struct{})
c.flushTimer = cmn.NewThrottleTimer("flush", c.config.FlushThrottle)
c.pingTimer = cmn.NewRepeatTimer("ping", c.config.PingInterval)
c.pongTimeoutCh = make(chan bool, 1)
c.chStatsTimer = cmn.NewRepeatTimer("chStats", updateStats)
c.quitSendRoutine = make(chan struct{})
c.doneSendRoutine = make(chan struct{})
go c.sendRoutine()
go c.recvRoutine()
return nil
}
// stopServices stops the BaseService and timers and closes the quitSendRoutine.
// if the quitSendRoutine was already closed, it returns true, otherwise it returns false.
// It uses the stopMtx to ensure only one of FlushStop and OnStop can do this at a time.
func (c *MConnection) stopServices() (alreadyStopped bool) {
c.stopMtx.Lock()
defer c.stopMtx.Unlock()
select {
case <-c.quitSendRoutine:
// already quit via FlushStop or OnStop
return true
default:
}
c.BaseService.OnStop()
c.flushTimer.Stop()
c.pingTimer.Stop()
c.chStatsTimer.Stop()
close(c.quitSendRoutine)
return false
}
// FlushStop replicates the logic of OnStop.
// It additionally ensures that all successful
// .Send() calls will get flushed before closing
// the connection.
// NOTE: it is not safe to call this method more than once.
func (c *MConnection) FlushStop() {
c.BaseService.OnStop()
c.flushTimer.Stop()
c.pingTimer.Stop()
c.chStatsTimer.Stop()
if c.quitSendRoutine != nil {
close(c.quitSendRoutine)
if c.stopServices() {
return
}
// this block is unique to FlushStop
{
// wait until the sendRoutine exits
// so we dont race on calling sendSomePacketMsgs
<-c.doneSendRoutine
}
// Send and flush all pending msgs.
// By now, IsRunning == false,
// so any concurrent attempts to send will fail.
// Since sendRoutine has exited, we can call this
// safely
eof := c.sendSomePacketMsgs()
for !eof {
eof = c.sendSomePacketMsgs()
// Send and flush all pending msgs.
// By now, IsRunning == false,
// so any concurrent attempts to send will fail.
// Since sendRoutine has exited, we can call this
// safely
eof := c.sendSomePacketMsgs()
for !eof {
eof = c.sendSomePacketMsgs()
}
c.flush()
// Now we can close the connection
}
c.flush()
// Now we can close the connection
c.conn.Close() // nolint: errcheck
// We can't close pong safely here because
@ -247,18 +275,10 @@ func (c *MConnection) FlushStop() {
// OnStop implements BaseService
func (c *MConnection) OnStop() {
select {
case <-c.quitSendRoutine:
// already quit via FlushStop
if c.stopServices() {
return
default:
}
c.BaseService.OnStop()
c.flushTimer.Stop()
c.pingTimer.Stop()
c.chStatsTimer.Stop()
close(c.quitSendRoutine)
c.conn.Close() // nolint: errcheck
// We can't close pong safely here because
@ -416,7 +436,6 @@ FOR_LOOP:
c.sendMonitor.Update(int(_n))
c.flush()
case <-c.quitSendRoutine:
close(c.doneSendRoutine)
break FOR_LOOP
case <-c.send:
// Send some PacketMsgs
@ -442,6 +461,7 @@ FOR_LOOP:
// Cleanup
c.stopPongTimer()
close(c.doneSendRoutine)
}
// Returns true if messages from channels were exhausted.


+ 1
- 1
p2p/conn/connection_test.go View File

@ -30,7 +30,7 @@ func createMConnectionWithCallbacks(conn net.Conn, onReceive func(chID byte, msg
cfg := DefaultMConnConfig()
cfg.PingInterval = 90 * time.Millisecond
cfg.PongTimeout = 45 * time.Millisecond
chDescs := []*ChannelDescriptor{&ChannelDescriptor{ID: 0x01, Priority: 1, SendQueueCapacity: 1}}
chDescs := []*ChannelDescriptor{{ID: 0x01, Priority: 1, SendQueueCapacity: 1}}
c := NewMConnectionWithConfig(conn, chDescs, onReceive, onError, cfg)
c.SetLogger(log.TestingLogger())
return c


+ 52
- 0
p2p/conn/secret_connection.go View File

@ -4,6 +4,7 @@ import (
"bytes"
crand "crypto/rand"
"crypto/sha256"
"crypto/subtle"
"encoding/binary"
"errors"
"io"
@ -28,6 +29,8 @@ const aeadSizeOverhead = 16 // overhead of poly 1305 authentication tag
const aeadKeySize = chacha20poly1305.KeySize
const aeadNonceSize = chacha20poly1305.NonceSize
var ErrSmallOrderRemotePubKey = errors.New("detected low order point from remote peer")
// SecretConnection implements net.Conn.
// It is an implementation of the STS protocol.
// See https://github.com/tendermint/tendermint/blob/0.1/docs/sts-final.pdf for
@ -251,6 +254,9 @@ func shareEphPubKey(conn io.ReadWriteCloser, locEphPub *[32]byte) (remEphPub *[3
if err2 != nil {
return nil, err2, true // abort
}
if hasSmallOrder(_remEphPub) {
return nil, ErrSmallOrderRemotePubKey, true
}
return _remEphPub, nil, false
},
)
@ -266,6 +272,52 @@ func shareEphPubKey(conn io.ReadWriteCloser, locEphPub *[32]byte) (remEphPub *[3
return &_remEphPub, nil
}
// use the samne blacklist as lib sodium (see https://eprint.iacr.org/2017/806.pdf for reference):
// https://github.com/jedisct1/libsodium/blob/536ed00d2c5e0c65ac01e29141d69a30455f2038/src/libsodium/crypto_scalarmult/curve25519/ref10/x25519_ref10.c#L11-L17
var blacklist = [][32]byte{
// 0 (order 4)
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
// 1 (order 1)
{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
// 325606250916557431795983626356110631294008115727848805560023387167927233504
// (order 8)
{0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae, 0x16, 0x56, 0xe3,
0xfa, 0xf1, 0x9f, 0xc4, 0x6a, 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32,
0xb1, 0xfd, 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8, 0x00},
// 39382357235489614581723060781553021112529911719440698176882885853963445705823
// (order 8)
{0x5f, 0x9c, 0x95, 0xbc, 0xa3, 0x50, 0x8c, 0x24, 0xb1, 0xd0, 0xb1,
0x55, 0x9c, 0x83, 0xef, 0x5b, 0x04, 0x44, 0x5c, 0xc4, 0x58, 0x1c,
0x8e, 0x86, 0xd8, 0x22, 0x4e, 0xdd, 0xd0, 0x9f, 0x11, 0x57},
// p-1 (order 2)
{0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f},
// p (=0, order 4)
{0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f},
// p+1 (=1, order 1)
{0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f},
}
func hasSmallOrder(pubKey [32]byte) bool {
isSmallOrderPoint := false
for _, bl := range blacklist {
if subtle.ConstantTimeCompare(pubKey[:], bl[:]) == 1 {
isSmallOrderPoint = true
break
}
}
return isSmallOrderPoint
}
func deriveSecretAndChallenge(dhSecret *[32]byte, locIsLeast bool) (recvSecret, sendSecret *[aeadKeySize]byte, challenge *[32]byte) {
hash := sha256.New
hkdf := hkdf.New(hash, dhSecret[:], nil, []byte("TENDERMINT_SECRET_CONNECTION_KEY_AND_CHALLENGE_GEN"))


+ 26
- 9
p2p/conn/secret_connection_test.go View File

@ -100,6 +100,32 @@ func TestSecretConnectionHandshake(t *testing.T) {
}
}
func TestShareLowOrderPubkey(t *testing.T) {
var fooConn, barConn = makeKVStoreConnPair()
locEphPub, _ := genEphKeys()
// all blacklisted low order points:
for _, remLowOrderPubKey := range blacklist {
_, _ = cmn.Parallel(
func(_ int) (val interface{}, err error, abort bool) {
_, err = shareEphPubKey(fooConn, locEphPub)
require.Error(t, err)
require.Equal(t, err, ErrSmallOrderRemotePubKey)
return nil, nil, false
},
func(_ int) (val interface{}, err error, abort bool) {
readRemKey, err := shareEphPubKey(barConn, &remLowOrderPubKey)
require.NoError(t, err)
require.Equal(t, locEphPub, readRemKey)
return nil, nil, false
})
}
}
func TestConcurrentWrite(t *testing.T) {
fooSecConn, barSecConn := makeSecretConnPair(t)
fooWriteText := cmn.RandStr(dataMaxSize)
@ -372,12 +398,3 @@ func BenchmarkSecretConnection(b *testing.B) {
}
//barSecConn.Close() race condition
}
func fingerprint(bz []byte) []byte {
const fbsize = 40
if len(bz) < fbsize {
return bz
} else {
return bz[:fbsize]
}
}

+ 1
- 1
p2p/conn/wire.go View File

@ -1,7 +1,7 @@
package conn
import (
"github.com/tendermint/go-amino"
amino "github.com/tendermint/go-amino"
cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino"
)


+ 25
- 2
p2p/pex/addrbook.go View File

@ -369,6 +369,10 @@ func (a *addrBook) GetSelection() []*p2p.NetAddress {
return allAddr[:numAddresses]
}
func percentageOfNum(p, n int) int {
return int(math.Round((float64(p) / float64(100)) * float64(n)))
}
// GetSelectionWithBias implements AddrBook.
// It randomly selects some addresses (old & new). Suitable for peer-exchange protocols.
// Must never return a nil address.
@ -408,11 +412,28 @@ func (a *addrBook) GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddre
newBucketToAddrsMap := make(map[int]map[string]struct{})
var newIndex int
// initialize counters used to count old and new added addresses.
// len(oldBucketToAddrsMap) cannot be used as multiple addresses can endup in the same bucket.
var oldAddressesAdded int
var newAddressesAdded int
// number of new addresses that, if possible, should be in the beginning of the selection
numRequiredNewAdd := percentageOfNum(biasTowardsNewAddrs, numAddresses)
selectionIndex := 0
ADDRS_LOOP:
for selectionIndex < numAddresses {
pickFromOldBucket := int((float64(selectionIndex)/float64(numAddresses))*100) >= biasTowardsNewAddrs
pickFromOldBucket = (pickFromOldBucket && a.nOld > 0) || a.nNew == 0
// biasedTowardsOldAddrs indicates if the selection can switch to old addresses
biasedTowardsOldAddrs := selectionIndex >= numRequiredNewAdd
// An old addresses is selected if:
// - the bias is for old and old addressees are still available or,
// - there are no new addresses or all new addresses have been selected.
// numAddresses <= a.nOld + a.nNew therefore it is guaranteed that there are enough
// addresses to fill the selection
pickFromOldBucket :=
(biasedTowardsOldAddrs && oldAddressesAdded < a.nOld) ||
a.nNew == 0 || newAddressesAdded >= a.nNew
bucket := make(map[string]*knownAddress)
// loop until we pick a random non-empty bucket
@ -450,6 +471,7 @@ ADDRS_LOOP:
oldBucketToAddrsMap[oldIndex] = make(map[string]struct{})
}
oldBucketToAddrsMap[oldIndex][selectedAddr.String()] = struct{}{}
oldAddressesAdded++
} else {
if addrsMap, ok := newBucketToAddrsMap[newIndex]; ok {
if _, ok = addrsMap[selectedAddr.String()]; ok {
@ -459,6 +481,7 @@ ADDRS_LOOP:
newBucketToAddrsMap[newIndex] = make(map[string]struct{})
}
newBucketToAddrsMap[newIndex][selectedAddr.String()] = struct{}{}
newAddressesAdded++
}
selection[selectionIndex] = selectedAddr


+ 235
- 24
p2p/pex/addrbook_test.go View File

@ -4,38 +4,18 @@ import (
"encoding/hex"
"fmt"
"io/ioutil"
"math"
"os"
"testing"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/p2p"
)
func createTempFileName(prefix string) string {
f, err := ioutil.TempFile("", prefix)
if err != nil {
panic(err)
}
fname := f.Name()
err = f.Close()
if err != nil {
panic(err)
}
return fname
}
func deleteTempFile(fname string) {
err := os.Remove(fname)
if err != nil {
panic(err)
}
}
func TestAddrBookPickAddress(t *testing.T) {
fname := createTempFileName("addrbook_test")
defer deleteTempFile(fname)
@ -239,6 +219,34 @@ func TestAddrBookRemoveAddress(t *testing.T) {
assert.Equal(t, 0, book.Size())
}
func TestAddrBookGetSelectionWithOneMarkedGood(t *testing.T) {
// create a book with 10 addresses, 1 good/old and 9 new
book, fname := createAddrBookWithMOldAndNNewAddrs(t, 1, 9)
defer deleteTempFile(fname)
addrs := book.GetSelectionWithBias(biasToSelectNewPeers)
assert.NotNil(t, addrs)
assertMOldAndNNewAddrsInSelection(t, 1, 9, addrs, book)
}
func TestAddrBookGetSelectionWithOneNotMarkedGood(t *testing.T) {
// create a book with 10 addresses, 9 good/old and 1 new
book, fname := createAddrBookWithMOldAndNNewAddrs(t, 9, 1)
defer deleteTempFile(fname)
addrs := book.GetSelectionWithBias(biasToSelectNewPeers)
assert.NotNil(t, addrs)
assertMOldAndNNewAddrsInSelection(t, 9, 1, addrs, book)
}
func TestAddrBookGetSelectionReturnsNilWhenAddrBookIsEmpty(t *testing.T) {
book, fname := createAddrBookWithMOldAndNNewAddrs(t, 0, 0)
defer deleteTempFile(fname)
addrs := book.GetSelectionWithBias(biasToSelectNewPeers)
assert.Nil(t, addrs)
}
func TestAddrBookGetSelection(t *testing.T) {
fname := createTempFileName("addrbook_test")
defer deleteTempFile(fname)
@ -335,9 +343,16 @@ func TestAddrBookGetSelectionWithBias(t *testing.T) {
good++
}
}
got, expected := int((float64(good)/float64(len(selection)))*100), (100 - biasTowardsNewAddrs)
if got >= expected {
t.Fatalf("expected more good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)", got, expected, good, len(selection))
// compute some slack to protect against small differences due to rounding:
slack := int(math.Round(float64(100) / float64(len(selection))))
if got > expected+slack {
t.Fatalf("got more good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)", got, expected, good, len(selection))
}
if got < expected-slack {
t.Fatalf("got fewer good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)", got, expected, good, len(selection))
}
}
@ -417,3 +432,199 @@ func TestPrivatePeers(t *testing.T) {
assert.True(t, ok)
}
}
func testAddrBookAddressSelection(t *testing.T, bookSize int) {
// generate all combinations of old (m) and new addresses
for nOld := 0; nOld <= bookSize; nOld++ {
nNew := bookSize - nOld
dbgStr := fmt.Sprintf("book of size %d (new %d, old %d)", bookSize, nNew, nOld)
// create book and get selection
book, fname := createAddrBookWithMOldAndNNewAddrs(t, nOld, nNew)
defer deleteTempFile(fname)
addrs := book.GetSelectionWithBias(biasToSelectNewPeers)
assert.NotNil(t, addrs, "%s - expected a non-nil selection", dbgStr)
nAddrs := len(addrs)
assert.NotZero(t, nAddrs, "%s - expected at least one address in selection", dbgStr)
// check there's no nil addresses
for _, addr := range addrs {
if addr == nil {
t.Fatalf("%s - got nil address in selection %v", dbgStr, addrs)
}
}
// XXX: shadowing
nOld, nNew := countOldAndNewAddrsInSelection(addrs, book)
// Given:
// n - num new addrs, m - num old addrs
// k - num new addrs expected in the beginning (based on bias %)
// i=min(n, k), aka expFirstNew
// j=min(m, r-i), aka expOld
//
// We expect this layout:
// indices: 0...i-1 i...i+j-1 i+j...r
// addresses: N0..Ni-1 O0..Oj-1 Ni...
//
// There is at least one partition and at most three.
var (
k = percentageOfNum(biasToSelectNewPeers, nAddrs)
expFirstNew = cmn.MinInt(nNew, k)
expOld = cmn.MinInt(nOld, nAddrs-expFirstNew)
expNew = nAddrs - expOld
expLastNew = expNew - expFirstNew
)
// Verify that the number of old and new addresses are as expected
if nNew < expNew || nNew > expNew {
t.Fatalf("%s - expected new addrs %d, got %d", dbgStr, expNew, nNew)
}
if nOld < expOld || nOld > expOld {
t.Fatalf("%s - expected old addrs %d, got %d", dbgStr, expOld, nOld)
}
// Verify that the order of addresses is as expected
// Get the sequence types and lengths of the selection
seqLens, seqTypes, err := analyseSelectionLayout(book, addrs)
assert.NoError(t, err, "%s", dbgStr)
// Build a list with the expected lengths of partitions and another with the expected types, e.g.:
// expSeqLens = [10, 22], expSeqTypes = [1, 2]
// means we expect 10 new (type 1) addresses followed by 22 old (type 2) addresses.
var expSeqLens []int
var expSeqTypes []int
switch {
case expOld == 0: // all new addresses
expSeqLens = []int{nAddrs}
expSeqTypes = []int{1}
case expFirstNew == 0: // all old addresses
expSeqLens = []int{nAddrs}
expSeqTypes = []int{2}
case nAddrs-expFirstNew-expOld == 0: // new addresses, old addresses
expSeqLens = []int{expFirstNew, expOld}
expSeqTypes = []int{1, 2}
default: // new addresses, old addresses, new addresses
expSeqLens = []int{expFirstNew, expOld, expLastNew}
expSeqTypes = []int{1, 2, 1}
}
assert.Equal(t, expSeqLens, seqLens,
"%s - expected sequence lengths of old/new %v, got %v",
dbgStr, expSeqLens, seqLens)
assert.Equal(t, expSeqTypes, seqTypes,
"%s - expected sequence types (1-new, 2-old) was %v, got %v",
dbgStr, expSeqTypes, seqTypes)
}
}
func TestMultipleAddrBookAddressSelection(t *testing.T) {
// test books with smaller size, < N
const N = 32
for bookSize := 1; bookSize < N; bookSize++ {
testAddrBookAddressSelection(t, bookSize)
}
// Test for two books with sizes from following ranges
ranges := [...][]int{{33, 100}, {100, 175}}
bookSizes := make([]int, 0, len(ranges))
for _, r := range ranges {
bookSizes = append(bookSizes, cmn.RandIntn(r[1]-r[0])+r[0])
}
t.Logf("Testing address selection for the following book sizes %v\n", bookSizes)
for _, bookSize := range bookSizes {
testAddrBookAddressSelection(t, bookSize)
}
}
func assertMOldAndNNewAddrsInSelection(t *testing.T, m, n int, addrs []*p2p.NetAddress, book *addrBook) {
nOld, nNew := countOldAndNewAddrsInSelection(addrs, book)
assert.Equal(t, m, nOld, "old addresses")
assert.Equal(t, n, nNew, "new addresses")
}
func createTempFileName(prefix string) string {
f, err := ioutil.TempFile("", prefix)
if err != nil {
panic(err)
}
fname := f.Name()
err = f.Close()
if err != nil {
panic(err)
}
return fname
}
func deleteTempFile(fname string) {
err := os.Remove(fname)
if err != nil {
panic(err)
}
}
func createAddrBookWithMOldAndNNewAddrs(t *testing.T, nOld, nNew int) (book *addrBook, fname string) {
fname = createTempFileName("addrbook_test")
book = NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
assert.Zero(t, book.Size())
randAddrs := randNetAddressPairs(t, nOld)
for _, addr := range randAddrs {
book.AddAddress(addr.addr, addr.src)
book.MarkGood(addr.addr)
}
randAddrs = randNetAddressPairs(t, nNew)
for _, addr := range randAddrs {
book.AddAddress(addr.addr, addr.src)
}
return
}
func countOldAndNewAddrsInSelection(addrs []*p2p.NetAddress, book *addrBook) (nOld, nNew int) {
for _, addr := range addrs {
if book.IsGood(addr) {
nOld++
} else {
nNew++
}
}
return
}
// Analyse the layout of the selection specified by 'addrs'
// Returns:
// - seqLens - the lengths of the sequences of addresses of same type
// - seqTypes - the types of sequences in selection
func analyseSelectionLayout(book *addrBook, addrs []*p2p.NetAddress) (seqLens, seqTypes []int, err error) {
// address types are: 0 - nil, 1 - new, 2 - old
var (
prevType = 0
currentSeqLen = 0
)
for _, addr := range addrs {
addrType := 0
if book.IsGood(addr) {
addrType = 2
} else {
addrType = 1
}
if addrType != prevType && prevType != 0 {
seqLens = append(seqLens, currentSeqLen)
seqTypes = append(seqTypes, prevType)
currentSeqLen = 0
}
currentSeqLen++
prevType = addrType
}
seqLens = append(seqLens, currentSeqLen)
seqTypes = append(seqTypes, prevType)
return
}

+ 1
- 2
p2p/pex/pex_reactor.go View File

@ -616,10 +616,9 @@ func (of oldestFirst) Less(i, j int) bool { return of[i].LastAttempt.Before(of[j
// getPeersToCrawl returns addresses of potential peers that we wish to validate.
// NOTE: The status information is ordered as described above.
func (r *PEXReactor) getPeersToCrawl() []crawlPeerInfo {
var of oldestFirst
// TODO: be more selective
addrs := r.book.ListOfKnownAddresses()
of := make(oldestFirst, 0, len(addrs))
for _, addr := range addrs {
if len(addr.ID()) == 0 {
continue // dont use peers without id


+ 75
- 0
p2p/pex/pex_reactor_test.go View File

@ -316,6 +316,81 @@ func TestPEXReactorCrawlStatus(t *testing.T) {
// TODO: test
}
// connect a peer to a seed, wait a bit, then stop it.
// this should give it time to request addrs and for the seed
// to call FlushStop, and allows us to test calling Stop concurrently
// with FlushStop. Before a fix, this non-deterministically reproduced
// https://github.com/tendermint/tendermint/issues/3231.
func TestPEXReactorSeedModeFlushStop(t *testing.T) {
N := 2
switches := make([]*p2p.Switch, N)
// directory to store address books
dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(t, err)
defer os.RemoveAll(dir) // nolint: errcheck
books := make([]*addrBook, N)
logger := log.TestingLogger()
// create switches
for i := 0; i < N; i++ {
switches[i] = p2p.MakeSwitch(cfg, i, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch {
books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false)
books[i].SetLogger(logger.With("pex", i))
sw.SetAddrBook(books[i])
sw.SetLogger(logger.With("pex", i))
config := &PEXReactorConfig{}
if i == 0 {
// first one is a seed node
config = &PEXReactorConfig{SeedMode: true}
}
r := NewPEXReactor(books[i], config)
r.SetLogger(logger.With("pex", i))
r.SetEnsurePeersPeriod(250 * time.Millisecond)
sw.AddReactor("pex", r)
return sw
})
}
for _, sw := range switches {
err := sw.Start() // start switch and reactors
require.Nil(t, err)
}
reactor := switches[0].Reactors()["pex"].(*PEXReactor)
peerID := switches[1].NodeInfo().ID()
err = switches[1].DialPeerWithAddress(switches[0].NodeInfo().NetAddress(), false)
assert.NoError(t, err)
// sleep up to a second while waiting for the peer to send us a message.
// this isn't perfect since it's possible the peer sends us a msg and we FlushStop
// before this loop catches it. but non-deterministically it works pretty well.
for i := 0; i < 1000; i++ {
v := reactor.lastReceivedRequests.Get(string(peerID))
if v != nil {
break
}
time.Sleep(time.Millisecond)
}
// by now the FlushStop should have happened. Try stopping the peer.
// it should be safe to do this.
peers := switches[0].Peers().List()
for _, peer := range peers {
peer.Stop()
}
// stop the switches
for _, s := range switches {
s.Stop()
}
}
func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) {
peer := p2p.CreateRandomPeer(false)


+ 1
- 1
p2p/pex/wire.go View File

@ -1,7 +1,7 @@
package pex
import (
"github.com/tendermint/go-amino"
amino "github.com/tendermint/go-amino"
)
var cdc *amino.Codec = amino.NewCodec()


+ 3
- 5
p2p/switch.go View File

@ -480,14 +480,12 @@ func (sw *Switch) acceptRoutine() {
metrics: sw.metrics,
})
if err != nil {
switch err.(type) {
switch err := err.(type) {
case ErrRejected:
rErr := err.(ErrRejected)
if rErr.IsSelf() {
if err.IsSelf() {
// Remove the given address from the address book and add to our addresses
// to avoid dialing in the future.
addr := rErr.Addr()
addr := err.Addr()
sw.addrBook.RemoveAddress(&addr)
sw.addrBook.AddOurAddress(&addr)
}


+ 7
- 17
p2p/test_util.go View File

@ -125,7 +125,7 @@ func (sw *Switch) addPeerWithConnection(conn net.Conn) error {
return err
}
ni, err := handshake(conn, 50*time.Millisecond, sw.nodeInfo)
ni, err := handshake(conn, time.Second, sw.nodeInfo)
if err != nil {
if err := conn.Close(); err != nil {
sw.Logger.Error("Error closing connection", "err", err)
@ -247,35 +247,25 @@ func testNodeInfo(id ID, name string) NodeInfo {
}
func testNodeInfoWithNetwork(id ID, name, network string) NodeInfo {
port, err := getFreePort()
if err != nil {
panic(err)
}
return DefaultNodeInfo{
ProtocolVersion: defaultProtocolVersion,
ID_: id,
ListenAddr: fmt.Sprintf("127.0.0.1:%d", port),
ListenAddr: fmt.Sprintf("127.0.0.1:%d", getFreePort()),
Network: network,
Version: "1.2.3-rc0-deadbeef",
Channels: []byte{testCh},
Moniker: name,
Other: DefaultNodeInfoOther{
TxIndex: "on",
RPCAddress: fmt.Sprintf("127.0.0.1:%d", port),
RPCAddress: fmt.Sprintf("127.0.0.1:%d", getFreePort()),
},
}
}
func getFreePort() (int, error) {
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
func getFreePort() int {
port, err := cmn.GetFreePort()
if err != nil {
return 0, err
}
l, err := net.ListenTCP("tcp", addr)
if err != nil {
return 0, err
panic(err)
}
defer l.Close()
return l.Addr().(*net.TCPAddr).Port, nil
return port
}

+ 4
- 4
p2p/transport_test.go View File

@ -498,13 +498,13 @@ func TestTransportConnDuplicateIPFilter(t *testing.T) {
)
cs.Set(c, []net.IP{
net.IP{10, 0, 10, 1},
net.IP{10, 0, 10, 2},
net.IP{10, 0, 10, 3},
{10, 0, 10, 1},
{10, 0, 10, 2},
{10, 0, 10, 3},
})
if err := filter(cs, c, []net.IP{
net.IP{10, 0, 10, 2},
{10, 0, 10, 2},
}); err == nil {
t.Errorf("expected Peer to be rejected as duplicate")
}


+ 1
- 0
p2p/trust/metric_test.go View File

@ -65,6 +65,7 @@ func TestTrustMetricCopyNilPointer(t *testing.T) {
}
// XXX: This test fails non-deterministically
//nolint:unused,deadcode
func _TestTrustMetricStopPause(t *testing.T) {
// The TestTicker will provide manual control over
// the passing of time within the metric


+ 1
- 1
p2p/wire.go View File

@ -1,7 +1,7 @@
package p2p
import (
"github.com/tendermint/go-amino"
amino "github.com/tendermint/go-amino"
cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino"
)


+ 13
- 11
privval/client.go View File

@ -53,9 +53,11 @@ type SocketVal struct {
// reset if the connection fails.
// failures are detected by a background
// ping routine.
// All messages are request/response, so we hold the mutex
// so only one request/response pair can happen at a time.
// Methods on the underlying net.Conn itself
// are already gorountine safe.
mtx sync.RWMutex
mtx sync.Mutex
signer *RemoteSignerClient
}
@ -82,22 +84,22 @@ func NewSocketVal(
// GetPubKey implements PrivValidator.
func (sc *SocketVal) GetPubKey() crypto.PubKey {
sc.mtx.RLock()
defer sc.mtx.RUnlock()
sc.mtx.Lock()
defer sc.mtx.Unlock()
return sc.signer.GetPubKey()
}
// SignVote implements PrivValidator.
func (sc *SocketVal) SignVote(chainID string, vote *types.Vote) error {
sc.mtx.RLock()
defer sc.mtx.RUnlock()
sc.mtx.Lock()
defer sc.mtx.Unlock()
return sc.signer.SignVote(chainID, vote)
}
// SignProposal implements PrivValidator.
func (sc *SocketVal) SignProposal(chainID string, proposal *types.Proposal) error {
sc.mtx.RLock()
defer sc.mtx.RUnlock()
sc.mtx.Lock()
defer sc.mtx.Unlock()
return sc.signer.SignProposal(chainID, proposal)
}
@ -106,15 +108,15 @@ func (sc *SocketVal) SignProposal(chainID string, proposal *types.Proposal) erro
// Ping is used to check connection health.
func (sc *SocketVal) Ping() error {
sc.mtx.RLock()
defer sc.mtx.RUnlock()
sc.mtx.Lock()
defer sc.mtx.Unlock()
return sc.signer.Ping()
}
// Close closes the underlying net.Conn.
func (sc *SocketVal) Close() {
sc.mtx.RLock()
defer sc.mtx.RUnlock()
sc.mtx.Lock()
defer sc.mtx.Unlock()
if sc.signer != nil {
if err := sc.signer.Close(); err != nil {
sc.Logger.Error("OnStop", "err", err)


+ 2
- 2
privval/client_test.go View File

@ -37,11 +37,11 @@ func socketTestCases(t *testing.T) []socketTestCase {
require.NoError(t, err)
unixAddr := fmt.Sprintf("unix://%s", unixFilePath)
return []socketTestCase{
socketTestCase{
{
addr: tcpAddr,
dialer: DialTCPFn(tcpAddr, testConnDeadline, ed25519.GenPrivKey()),
},
socketTestCase{
{
addr: unixAddr,
dialer: DialUnixFn(unixFilePath),
},


+ 3
- 3
privval/file.go View File

@ -87,17 +87,17 @@ type FilePVLastSignState struct {
func (lss *FilePVLastSignState) CheckHRS(height int64, round int, step int8) (bool, error) {
if lss.Height > height {
return false, errors.New("Height regression")
return false, fmt.Errorf("Height regression. Got %v, last height %v", height, lss.Height)
}
if lss.Height == height {
if lss.Round > round {
return false, errors.New("Round regression")
return false, fmt.Errorf("Round regression at height %v. Got %v, last round %v", height, round, lss.Round)
}
if lss.Round == round {
if lss.Step > step {
return false, errors.New("Step regression")
return false, fmt.Errorf("Step regression at height %v round %v. Got %v, last step %v", height, round, step, lss.Step)
} else if lss.Step == step {
if lss.SignBytes != nil {
if lss.Signature == nil {


+ 1
- 1
privval/remote_signer.go View File

@ -7,7 +7,7 @@ import (
"github.com/pkg/errors"
"github.com/tendermint/go-amino"
amino "github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/crypto"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/types"


+ 17
- 24
privval/socket_test.go View File

@ -98,36 +98,29 @@ func TestListenerAcceptDeadlines(t *testing.T) {
func TestListenerConnectDeadlines(t *testing.T) {
for _, tc := range listenerTestCases(t, time.Second, time.Millisecond) {
readyc := make(chan struct{})
donec := make(chan struct{})
go func(ln net.Listener) {
defer close(donec)
c, err := ln.Accept()
go func(dialer Dialer) {
_, err := dialer()
if err != nil {
t.Fatal(err)
panic(err)
}
<-readyc
}(tc.dialer)
time.Sleep(2 * time.Millisecond)
c, err := tc.listener.Accept()
if err != nil {
t.Fatal(err)
}
msg := make([]byte, 200)
_, err = c.Read(msg)
opErr, ok := err.(*net.OpError)
if !ok {
t.Fatalf("for %s listener, have %v, want *net.OpError", tc.description, err)
}
time.Sleep(2 * time.Millisecond)
if have, want := opErr.Op, "read"; have != want {
t.Errorf("for %s listener, have %v, want %v", tc.description, have, want)
}
}(tc.listener)
msg := make([]byte, 200)
_, err = c.Read(msg)
opErr, ok := err.(*net.OpError)
if !ok {
t.Fatalf("for %s listener, have %v, want *net.OpError", tc.description, err)
}
_, err := tc.dialer()
if err != nil {
t.Fatal(err)
if have, want := opErr.Op, "read"; have != want {
t.Errorf("for %s listener, have %v, want %v", tc.description, have, want)
}
close(readyc)
<-donec
}
}

+ 1
- 1
privval/wire.go View File

@ -1,7 +1,7 @@
package privval
import (
"github.com/tendermint/go-amino"
amino "github.com/tendermint/go-amino"
cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino"
)


+ 1
- 1
rpc/client/rpc_test.go View File

@ -12,7 +12,7 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/rpc/client"
"github.com/tendermint/tendermint/rpc/test"
rpctest "github.com/tendermint/tendermint/rpc/test"
"github.com/tendermint/tendermint/types"
)


+ 1
- 1
rpc/core/types/wire.go View File

@ -1,7 +1,7 @@
package core_types
import (
"github.com/tendermint/go-amino"
amino "github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/types"
)


+ 2
- 2
rpc/grpc/grpc_test.go View File

@ -8,8 +8,8 @@ import (
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/abci/example/kvstore"
"github.com/tendermint/tendermint/rpc/grpc"
"github.com/tendermint/tendermint/rpc/test"
core_grpc "github.com/tendermint/tendermint/rpc/grpc"
rpctest "github.com/tendermint/tendermint/rpc/test"
)
func TestMain(m *testing.M) {


+ 1
- 2
rpc/lib/client/args_test.go View File

@ -5,8 +5,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/go-amino"
amino "github.com/tendermint/go-amino"
)
type Tx []byte


+ 1
- 1
rpc/lib/client/http_client.go View File

@ -12,7 +12,7 @@ import (
"strings"
"github.com/pkg/errors"
"github.com/tendermint/go-amino"
amino "github.com/tendermint/go-amino"
types "github.com/tendermint/tendermint/rpc/lib/types"
)


+ 1
- 1
rpc/lib/client/ws_client.go View File

@ -13,7 +13,7 @@ import (
"github.com/pkg/errors"
metrics "github.com/rcrowley/go-metrics"
"github.com/tendermint/go-amino"
amino "github.com/tendermint/go-amino"
cmn "github.com/tendermint/tendermint/libs/common"
types "github.com/tendermint/tendermint/rpc/lib/types"
)


+ 1
- 1
rpc/lib/types/types_test.go View File

@ -8,7 +8,7 @@ import (
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/tendermint/go-amino"
amino "github.com/tendermint/go-amino"
)
type SampleResult struct {


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save