Browse Source

Merge pull request #559 from tendermint/release-v0.10.1

Release v0.10.1
pull/570/head v0.10.1
Ethan Buchman 7 years ago
committed by GitHub
parent
commit
7f3c697b6e
61 changed files with 577 additions and 928 deletions
  1. +20
    -0
      CHANGELOG.md
  2. +56
    -0
      CODE_OF_CONDUCT.md
  3. +5
    -6
      DOCKER/Dockerfile
  4. +1
    -1
      DOCKER/Dockerfile.develop
  5. +2
    -6
      DOCKER/Makefile
  6. +4
    -3
      DOCKER/README.md
  7. +20
    -0
      DOCKER/build.sh
  8. +22
    -0
      DOCKER/push.sh
  9. +8
    -1
      Makefile
  10. +3
    -0
      README.md
  11. +7
    -7
      blockchain/pool.go
  12. +6
    -7
      blockchain/reactor.go
  13. +0
    -87
      cmd/tendermint/commands/flags/log_level.go
  14. +0
    -64
      cmd/tendermint/commands/flags/log_level_test.go
  15. +20
    -6
      cmd/tendermint/commands/root.go
  16. +26
    -33
      cmd/tendermint/commands/run_node.go
  17. +2
    -4
      consensus/byzantine_test.go
  18. +6
    -0
      consensus/common.go
  19. +0
    -21
      consensus/common_test.go
  20. +3
    -0
      consensus/height_vote_set_test.go
  21. +4
    -4
      consensus/reactor.go
  22. +1
    -1
      consensus/replay.go
  23. +0
    -61
      consensus/replay_test.go
  24. +11
    -14
      consensus/state.go
  25. +10
    -7
      consensus/state_test.go
  26. +1
    -1
      consensus/ticker.go
  27. +33
    -30
      glide.lock
  28. +15
    -11
      glide.yaml
  29. +1
    -1
      mempool/mempool.go
  30. +1
    -1
      mempool/reactor.go
  31. +2
    -2
      node/node.go
  32. +1
    -4
      p2p/addrbook.go
  33. +8
    -8
      p2p/connection.go
  34. +4
    -4
      p2p/listener.go
  35. +0
    -1
      p2p/netaddress.go
  36. +2
    -3
      p2p/pex_reactor.go
  37. +0
    -14
      p2p/secret_connection.go
  38. +7
    -7
      p2p/switch.go
  39. +6
    -5
      p2p/upnp/probe.go
  40. +5
    -5
      p2p/upnp/upnp.go
  41. +66
    -1
      rpc/client/event_test.go
  42. +2
    -2
      rpc/client/mock/abci.go
  43. +91
    -0
      rpc/client/rpc_test.go
  44. +1
    -1
      rpc/core/mempool.go
  45. +14
    -5
      rpc/lib/client/http_client.go
  46. +2
    -2
      rpc/lib/client/ws_client.go
  47. +2
    -2
      rpc/lib/rpc_test.go
  48. +19
    -9
      rpc/lib/server/handlers.go
  49. +1
    -1
      rpc/lib/server/http_server.go
  50. +0
    -352
      rpc/test/client_test.go
  51. +2
    -4
      rpc/test/grpc_test.go
  52. +0
    -92
      rpc/test/helpers.go
  53. +1
    -1
      scripts/tendermint-builder/Dockerfile
  54. +5
    -5
      state/execution.go
  55. +1
    -5
      state/state.go
  56. +5
    -2
      state/txindex/kv/kv_test.go
  57. +2
    -3
      test/docker/Dockerfile
  58. +1
    -4
      types/block.go
  59. +33
    -1
      types/genesis.go
  60. +4
    -4
      types/validator_set.go
  61. +2
    -2
      version/version.go

+ 20
- 0
CHANGELOG.md View File

@ -1,5 +1,25 @@
# Changelog
## 0.10.1 (June 28, 2017)
FEATURES:
- Use `--trace` to get stack traces for logged errors
- types: GenesisDoc.ValidatorHash returns the hash of the genesis validator set
- types: GenesisDocFromFile parses a GenesiDoc from a JSON file
IMPROVEMENTS:
- Add a Code of Conduct
- Variety of improvements as suggested by `megacheck` tool
- rpc: deduplicate tests between rpc/client and rpc/tests
- rpc: addresses without a protocol prefix default to `tcp://`. `http://` is also accepted as an alias for `tcp://`
- cmd: commands are more easily reuseable from other tools
- DOCKER: automate build/push
BUG FIXES:
- Fix log statements using keys with spaces (logger does not currently support spaces)
- rpc: set logger on websocket connection
- rpc: fix ws connection stability by setting write deadline on pings
## 0.10.0 (June 2, 2017)
Includes major updates to configuration, logging, and json serialization.


+ 56
- 0
CODE_OF_CONDUCT.md View File

@ -0,0 +1,56 @@
# The Tendermint Code of Conduct
This code of conduct applies to all projects run by the Tendermint/COSMOS team and hence to tendermint.
----
# Conduct
## Contact: adrian@tendermint.com
* We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, nationality, or other similar characteristic.
* On Slack, please avoid using overtly sexual nicknames or other nicknames that might detract from a friendly, safe and welcoming environment for all.
* Please be kind and courteous. There’s no need to be mean or rude.
* Respect that people have differences of opinion and that every design or implementation choice carries a trade-off and numerous costs. There is seldom a right answer.
* Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works.
* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behaviour. We interpret the term “harassment” as including the definition in the [Citizen Code of Conduct](http://citizencodeofconduct.org/); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don’t tolerate behavior that excludes people in socially marginalized groups.
* Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the channel admins or the person mentioned above immediately. Whether you’re a regular contributor or a newcomer, we care about making this community a safe place for you and we’ve got your back.
* Likewise any spamming, trolling, flaming, baiting or other attention-stealing behaviour is not welcome.
----
# Moderation
These are the policies for upholding our community’s standards of conduct. If you feel that a thread needs moderation, please contact the above mentioned person.
1. Remarks that violate the Tendermint/COSMOS standards of conduct, including hateful, hurtful, oppressive, or exclusionary remarks, are not allowed. (Cursing is allowed, but never targeting another user, and never in a hateful manner.)
2. Remarks that moderators find inappropriate, whether listed in the code of conduct or not, are also not allowed.
3. Moderators will first respond to such remarks with a warning.
4. If the warning is unheeded, the user will be “kicked,” i.e., kicked out of the communication channel to cool off.
5. If the user comes back and continues to make trouble, they will be banned, i.e., indefinitely excluded.
6. Moderators may choose at their discretion to un-ban the user if it was a first offense and they offer the offended party a genuine apology.
7. If a moderator bans someone and you think it was unjustified, please take it up with that moderator, or with a different moderator, in private. Complaints about bans in-channel are not allowed.
8. Moderators are held to a higher standard than other community members. If a moderator creates an inappropriate situation, they should expect less leeway than others.
In the Tendermint/COSMOS community we strive to go the extra step to look out for each other. Don’t just aim to be technically unimpeachable, try to be your best self. In particular, avoid flirting with offensive or sensitive issues, particularly if they’re off-topic; this all too often leads to unnecessary fights, hurt feelings, and damaged trust; worse, it can drive people away from the community entirely.
And if someone takes issue with something you said or did, resist the urge to be defensive. Just stop doing what it was they complained about and apologize. Even if you feel you were misinterpreted or unfairly accused, chances are good there was something you could’ve communicated better — remember that it’s your responsibility to make your fellow Cosmonauts comfortable. Everyone wants to get along and we are all here first and foremost because we want to talk about cool technology. You will find that people will be eager to assume good intent and forgive as long as you earn their trust.
The enforcement policies listed above apply to all official Tendermint/COSMOS venues.For other projects adopting the Tendermint/COSMOS Code of Conduct, please contact the maintainers of those projects for enforcement. If you wish to use this code of conduct for your own project, consider explicitly mentioning your moderation policy or making a copy with your own moderation policy so as to avoid confusion.
*Adapted from the [Node.js Policy on Trolling](http://blog.izs.me/post/30036893703/policy-on-trolling), the [Contributor Covenant v1.3.0](http://contributor-covenant.org/version/1/3/0/) and the [Rust Code of Conduct](https://www.rust-lang.org/en-US/conduct.html).

+ 5
- 6
DOCKER/Dockerfile View File

@ -1,8 +1,8 @@
FROM alpine:3.5
FROM alpine:3.6
# This is the release of tendermint to pull in.
ENV TM_VERSION 0.9.1
ENV TM_SHA256SUM da34234755937140dcd953afcc965555fad7e05afd546711bc5bdc2df3d54226
ENV TM_VERSION 0.10.0
ENV TM_SHA256SUM a29852b8d51c00db93c87c3d148fa419a047abd38f32b2507a905805131acc19
# Tendermint will be looking for genesis file in /tendermint (unless you change
# `genesis_file` in config.toml). You can put your config.toml and private
@ -26,7 +26,7 @@ RUN mkdir -p $DATA_ROOT && \
RUN apk add --no-cache bash curl jq
RUN apk add --no-cache openssl && \
wget https://s3-us-west-2.amazonaws.com/tendermint/${TM_VERSION}/tendermint_${TM_VERSION}_linux_amd64.zip && \
wget https://s3-us-west-2.amazonaws.com/tendermint/binaries/tendermint/v${TM_VERSION}/tendermint_${TM_VERSION}_linux_amd64.zip && \
echo "${TM_SHA256SUM} tendermint_${TM_VERSION}_linux_amd64.zip" | sha256sum -c && \
unzip -d /bin tendermint_${TM_VERSION}_linux_amd64.zip && \
apk del openssl && \
@ -42,5 +42,4 @@ EXPOSE 46657
ENTRYPOINT ["tendermint"]
# By default you'll get the dummy app
CMD ["node", "--moniker=`hostname`", "--proxy_app=dummy"]
CMD ["node", "--moniker=`hostname`"]

+ 1
- 1
DOCKER/Dockerfile.develop View File

@ -1,4 +1,4 @@
FROM alpine:3.5
FROM alpine:3.6
ENV DATA_ROOT /tendermint
ENV TMHOME $DATA_ROOT


+ 2
- 6
DOCKER/Makefile View File

@ -1,12 +1,8 @@
build:
# TAG=0.8.0 TAG_NO_PATCH=0.8
docker build -t "tendermint/tendermint" -t "tendermint/tendermint:$(TAG)" -t "tendermint/tendermint:$(TAG_NO_PATCH)" .
@sh -c "'$(CURDIR)/build.sh'"
push:
# TAG=0.8.0 TAG_NO_PATCH=0.8
docker push "tendermint/tendermint:latest"
docker push "tendermint/tendermint:$(TAG)"
docker push "tendermint/tendermint:$(TAG_NO_PATCH)"
@sh -c "'$(CURDIR)/push.sh'"
build_develop:
docker build -t "tendermint/tendermint:develop" -f Dockerfile.develop .


+ 4
- 3
DOCKER/README.md View File

@ -1,6 +1,7 @@
# Supported tags and respective `Dockerfile` links
- `0.9.1`, `0.9`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/809e0e8c5933604ba8b2d096803ada7c5ec4dfd3/DOCKER/Dockerfile)
- `0.10.0`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/e5342f4054ab784b2cd6150e14f01053d7c8deb2/DOCKER/Dockerfile)
- `0.9.1`, `0.9`, [(Dockerfile)](https://github.com/tendermint/tendermint/blob/809e0e8c5933604ba8b2d096803ada7c5ec4dfd3/DOCKER/Dockerfile)
- `0.9.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/d474baeeea6c22b289e7402449572f7c89ee21da/DOCKER/Dockerfile)
- `0.8.0`, `0.8` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/bf64dd21fdb193e54d8addaaaa2ecf7ac371de8c/DOCKER/Dockerfile)
- `develop` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/master/DOCKER/Dockerfile.develop)
@ -23,12 +24,12 @@ A very simple example of a built-in app and Tendermint core in one container.
```
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app=dummy
```
## mintnet-kubernetes
If you want to see many containers talking to each other, consider using [mintnet-kubernetes](https://github.com/tendermint/mintnet-kubernetes), which is a tool for running Tendermint-based applications on a Kubernetes cluster.
If you want to see many containers talking to each other, consider using [mintnet-kubernetes](https://github.com/tendermint/tools/tree/master/mintnet-kubernetes), which is a tool for running Tendermint-based applications on a Kubernetes cluster.
# Supported Docker versions


+ 20
- 0
DOCKER/build.sh View File

@ -0,0 +1,20 @@
#!/usr/bin/env bash
set -e
# Get the tag from the version, or try to figure it out.
if [ -z "$TAG" ]; then
TAG=$(awk -F\" '/Version =/ { print $2; exit }' < ../version/version.go)
fi
if [ -z "$TAG" ]; then
echo "Please specify a tag."
exit 1
fi
TAG_NO_PATCH=${TAG%.*}
read -p "==> Build 3 docker images with the following tags (latest, $TAG, $TAG_NO_PATCH)? y/n" -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
docker build -t "tendermint/tendermint" -t "tendermint/tendermint:$TAG" -t "tendermint/tendermint:$TAG_NO_PATCH" .
fi

+ 22
- 0
DOCKER/push.sh View File

@ -0,0 +1,22 @@
#!/usr/bin/env bash
set -e
# Get the tag from the version, or try to figure it out.
if [ -z "$TAG" ]; then
TAG=$(awk -F\" '/Version =/ { print $2; exit }' < ../version/version.go)
fi
if [ -z "$TAG" ]; then
echo "Please specify a tag."
exit 1
fi
TAG_NO_PATCH=${TAG%.*}
read -p "==> Push 3 docker images with the following tags (latest, $TAG, $TAG_NO_PATCH)? y/n" -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
docker push "tendermint/tendermint:latest"
docker push "tendermint/tendermint:$TAG"
docker push "tendermint/tendermint:$TAG_NO_PATCH"
fi

+ 8
- 1
Makefile View File

@ -1,6 +1,8 @@
GOTOOLS = \
github.com/mitchellh/gox \
github.com/Masterminds/glide
github.com/Masterminds/glide \
honnef.co/go/tools/cmd/megacheck
PACKAGES=$(shell go list ./... | grep -v '/vendor/')
BUILD_TAGS?=tendermint
TMHOME = $${TMHOME:-$$HOME/.tendermint}
@ -72,5 +74,10 @@ tools:
ensure_tools:
go get $(GOTOOLS)
### Formatting, linting, and vetting
megacheck:
@for pkg in ${PACKAGES}; do megacheck "$$pkg"; done
.PHONY: install build build_race dist test test_race test_integrations test100 draw_deps list_deps get_deps get_vendor_deps update_deps revision tools

+ 3
- 0
README.md View File

@ -27,6 +27,9 @@ For more background, see the [introduction](https://tendermint.com/intro).
To get started developing applications, see the [application developers guide](https://tendermint.com/docs/guides/app-development).
### Code of Conduct
Please read, understand and adhere to our [code of conduct](CODE_OF_CONDUCT.md).
## Install
To download pre-built binaries, see our [downloads page](https://tendermint.com/intro/getting-started/download).


+ 7
- 7
blockchain/pool.go View File

@ -28,7 +28,7 @@ var peerTimeoutSeconds = time.Duration(15) // not const so we can override with
Every so often we ask peers what height they're on so we can keep going.
Requests are continuously made for blocks of higher heights until
the limits. If most of the requests have no available peers, and we
the limit is reached. If most of the requests have no available peers, and we
are not at peer limits, we can probably switch to consensus reactor
*/
@ -129,8 +129,6 @@ func (pool *BlockPool) IsCaughtUp() bool {
pool.mtx.Lock()
defer pool.mtx.Unlock()
height := pool.height
// Need at least 1 peer to be considered caught up.
if len(pool.peers) == 0 {
pool.Logger.Debug("Blockpool has no peers")
@ -142,8 +140,11 @@ func (pool *BlockPool) IsCaughtUp() bool {
maxPeerHeight = MaxInt(maxPeerHeight, peer.height)
}
isCaughtUp := (height > 0 || time.Now().Sub(pool.startTime) > 5*time.Second) && (maxPeerHeight == 0 || height >= maxPeerHeight)
pool.Logger.Info(Fmt("IsCaughtUp: %v", isCaughtUp), "height", height, "maxPeerHeight", maxPeerHeight)
// some conditions to determine if we're caught up
receivedBlockOrTimedOut := (pool.height > 0 || time.Since(pool.startTime) > 5*time.Second)
ourChainIsLongestAmongPeers := maxPeerHeight == 0 || pool.height >= maxPeerHeight
isCaughtUp := receivedBlockOrTimedOut && ourChainIsLongestAmongPeers
pool.Logger.Info(Fmt("IsCaughtUp: %v", isCaughtUp), "height", pool.height, "maxPeerHeight", maxPeerHeight)
return isCaughtUp
}
@ -261,7 +262,6 @@ func (pool *BlockPool) pickIncrAvailablePeer(minHeight int) *bpPeer {
if peer.didTimeout {
pool.removePeer(peer.id)
continue
} else {
}
if peer.numPending >= maxPendingRequestsPerPeer {
continue
@ -303,6 +303,7 @@ func (pool *BlockPool) sendTimeout(peerID string) {
pool.timeoutsCh <- peerID
}
// unused by tendermint; left for debugging purposes
func (pool *BlockPool) debug() string {
pool.mtx.Lock() // Lock
defer pool.mtx.Unlock()
@ -326,7 +327,6 @@ type bpPeer struct {
id string
recvMonitor *flow.Monitor
mtx sync.Mutex
height int
numPending int32
timeout *time.Timer


+ 6
- 7
blockchain/reactor.go View File

@ -19,7 +19,6 @@ const (
BlockchainChannel = byte(0x40)
defaultChannelCapacity = 100
defaultSleepIntervalMS = 500
trySyncIntervalMS = 100
// stop syncing when last block's time is
// within this much of the system time.
@ -49,7 +48,6 @@ type BlockchainReactor struct {
fastSync bool
requestsCh chan BlockRequest
timeoutsCh chan string
lastBlock *types.Block
evsw types.EventSwitch
}
@ -128,12 +126,13 @@ func (bcR *BlockchainReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
func (bcR *BlockchainReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) {
_, msg, err := DecodeMessage(msgBytes)
if err != nil {
bcR.Logger.Error("Error decoding message", "error", err)
bcR.Logger.Error("Error decoding message", "err", err)
return
}
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
// TODO: improve logic to satisfy megacheck
switch msg := msg.(type) {
case *bcBlockRequestMessage:
// Got a request for a block. Respond with block if we have it.
@ -194,10 +193,10 @@ FOR_LOOP:
if peer != nil {
bcR.Switch.StopPeerForError(peer, errors.New("BlockchainReactor Timeout"))
}
case _ = <-statusUpdateTicker.C:
case <-statusUpdateTicker.C:
// ask for status updates
go bcR.BroadcastStatusRequest()
case _ = <-switchToConsensusTicker.C:
case <-switchToConsensusTicker.C:
height, numPending, _ := bcR.pool.GetStatus()
outbound, inbound, _ := bcR.Switch.NumPeers()
bcR.Logger.Info("Consensus ticker", "numPending", numPending, "total", len(bcR.pool.requesters),
@ -211,7 +210,7 @@ FOR_LOOP:
break FOR_LOOP
}
case _ = <-trySyncTicker.C: // chan time
case <-trySyncTicker.C: // chan time
// This loop can be slow as long as it's doing syncing work.
SYNC_LOOP:
for i := 0; i < 10; i++ {
@ -231,7 +230,7 @@ FOR_LOOP:
err := bcR.state.Validators.VerifyCommit(
bcR.state.ChainID, types.BlockID{first.Hash(), firstPartsHeader}, first.Height, second.LastCommit)
if err != nil {
bcR.Logger.Info("error in validation", "error", err)
bcR.Logger.Info("error in validation", "err", err)
bcR.pool.RedoRequest(first.Height)
break SYNC_LOOP
} else {


+ 0
- 87
cmd/tendermint/commands/flags/log_level.go View File

@ -1,87 +0,0 @@
package flags
import (
"fmt"
"strings"
"github.com/pkg/errors"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tmlibs/log"
)
const (
defaultLogLevelKey = "*"
)
// ParseLogLevel parses complex log level - comma-separated
// list of module:level pairs with an optional *:level pair (* means
// all other modules).
//
// Example:
// ParseLogLevel("consensus:debug,mempool:debug,*:error", log.NewTMLogger(os.Stdout))
func ParseLogLevel(lvl string, logger log.Logger) (log.Logger, error) {
if lvl == "" {
return nil, errors.New("Empty log level")
}
l := lvl
// prefix simple one word levels (e.g. "info") with "*"
if !strings.Contains(l, ":") {
l = defaultLogLevelKey + ":" + l
}
options := make([]log.Option, 0)
isDefaultLogLevelSet := false
var option log.Option
var err error
list := strings.Split(l, ",")
for _, item := range list {
moduleAndLevel := strings.Split(item, ":")
if len(moduleAndLevel) != 2 {
return nil, fmt.Errorf("Expected list in a form of \"module:level\" pairs, given pair %s, list %s", item, list)
}
module := moduleAndLevel[0]
level := moduleAndLevel[1]
if module == defaultLogLevelKey {
option, err = log.AllowLevel(level)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("Failed to parse default log level (pair %s, list %s)", item, l))
}
options = append(options, option)
isDefaultLogLevelSet = true
} else {
switch level {
case "debug":
option = log.AllowDebugWith("module", module)
case "info":
option = log.AllowInfoWith("module", module)
case "error":
option = log.AllowErrorWith("module", module)
case "none":
option = log.AllowNoneWith("module", module)
default:
return nil, fmt.Errorf("Expected either \"info\", \"debug\", \"error\" or \"none\" log level, given %s (pair %s, list %s)", level, item, list)
}
options = append(options, option)
}
}
// if "*" is not provided, set default global level
if !isDefaultLogLevelSet {
option, err = log.AllowLevel(cfg.DefaultLogLevel())
if err != nil {
return nil, err
}
options = append(options, option)
}
return log.NewFilter(logger, options...), nil
}

+ 0
- 64
cmd/tendermint/commands/flags/log_level_test.go View File

@ -1,64 +0,0 @@
package flags_test
import (
"bytes"
"strings"
"testing"
tmflags "github.com/tendermint/tendermint/cmd/tendermint/commands/flags"
"github.com/tendermint/tmlibs/log"
)
func TestParseLogLevel(t *testing.T) {
var buf bytes.Buffer
jsonLogger := log.NewTMJSONLogger(&buf)
correctLogLevels := []struct {
lvl string
expectedLogLines []string
}{
{"mempool:error", []string{``, ``, `{"_msg":"Mesmero","level":"error","module":"mempool"}`}},
{"mempool:error,*:debug", []string{``, ``, `{"_msg":"Mesmero","level":"error","module":"mempool"}`}},
{"*:debug,wire:none", []string{
`{"_msg":"Kingpin","level":"debug","module":"mempool"}`,
`{"_msg":"Kitty Pryde","level":"info","module":"mempool"}`,
`{"_msg":"Mesmero","level":"error","module":"mempool"}`}},
}
for _, c := range correctLogLevels {
logger, err := tmflags.ParseLogLevel(c.lvl, jsonLogger)
if err != nil {
t.Fatal(err)
}
logger = logger.With("module", "mempool")
buf.Reset()
logger.Debug("Kingpin")
if have := strings.TrimSpace(buf.String()); c.expectedLogLines[0] != have {
t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[0], have, c.lvl)
}
buf.Reset()
logger.Info("Kitty Pryde")
if have := strings.TrimSpace(buf.String()); c.expectedLogLines[1] != have {
t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[1], have, c.lvl)
}
buf.Reset()
logger.Error("Mesmero")
if have := strings.TrimSpace(buf.String()); c.expectedLogLines[2] != have {
t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[2], have, c.lvl)
}
}
incorrectLogLevel := []string{"some", "mempool:some", "*:some,mempool:error"}
for _, lvl := range incorrectLogLevel {
if _, err := tmflags.ParseLogLevel(lvl, jsonLogger); err == nil {
t.Fatalf("Expected %s to produce error", lvl)
}
}
}

+ 20
- 6
cmd/tendermint/commands/root.go View File

@ -6,8 +6,9 @@ import (
"github.com/spf13/cobra"
"github.com/spf13/viper"
tmflags "github.com/tendermint/tendermint/cmd/tendermint/commands/flags"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tmlibs/cli"
tmflags "github.com/tendermint/tmlibs/cli/flags"
"github.com/tendermint/tmlibs/log"
)
@ -20,20 +21,33 @@ func init() {
RootCmd.PersistentFlags().String("log_level", config.LogLevel, "Log level")
}
// ParseConfig will setup the tendermint configuration properly
func ParseConfig() (*cfg.Config, error) {
conf := cfg.DefaultConfig()
err := viper.Unmarshal(conf)
if err != nil {
return nil, err
}
conf.SetRoot(conf.RootDir)
cfg.EnsureRoot(conf.RootDir)
return conf, err
}
var RootCmd = &cobra.Command{
Use: "tendermint",
Short: "Tendermint Core (BFT Consensus) in Go",
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
err := viper.Unmarshal(config)
PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) {
config, err = ParseConfig()
if err != nil {
return err
}
config.SetRoot(config.RootDir)
cfg.EnsureRoot(config.RootDir)
logger, err = tmflags.ParseLogLevel(config.LogLevel, logger)
logger, err = tmflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel())
if err != nil {
return err
}
if viper.GetBool(cli.TraceFlag) {
logger = log.NewTracingLogger(logger)
}
return nil
},
}

+ 26
- 33
cmd/tendermint/commands/run_node.go View File

@ -2,7 +2,6 @@ package commands
import (
"fmt"
"io/ioutil"
"time"
"github.com/spf13/cobra"
@ -19,28 +18,33 @@ var runNodeCmd = &cobra.Command{
}
func init() {
AddNodeFlags(runNodeCmd)
RootCmd.AddCommand(runNodeCmd)
}
// AddNodeFlags exposes some common configuration options on the command-line
// These are exposed for convenience of commands embedding a tendermint node
func AddNodeFlags(cmd *cobra.Command) {
// bind flags
runNodeCmd.Flags().String("moniker", config.Moniker, "Node Name")
cmd.Flags().String("moniker", config.Moniker, "Node Name")
// node flags
runNodeCmd.Flags().Bool("fast_sync", config.FastSync, "Fast blockchain syncing")
cmd.Flags().Bool("fast_sync", config.FastSync, "Fast blockchain syncing")
// abci flags
runNodeCmd.Flags().String("proxy_app", config.ProxyApp, "Proxy app address, or 'nilapp' or 'dummy' for local testing.")
runNodeCmd.Flags().String("abci", config.ABCI, "Specify abci transport (socket | grpc)")
cmd.Flags().String("proxy_app", config.ProxyApp, "Proxy app address, or 'nilapp' or 'dummy' for local testing.")
cmd.Flags().String("abci", config.ABCI, "Specify abci transport (socket | grpc)")
// rpc flags
runNodeCmd.Flags().String("rpc.laddr", config.RPC.ListenAddress, "RPC listen address. Port required")
runNodeCmd.Flags().String("rpc.grpc_laddr", config.RPC.GRPCListenAddress, "GRPC listen address (BroadcastTx only). Port required")
runNodeCmd.Flags().Bool("rpc.unsafe", config.RPC.Unsafe, "Enabled unsafe rpc methods")
cmd.Flags().String("rpc.laddr", config.RPC.ListenAddress, "RPC listen address. Port required")
cmd.Flags().String("rpc.grpc_laddr", config.RPC.GRPCListenAddress, "GRPC listen address (BroadcastTx only). Port required")
cmd.Flags().Bool("rpc.unsafe", config.RPC.Unsafe, "Enabled unsafe rpc methods")
// p2p flags
runNodeCmd.Flags().String("p2p.laddr", config.P2P.ListenAddress, "Node listen address. (0.0.0.0:0 means any interface, any port)")
runNodeCmd.Flags().String("p2p.seeds", config.P2P.Seeds, "Comma delimited host:port seed nodes")
runNodeCmd.Flags().Bool("p2p.skip_upnp", config.P2P.SkipUPNP, "Skip UPNP configuration")
runNodeCmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "Enable Peer-Exchange (dev feature)")
RootCmd.AddCommand(runNodeCmd)
cmd.Flags().String("p2p.laddr", config.P2P.ListenAddress, "Node listen address. (0.0.0.0:0 means any interface, any port)")
cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "Comma delimited host:port seed nodes")
cmd.Flags().Bool("p2p.skip_upnp", config.P2P.SkipUPNP, "Skip UPNP configuration")
cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "Enable Peer-Exchange (dev feature)")
}
// Users wishing to:
@ -56,27 +60,16 @@ func runNode(cmd *cobra.Command, args []string) error {
// TODO: If Mintnet gets deprecated or genesis_file is
// always available, remove.
genDocFile := config.GenesisFile()
if !cmn.FileExists(genDocFile) {
for !cmn.FileExists(genDocFile) {
logger.Info(cmn.Fmt("Waiting for genesis file %v...", genDocFile))
for {
time.Sleep(time.Second)
if !cmn.FileExists(genDocFile) {
continue
}
jsonBlob, err := ioutil.ReadFile(genDocFile)
if err != nil {
return fmt.Errorf("Couldn't read GenesisDoc file: %v", err)
}
genDoc, err := types.GenesisDocFromJSON(jsonBlob)
if err != nil {
return fmt.Errorf("Error reading GenesisDoc: %v", err)
}
if genDoc.ChainID == "" {
return fmt.Errorf("Genesis doc %v must include non-empty chain_id", genDocFile)
}
config.ChainID = genDoc.ChainID
}
time.Sleep(time.Second)
}
genDoc, err := types.GenesisDocFromFile(genDocFile)
if err != nil {
return err
}
config.ChainID = genDoc.ChainID
// Create & start node
n := node.NewNodeDefault(config, logger.With("module", "node"))


+ 2
- 4
consensus/byzantine_test.go View File

@ -77,6 +77,7 @@ func TestByzantine(t *testing.T) {
var conRI p2p.Reactor
conRI = conR
if i == 0 {
conRI = NewByzantineReactor(conR)
}
@ -116,10 +117,7 @@ func TestByzantine(t *testing.T) {
p2p.Connect2Switches(switches, ind1, ind2)
// wait for someone in the big partition to make a block
select {
case <-eventChans[ind2]:
}
<-eventChans[ind2]
t.Log("A block has been committed. Healing partition")


+ 6
- 0
consensus/common.go View File

@ -27,3 +27,9 @@ func subscribeToEventRespond(evsw types.EventSwitch, receiver, eventID string) c
})
return ch
}
func discardFromChan(ch chan interface{}, n int) {
for i := 0; i < n; i++ {
<-ch
}
}

+ 0
- 21
consensus/common_test.go View File

@ -222,17 +222,6 @@ func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} {
return voteCh
}
func readVotes(ch chan interface{}, reads int) chan struct{} {
wg := make(chan struct{})
go func() {
for i := 0; i < reads; i++ {
<-ch // read the precommit event
}
close(wg)
}()
return wg
}
//-------------------------------------------------------------------------------
// consensus states
@ -274,16 +263,6 @@ func loadPrivValidator(config *cfg.Config) *types.PrivValidator {
return privValidator
}
func fixedConsensusState() *ConsensusState {
stateDB := dbm.NewMemDB()
state := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile())
state.SetLogger(log.TestingLogger().With("module", "state"))
privValidator := loadPrivValidator(config)
cs := newConsensusState(state, privValidator, counter.NewCounterApplication(true))
cs.SetLogger(log.TestingLogger())
return cs
}
func fixedConsensusStateDummy() *ConsensusState {
stateDB := dbm.NewMemDB()
state := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile())


+ 3
- 0
consensus/height_vote_set_test.go View File

@ -30,6 +30,9 @@ func TestPeerCatchupRounds(t *testing.T) {
vote1001_0 := makeVoteHR(t, 1, 1001, privVals, 0)
added, err = hvs.AddVote(vote1001_0, "peer1")
if err != nil {
t.Error("AddVote error", err)
}
if added {
t.Error("Expected to *not* add vote from peer, too many catchup rounds.")
}


+ 4
- 4
consensus/reactor.go View File

@ -154,7 +154,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
_, msg, err := DecodeMessage(msgBytes)
if err != nil {
conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "error", err, "bytes", msgBytes)
conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
// TODO punish peer?
return
}
@ -282,7 +282,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
}
if err != nil {
conR.Logger.Error("Error in Receive()", "error", err)
conR.Logger.Error("Error in Receive()", "err", err)
}
}
@ -351,7 +351,7 @@ func makeRoundStepMessages(rs *RoundState) (nrsMsg *NewRoundStepMessage, csMsg *
Height: rs.Height,
Round: rs.Round,
Step: rs.Step,
SecondsSinceStartTime: int(time.Now().Sub(rs.StartTime).Seconds()),
SecondsSinceStartTime: int(time.Since(rs.StartTime).Seconds()),
LastCommitRound: rs.LastCommit.Round(),
}
if rs.Step == RoundStepCommit {
@ -412,7 +412,7 @@ OUTER_LOOP:
// Ensure that the peer's PartSetHeader is correct
blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height)
if blockMeta == nil {
logger.Error("Failed to load block meta", "peer height", prs.Height, "our height", rs.Height, "blockstore height", conR.conS.blockStore.Height(), "pv", conR.conS.privValidator)
logger.Error("Failed to load block meta", "peer height", prs.Height, "ourHeight", rs.Height, "blockstoreHeight", conR.conS.blockStore.Height(), "pv", conR.conS.privValidator)
time.Sleep(peerGossipSleepDuration)
continue OUTER_LOOP
} else if !blockMeta.BlockID.PartsHeader.Equals(prs.ProposalBlockPartsHeader) {


+ 1
- 1
consensus/replay.go View File

@ -132,7 +132,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error {
if !found {
// if we upgraded from 0.9 to 0.9.1, we may have #HEIGHT instead
// TODO (0.10.0): remove this
gr, found, err = cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(csHeight))
gr, _, err = cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(csHeight))
if err == io.EOF {
cs.Logger.Error("Replay: wal.group.Search returned EOF", "#HEIGHT", csHeight)
return nil


+ 0
- 61
consensus/replay_test.go View File

@ -557,67 +557,6 @@ func readPieceFromWAL(msgBytes []byte) (interface{}, error) {
return nil, nil
}
// make some bogus txs
func txsFunc(blockNum int) (txs []types.Tx) {
for i := 0; i < 10; i++ {
txs = append(txs, types.Tx([]byte{byte(blockNum), byte(i)}))
}
return txs
}
// sign a commit vote
func signCommit(chainID string, privVal *types.PrivValidator, height, round int, hash []byte, header types.PartSetHeader) *types.Vote {
vote := &types.Vote{
ValidatorIndex: 0,
ValidatorAddress: privVal.Address,
Height: height,
Round: round,
Type: types.VoteTypePrecommit,
BlockID: types.BlockID{hash, header},
}
sig := privVal.Sign(types.SignBytes(chainID, vote))
vote.Signature = sig
return vote
}
// make a blockchain with one validator
func makeBlockchain(t *testing.T, chainID string, nBlocks int, privVal *types.PrivValidator, proxyApp proxy.AppConns, state *sm.State) (blockchain []*types.Block, commits []*types.Commit) {
prevHash := state.LastBlockID.Hash
lastCommit := new(types.Commit)
prevParts := types.PartSetHeader{}
valHash := state.Validators.Hash()
prevBlockID := types.BlockID{prevHash, prevParts}
for i := 1; i < nBlocks+1; i++ {
block, parts := types.MakeBlock(i, chainID, txsFunc(i), lastCommit,
prevBlockID, valHash, state.AppHash, testPartSize)
fmt.Println(i)
fmt.Println(block.LastBlockID)
err := state.ApplyBlock(nil, proxyApp.Consensus(), block, block.MakePartSet(testPartSize).Header(), mempool)
if err != nil {
t.Fatal(i, err)
}
voteSet := types.NewVoteSet(chainID, i, 0, types.VoteTypePrecommit, state.Validators)
vote := signCommit(chainID, privVal, i, 0, block.Hash(), parts.Header())
_, err = voteSet.AddVote(vote)
if err != nil {
t.Fatal(err)
}
prevHash = block.Hash()
prevParts = parts.Header()
lastCommit = voteSet.MakeCommit()
prevBlockID = types.BlockID{prevHash, prevParts}
blockchain = append(blockchain, block)
commits = append(commits, lastCommit)
}
return blockchain, commits
}
// fresh state and mock store
func stateAndStore(config *cfg.Config, pubKey crypto.PubKey) (*sm.State, *mockBlockStore) {
stateDB := dbm.NewMemDB()


+ 11
- 14
consensus/state.go View File

@ -311,7 +311,7 @@ func (cs *ConsensusState) OnStart() error {
walFile := cs.config.WalFile()
if err := cs.OpenWAL(walFile); err != nil {
cs.Logger.Error("Error loading ConsensusState wal", "error", err.Error())
cs.Logger.Error("Error loading ConsensusState wal", "err", err.Error())
return err
}
@ -325,7 +325,7 @@ func (cs *ConsensusState) OnStart() error {
// we may have lost some votes if the process crashed
// reload from consensus log to catchup
if err := cs.catchupReplay(cs.Height); err != nil {
cs.Logger.Error("Error on catchup replay. Proceeding to start ConsensusState anyway", "error", err.Error())
cs.Logger.Error("Error on catchup replay. Proceeding to start ConsensusState anyway", "err", err.Error())
// NOTE: if we ever do return an error here,
// make sure to stop the timeoutTicker
}
@ -368,7 +368,7 @@ func (cs *ConsensusState) Wait() {
func (cs *ConsensusState) OpenWAL(walFile string) (err error) {
err = cmn.EnsureDir(path.Dir(walFile), 0700)
if err != nil {
cs.Logger.Error("Error ensuring ConsensusState wal dir", "error", err.Error())
cs.Logger.Error("Error ensuring ConsensusState wal dir", "err", err.Error())
return err
}
@ -663,7 +663,7 @@ func (cs *ConsensusState) handleMsg(mi msgInfo, rs RoundState) {
cs.Logger.Error("Unknown msg type", reflect.TypeOf(msg))
}
if err != nil {
cs.Logger.Error("Error with msg", "type", reflect.TypeOf(msg), "peer", peerKey, "error", err, "msg", msg)
cs.Logger.Error("Error with msg", "type", reflect.TypeOf(msg), "peer", peerKey, "err", err, "msg", msg)
}
}
@ -831,7 +831,7 @@ func (cs *ConsensusState) defaultDecideProposal(height, round int) {
cs.Logger.Debug(cmn.Fmt("Signed proposal block: %v", block))
} else {
if !cs.replayMode {
cs.Logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "error", err)
cs.Logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err)
}
}
}
@ -930,7 +930,7 @@ func (cs *ConsensusState) defaultDoPrevote(height int, round int) {
err := cs.state.ValidateBlock(cs.ProposalBlock)
if err != nil {
// ProposalBlock is invalid, prevote nil.
cs.Logger.Error("enterPrevote: ProposalBlock is invalid", "error", err)
cs.Logger.Error("enterPrevote: ProposalBlock is invalid", "err", err)
cs.signAddVote(types.VoteTypePrevote, nil, types.PartSetHeader{})
return
}
@ -939,7 +939,6 @@ func (cs *ConsensusState) defaultDoPrevote(height int, round int) {
// NOTE: the proposal signature is validated when it is received,
// and the proposal block parts are validated as they are received (against the merkle hash in the proposal)
cs.signAddVote(types.VoteTypePrevote, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header())
return
}
// Enter: any +2/3 prevotes at next round.
@ -1059,7 +1058,6 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) {
}
types.FireEventUnlock(cs.evsw, cs.RoundStateEvent())
cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{})
return
}
// Enter: any +2/3 precommits for next round.
@ -1218,7 +1216,7 @@ func (cs *ConsensusState) finalizeCommit(height int) {
// NOTE: the block.AppHash wont reflect these txs until the next block
err := stateCopy.ApplyBlock(eventCache, cs.proxyAppConn, block, blockParts.Header(), cs.mempool)
if err != nil {
cs.Logger.Error("Error on ApplyBlock. Did the application crash? Please restart tendermint", "error", err)
cs.Logger.Error("Error on ApplyBlock. Did the application crash? Please restart tendermint", "err", err)
return
}
@ -1250,7 +1248,6 @@ func (cs *ConsensusState) finalizeCommit(height int) {
// * cs.Height has been increment to height+1
// * cs.Step is now RoundStepNewHeight
// * cs.StartTime is set to when we will start round0.
return
}
//-----------------------------------------------------------------------------
@ -1350,7 +1347,7 @@ func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerKey string) error {
return err
} else {
// Probably an invalid signature. Bad peer.
cs.Logger.Error("Error attempting to add vote", "error", err)
cs.Logger.Error("Error attempting to add vote", "err", err)
return ErrAddingVote
}
}
@ -1360,7 +1357,7 @@ func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerKey string) error {
//-----------------------------------------------------------------------------
func (cs *ConsensusState) addVote(vote *types.Vote, peerKey string) (added bool, err error) {
cs.Logger.Debug("addVote", "voteHeight", vote.Height, "voteType", vote.Type, "csHeight", cs.Height)
cs.Logger.Debug("addVote", "voteHeight", vote.Height, "voteType", vote.Type, "valIndex", vote.ValidatorIndex, "csHeight", cs.Height)
// A precommit for the previous height?
// These come in while we wait timeoutCommit
@ -1491,11 +1488,11 @@ func (cs *ConsensusState) signAddVote(type_ byte, hash []byte, header types.Part
vote, err := cs.signVote(type_, hash, header)
if err == nil {
cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""})
cs.Logger.Info("Signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote, "error", err)
cs.Logger.Info("Signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err)
return vote
} else {
//if !cs.replayMode {
cs.Logger.Error("Error signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "error", err)
cs.Logger.Error("Error signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err)
//}
return nil
}


+ 10
- 7
consensus/state_test.go View File

@ -523,7 +523,8 @@ func TestLockPOLRelock(t *testing.T) {
<-voteCh // prevote
signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2, vs3, vs4)
_, _, _ = <-voteCh, <-voteCh, <-voteCh // prevotes
// prevotes
discardFromChan(voteCh, 3)
<-voteCh // our precommit
// the proposed block should now be locked and our precommit added
@ -532,7 +533,8 @@ func TestLockPOLRelock(t *testing.T) {
// add precommits from the rest
signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4)
signAddVotes(cs1, types.VoteTypePrecommit, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs3)
_, _, _ = <-voteCh, <-voteCh, <-voteCh // precommits
// precommites
discardFromChan(voteCh, 3)
// before we timeout to the new round set the new proposal
prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
@ -544,7 +546,7 @@ func TestLockPOLRelock(t *testing.T) {
// timeout to new round
<-timeoutWaitCh
//XXX: this isnt gauranteed to get there before the timeoutPropose ...
//XXX: this isnt guaranteed to get there before the timeoutPropose ...
cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer")
<-newRoundCh
@ -570,7 +572,8 @@ func TestLockPOLRelock(t *testing.T) {
// now lets add prevotes from everyone else for the new block
signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4)
_, _, _ = <-voteCh, <-voteCh, <-voteCh // prevotes
// prevotes
discardFromChan(voteCh, 3)
// now either we go to PrevoteWait or Precommit
select {
@ -585,7 +588,7 @@ func TestLockPOLRelock(t *testing.T) {
validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash)
signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propBlockParts.Header(), vs2, vs3)
_, _ = <-voteCh, <-voteCh
discardFromChan(voteCh, 2)
be := <-newBlockCh
b := be.(types.TMEventData).Unwrap().(types.EventDataNewBlockHeader)
@ -655,7 +658,7 @@ func TestLockPOLUnlock(t *testing.T) {
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*RoundState)
lockedBlockHash := rs.LockedBlock.Hash()
//XXX: this isnt gauranteed to get there before the timeoutPropose ...
//XXX: this isnt guaranteed to get there before the timeoutPropose ...
cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer")
<-newRoundCh
@ -742,7 +745,7 @@ func TestLockPOLSafety1(t *testing.T) {
incrementRound(vs2, vs3, vs4)
//XXX: this isnt gauranteed to get there before the timeoutPropose ...
//XXX: this isnt guaranteed to get there before the timeoutPropose ...
cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer")
<-newRoundCh


+ 1
- 1
consensus/ticker.go View File

@ -117,7 +117,7 @@ func (t *timeoutTicker) timeoutRoutine() {
t.Logger.Debug("Scheduled timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
case <-t.timer.C:
t.Logger.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
// go routine here gaurantees timeoutRoutine doesn't block.
// go routine here guarantees timeoutRoutine doesn't block.
// Determinism comes from playback in the receiveRoutine.
// We can eliminate it by merging the timeoutRoutine into receiveRoutine
// and managing the timeouts ourselves with a millisecond ticker


+ 33
- 30
glide.lock View File

@ -1,20 +1,18 @@
hash: 93f15c9766ea826c29a91f545c42172eafd8c61e39c1d81617114ad1a9c9eaf2
updated: 2017-05-18T06:13:24.295793122-04:00
hash: 2c988aae9517b386ee911e4da5deb9f5034359b7e2ccf448952a3ddb9771222d
updated: 2017-06-28T13:04:20.907047164+02:00
imports:
- name: github.com/btcsuite/btcd
version: 53f55a46349aa8f44b90895047e843666991cf24
version: b8df516b4b267acf2de46be593a9d948d1d2c420
subpackages:
- btcec
- name: github.com/davecgh/go-spew
version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9
subpackages:
- spew
- name: github.com/btcsuite/fastsha256
version: 637e656429416087660c84436a2a035d69d54e2e
- name: github.com/ebuchman/fail-test
version: 95f809107225be108efcf10a3509e4ea6ceef3c4
- name: github.com/fsnotify/fsnotify
version: 4da3e2cfbabc9f751898f250b49f2439785783a1
- name: github.com/go-kit/kit
version: 6964666de57c88f7d93da127e900d201b632f561
version: d67bb4c202e3b91377d1079b110a6c9ce23ab2f8
subpackages:
- log
- log/level
@ -22,13 +20,13 @@ imports:
- name: github.com/go-logfmt/logfmt
version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5
- name: github.com/go-stack/stack
version: 7a2f19628aabfe68f0766b59e74d6315f8347d22
version: 100eb0c0a9c5b306ca2fb4f165df21d80ada4b82
- name: github.com/gogo/protobuf
version: 9df9efe4c742f1a2bfdedf1c3b6902fc6e814c6b
subpackages:
- proto
- name: github.com/golang/protobuf
version: fec3b39b059c0f88fa6b20f5ed012b1aa203a8b4
version: 18c9bb3261723cd5401db4d0c9fbc5c3b6c70fe8
subpackages:
- proto
- ptypes/any
@ -60,13 +58,9 @@ imports:
- name: github.com/pelletier/go-buffruneio
version: c37440a7cf42ac63b919c752ca73a85067e05992
- name: github.com/pelletier/go-toml
version: 5c26a6ff6fd178719e15decac1c8196da0d7d6d1
version: 5ccdfb18c776b740aecaf085c4d9a2779199c279
- name: github.com/pkg/errors
version: c605e284fe17294bda444b34710735b29d1a9d90
- name: github.com/pmezard/go-difflib
version: d8ed2627bdf02c080bf22230dbb337003b7aba2d
subpackages:
- difflib
version: 645ef00459ed84a119197bfb8d8205042c6df63d
- name: github.com/spf13/afero
version: 9be650865eab0c12963d8753212f4f9c66cdcf12
subpackages:
@ -81,11 +75,6 @@ imports:
version: e57e3eeb33f795204c1ca35f56c44f83227c6e66
- name: github.com/spf13/viper
version: 0967fc9aceab2ce9da34061253ac10fb99bba5b2
- name: github.com/stretchr/testify
version: 4d4bfba8f1d1027c4fdbe371823030df51419987
subpackages:
- assert
- require
- name: github.com/syndtr/goleveldb
version: 8c81ea47d4c41a385645e133e15510fc6a2a74b4
subpackages:
@ -115,24 +104,25 @@ imports:
- edwards25519
- extra25519
- name: github.com/tendermint/go-crypto
version: 7dff40942a64cdeefefa9446b2d104750b349f8a
version: 95b7c9e09c49b91bfbb71bb63dd514eb55450f16
- name: github.com/tendermint/go-wire
version: 5f88da3dbc1a72844e6dfaf274ce87f851d488eb
subpackages:
- data
- data/base58
- name: github.com/tendermint/merkleeyes
version: a0e73e1ac3e18e12a007520a4ea2c9822256e307
version: 102aaf5a8ffda1846413fb22805a94def2045b9f
subpackages:
- app
- client
- iavl
- testutil
- name: github.com/tendermint/tmlibs
version: 306795ae1d8e4f4a10dcc8bdb32a00455843c9d5
version: 7ce4da1eee6004d627e780c8fe91e96d9b99e459
subpackages:
- autofile
- cli
- cli/flags
- clist
- common
- db
@ -142,7 +132,7 @@ imports:
- merkle
- test
- name: golang.org/x/crypto
version: 0fe963104e9d1877082f8fb38f816fcd97eb1d10
version: c7af5bf2638a1164f2eb5467c39c6cffbd13a02e
subpackages:
- curve25519
- nacl/box
@ -153,7 +143,7 @@ imports:
- ripemd160
- salsa20/salsa
- name: golang.org/x/net
version: 513929065c19401a1c7b76ecd942f9f86a0c061b
version: feeb485667d1fdabe727840fe00adc22431bc86e
subpackages:
- context
- http2
@ -167,18 +157,18 @@ imports:
subpackages:
- unix
- name: golang.org/x/text
version: 19e51611da83d6be54ddafce4a4af510cb3e9ea4
version: 470f45bf29f4147d6fbd7dfd0a02a848e49f5bf4
subpackages:
- secure/bidirule
- transform
- unicode/bidi
- unicode/norm
- name: google.golang.org/genproto
version: bb3573be0c484136831138976d444b8754777aff
version: 411e09b969b1170a9f0c467558eb4c4c110d9c77
subpackages:
- googleapis/rpc/status
- name: google.golang.org/grpc
version: 11d93ecdb918872ee841ba3a2dc391aa6d4f57c3
version: 844f573616520565fdc6fb4db242321b5456fd6d
subpackages:
- codes
- credentials
@ -195,4 +185,17 @@ imports:
- transport
- name: gopkg.in/yaml.v2
version: cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b
testImports: []
testImports:
- name: github.com/davecgh/go-spew
version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9
subpackages:
- spew
- name: github.com/pmezard/go-difflib
version: d8ed2627bdf02c080bf22230dbb337003b7aba2d
subpackages:
- difflib
- name: github.com/stretchr/testify
version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0
subpackages:
- assert
- require

+ 15
- 11
glide.yaml View File

@ -7,13 +7,13 @@ import:
- package: github.com/golang/protobuf
subpackages:
- proto
- package: github.com/pelletier/go-toml
version: ^1.0.0
- package: github.com/gorilla/websocket
- package: github.com/pkg/errors
version: ~0.8.0
- package: github.com/spf13/cobra
- package: github.com/spf13/viper
- package: github.com/stretchr/testify
subpackages:
- require
- package: github.com/tendermint/abci
version: v0.5.0
subpackages:
@ -21,13 +21,19 @@ import:
- example/dummy
- types
- package: github.com/tendermint/go-crypto
version: v0.2.0
version: ~0.2.2
- package: github.com/tendermint/go-wire
version: v0.6.2
version: ~0.6.2
subpackages:
- data
- package: github.com/tendermint/merkleeyes
version: ~0.2.4
subpackages:
- app
- iavl
- testutil
- package: github.com/tendermint/tmlibs
version: v0.2.0
version: ~0.2.2
subpackages:
- autofile
- cli
@ -48,9 +54,7 @@ import:
- context
- package: google.golang.org/grpc
testImport:
- package: github.com/tendermint/merkleeyes
version: develop
- package: github.com/stretchr/testify
subpackages:
- app
- iavl
- testutil
- assert
- require

+ 1
- 1
mempool/mempool.go View File

@ -409,7 +409,7 @@ func (cache *txCache) Push(tx types.Tx) bool {
popped := cache.list.Front()
poppedTx := popped.Value.(types.Tx)
// NOTE: the tx may have already been removed from the map
// but deleting a non-existant element is fine
// but deleting a non-existent element is fine
delete(cache.map_, string(poppedTx))
cache.list.Remove(popped)
}


+ 1
- 1
mempool/reactor.go View File

@ -63,7 +63,7 @@ func (memR *MempoolReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
func (memR *MempoolReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) {
_, msg, err := DecodeMessage(msgBytes)
if err != nil {
memR.Logger.Error("Error decoding message", "error", err)
memR.Logger.Error("Error decoding message", "err", err)
return
}
memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)


+ 2
- 2
node/node.go View File

@ -205,7 +205,7 @@ func NewNode(config *cfg.Config, privValidator *types.PrivValidator, clientCreat
if profileHost != "" {
go func() {
logger.Error("Profile server", "error", http.ListenAndServe(profileHost, nil))
logger.Error("Profile server", "err", http.ListenAndServe(profileHost, nil))
}()
}
@ -276,7 +276,7 @@ func (n *Node) OnStop() {
for _, l := range n.rpcListeners {
n.Logger.Info("Closing rpc listener", "listener", l)
if err := l.Close(); err != nil {
n.Logger.Error("Error closing listener", "listener", l, "error", err)
n.Logger.Error("Error closing listener", "listener", l, "err", err)
}
}
}


+ 1
- 4
p2p/addrbook.go View File

@ -68,9 +68,6 @@ const (
// max addresses returned by GetSelection
// NOTE: this must match "maxPexMessageSize"
maxGetSelection = 250
// current version of the on-disk format.
serializationVersion = 1
)
const (
@ -340,7 +337,7 @@ func (a *AddrBook) saveToFile(filePath string) {
}
err = cmn.WriteFileAtomic(filePath, jsonBytes, 0644)
if err != nil {
a.Logger.Error("Failed to save AddrBook to file", "file", filePath, "error", err)
a.Logger.Error("Failed to save AddrBook to file", "file", filePath, "err", err)
}
}


+ 8
- 8
p2p/connection.go View File

@ -180,7 +180,7 @@ func (c *MConnection) flush() {
c.Logger.Debug("Flush", "conn", c)
err := c.bufWriter.Flush()
if err != nil {
c.Logger.Error("MConnection flush failed", "error", err)
c.Logger.Error("MConnection flush failed", "err", err)
}
}
@ -318,7 +318,7 @@ FOR_LOOP:
break FOR_LOOP
}
if err != nil {
c.Logger.Error("Connection failed @ sendRoutine", "conn", c, "error", err)
c.Logger.Error("Connection failed @ sendRoutine", "conn", c, "err", err)
c.stopForError(err)
break FOR_LOOP
}
@ -373,7 +373,7 @@ func (c *MConnection) sendMsgPacket() bool {
// Make & send a msgPacket from this channel
n, err := leastChannel.writeMsgPacketTo(c.bufWriter)
if err != nil {
c.Logger.Error("Failed to write msgPacket", "error", err)
c.Logger.Error("Failed to write msgPacket", "err", err)
c.stopForError(err)
return true
}
@ -401,7 +401,7 @@ FOR_LOOP:
if err == nil {
return bytes
} else {
log.Warn("Error peeking connection buffer", "error", err)
log.Warn("Error peeking connection buffer", "err", err)
return nil
}
}})
@ -415,7 +415,7 @@ FOR_LOOP:
c.recvMonitor.Update(int(n))
if err != nil {
if c.IsRunning() {
c.Logger.Error("Connection failed @ recvRoutine (reading byte)", "conn", c, "error", err)
c.Logger.Error("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err)
c.stopForError(err)
}
break FOR_LOOP
@ -436,7 +436,7 @@ FOR_LOOP:
c.recvMonitor.Update(int(n))
if err != nil {
if c.IsRunning() {
c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "error", err)
c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
c.stopForError(err)
}
break FOR_LOOP
@ -448,7 +448,7 @@ FOR_LOOP:
msgBytes, err := channel.recvMsgPacket(pkt)
if err != nil {
if c.IsRunning() {
c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "error", err)
c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
c.stopForError(err)
}
break FOR_LOOP
@ -468,7 +468,7 @@ FOR_LOOP:
// Cleanup
close(c.pong)
for _ = range c.pong {
for range c.pong {
// Drain
}
}


+ 4
- 4
p2p/listener.go View File

@ -135,7 +135,7 @@ func (l *DefaultListener) listenRoutine() {
// Cleanup
close(l.connections)
for _ = range l.connections {
for range l.connections {
// Drain
}
}
@ -171,13 +171,13 @@ func getUPNPExternalAddress(externalPort, internalPort int, logger log.Logger) *
logger.Info("Getting UPNP external address")
nat, err := upnp.Discover()
if err != nil {
logger.Info("Could not perform UPNP discover", "error", err)
logger.Info("Could not perform UPNP discover", "err", err)
return nil
}
ext, err := nat.GetExternalAddress()
if err != nil {
logger.Info("Could not get UPNP external address", "error", err)
logger.Info("Could not get UPNP external address", "err", err)
return nil
}
@ -188,7 +188,7 @@ func getUPNPExternalAddress(externalPort, internalPort int, logger log.Logger) *
externalPort, err = nat.AddPortMapping("tcp", externalPort, internalPort, "tendermint", 0)
if err != nil {
logger.Info("Could not add UPNP port mapping", "error", err)
logger.Info("Could not add UPNP port mapping", "err", err)
return nil
}


+ 0
- 1
p2p/netaddress.go View File

@ -174,7 +174,6 @@ func (na *NetAddress) ReachabilityTo(o *NetAddress) int {
Ipv6_weak
Ipv4
Ipv6_strong
Private
)
if !na.Routable() {
return Unreachable


+ 2
- 3
p2p/pex_reactor.go View File

@ -44,7 +44,6 @@ const (
type PEXReactor struct {
BaseReactor
sw *Switch
book *AddrBook
ensurePeersPeriod time.Duration
@ -105,7 +104,7 @@ func (r *PEXReactor) AddPeer(p *Peer) {
addr, err := NewNetAddressString(p.ListenAddr)
if err != nil {
// this should never happen
r.Logger.Error("Error in AddPeer: invalid peer address", "addr", p.ListenAddr, "error", err)
r.Logger.Error("Error in AddPeer: invalid peer address", "addr", p.ListenAddr, "err", err)
return
}
r.book.AddAddress(addr, addr)
@ -132,7 +131,7 @@ func (r *PEXReactor) Receive(chID byte, src *Peer, msgBytes []byte) {
_, msg, err := DecodeMessage(msgBytes)
if err != nil {
r.Logger.Error("Error decoding message", "error", err)
r.Logger.Error("Error decoding message", "err", err)
return
}
r.Logger.Info("Received message", "msg", msg)


+ 0
- 14
p2p/secret_connection.go View File

@ -293,10 +293,6 @@ func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKeyEd25519, signa
return &recvMsg, nil
}
func verifyChallengeSignature(challenge *[32]byte, remPubKey crypto.PubKeyEd25519, remSignature crypto.SignatureEd25519) bool {
return remPubKey.VerifyBytes(challenge[:], remSignature.Wrap())
}
//--------------------------------------------------------------------------------
// sha256
@ -319,16 +315,6 @@ func hash24(input []byte) (res *[24]byte) {
return
}
// ripemd160
func hash20(input []byte) (res *[20]byte) {
hasher := ripemd160.New()
hasher.Write(input) // does not error
resSlice := hasher.Sum(nil)
res = new([20]byte)
copy(res[:], resSlice)
return
}
// increment nonce big-endian by 2 with wraparound.
func incr2Nonce(nonce *[24]byte) {
incrNonce(nonce)


+ 7
- 7
p2p/switch.go View File

@ -309,7 +309,7 @@ func (sw *Switch) DialSeeds(addrBook *AddrBook, seeds []string) error {
func (sw *Switch) dialSeed(addr *NetAddress) {
peer, err := sw.DialPeerWithAddress(addr, true)
if err != nil {
sw.Logger.Error("Error dialing seed", "error", err)
sw.Logger.Error("Error dialing seed", "err", err)
} else {
sw.Logger.Info("Connected to seed", "peer", peer)
}
@ -322,7 +322,7 @@ func (sw *Switch) DialPeerWithAddress(addr *NetAddress, persistent bool) (*Peer,
sw.Logger.Info("Dialing peer", "address", addr)
peer, err := newOutboundPeerWithConfig(addr, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey, sw.peerConfig)
if err != nil {
sw.Logger.Error("Failed to dial peer", "address", addr, "error", err)
sw.Logger.Error("Failed to dial peer", "address", addr, "err", err)
return nil, err
}
peer.SetLogger(sw.Logger.With("peer", addr))
@ -331,7 +331,7 @@ func (sw *Switch) DialPeerWithAddress(addr *NetAddress, persistent bool) (*Peer,
}
err = sw.AddPeer(peer)
if err != nil {
sw.Logger.Error("Failed to add peer", "address", addr, "error", err)
sw.Logger.Error("Failed to add peer", "address", addr, "err", err)
peer.CloseConn()
return nil, err
}
@ -381,7 +381,7 @@ func (sw *Switch) Peers() IPeerSet {
// TODO: make record depending on reason.
func (sw *Switch) StopPeerForError(peer *Peer, reason interface{}) {
addr := NewNetAddress(peer.Addr())
sw.Logger.Info("Stopping peer for error", "peer", peer, "error", reason)
sw.Logger.Error("Stopping peer for error", "peer", peer, "err", reason)
sw.stopAndRemovePeer(peer, reason)
if peer.IsPersistent() {
@ -395,10 +395,10 @@ func (sw *Switch) StopPeerForError(peer *Peer, reason interface{}) {
peer, err := sw.DialPeerWithAddress(addr, true)
if err != nil {
if i == reconnectAttempts {
sw.Logger.Info("Error reconnecting to peer. Giving up", "tries", i, "error", err)
sw.Logger.Info("Error reconnecting to peer. Giving up", "tries", i, "err", err)
return
}
sw.Logger.Info("Error reconnecting to peer. Trying again", "tries", i, "error", err)
sw.Logger.Info("Error reconnecting to peer. Trying again", "tries", i, "err", err)
time.Sleep(reconnectInterval)
continue
}
@ -442,7 +442,7 @@ func (sw *Switch) listenerRoutine(l Listener) {
// New inbound connection!
err := sw.addPeerWithConnectionAndConfig(inConn, sw.peerConfig)
if err != nil {
sw.Logger.Info("Ignoring inbound connection: error while adding peer", "address", inConn.RemoteAddr().String(), "error", err)
sw.Logger.Info("Ignoring inbound connection: error while adding peer", "address", inConn.RemoteAddr().String(), "err", err)
continue
}


+ 6
- 5
p2p/upnp/probe.go View File

@ -1,11 +1,12 @@
package upnp
import (
"errors"
"fmt"
"net"
"time"
"github.com/pkg/errors"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
)
@ -18,26 +19,26 @@ type UPNPCapabilities struct {
func makeUPNPListener(intPort int, extPort int, logger log.Logger) (NAT, net.Listener, net.IP, error) {
nat, err := Discover()
if err != nil {
return nil, nil, nil, errors.New(fmt.Sprintf("NAT upnp could not be discovered: %v", err))
return nil, nil, nil, errors.Errorf("NAT upnp could not be discovered: %v", err)
}
logger.Info(cmn.Fmt("ourIP: %v", nat.(*upnpNAT).ourIP))
ext, err := nat.GetExternalAddress()
if err != nil {
return nat, nil, nil, errors.New(fmt.Sprintf("External address error: %v", err))
return nat, nil, nil, errors.Errorf("External address error: %v", err)
}
logger.Info(cmn.Fmt("External address: %v", ext))
port, err := nat.AddPortMapping("tcp", extPort, intPort, "Tendermint UPnP Probe", 0)
if err != nil {
return nat, nil, ext, errors.New(fmt.Sprintf("Port mapping error: %v", err))
return nat, nil, ext, errors.Errorf("Port mapping error: %v", err)
}
logger.Info(cmn.Fmt("Port mapping mapped: %v", port))
// also run the listener, open for all remote addresses.
listener, err := net.Listen("tcp", fmt.Sprintf(":%v", intPort))
if err != nil {
return nat, nil, ext, errors.New(fmt.Sprintf("Error establishing listener: %v", err))
return nat, nil, ext, errors.Errorf("Error establishing listener: %v", err)
}
return nat, listener, ext, nil
}


+ 5
- 5
p2p/upnp/upnp.go View File

@ -65,14 +65,14 @@ func Discover() (nat NAT, err error) {
return
}
var n int
n, _, err = socket.ReadFromUDP(answerBytes)
_, _, err = socket.ReadFromUDP(answerBytes)
for {
n, _, err = socket.ReadFromUDP(answerBytes)
if err != nil {
break
}
answer := string(answerBytes[0:n])
if strings.Index(answer, st) < 0 {
if !strings.Contains(answer, st) {
continue
}
// HTTP header field names are case-insensitive.
@ -153,7 +153,7 @@ type Root struct {
func getChildDevice(d *Device, deviceType string) *Device {
dl := d.DeviceList.Device
for i := 0; i < len(dl); i++ {
if strings.Index(dl[i].DeviceType, deviceType) >= 0 {
if strings.Contains(dl[i].DeviceType, deviceType) {
return &dl[i]
}
}
@ -163,7 +163,7 @@ func getChildDevice(d *Device, deviceType string) *Device {
func getChildService(d *Device, serviceType string) *UPNPService {
sl := d.ServiceList.Service
for i := 0; i < len(sl); i++ {
if strings.Index(sl[i].ServiceType, serviceType) >= 0 {
if strings.Contains(sl[i].ServiceType, serviceType) {
return &sl[i]
}
}
@ -211,7 +211,7 @@ func getServiceURL(rootURL string) (url, urnDomain string, err error) {
return
}
a := &root.Device
if strings.Index(a.DeviceType, "InternetGatewayDevice:1") < 0 {
if !strings.Contains(a.DeviceType, "InternetGatewayDevice:1") {
err = errors.New("No InternetGatewayDevice")
return
}


+ 66
- 1
rpc/client/event_test.go View File

@ -31,7 +31,39 @@ func TestHeaderEvents(t *testing.T) {
}
}
func TestTxEvents(t *testing.T) {
func TestBlockEvents(t *testing.T) {
require := require.New(t)
for i, c := range GetClients() {
// start for this test it if it wasn't already running
if !c.IsRunning() {
// if so, then we start it, listen, and stop it.
st, err := c.Start()
require.Nil(err, "%d: %+v", i, err)
require.True(st, "%d", i)
defer c.Stop()
}
// listen for a new block; ensure height increases by 1
var firstBlockHeight int
for i := 0; i < 3; i++ {
evtTyp := types.EventStringNewBlock()
evt, err := client.WaitForOneEvent(c, evtTyp, 1*time.Second)
require.Nil(err, "%d: %+v", i, err)
blockEvent, ok := evt.Unwrap().(types.EventDataNewBlock)
require.True(ok, "%d: %#v", i, evt)
block := blockEvent.Block
if i == 0 {
firstBlockHeight = block.Header.Height
continue
}
require.Equal(block.Header.Height, firstBlockHeight+i)
}
}
}
func TestTxEventsSentWithBroadcastTxAsync(t *testing.T) {
require := require.New(t)
for i, c := range GetClients() {
// start for this test it if it wasn't already running
@ -63,3 +95,36 @@ func TestTxEvents(t *testing.T) {
require.True(txe.Code.IsOK())
}
}
func TestTxEventsSentWithBroadcastTxSync(t *testing.T) {
require := require.New(t)
for i, c := range GetClients() {
// start for this test it if it wasn't already running
if !c.IsRunning() {
// if so, then we start it, listen, and stop it.
st, err := c.Start()
require.Nil(err, "%d: %+v", i, err)
require.True(st, "%d", i)
defer c.Stop()
}
// make the tx
_, _, tx := merktest.MakeTxKV()
evtTyp := types.EventStringTx(types.Tx(tx))
// send async
txres, err := c.BroadcastTxSync(tx)
require.Nil(err, "%+v", err)
require.True(txres.Code.IsOK())
// and wait for confirmation
evt, err := client.WaitForOneEvent(c, evtTyp, 1*time.Second)
require.Nil(err, "%d: %+v", i, err)
// and make sure it has the proper info
txe, ok := evt.Unwrap().(types.EventDataTx)
require.True(ok, "%d: %#v", i, evt)
// make sure this is the proper tx
require.EqualValues(tx, txe.Tx)
require.True(txe.Code.IsOK())
}
}

+ 2
- 2
rpc/client/mock/abci.go View File

@ -40,7 +40,7 @@ func (a ABCIApp) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit
func (a ABCIApp) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
c := a.App.CheckTx(tx)
// and this gets writen in a background thread...
// and this gets written in a background thread...
if c.IsOK() {
go func() { a.App.DeliverTx(tx) }()
}
@ -49,7 +49,7 @@ func (a ABCIApp) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error
func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
c := a.App.CheckTx(tx)
// and this gets writen in a background thread...
// and this gets written in a background thread...
if c.IsOK() {
go func() { a.App.DeliverTx(tx) }()
}


+ 91
- 0
rpc/client/rpc_test.go View File

@ -10,6 +10,7 @@ import (
merktest "github.com/tendermint/merkleeyes/testutil"
"github.com/tendermint/tendermint/rpc/client"
rpctest "github.com/tendermint/tendermint/rpc/test"
"github.com/tendermint/tendermint/types"
)
func getHTTPClient() *client.HTTP {
@ -182,3 +183,93 @@ func TestAppCalls(t *testing.T) {
}
}
}
func TestBroadcastTxSync(t *testing.T) {
require := require.New(t)
mempool := node.MempoolReactor().Mempool
initMempoolSize := mempool.Size()
for i, c := range GetClients() {
_, _, tx := merktest.MakeTxKV()
bres, err := c.BroadcastTxSync(tx)
require.Nil(err, "%d: %+v", i, err)
require.True(bres.Code.IsOK())
require.Equal(initMempoolSize+1, mempool.Size())
txs := mempool.Reap(1)
require.EqualValues(tx, txs[0])
mempool.Flush()
}
}
func TestBroadcastTxCommit(t *testing.T) {
require := require.New(t)
mempool := node.MempoolReactor().Mempool
for i, c := range GetClients() {
_, _, tx := merktest.MakeTxKV()
bres, err := c.BroadcastTxCommit(tx)
require.Nil(err, "%d: %+v", i, err)
require.True(bres.CheckTx.Code.IsOK())
require.True(bres.DeliverTx.Code.IsOK())
require.Equal(0, mempool.Size())
}
}
func TestTx(t *testing.T) {
assert, require := assert.New(t), require.New(t)
// first we broadcast a tx
c := getHTTPClient()
_, _, tx := merktest.MakeTxKV()
bres, err := c.BroadcastTxCommit(tx)
require.Nil(err, "%+v", err)
txHeight := bres.Height
txHash := bres.Hash
anotherTxHash := types.Tx("a different tx").Hash()
cases := []struct {
valid bool
hash []byte
prove bool
}{
// only valid if correct hash provided
{true, txHash, false},
{true, txHash, true},
{false, anotherTxHash, false},
{false, anotherTxHash, true},
{false, nil, false},
{false, nil, true},
}
for i, c := range GetClients() {
for j, tc := range cases {
t.Logf("client %d, case %d", i, j)
// now we query for the tx.
// since there's only one tx, we know index=0.
ptx, err := c.Tx(tc.hash, tc.prove)
if !tc.valid {
require.NotNil(err)
} else {
require.Nil(err, "%+v", err)
assert.Equal(txHeight, ptx.Height)
assert.EqualValues(tx, ptx.Tx)
assert.Equal(0, ptx.Index)
assert.True(ptx.TxResult.Code.IsOK())
// time to verify the proof
proof := ptx.Proof
if tc.prove && assert.EqualValues(tx, proof.Data) {
assert.True(proof.Proof.Verify(proof.Index, proof.Total, txHash, proof.RootHash))
}
}
}
}
}

+ 1
- 1
rpc/core/mempool.go View File

@ -75,7 +75,7 @@ func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
// Wait for the tx to be included in a block,
// timeout after something reasonable.
// TODO: configureable?
// TODO: configurable?
timer := time.NewTimer(60 * 2 * time.Second)
select {
case deliverTxRes := <-deliverTxResCh:


+ 14
- 5
rpc/lib/client/http_client.go View File

@ -13,7 +13,6 @@ import (
"github.com/pkg/errors"
types "github.com/tendermint/tendermint/rpc/lib/types"
cmn "github.com/tendermint/tmlibs/common"
)
// HTTPClient is a common interface for JSONRPCClient and URIClient.
@ -23,13 +22,23 @@ type HTTPClient interface {
// TODO: Deprecate support for IP:PORT or /path/to/socket
func makeHTTPDialer(remoteAddr string) (string, func(string, string) (net.Conn, error)) {
parts := strings.SplitN(remoteAddr, "://", 2)
var protocol, address string
if len(parts) != 2 {
cmn.PanicSanity(fmt.Sprintf("Expected fully formed listening address, including the tcp:// or unix:// prefix, given %s", remoteAddr))
} else {
if len(parts) == 1 {
// default to tcp if nothing specified
protocol, address = "tcp", remoteAddr
} else if len(parts) == 2 {
protocol, address = parts[0], parts[1]
} else {
// return a invalid message
msg := fmt.Sprintf("Invalid addr: %s", remoteAddr)
return msg, func(_ string, _ string) (net.Conn, error) {
return nil, errors.New(msg)
}
}
// accept http as an alias for tcp
if protocol == "http" {
protocol = "tcp"
}
trimmedAddress := strings.Replace(address, "/", ".", -1) // replace / with . for http requests (dummy domain)


+ 2
- 2
rpc/lib/client/ws_client.go View File

@ -100,14 +100,14 @@ func (wsc *WSClient) receiveEventsRoutine() {
for {
_, data, err := wsc.ReadMessage()
if err != nil {
wsc.Logger.Info("WSClient failed to read message", "error", err, "data", string(data))
wsc.Logger.Info("WSClient failed to read message", "err", err, "data", string(data))
wsc.Stop()
break
} else {
var response types.RPCResponse
err := json.Unmarshal(data, &response)
if err != nil {
wsc.Logger.Info("WSClient failed to parse message", "error", err, "data", string(data))
wsc.Logger.Info("WSClient failed to parse message", "err", err, "data", string(data))
wsc.ErrorsCh <- err
continue
}


+ 2
- 2
rpc/lib/rpc_test.go View File

@ -245,11 +245,11 @@ func TestServersAndClientsBasic(t *testing.T) {
fmt.Printf("=== testing server on %s using %v client", addr, cl1)
testWithHTTPClient(t, cl1)
cl2 := client.NewJSONRPCClient(tcpAddr)
cl2 := client.NewJSONRPCClient(addr)
fmt.Printf("=== testing server on %s using %v client", addr, cl2)
testWithHTTPClient(t, cl2)
cl3 := client.NewWSClient(tcpAddr, websocketEndpoint)
cl3 := client.NewWSClient(addr, websocketEndpoint)
_, err := cl3.Start()
require.Nil(t, err)
fmt.Printf("=== testing server on %s using %v client", addr, cl3)


+ 19
- 9
rpc/lib/server/handlers.go View File

@ -338,7 +338,7 @@ func nonJsonToArg(ty reflect.Type, arg string) (reflect.Value, error, bool) {
const (
writeChanCapacity = 1000
wsWriteTimeoutSeconds = 30 // each write times out after this
wsWriteTimeoutSeconds = 30 // each write times out after this.
wsReadTimeoutSeconds = 30 // connection times out if we haven't received *anything* in this long, not even pings.
wsPingTickerSeconds = 10 // send a ping every PingTickerSeconds.
)
@ -510,7 +510,10 @@ func (wsc *wsConnection) readRoutine() {
continue
}
returns := rpcFunc.f.Call(args)
wsc.Logger.Info("WSJSONRPC", "method", request.Method, "args", args, "returns", returns)
// TODO: Need to encode args/returns to string if we want to log them
wsc.Logger.Info("WSJSONRPC", "method", request.Method)
result, err := unreflectResult(returns)
if err != nil {
wsc.WriteRPCResponse(types.NewRPCResponse(request.ID, nil, err.Error()))
@ -532,20 +535,19 @@ func (wsc *wsConnection) writeRoutine() {
case <-wsc.Quit:
return
case <-wsc.pingTicker.C:
err := wsc.baseConn.WriteMessage(websocket.PingMessage, []byte{})
err := wsc.writeMessageWithDeadline(websocket.PingMessage, []byte{})
if err != nil {
wsc.Logger.Error("Failed to write ping message on websocket", "error", err)
wsc.Logger.Error("Failed to write ping message on websocket", "err", err)
wsc.Stop()
return
}
case msg := <-wsc.writeChan:
jsonBytes, err := json.MarshalIndent(msg, "", " ")
if err != nil {
wsc.Logger.Error("Failed to marshal RPCResponse to JSON", "error", err)
wsc.Logger.Error("Failed to marshal RPCResponse to JSON", "err", err)
} else {
wsc.baseConn.SetWriteDeadline(time.Now().Add(time.Second * wsWriteTimeoutSeconds))
if err = wsc.baseConn.WriteMessage(websocket.TextMessage, jsonBytes); err != nil {
wsc.Logger.Error("Failed to write response on websocket", "error", err)
if err = wsc.writeMessageWithDeadline(websocket.TextMessage, jsonBytes); err != nil {
wsc.Logger.Error("Failed to write response on websocket", "err", err)
wsc.Stop()
return
}
@ -554,6 +556,13 @@ func (wsc *wsConnection) writeRoutine() {
}
}
// All writes to the websocket must (re)set the write deadline.
// If some writes don't set it while others do, they may timeout incorrectly (https://github.com/tendermint/tendermint/issues/553)
func (wsc *wsConnection) writeMessageWithDeadline(msgType int, msg []byte) error {
wsc.baseConn.SetWriteDeadline(time.Now().Add(time.Second * wsWriteTimeoutSeconds))
return wsc.baseConn.WriteMessage(msgType, msg)
}
//----------------------------------------
// Main manager for all websocket connections
@ -591,12 +600,13 @@ func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Requ
wsConn, err := wm.Upgrade(w, r, nil)
if err != nil {
// TODO - return http error
wm.logger.Error("Failed to upgrade to websocket connection", "error", err)
wm.logger.Error("Failed to upgrade to websocket connection", "err", err)
return
}
// register connection
con := NewWSConnection(wsConn, wm.funcMap, wm.evsw)
con.SetLogger(wm.logger)
wm.logger.Info("New websocket connection", "remote", con.remoteAddr)
con.Start() // Blocking
}


+ 1
- 1
rpc/lib/server/http_server.go View File

@ -97,7 +97,7 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler
WriteRPCResponseHTTP(rww, res)
} else {
// For the rest,
logger.Error("Panic in RPC HTTP handler", "error", e, "stack", string(debug.Stack()))
logger.Error("Panic in RPC HTTP handler", "err", e, "stack", string(debug.Stack()))
rww.WriteHeader(http.StatusInternalServerError)
WriteRPCResponseHTTP(rww, types.NewRPCResponse("", nil, fmt.Sprintf("Internal Server Error: %v", e)))
}


+ 0
- 352
rpc/test/client_test.go View File

@ -1,352 +0,0 @@
package rpctest
import (
"bytes"
crand "crypto/rand"
"fmt"
"math/rand"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/abci/types"
"github.com/tendermint/go-wire/data"
. "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tendermint/rpc/core"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
rpc "github.com/tendermint/tendermint/rpc/lib/client"
"github.com/tendermint/tendermint/state/txindex/null"
"github.com/tendermint/tendermint/types"
)
//--------------------------------------------------------------------------------
// Test the HTTP client
// These tests assume the dummy app
//--------------------------------------------------------------------------------
//--------------------------------------------------------------------------------
// status
func TestURIStatus(t *testing.T) {
testStatus(t, GetURIClient())
}
func TestJSONStatus(t *testing.T) {
testStatus(t, GetJSONClient())
}
func testStatus(t *testing.T, client rpc.HTTPClient) {
moniker := GetConfig().Moniker
result := new(ctypes.ResultStatus)
_, err := client.Call("status", map[string]interface{}{}, result)
require.Nil(t, err)
assert.Equal(t, moniker, result.NodeInfo.Moniker)
}
//--------------------------------------------------------------------------------
// broadcast tx sync
// random bytes (excluding byte('='))
func randBytes(t *testing.T) []byte {
n := rand.Intn(10) + 2
buf := make([]byte, n)
_, err := crand.Read(buf)
require.Nil(t, err)
return bytes.Replace(buf, []byte("="), []byte{100}, -1)
}
func TestURIBroadcastTxSync(t *testing.T) {
testBroadcastTxSync(t, GetURIClient())
}
func TestJSONBroadcastTxSync(t *testing.T) {
testBroadcastTxSync(t, GetJSONClient())
}
func testBroadcastTxSync(t *testing.T, client rpc.HTTPClient) {
mem := node.MempoolReactor().Mempool
initMemSize := mem.Size()
result := new(ctypes.ResultBroadcastTx)
tx := randBytes(t)
_, err := client.Call("broadcast_tx_sync", map[string]interface{}{"tx": tx}, result)
require.Nil(t, err)
require.Equal(t, abci.CodeType_OK, result.Code)
require.Equal(t, initMemSize+1, mem.Size())
txs := mem.Reap(1)
require.EqualValues(t, tx, txs[0])
mem.Flush()
}
//--------------------------------------------------------------------------------
// query
func testTxKV(t *testing.T) ([]byte, []byte, types.Tx) {
k := randBytes(t)
v := randBytes(t)
return k, v, types.Tx(Fmt("%s=%s", k, v))
}
func sendTx(t *testing.T, client rpc.HTTPClient) ([]byte, []byte) {
result := new(ctypes.ResultBroadcastTxCommit)
k, v, tx := testTxKV(t)
_, err := client.Call("broadcast_tx_commit", map[string]interface{}{"tx": tx}, result)
require.Nil(t, err)
require.NotNil(t, 0, result.DeliverTx, "%#v", result)
require.EqualValues(t, 0, result.CheckTx.Code, "%#v", result)
require.EqualValues(t, 0, result.DeliverTx.Code, "%#v", result)
return k, v
}
func TestURIABCIQuery(t *testing.T) {
testABCIQuery(t, GetURIClient())
}
func TestJSONABCIQuery(t *testing.T) {
testABCIQuery(t, GetJSONClient())
}
func testABCIQuery(t *testing.T, client rpc.HTTPClient) {
k, _ := sendTx(t, client)
time.Sleep(time.Millisecond * 500)
result := new(ctypes.ResultABCIQuery)
_, err := client.Call("abci_query",
map[string]interface{}{"path": "", "data": data.Bytes(k), "prove": false}, result)
require.Nil(t, err)
require.EqualValues(t, 0, result.Code)
// XXX: specific to value returned by the dummy
require.NotEqual(t, 0, len(result.Value))
}
//--------------------------------------------------------------------------------
// broadcast tx commit
func TestURIBroadcastTxCommit(t *testing.T) {
testBroadcastTxCommit(t, GetURIClient())
}
func TestJSONBroadcastTxCommit(t *testing.T) {
testBroadcastTxCommit(t, GetJSONClient())
}
func testBroadcastTxCommit(t *testing.T, client rpc.HTTPClient) {
require := require.New(t)
result := new(ctypes.ResultBroadcastTxCommit)
tx := randBytes(t)
_, err := client.Call("broadcast_tx_commit", map[string]interface{}{"tx": tx}, result)
require.Nil(err)
checkTx := result.CheckTx
require.Equal(abci.CodeType_OK, checkTx.Code)
deliverTx := result.DeliverTx
require.Equal(abci.CodeType_OK, deliverTx.Code)
mem := node.MempoolReactor().Mempool
require.Equal(0, mem.Size())
// TODO: find tx in block
}
//--------------------------------------------------------------------------------
// query tx
func TestURITx(t *testing.T) {
testTx(t, GetURIClient(), true)
core.SetTxIndexer(&null.TxIndex{})
defer core.SetTxIndexer(node.ConsensusState().GetState().TxIndexer)
testTx(t, GetURIClient(), false)
}
func TestJSONTx(t *testing.T) {
testTx(t, GetJSONClient(), true)
core.SetTxIndexer(&null.TxIndex{})
testTx(t, GetJSONClient(), false)
core.SetTxIndexer(node.ConsensusState().GetState().TxIndexer)
}
func testTx(t *testing.T, client rpc.HTTPClient, withIndexer bool) {
assert, require := assert.New(t), require.New(t)
// first we broadcast a tx
result := new(ctypes.ResultBroadcastTxCommit)
txBytes := randBytes(t)
tx := types.Tx(txBytes)
_, err := client.Call("broadcast_tx_commit", map[string]interface{}{"tx": txBytes}, result)
require.Nil(err)
checkTx := result.CheckTx
require.Equal(abci.CodeType_OK, checkTx.Code)
deliverTx := result.DeliverTx
require.Equal(abci.CodeType_OK, deliverTx.Code)
mem := node.MempoolReactor().Mempool
require.Equal(0, mem.Size())
txHash := tx.Hash()
txHash2 := types.Tx("a different tx").Hash()
cases := []struct {
valid bool
hash []byte
prove bool
}{
// only valid if correct hash provided
{true, txHash, false},
{true, txHash, true},
{false, txHash2, false},
{false, txHash2, true},
{false, nil, false},
{false, nil, true},
}
for i, tc := range cases {
idx := fmt.Sprintf("%d", i)
// now we query for the tx.
// since there's only one tx, we know index=0.
result2 := new(ctypes.ResultTx)
query := map[string]interface{}{
"hash": tc.hash,
"prove": tc.prove,
}
_, err = client.Call("tx", query, result2)
valid := (withIndexer && tc.valid)
if !valid {
require.NotNil(err, idx)
} else {
require.Nil(err, idx)
assert.Equal(tx, result2.Tx, idx)
assert.Equal(result.Height, result2.Height, idx)
assert.Equal(0, result2.Index, idx)
assert.Equal(abci.CodeType_OK, result2.TxResult.Code, idx)
// time to verify the proof
proof := result2.Proof
if tc.prove && assert.Equal(tx, proof.Data, idx) {
assert.True(proof.Proof.Verify(proof.Index, proof.Total, tx.Hash(), proof.RootHash), idx)
}
}
}
}
//--------------------------------------------------------------------------------
// Test the websocket service
var wsTyp = "JSONRPC"
// make a simple connection to the server
func TestWSConnect(t *testing.T) {
wsc := GetWSClient()
wsc.Stop()
}
// receive a new block message
func TestWSNewBlock(t *testing.T) {
wsc := GetWSClient()
eid := types.EventStringNewBlock()
require.Nil(t, wsc.Subscribe(eid))
defer func() {
require.Nil(t, wsc.Unsubscribe(eid))
wsc.Stop()
}()
waitForEvent(t, wsc, eid, true, func() {}, func(eid string, b interface{}) error {
// fmt.Println("Check:", b)
return nil
})
}
// receive a few new block messages in a row, with increasing height
func TestWSBlockchainGrowth(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
wsc := GetWSClient()
eid := types.EventStringNewBlock()
require.Nil(t, wsc.Subscribe(eid))
defer func() {
require.Nil(t, wsc.Unsubscribe(eid))
wsc.Stop()
}()
// listen for NewBlock, ensure height increases by 1
var initBlockN int
for i := 0; i < 3; i++ {
waitForEvent(t, wsc, eid, true, func() {}, func(eid string, eventData interface{}) error {
block := eventData.(types.TMEventData).Unwrap().(types.EventDataNewBlock).Block
if i == 0 {
initBlockN = block.Header.Height
} else {
if block.Header.Height != initBlockN+i {
return fmt.Errorf("Expected block %d, got block %d", initBlockN+i, block.Header.Height)
}
}
return nil
})
}
}
func TestWSTxEvent(t *testing.T) {
require := require.New(t)
wsc := GetWSClient()
tx := randBytes(t)
// listen for the tx I am about to submit
eid := types.EventStringTx(types.Tx(tx))
require.Nil(wsc.Subscribe(eid))
defer func() {
require.Nil(wsc.Unsubscribe(eid))
wsc.Stop()
}()
// send an tx
result := new(ctypes.ResultBroadcastTx)
_, err := GetJSONClient().Call("broadcast_tx_sync", map[string]interface{}{"tx": tx}, result)
require.Nil(err)
waitForEvent(t, wsc, eid, true, func() {}, func(eid string, b interface{}) error {
evt, ok := b.(types.TMEventData).Unwrap().(types.EventDataTx)
require.True(ok, "Got wrong event type: %#v", b)
require.Equal(tx, []byte(evt.Tx), "Returned different tx")
require.Equal(abci.CodeType_OK, evt.Code)
return nil
})
}
/* TODO: this with dummy app..
func TestWSDoubleFire(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
con := newWSCon(t)
eid := types.EventStringAccInput(user[0].Address)
subscribe(t, con, eid)
defer func() {
unsubscribe(t, con, eid)
con.Close()
}()
amt := int64(100)
toAddr := user[1].Address
// broadcast the transaction, wait to hear about it
waitForEvent(t, con, eid, true, func() {
tx := makeDefaultSendTxSigned(t, wsTyp, toAddr, amt)
broadcastTx(t, wsTyp, tx)
}, func(eid string, b []byte) error {
return nil
})
// but make sure we don't hear about it twice
waitForEvent(t, con, eid, false, func() {
}, func(eid string, b []byte) error {
return nil
})
}*/

+ 2
- 4
rpc/test/grpc_test.go View File

@ -6,15 +6,13 @@ import (
"golang.org/x/net/context"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/rpc/grpc"
core_grpc "github.com/tendermint/tendermint/rpc/grpc"
)
//-------------------------------------------
func TestBroadcastTx(t *testing.T) {
require := require.New(t)
res, err := GetGRPCClient().BroadcastTx(context.Background(), &core_grpc.RequestBroadcastTx{[]byte("this is a tx")})
require.Nil(err)
require.Nil(err, "%+v", err)
require.EqualValues(0, res.CheckTx.Code)
require.EqualValues(0, res.DeliverTx.Code)
}

+ 0
- 92
rpc/test/helpers.go View File

@ -1,25 +1,19 @@
package rpctest
import (
"encoding/json"
"fmt"
"math/rand"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/tendermint/tmlibs/log"
abci "github.com/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
nm "github.com/tendermint/tendermint/node"
"github.com/tendermint/tendermint/proxy"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
core_grpc "github.com/tendermint/tendermint/rpc/grpc"
client "github.com/tendermint/tendermint/rpc/lib/client"
"github.com/tendermint/tendermint/types"
)
@ -65,32 +59,11 @@ func GetConfig() *cfg.Config {
return config
}
// GetURIClient gets a uri client pointing to the test tendermint rpc
func GetURIClient() *client.URIClient {
rpcAddr := GetConfig().RPC.ListenAddress
return client.NewURIClient(rpcAddr)
}
// GetJSONClient gets a http/json client pointing to the test tendermint rpc
func GetJSONClient() *client.JSONRPCClient {
rpcAddr := GetConfig().RPC.ListenAddress
return client.NewJSONRPCClient(rpcAddr)
}
func GetGRPCClient() core_grpc.BroadcastAPIClient {
grpcAddr := config.RPC.GRPCListenAddress
return core_grpc.StartGRPCClient(grpcAddr)
}
func GetWSClient() *client.WSClient {
rpcAddr := GetConfig().RPC.ListenAddress
wsc := client.NewWSClient(rpcAddr, "/websocket")
if _, err := wsc.Start(); err != nil {
panic(err)
}
return wsc
}
// StartTendermint starts a test tendermint server in a go routine and returns when it is initialized
func StartTendermint(app abci.Application) *nm.Node {
node := NewTendermint(app)
@ -111,68 +84,3 @@ func NewTendermint(app abci.Application) *nm.Node {
node := nm.NewNode(config, privValidator, papp, logger)
return node
}
//--------------------------------------------------------------------------------
// Utilities for testing the websocket service
// wait for an event; do things that might trigger events, and check them when they are received
// the check function takes an event id and the byte slice read off the ws
func waitForEvent(t *testing.T, wsc *client.WSClient, eventid string, dieOnTimeout bool, f func(), check func(string, interface{}) error) {
// go routine to wait for webscoket msg
goodCh := make(chan interface{})
errCh := make(chan error)
// Read message
go func() {
var err error
LOOP:
for {
select {
case r := <-wsc.ResultsCh:
result := new(ctypes.ResultEvent)
err = json.Unmarshal(r, result)
if err != nil {
// cant distinguish between error and wrong type ...
continue
}
if result.Name == eventid {
goodCh <- result.Data
break LOOP
}
case err := <-wsc.ErrorsCh:
errCh <- err
break LOOP
case <-wsc.Quit:
break LOOP
}
}
}()
// do stuff (transactions)
f()
// wait for an event or timeout
timeout := time.NewTimer(10 * time.Second)
select {
case <-timeout.C:
if dieOnTimeout {
wsc.Stop()
require.True(t, false, "%s event was not received in time", eventid)
}
// else that's great, we didn't hear the event
// and we shouldn't have
case eventData := <-goodCh:
if dieOnTimeout {
// message was received and expected
// run the check
require.Nil(t, check(eventid, eventData))
} else {
wsc.Stop()
require.True(t, false, "%s event was not expected", eventid)
}
case err := <-errCh:
panic(err) // Show the stack trace.
}
}
//--------------------------------------------------------------------------------

+ 1
- 1
scripts/tendermint-builder/Dockerfile View File

@ -1,4 +1,4 @@
FROM golang:1.7.4
FROM golang:1.8.3
RUN apt-get update && apt-get install -y --no-install-recommends \
zip \


+ 5
- 5
state/execution.go View File

@ -85,7 +85,7 @@ func execBlockOnProxyApp(eventCache types.Fireable, proxyAppConn proxy.AppConnCo
// Begin block
err := proxyAppConn.BeginBlockSync(block.Hash(), types.TM2PB.Header(block.Header))
if err != nil {
logger.Error("Error in proxyAppConn.BeginBlock", "error", err)
logger.Error("Error in proxyAppConn.BeginBlock", "err", err)
return nil, err
}
@ -100,13 +100,13 @@ func execBlockOnProxyApp(eventCache types.Fireable, proxyAppConn proxy.AppConnCo
// End block
abciResponses.EndBlock, err = proxyAppConn.EndBlockSync(uint64(block.Height))
if err != nil {
logger.Error("Error in proxyAppConn.EndBlock", "error", err)
logger.Error("Error in proxyAppConn.EndBlock", "err", err)
return nil, err
}
valDiff := abciResponses.EndBlock.Diffs
logger.Info("Executed block", "height", block.Height, "valid txs", validTxs, "invalid txs", invalidTxs)
logger.Info("Executed block", "height", block.Height, "validTxs", validTxs, "invalidTxs", invalidTxs)
if len(valDiff) > 0 {
logger.Info("Update to validator set", "updates", abci.ValidatorsString(valDiff))
}
@ -252,7 +252,7 @@ func (s *State) CommitStateUpdateMempool(proxyAppConn proxy.AppConnConsensus, bl
// Commit block, get hash back
res := proxyAppConn.CommitSync()
if res.IsErr() {
s.logger.Error("Error in proxyAppConn.CommitSync", "error", res)
s.logger.Error("Error in proxyAppConn.CommitSync", "err", res)
return res
}
if res.Log != "" {
@ -298,7 +298,7 @@ func ExecCommitBlock(appConnConsensus proxy.AppConnConsensus, block *types.Block
// Commit block, get hash back
res := appConnConsensus.CommitSync()
if res.IsErr() {
logger.Error("Error in proxyAppConn.CommitSync", "error", res)
logger.Error("Error in proxyAppConn.CommitSync", "err", res)
return nil, res
}
if res.Log != "" {


+ 1
- 5
state/state.go View File

@ -46,10 +46,6 @@ type State struct {
TxIndexer txindex.TxIndexer `json:"-"` // Transaction indexer.
// Intermediate results from processing
// Persisted separately from the state
abciResponses *ABCIResponses
logger log.Logger
}
@ -148,7 +144,7 @@ func (s *State) SetBlockAndValidators(header *types.Header, blockPartsHeader typ
// update the validator set with the latest abciResponses
err := updateValidators(nextValSet, abciResponses.EndBlock.Diffs)
if err != nil {
s.logger.Error("Error changing validator set", "error", err)
s.logger.Error("Error changing validator set", "err", err)
// TODO: err or carry on?
}


+ 5
- 2
state/txindex/kv/kv_test.go View File

@ -8,9 +8,9 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/abci/types"
db "github.com/tendermint/tmlibs/db"
"github.com/tendermint/tendermint/state/txindex"
"github.com/tendermint/tendermint/types"
db "github.com/tendermint/tmlibs/db"
)
func TestTxIndex(t *testing.T) {
@ -45,8 +45,8 @@ func benchmarkTxIndex(txsCount int, b *testing.B) {
batch := txindex.NewBatch(txsCount)
for i := 0; i < txsCount; i++ {
txResult.Index += 1
batch.Add(*txResult)
txResult.Index += 1
}
b.ResetTimer()
@ -54,6 +54,9 @@ func benchmarkTxIndex(txsCount int, b *testing.B) {
for n := 0; n < b.N; n++ {
err = indexer.AddBatch(batch)
}
if err != nil {
b.Fatal(err)
}
}
func BenchmarkTxIndex1(b *testing.B) { benchmarkTxIndex(1, b) }


+ 2
- 3
test/docker/Dockerfile View File

@ -1,5 +1,4 @@
# Pull base image.
FROM golang:1.7.4
FROM golang:1.8.3
# Add testing deps for curl
RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list
@ -9,7 +8,7 @@ RUN apt-get update && \
apt-get install -y --no-install-recommends \
jq bsdmainutils vim-common psmisc netcat curl
# Setup tendermint repo
# Setup tendermint repo
ENV REPO $GOPATH/src/github.com/tendermint/tendermint
WORKDIR $REPO


+ 1
- 4
types/block.go View File

@ -282,10 +282,7 @@ func (commit *Commit) GetByIndex(index int) *Vote {
}
func (commit *Commit) IsCommit() bool {
if len(commit.Precommits) == 0 {
return false
}
return true
return len(commit.Precommits) != 0
}
func (commit *Commit) ValidateBasic() error {


+ 33
- 1
types/genesis.go View File

@ -2,8 +2,11 @@ package types
import (
"encoding/json"
"io/ioutil"
"time"
"github.com/pkg/errors"
"github.com/tendermint/go-crypto"
"github.com/tendermint/go-wire/data"
cmn "github.com/tendermint/tmlibs/common"
@ -17,12 +20,14 @@ var GenDocKey = []byte("GenDocKey")
//------------------------------------------------------------
// core types for a genesis definition
// GenesisValidator is an initial validator.
type GenesisValidator struct {
PubKey crypto.PubKey `json:"pub_key"`
Amount int64 `json:"amount"`
Name string `json:"name"`
}
// GenesisDoc defines the initial conditions for a tendermint blockchain, in particular its validator set.
type GenesisDoc struct {
GenesisTime time.Time `json:"genesis_time"`
ChainID string `json:"chain_id"`
@ -30,7 +35,7 @@ type GenesisDoc struct {
AppHash data.Bytes `json:"app_hash"`
}
// Utility method for saving GenensisDoc as JSON file.
// SaveAs is a utility method for saving GenensisDoc as a JSON file.
func (genDoc *GenesisDoc) SaveAs(file string) error {
genDocBytes, err := json.Marshal(genDoc)
if err != nil {
@ -39,11 +44,38 @@ func (genDoc *GenesisDoc) SaveAs(file string) error {
return cmn.WriteFile(file, genDocBytes, 0644)
}
// ValidatorHash returns the hash of the validator set contained in the GenesisDoc
func (genDoc *GenesisDoc) ValidatorHash() []byte {
vals := make([]*Validator, len(genDoc.Validators))
for i, v := range genDoc.Validators {
vals[i] = NewValidator(v.PubKey, v.Amount)
}
vset := NewValidatorSet(vals)
return vset.Hash()
}
//------------------------------------------------------------
// Make genesis state from file
// GenesisDocFromJSON unmarshalls JSON data into a GenesisDoc.
func GenesisDocFromJSON(jsonBlob []byte) (*GenesisDoc, error) {
genDoc := GenesisDoc{}
err := json.Unmarshal(jsonBlob, &genDoc)
return &genDoc, err
}
// GenesisDocFromFile reads JSON data from a file and unmarshalls it into a GenesisDoc.
func GenesisDocFromFile(genDocFile string) (*GenesisDoc, error) {
jsonBlob, err := ioutil.ReadFile(genDocFile)
if err != nil {
return nil, errors.Wrap(err, "Couldn't read GenesisDoc file")
}
genDoc, err := GenesisDocFromJSON(jsonBlob)
if err != nil {
return nil, errors.Wrap(err, "Error reading GenesisDoc")
}
if genDoc.ChainID == "" {
return nil, errors.Errorf("Genesis doc %v must include non-empty chain_id", genDocFile)
}
return genDoc, nil
}

+ 4
- 4
types/validator_set.go View File

@ -85,14 +85,14 @@ func (valSet *ValidatorSet) HasAddress(address []byte) bool {
idx := sort.Search(len(valSet.Validators), func(i int) bool {
return bytes.Compare(address, valSet.Validators[i].Address) <= 0
})
return idx != len(valSet.Validators) && bytes.Compare(valSet.Validators[idx].Address, address) == 0
return idx != len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address)
}
func (valSet *ValidatorSet) GetByAddress(address []byte) (index int, val *Validator) {
idx := sort.Search(len(valSet.Validators), func(i int) bool {
return bytes.Compare(address, valSet.Validators[i].Address) <= 0
})
if idx != len(valSet.Validators) && bytes.Compare(valSet.Validators[idx].Address, address) == 0 {
if idx != len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address) {
return idx, valSet.Validators[idx].Copy()
} else {
return 0, nil
@ -159,7 +159,7 @@ func (valSet *ValidatorSet) Add(val *Validator) (added bool) {
valSet.Proposer = nil
valSet.totalVotingPower = 0
return true
} else if bytes.Compare(valSet.Validators[idx].Address, val.Address) == 0 {
} else if bytes.Equal(valSet.Validators[idx].Address, val.Address) {
return false
} else {
newValidators := make([]*Validator, len(valSet.Validators)+1)
@ -191,7 +191,7 @@ func (valSet *ValidatorSet) Remove(address []byte) (val *Validator, removed bool
idx := sort.Search(len(valSet.Validators), func(i int) bool {
return bytes.Compare(address, valSet.Validators[i].Address) <= 0
})
if idx == len(valSet.Validators) || bytes.Compare(valSet.Validators[idx].Address, address) != 0 {
if idx == len(valSet.Validators) || !bytes.Equal(valSet.Validators[idx].Address, address) {
return nil, false
} else {
removedVal := valSet.Validators[idx]


+ 2
- 2
version/version.go View File

@ -2,11 +2,11 @@ package version
const Maj = "0"
const Min = "10"
const Fix = "0"
const Fix = "1"
var (
// The full version string
Version = "0.10.0"
Version = "0.10.1"
// GitCommit is set with --ldflags "-X main.gitCommit=$(git rev-parse HEAD)"
GitCommit string


Loading…
Cancel
Save