Browse Source

Merge pull request #451 from tendermint/release-v0.9.1

Release v0.9.1
pull/340/merge v0.9.1
Ethan Buchman 8 years ago
committed by GitHub
parent
commit
44f25864d9
77 changed files with 1861 additions and 718 deletions
  1. +41
    -3
      CHANGELOG.md
  2. +7
    -6
      DOCKER/Dockerfile
  3. +1
    -1
      DOCKER/Dockerfile.develop
  4. +3
    -2
      DOCKER/README.md
  5. +6
    -6
      Makefile
  6. +3
    -1
      README.md
  7. +3
    -1
      benchmarks/simu/counter.go
  8. +3
    -3
      blockchain/pool.go
  9. +2
    -4
      blockchain/pool_test.go
  10. +0
    -1
      blockchain/reactor.go
  11. +27
    -0
      cmd/tendermint/commands/gen_validator.go
  12. +14
    -2
      cmd/tendermint/commands/init.go
  13. +14
    -2
      cmd/tendermint/commands/probe_upnp.go
  14. +40
    -0
      cmd/tendermint/commands/replay.go
  15. +62
    -0
      cmd/tendermint/commands/reset_priv_validator.go
  16. +31
    -0
      cmd/tendermint/commands/root.go
  17. +124
    -0
      cmd/tendermint/commands/run_node.go
  18. +14
    -2
      cmd/tendermint/commands/show_validator.go
  19. +93
    -0
      cmd/tendermint/commands/testnet.go
  20. +21
    -0
      cmd/tendermint/commands/version.go
  21. +0
    -66
      cmd/tendermint/flags.go
  22. +0
    -15
      cmd/tendermint/gen_validator.go
  23. +0
    -7
      cmd/tendermint/log.go
  24. +3
    -67
      cmd/tendermint/main.go
  25. +0
    -33
      cmd/tendermint/reset_priv_validator.go
  26. +0
    -59
      cmd/tendermint/run_node.go
  27. +10
    -0
      config/tendermint/config.go
  28. +3
    -0
      config/tendermint_test/config.go
  29. +94
    -43
      consensus/replay.go
  30. +0
    -2
      consensus/replay_file.go
  31. +1
    -1
      consensus/replay_test.go
  32. +35
    -18
      consensus/state.go
  33. +4
    -4
      consensus/test_data/build.sh
  34. +1
    -1
      consensus/test_data/empty_block.cswal
  35. +6
    -6
      consensus/test_data/many_blocks.cswal
  36. +1
    -1
      consensus/test_data/small_block1.cswal
  37. +1
    -1
      consensus/test_data/small_block2.cswal
  38. +0
    -1
      consensus/ticker.go
  39. +3
    -10
      consensus/wal.go
  40. +42
    -24
      glide.lock
  41. +3
    -0
      glide.yaml
  42. +36
    -15
      node/node.go
  43. +0
    -1
      proxy/multi_app_conn.go
  44. +31
    -14
      rpc/client/httpclient.go
  45. +1
    -0
      rpc/client/interface.go
  46. +4
    -0
      rpc/client/localclient.go
  47. +2
    -2
      rpc/client/mock/abci.go
  48. +22
    -13
      rpc/client/rpc_test.go
  49. +10
    -5
      rpc/core/mempool.go
  50. +10
    -1
      rpc/core/net.go
  51. +11
    -5
      rpc/core/pipe.go
  52. +31
    -115
      rpc/core/routes.go
  53. +43
    -0
      rpc/core/tx.go
  54. +30
    -0
      rpc/core/types/responses.go
  55. +38
    -0
      rpc/core/types/responses_test.go
  56. +128
    -54
      rpc/test/client_test.go
  57. +4
    -4
      rpc/test/helpers.go
  58. +1
    -1
      scripts/install_abci_apps.sh
  59. +20
    -0
      scripts/publish.sh
  60. +83
    -67
      state/execution.go
  61. +90
    -0
      state/execution_test.go
  62. +92
    -5
      state/state.go
  63. +31
    -0
      state/state_test.go
  64. +57
    -0
      state/txindex/indexer.go
  65. +56
    -0
      state/txindex/kv/kv.go
  66. +63
    -0
      state/txindex/kv/kv_test.go
  67. +21
    -0
      state/txindex/null/null.go
  68. +6
    -6
      test/app/test.sh
  69. +1
    -1
      test/p2p/README.md
  70. +3
    -2
      test/p2p/peer.sh
  71. +3
    -3
      test/persist/test_failure_indices.sh
  72. +3
    -3
      test/persist/test_simple.sh
  73. +2
    -0
      test/test_libs.sh
  74. +7
    -6
      types/events.go
  75. +81
    -0
      types/tx.go
  76. +122
    -0
      types/tx_test.go
  77. +2
    -2
      version/version.go

+ 41
- 3
CHANGELOG.md View File

@ -1,5 +1,28 @@
# Changelog # Changelog
## 0.9.1 (April 18, 2017)
FEATURES:
- Transaction indexing - txs are indexed by their hash using a simple key-value store; easily extended to more advanced indexers
- New `/tx?hash=X` endpoint to query for transactions and their DeliverTx result by hash. Optionally returns a proof of the tx's inclusion in the block
- `tendermint testnet` command initializes files for a testnet
IMPROVEMENTS:
- CLI now uses Cobra framework
- TMROOT is now TMHOME (TMROOT will stop working in 0.10.0)
- `/broadcast_tx_XXX` also returns the Hash (can be used to query for the tx)
- `/broadcast_tx_commit` also returns the height the block was committed in
- ABCIResponses struct persisted to disk before calling Commit; makes handshake replay much cleaner
- WAL uses #ENDHEIGHT instead of #HEIGHT (#HEIGHT will stop working in 0.10.0)
- Peers included via `--seeds`, under `seeds` in the config, or in `/dial_seeds` are now persistent, and will be reconnected to if the connection breaks
BUG FIXES:
- Fix bug in fast-sync where we stop syncing after a peer is removed, even if they're re-added later
- Fix handshake replay to handle validator set changes and results of DeliverTx when we crash after app.Commit but before state.Save()
## 0.9.0 (March 6, 2017) ## 0.9.0 (March 6, 2017)
BREAKING CHANGES: BREAKING CHANGES:
@ -35,10 +58,9 @@ type BlockMeta struct {
} }
``` ```
- `ValidatorSet.Proposer` is exposed as a field and persisted with the `State`. Use `GetProposer()` to initialize or update after validator-set changes.
- `tendermint gen_validator` command output is now pure JSON - `tendermint gen_validator` command output is now pure JSON
- `ValidatorSet` data type:
- expose a `Proposer` field. Note this means the `Proposer` is persisted with the `State`.
- change `.Proposer()` to `.GetProposer()`
FEATURES: FEATURES:
@ -218,3 +240,19 @@ BUG FIXES:
- Various fixes to WAL and replay logic - Various fixes to WAL and replay logic
- Various race conditions - Various race conditions
## PreHistory
Strict versioning only began with the release of v0.7.0, in late summer 2016.
The project itself began in early summer 2014 and was workable decentralized cryptocurrency software by the end of that year.
Through the course of 2015, in collaboration with Eris Industries (now Monax Indsutries),
many additional features were integrated, including an implementation from scratch of the Ethereum Virtual Machine.
That implementation now forms the heart of [ErisDB](https://github.com/eris-ltd/eris-db).
In the later half of 2015, the consensus algorithm was upgraded with a more asynchronous design and a more deterministic and robust implementation.
By late 2015, frustration with the difficulty of forking a large monolithic stack to create alternative cryptocurrency designs led to the
invention of the Application Blockchain Interface (ABCI), then called the Tendermint Socket Protocol (TMSP).
The Ethereum Virtual Machine and various other transaction features were removed, and Tendermint was whittled down to a core consensus engine
driving an application running in another process.
The ABCI interface and implementation were iterated on and improved over the course of 2016,
until versioned history kicked in with v0.7.0.

+ 7
- 6
DOCKER/Dockerfile View File

@ -1,7 +1,8 @@
FROM alpine:3.5 FROM alpine:3.5
# This is the release of tendermint to pull in. # This is the release of tendermint to pull in.
ENV TM_VERSION 0.8.0
ENV TM_VERSION 0.9.0
ENV TM_SHA256SUM 697033ea0f34f8b34a8a2b74c4dd730b47dd4efcfce65e53e953bdae8eb14364
# Tendermint will be looking for genesis file in /tendermint (unless you change # Tendermint will be looking for genesis file in /tendermint (unless you change
# `genesis_file` in config.toml). You can put your config.toml and private # `genesis_file` in config.toml). You can put your config.toml and private
@ -9,7 +10,7 @@ ENV TM_VERSION 0.8.0
# #
# The /tendermint/data dir is used by tendermint to store state. # The /tendermint/data dir is used by tendermint to store state.
ENV DATA_ROOT /tendermint ENV DATA_ROOT /tendermint
ENV TMROOT $DATA_ROOT
ENV TMHOME $DATA_ROOT
# Set user right away for determinism # Set user right away for determinism
RUN addgroup tmuser && \ RUN addgroup tmuser && \
@ -25,11 +26,11 @@ RUN mkdir -p $DATA_ROOT && \
RUN apk add --no-cache bash curl jq RUN apk add --no-cache bash curl jq
RUN apk add --no-cache openssl && \ RUN apk add --no-cache openssl && \
wget https://s3-us-west-2.amazonaws.com/tendermint/${TM_VERSION}/tendermint_linux_amd64.zip && \
echo "83f6bd52055ebc93434a68263c6666a4de41e0e543d0b5a06ad461262c460f4c tendermint_linux_amd64.zip" | sha256sum -c && \
unzip -d /bin tendermint_linux_amd64.zip && \
wget https://s3-us-west-2.amazonaws.com/tendermint/${TM_VERSION}/tendermint_${TM_VERSION}_linux_amd64.zip && \
echo "${TM_SHA256SUM} tendermint_${TM_VERSION}_linux_amd64.zip" | sha256sum -c && \
unzip -d /bin tendermint_${TM_VERSION}_linux_amd64.zip && \
apk del openssl && \ apk del openssl && \
rm -f tendermint_linux_amd64.zip
rm -f tendermint_${TM_VERSION}_linux_amd64.zip
# Expose the data directory as a volume since there's mutable state in there # Expose the data directory as a volume since there's mutable state in there
VOLUME $DATA_ROOT VOLUME $DATA_ROOT


+ 1
- 1
DOCKER/Dockerfile.develop View File

@ -1,7 +1,7 @@
FROM alpine:3.5 FROM alpine:3.5
ENV DATA_ROOT /tendermint ENV DATA_ROOT /tendermint
ENV TMROOT $DATA_ROOT
ENV TMHOME $DATA_ROOT
RUN addgroup tmuser && \ RUN addgroup tmuser && \
adduser -S -G tmuser tmuser adduser -S -G tmuser tmuser


+ 3
- 2
DOCKER/README.md View File

@ -1,7 +1,8 @@
# Supported tags and respective `Dockerfile` links # Supported tags and respective `Dockerfile` links
- `0.8.0`, `0.8`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/bf64dd21fdb193e54d8addaaaa2ecf7ac371de8c/DOCKER/Dockerfile)
- `develop` [(Dockerfile)]()
- `0.9.0`, `0.9`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/d474baeeea6c22b289e7402449572f7c89ee21da/DOCKER/Dockerfile)
- `0.8.0`, `0.8` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/bf64dd21fdb193e54d8addaaaa2ecf7ac371de8c/DOCKER/Dockerfile)
- `develop` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/master/DOCKER/Dockerfile.develop)
`develop` tag points to the [develop](https://github.com/tendermint/tendermint/tree/develop) branch. `develop` tag points to the [develop](https://github.com/tendermint/tendermint/tree/develop) branch.


+ 6
- 6
Makefile View File

@ -3,7 +3,7 @@ GOTOOLS = \
github.com/Masterminds/glide github.com/Masterminds/glide
PACKAGES=$(shell go list ./... | grep -v '/vendor/') PACKAGES=$(shell go list ./... | grep -v '/vendor/')
BUILD_TAGS?=tendermint BUILD_TAGS?=tendermint
TMROOT = $${TMROOT:-$$HOME/.tendermint}
TMHOME = $${TMHOME:-$$HOME/.tendermint}
all: install test all: install test
@ -35,9 +35,9 @@ test100:
@for i in {1..100}; do make test; done @for i in {1..100}; do make test; done
draw_deps: draw_deps:
# requires brew install graphviz
go get github.com/hirokidaichi/goviz
goviz -i ./cmd/tendermint | dot -Tpng -o huge.png
# requires brew install graphviz or apt-get install graphviz
go get github.com/RobotsAndPencils/goviz
@goviz -i github.com/tendermint/tendermint/cmd/tendermint -d 3 | dot -Tpng -o dependency-graph.png
list_deps: list_deps:
@go list -f '{{join .Deps "\n"}}' ./... | \ @go list -f '{{join .Deps "\n"}}' ./... | \
@ -61,8 +61,8 @@ update_deps: tools
@go get -d -u ./... @go get -d -u ./...
revision: revision:
-echo `git rev-parse --verify HEAD` > $(TMROOT)/revision
-echo `git rev-parse --verify HEAD` >> $(TMROOT)/revision_history
-echo `git rev-parse --verify HEAD` > $(TMHOME)/revision
-echo `git rev-parse --verify HEAD` >> $(TMHOME)/revision_history
tools: tools:
go get -u -v $(GOTOOLS) go get -u -v $(GOTOOLS)


+ 3
- 1
README.md View File

@ -10,13 +10,15 @@ https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/6874
)](https://godoc.org/github.com/tendermint/tendermint) )](https://godoc.org/github.com/tendermint/tendermint)
[![chat](https://img.shields.io/badge/slack-join%20chat-pink.svg)](http://forum.tendermint.com:3000/) [![chat](https://img.shields.io/badge/slack-join%20chat-pink.svg)](http://forum.tendermint.com:3000/)
[![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE) [![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE)
[![](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint)
Branch | Tests | Coverage | Report Card Branch | Tests | Coverage | Report Card
----------|-------|----------|------------- ----------|-------|----------|-------------
develop | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/develop.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/develop) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/develop/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) | [![Go Report Card](https://goreportcard.com/badge/github.com/tendermint/tendermint/tree/develop)](https://goreportcard.com/report/github.com/tendermint/tendermint/tree/develop) develop | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/develop.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/develop) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/develop/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) | [![Go Report Card](https://goreportcard.com/badge/github.com/tendermint/tendermint/tree/develop)](https://goreportcard.com/report/github.com/tendermint/tendermint/tree/develop)
master | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/master.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/master) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/master/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) | [![Go Report Card](https://goreportcard.com/badge/github.com/tendermint/tendermint/tree/master)](https://goreportcard.com/report/github.com/tendermint/tendermint/tree/master) master | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/master.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/master) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/master/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) | [![Go Report Card](https://goreportcard.com/badge/github.com/tendermint/tendermint/tree/master)](https://goreportcard.com/report/github.com/tendermint/tendermint/tree/master)
_NOTE: This is yet pre-alpha non-production-quality software._
_NOTE: This is alpha software. Please contact us if you intend to run it in production._
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language, Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language,
and securely replicates it on many machines. and securely replicates it on many machines.


+ 3
- 1
benchmarks/simu/counter.go View File

@ -37,7 +37,9 @@ func main() {
for i := 0; ; i++ { for i := 0; ; i++ {
binary.BigEndian.PutUint64(buf, uint64(i)) binary.BigEndian.PutUint64(buf, uint64(i))
//txBytes := hex.EncodeToString(buf[:n]) //txBytes := hex.EncodeToString(buf[:n])
request := rpctypes.NewRPCRequest("fakeid", "broadcast_tx", Arr(buf[:8]))
request := rpctypes.NewRPCRequest("fakeid",
"broadcast_tx",
map[string]interface{}{"tx": buf[:8]})
reqBytes := wire.JSONBytes(request) reqBytes := wire.JSONBytes(request)
//fmt.Println("!!", string(reqBytes)) //fmt.Println("!!", string(reqBytes))
fmt.Print(".") fmt.Print(".")


+ 3
- 3
blockchain/pool.go View File

@ -63,7 +63,6 @@ func NewBlockPool(start int, requestsCh chan<- BlockRequest, timeoutsCh chan<- s
} }
func (pool *BlockPool) OnStart() error { func (pool *BlockPool) OnStart() error {
pool.BaseService.OnStart()
go pool.makeRequestersRoutine() go pool.makeRequestersRoutine()
pool.startTime = time.Now() pool.startTime = time.Now()
return nil return nil
@ -241,7 +240,9 @@ func (pool *BlockPool) RemovePeer(peerID string) {
func (pool *BlockPool) removePeer(peerID string) { func (pool *BlockPool) removePeer(peerID string) {
for _, requester := range pool.requesters { for _, requester := range pool.requesters {
if requester.getPeerID() == peerID { if requester.getPeerID() == peerID {
pool.numPending++
if requester.getBlock() != nil {
pool.numPending++
}
go requester.redo() // pick another peer and ... go requester.redo() // pick another peer and ...
} }
} }
@ -409,7 +410,6 @@ func newBPRequester(pool *BlockPool, height int) *bpRequester {
} }
func (bpr *bpRequester) OnStart() error { func (bpr *bpRequester) OnStart() error {
bpr.BaseService.OnStart()
go bpr.requestRoutine() go bpr.requestRoutine()
return nil return nil
} }


+ 2
- 4
blockchain/pool_test.go View File

@ -35,6 +35,7 @@ func TestBasic(t *testing.T) {
requestsCh := make(chan BlockRequest, 100) requestsCh := make(chan BlockRequest, 100)
pool := NewBlockPool(start, requestsCh, timeoutsCh) pool := NewBlockPool(start, requestsCh, timeoutsCh)
pool.Start() pool.Start()
defer pool.Stop()
// Introduce each peer. // Introduce each peer.
go func() { go func() {
@ -76,8 +77,6 @@ func TestBasic(t *testing.T) {
}() }()
} }
} }
pool.Stop()
} }
func TestTimeout(t *testing.T) { func TestTimeout(t *testing.T) {
@ -87,6 +86,7 @@ func TestTimeout(t *testing.T) {
requestsCh := make(chan BlockRequest, 100) requestsCh := make(chan BlockRequest, 100)
pool := NewBlockPool(start, requestsCh, timeoutsCh) pool := NewBlockPool(start, requestsCh, timeoutsCh)
pool.Start() pool.Start()
defer pool.Stop()
for _, peer := range peers { for _, peer := range peers {
log.Info("Peer", "id", peer.id) log.Info("Peer", "id", peer.id)
@ -131,6 +131,4 @@ func TestTimeout(t *testing.T) {
log.Info("TEST: Pulled new BlockRequest", "request", request) log.Info("TEST: Pulled new BlockRequest", "request", request)
} }
} }
pool.Stop()
} }

+ 0
- 1
blockchain/reactor.go View File

@ -251,7 +251,6 @@ FOR_LOOP:
// TODO This is bad, are we zombie? // TODO This is bad, are we zombie?
cmn.PanicQ(cmn.Fmt("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) cmn.PanicQ(cmn.Fmt("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
} }
bcR.state.Save()
} }
} }
continue FOR_LOOP continue FOR_LOOP


+ 27
- 0
cmd/tendermint/commands/gen_validator.go View File

@ -0,0 +1,27 @@
package commands
import (
"fmt"
"github.com/spf13/cobra"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types"
)
var genValidatorCmd = &cobra.Command{
Use: "gen_validator",
Short: "Generate new validator keypair",
Run: genValidator,
}
func init() {
RootCmd.AddCommand(genValidatorCmd)
}
func genValidator(cmd *cobra.Command, args []string) {
privValidator := types.GenPrivValidator()
privValidatorJSONBytes := wire.JSONBytesPretty(privValidator)
fmt.Printf(`%v
`, string(privValidatorJSONBytes))
}

cmd/tendermint/init.go → cmd/tendermint/commands/init.go View File


cmd/tendermint/probe_upnp.go → cmd/tendermint/commands/probe_upnp.go View File


+ 40
- 0
cmd/tendermint/commands/replay.go View File

@ -0,0 +1,40 @@
package commands
import (
"fmt"
"github.com/tendermint/tendermint/consensus"
"github.com/spf13/cobra"
)
var replayCmd = &cobra.Command{
Use: "replay [walfile]",
Short: "Replay messages from WAL",
Run: func(cmd *cobra.Command, args []string) {
if len(args) > 1 {
consensus.RunReplayFile(config, args[1], false)
} else {
fmt.Println("replay requires an argument (walfile)")
}
},
}
var replayConsoleCmd = &cobra.Command{
Use: "replay_console [walfile]",
Short: "Replay messages from WAL in a console",
Run: func(cmd *cobra.Command, args []string) {
if len(args) > 1 {
consensus.RunReplayFile(config, args[1], true)
} else {
fmt.Println("replay_console requires an argument (walfile)")
}
},
}
func init() {
RootCmd.AddCommand(replayCmd)
RootCmd.AddCommand(replayConsoleCmd)
}

+ 62
- 0
cmd/tendermint/commands/reset_priv_validator.go View File

@ -0,0 +1,62 @@
package commands
import (
"os"
"github.com/spf13/cobra"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/log15"
"github.com/tendermint/tendermint/types"
)
var resetAllCmd = &cobra.Command{
Use: "unsafe_reset_all",
Short: "(unsafe) Remove all the data and WAL, reset this node's validator",
Run: resetAll,
}
var resetPrivValidatorCmd = &cobra.Command{
Use: "unsafe_reset_priv_validator",
Short: "(unsafe) Reset this node's validator",
Run: resetPrivValidator,
}
func init() {
RootCmd.AddCommand(resetAllCmd)
RootCmd.AddCommand(resetPrivValidatorCmd)
}
// XXX: this is totally unsafe.
// it's only suitable for testnets.
func resetAll(cmd *cobra.Command, args []string) {
ResetAll(config, log)
}
// XXX: this is totally unsafe.
// it's only suitable for testnets.
func resetPrivValidator(cmd *cobra.Command, args []string) {
ResetPrivValidator(config, log)
}
// Exported so other CLI tools can use it
func ResetAll(c cfg.Config, l log15.Logger) {
ResetPrivValidator(c, l)
os.RemoveAll(c.GetString("db_dir"))
}
func ResetPrivValidator(c cfg.Config, l log15.Logger) {
// Get PrivValidator
var privValidator *types.PrivValidator
privValidatorFile := config.GetString("priv_validator_file")
if _, err := os.Stat(privValidatorFile); err == nil {
privValidator = types.LoadPrivValidator(privValidatorFile)
privValidator.Reset()
log.Notice("Reset PrivValidator", "file", privValidatorFile)
} else {
privValidator = types.GenPrivValidator()
privValidator.SetFile(privValidatorFile)
privValidator.Save()
log.Notice("Generated PrivValidator", "file", privValidatorFile)
}
}

+ 31
- 0
cmd/tendermint/commands/root.go View File

@ -0,0 +1,31 @@
package commands
import (
"github.com/spf13/cobra"
"github.com/tendermint/go-logger"
tmcfg "github.com/tendermint/tendermint/config/tendermint"
)
var (
config = tmcfg.GetConfig("")
log = logger.New("module", "main")
)
//global flag
var logLevel string
var RootCmd = &cobra.Command{
Use: "tendermint",
Short: "Tendermint Core (BFT Consensus) in Go",
PersistentPreRun: func(cmd *cobra.Command, args []string) {
// set the log level in the config and logger
config.Set("log_level", logLevel)
logger.SetLogLevel(logLevel)
},
}
func init() {
//parse flag and set config
RootCmd.PersistentFlags().StringVar(&logLevel, "log_level", config.GetString("log_level"), "Log level")
}

+ 124
- 0
cmd/tendermint/commands/run_node.go View File

@ -0,0 +1,124 @@
package commands
import (
"io/ioutil"
"time"
"github.com/spf13/cobra"
. "github.com/tendermint/go-common"
"github.com/tendermint/tendermint/node"
"github.com/tendermint/tendermint/types"
)
var runNodeCmd = &cobra.Command{
Use: "node",
Short: "Run the tendermint node",
PreRun: setConfigFlags,
Run: runNode,
}
//flags
var (
moniker string
nodeLaddr string
seeds string
fastSync bool
skipUPNP bool
rpcLaddr string
grpcLaddr string
proxyApp string
abciTransport string
pex bool
)
func init() {
// configuration options
runNodeCmd.Flags().StringVar(&moniker, "moniker", config.GetString("moniker"),
"Node Name")
runNodeCmd.Flags().StringVar(&nodeLaddr, "node_laddr", config.GetString("node_laddr"),
"Node listen address. (0.0.0.0:0 means any interface, any port)")
runNodeCmd.Flags().StringVar(&seeds, "seeds", config.GetString("seeds"),
"Comma delimited host:port seed nodes")
runNodeCmd.Flags().BoolVar(&fastSync, "fast_sync", config.GetBool("fast_sync"),
"Fast blockchain syncing")
runNodeCmd.Flags().BoolVar(&skipUPNP, "skip_upnp", config.GetBool("skip_upnp"),
"Skip UPNP configuration")
runNodeCmd.Flags().StringVar(&rpcLaddr, "rpc_laddr", config.GetString("rpc_laddr"),
"RPC listen address. Port required")
runNodeCmd.Flags().StringVar(&grpcLaddr, "grpc_laddr", config.GetString("grpc_laddr"),
"GRPC listen address (BroadcastTx only). Port required")
runNodeCmd.Flags().StringVar(&proxyApp, "proxy_app", config.GetString("proxy_app"),
"Proxy app address, or 'nilapp' or 'dummy' for local testing.")
runNodeCmd.Flags().StringVar(&abciTransport, "abci", config.GetString("abci"),
"Specify abci transport (socket | grpc)")
// feature flags
runNodeCmd.Flags().BoolVar(&pex, "pex", config.GetBool("pex_reactor"),
"Enable Peer-Exchange (dev feature)")
RootCmd.AddCommand(runNodeCmd)
}
func setConfigFlags(cmd *cobra.Command, args []string) {
// Merge parsed flag values onto config
config.Set("moniker", moniker)
config.Set("node_laddr", nodeLaddr)
config.Set("seeds", seeds)
config.Set("fast_sync", fastSync)
config.Set("skip_upnp", skipUPNP)
config.Set("rpc_laddr", rpcLaddr)
config.Set("grpc_laddr", grpcLaddr)
config.Set("proxy_app", proxyApp)
config.Set("abci", abciTransport)
config.Set("pex_reactor", pex)
}
// Users wishing to:
// * Use an external signer for their validators
// * Supply an in-proc abci app
// should import github.com/tendermint/tendermint/node and implement
// their own run_node to call node.NewNode (instead of node.NewNodeDefault)
// with their custom priv validator and/or custom proxy.ClientCreator
func runNode(cmd *cobra.Command, args []string) {
// Wait until the genesis doc becomes available
// This is for Mintnet compatibility.
// TODO: If Mintnet gets deprecated or genesis_file is
// always available, remove.
genDocFile := config.GetString("genesis_file")
if !FileExists(genDocFile) {
log.Notice(Fmt("Waiting for genesis file %v...", genDocFile))
for {
time.Sleep(time.Second)
if !FileExists(genDocFile) {
continue
}
jsonBlob, err := ioutil.ReadFile(genDocFile)
if err != nil {
Exit(Fmt("Couldn't read GenesisDoc file: %v", err))
}
genDoc, err := types.GenesisDocFromJSON(jsonBlob)
if err != nil {
Exit(Fmt("Error reading GenesisDoc: %v", err))
}
if genDoc.ChainID == "" {
Exit(Fmt("Genesis doc %v must include non-empty chain_id", genDocFile))
}
config.Set("chain_id", genDoc.ChainID)
}
}
// Create & start node
n := node.NewNodeDefault(config)
if _, err := n.Start(); err != nil {
Exit(Fmt("Failed to start node: %v", err))
} else {
log.Notice("Started node", "nodeInfo", n.Switch().NodeInfo())
}
// Trap signal, run forever.
n.RunForever()
}

cmd/tendermint/show_validator.go → cmd/tendermint/commands/show_validator.go View File


+ 93
- 0
cmd/tendermint/commands/testnet.go View File

@ -0,0 +1,93 @@
package commands
import (
"fmt"
"path"
"time"
"github.com/spf13/cobra"
cmn "github.com/tendermint/go-common"
"github.com/tendermint/tendermint/types"
)
var testnetFilesCmd = &cobra.Command{
Use: "testnet",
Short: "Initialize files for a Tendermint testnet",
Run: testnetFiles,
}
//flags
var (
nValidators int
dataDir string
)
func init() {
testnetFilesCmd.Flags().IntVar(&nValidators, "n", 4,
"Number of validators to initialize the testnet with")
testnetFilesCmd.Flags().StringVar(&dataDir, "dir", "mytestnet",
"Directory to store initialization data for the testnet")
RootCmd.AddCommand(testnetFilesCmd)
}
func testnetFiles(cmd *cobra.Command, args []string) {
genVals := make([]types.GenesisValidator, nValidators)
// Initialize core dir and priv_validator.json's
for i := 0; i < nValidators; i++ {
mach := cmn.Fmt("mach%d", i)
err := initMachCoreDirectory(dataDir, mach)
if err != nil {
cmn.Exit(err.Error())
}
// Read priv_validator.json to populate vals
privValFile := path.Join(dataDir, mach, "priv_validator.json")
privVal := types.LoadPrivValidator(privValFile)
genVals[i] = types.GenesisValidator{
PubKey: privVal.PubKey,
Amount: 1,
Name: mach,
}
}
// Generate genesis doc from generated validators
genDoc := &types.GenesisDoc{
GenesisTime: time.Now(),
ChainID: "chain-" + cmn.RandStr(6),
Validators: genVals,
}
// Write genesis file.
for i := 0; i < nValidators; i++ {
mach := cmn.Fmt("mach%d", i)
genDoc.SaveAs(path.Join(dataDir, mach, "genesis.json"))
}
fmt.Println(cmn.Fmt("Successfully initialized %v node directories", nValidators))
}
// Initialize per-machine core directory
func initMachCoreDirectory(base, mach string) error {
dir := path.Join(base, mach)
err := cmn.EnsureDir(dir, 0777)
if err != nil {
return err
}
// Create priv_validator.json file if not present
ensurePrivValidator(path.Join(dir, "priv_validator.json"))
return nil
}
func ensurePrivValidator(file string) {
if cmn.FileExists(file) {
return
}
privValidator := types.GenPrivValidator()
privValidator.SetFile(file)
privValidator.Save()
}

+ 21
- 0
cmd/tendermint/commands/version.go View File

@ -0,0 +1,21 @@
package commands
import (
"fmt"
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/version"
)
var versionCmd = &cobra.Command{
Use: "version",
Short: "Show version info",
Run: func(cmd *cobra.Command, args []string) {
fmt.Println(version.Version)
},
}
func init() {
RootCmd.AddCommand(versionCmd)
}

+ 0
- 66
cmd/tendermint/flags.go View File

@ -1,66 +0,0 @@
package main
import (
flag "github.com/spf13/pflag"
"os"
cfg "github.com/tendermint/go-config"
)
func parseFlags(config cfg.Config, args []string) {
var (
printHelp bool
moniker string
nodeLaddr string
seeds string
fastSync bool
skipUPNP bool
rpcLaddr string
grpcLaddr string
logLevel string
proxyApp string
abciTransport string
pex bool
)
// Declare flags
var flags = flag.NewFlagSet("main", flag.ExitOnError)
flags.BoolVar(&printHelp, "help", false, "Print this help message.")
// configuration options
flags.StringVar(&moniker, "moniker", config.GetString("moniker"), "Node Name")
flags.StringVar(&nodeLaddr, "node_laddr", config.GetString("node_laddr"), "Node listen address. (0.0.0.0:0 means any interface, any port)")
flags.StringVar(&seeds, "seeds", config.GetString("seeds"), "Comma delimited host:port seed nodes")
flags.BoolVar(&fastSync, "fast_sync", config.GetBool("fast_sync"), "Fast blockchain syncing")
flags.BoolVar(&skipUPNP, "skip_upnp", config.GetBool("skip_upnp"), "Skip UPNP configuration")
flags.StringVar(&rpcLaddr, "rpc_laddr", config.GetString("rpc_laddr"), "RPC listen address. Port required")
flags.StringVar(&grpcLaddr, "grpc_laddr", config.GetString("grpc_laddr"), "GRPC listen address (BroadcastTx only). Port required")
flags.StringVar(&logLevel, "log_level", config.GetString("log_level"), "Log level")
flags.StringVar(&proxyApp, "proxy_app", config.GetString("proxy_app"),
"Proxy app address, or 'nilapp' or 'dummy' for local testing.")
flags.StringVar(&abciTransport, "abci", config.GetString("abci"), "Specify abci transport (socket | grpc)")
// feature flags
flags.BoolVar(&pex, "pex", config.GetBool("pex_reactor"), "Enable Peer-Exchange (dev feature)")
flags.Parse(args)
if printHelp {
flags.PrintDefaults()
os.Exit(0)
}
// Merge parsed flag values onto app.
config.Set("moniker", moniker)
config.Set("node_laddr", nodeLaddr)
config.Set("seeds", seeds)
config.Set("fast_sync", fastSync)
config.Set("skip_upnp", skipUPNP)
config.Set("rpc_laddr", rpcLaddr)
config.Set("grpc_laddr", grpcLaddr)
config.Set("log_level", logLevel)
config.Set("proxy_app", proxyApp)
config.Set("abci", abciTransport)
config.Set("pex_reactor", pex)
}

+ 0
- 15
cmd/tendermint/gen_validator.go View File

@ -1,15 +0,0 @@
package main
import (
"fmt"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types"
)
func gen_validator() {
privValidator := types.GenPrivValidator()
privValidatorJSONBytes := wire.JSONBytesPretty(privValidator)
fmt.Printf(`%v
`, string(privValidatorJSONBytes))
}

+ 0
- 7
cmd/tendermint/log.go View File

@ -1,7 +0,0 @@
package main
import (
"github.com/tendermint/go-logger"
)
var log = logger.New("module", "main")

+ 3
- 67
cmd/tendermint/main.go View File

@ -4,76 +4,12 @@ import (
"fmt" "fmt"
"os" "os"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-logger"
tmcfg "github.com/tendermint/tendermint/config/tendermint"
"github.com/tendermint/tendermint/consensus"
"github.com/tendermint/tendermint/version"
"github.com/tendermint/tendermint/cmd/tendermint/commands"
) )
var config cfg.Config
func main() { func main() {
args := os.Args[1:]
if len(args) == 0 {
fmt.Println(`Tendermint
Commands:
init Initialize tendermint
node Run the tendermint node
show_validator Show this node's validator info
gen_validator Generate new validator keypair
probe_upnp Test UPnP functionality
replay <walfile> Replay messages from WAL
replay_console <walfile> Replay messages from WAL in a console
unsafe_reset_all (unsafe) Remove all the data and WAL, reset this node's validator
unsafe_reset_priv_validator (unsafe) Reset this node's validator
version Show version info
`)
return
}
// Get configuration
config = tmcfg.GetConfig("")
parseFlags(config, args[1:]) // Command line overrides
// set the log level
logger.SetLogLevel(config.GetString("log_level"))
switch args[0] {
case "node":
run_node(config)
case "replay":
if len(args) > 1 {
consensus.RunReplayFile(config, args[1], false)
} else {
fmt.Println("replay requires an argument (walfile)")
os.Exit(1)
}
case "replay_console":
if len(args) > 1 {
consensus.RunReplayFile(config, args[1], true)
} else {
fmt.Println("replay_console requires an argument (walfile)")
os.Exit(1)
}
case "init":
init_files()
case "show_validator":
show_validator()
case "gen_validator":
gen_validator()
case "probe_upnp":
probe_upnp()
case "unsafe_reset_all":
reset_all()
case "unsafe_reset_priv_validator":
reset_priv_validator()
case "version":
fmt.Println(version.Version)
default:
fmt.Printf("Unknown command %v\n", args[0])
if err := commands.RootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1) os.Exit(1)
} }
} }

+ 0
- 33
cmd/tendermint/reset_priv_validator.go View File

@ -1,33 +0,0 @@
package main
import (
"os"
"github.com/tendermint/tendermint/types"
)
// XXX: this is totally unsafe.
// it's only suitable for testnets.
func reset_all() {
reset_priv_validator()
os.RemoveAll(config.GetString("db_dir"))
os.Remove(config.GetString("cs_wal_file"))
}
// XXX: this is totally unsafe.
// it's only suitable for testnets.
func reset_priv_validator() {
// Get PrivValidator
var privValidator *types.PrivValidator
privValidatorFile := config.GetString("priv_validator_file")
if _, err := os.Stat(privValidatorFile); err == nil {
privValidator = types.LoadPrivValidator(privValidatorFile)
privValidator.Reset()
log.Notice("Reset PrivValidator", "file", privValidatorFile)
} else {
privValidator = types.GenPrivValidator()
privValidator.SetFile(privValidatorFile)
privValidator.Save()
log.Notice("Generated PrivValidator", "file", privValidatorFile)
}
}

+ 0
- 59
cmd/tendermint/run_node.go View File

@ -1,59 +0,0 @@
package main
import (
"io/ioutil"
"time"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/tendermint/node"
"github.com/tendermint/tendermint/types"
)
// Users wishing to:
// * Use an external signer for their validators
// * Supply an in-proc abci app
// should import github.com/tendermint/tendermint/node and implement
// their own run_node to call node.NewNode (instead of node.NewNodeDefault)
// with their custom priv validator and/or custom proxy.ClientCreator
func run_node(config cfg.Config) {
// Wait until the genesis doc becomes available
// This is for Mintnet compatibility.
// TODO: If Mintnet gets deprecated or genesis_file is
// always available, remove.
genDocFile := config.GetString("genesis_file")
if !FileExists(genDocFile) {
log.Notice(Fmt("Waiting for genesis file %v...", genDocFile))
for {
time.Sleep(time.Second)
if !FileExists(genDocFile) {
continue
}
jsonBlob, err := ioutil.ReadFile(genDocFile)
if err != nil {
Exit(Fmt("Couldn't read GenesisDoc file: %v", err))
}
genDoc, err := types.GenesisDocFromJSON(jsonBlob)
if err != nil {
Exit(Fmt("Error reading GenesisDoc: %v", err))
}
if genDoc.ChainID == "" {
Exit(Fmt("Genesis doc %v must include non-empty chain_id", genDocFile))
}
config.Set("chain_id", genDoc.ChainID)
}
}
// Create & start node
n := node.NewNodeDefault(config)
if _, err := n.Start(); err != nil {
Exit(Fmt("Failed to start node: %v", err))
} else {
log.Notice("Started node", "nodeInfo", n.Switch().NodeInfo())
}
// Trap signal, run forever.
n.RunForever()
}

+ 10
- 0
config/tendermint/config.go View File

@ -11,6 +11,10 @@ import (
func getTMRoot(rootDir string) string { func getTMRoot(rootDir string) string {
if rootDir == "" { if rootDir == "" {
rootDir = os.Getenv("TMHOME")
}
if rootDir == "" {
// deprecated, use TMHOME (TODO: remove in TM 0.11.0)
rootDir = os.Getenv("TMROOT") rootDir = os.Getenv("TMROOT")
} }
if rootDir == "" { if rootDir == "" {
@ -79,6 +83,9 @@ func GetConfig(rootDir string) cfg.Config {
mapConfig.SetDefault("block_size", 10000) // max number of txs mapConfig.SetDefault("block_size", 10000) // max number of txs
mapConfig.SetDefault("block_part_size", 65536) // part size 64K mapConfig.SetDefault("block_part_size", 65536) // part size 64K
mapConfig.SetDefault("disable_data_hash", false) mapConfig.SetDefault("disable_data_hash", false)
// all timeouts are in ms
mapConfig.SetDefault("timeout_handshake", 10000)
mapConfig.SetDefault("timeout_propose", 3000) mapConfig.SetDefault("timeout_propose", 3000)
mapConfig.SetDefault("timeout_propose_delta", 500) mapConfig.SetDefault("timeout_propose_delta", 500)
mapConfig.SetDefault("timeout_prevote", 1000) mapConfig.SetDefault("timeout_prevote", 1000)
@ -86,6 +93,7 @@ func GetConfig(rootDir string) cfg.Config {
mapConfig.SetDefault("timeout_precommit", 1000) mapConfig.SetDefault("timeout_precommit", 1000)
mapConfig.SetDefault("timeout_precommit_delta", 500) mapConfig.SetDefault("timeout_precommit_delta", 500)
mapConfig.SetDefault("timeout_commit", 1000) mapConfig.SetDefault("timeout_commit", 1000)
// make progress asap (no `timeout_commit`) on full precommit votes // make progress asap (no `timeout_commit`) on full precommit votes
mapConfig.SetDefault("skip_timeout_commit", false) mapConfig.SetDefault("skip_timeout_commit", false)
mapConfig.SetDefault("mempool_recheck", true) mapConfig.SetDefault("mempool_recheck", true)
@ -93,6 +101,8 @@ func GetConfig(rootDir string) cfg.Config {
mapConfig.SetDefault("mempool_broadcast", true) mapConfig.SetDefault("mempool_broadcast", true)
mapConfig.SetDefault("mempool_wal_dir", rootDir+"/data/mempool.wal") mapConfig.SetDefault("mempool_wal_dir", rootDir+"/data/mempool.wal")
mapConfig.SetDefault("tx_index", "kv")
return mapConfig return mapConfig
} }


+ 3
- 0
config/tendermint_test/config.go View File

@ -93,6 +93,7 @@ func ResetConfig(localPath string) cfg.Config {
mapConfig.SetDefault("block_size", 10000) mapConfig.SetDefault("block_size", 10000)
mapConfig.SetDefault("block_part_size", 65536) // part size 64K mapConfig.SetDefault("block_part_size", 65536) // part size 64K
mapConfig.SetDefault("disable_data_hash", false) mapConfig.SetDefault("disable_data_hash", false)
mapConfig.SetDefault("timeout_handshake", 10000)
mapConfig.SetDefault("timeout_propose", 2000) mapConfig.SetDefault("timeout_propose", 2000)
mapConfig.SetDefault("timeout_propose_delta", 1) mapConfig.SetDefault("timeout_propose_delta", 1)
mapConfig.SetDefault("timeout_prevote", 10) mapConfig.SetDefault("timeout_prevote", 10)
@ -106,6 +107,8 @@ func ResetConfig(localPath string) cfg.Config {
mapConfig.SetDefault("mempool_broadcast", true) mapConfig.SetDefault("mempool_broadcast", true)
mapConfig.SetDefault("mempool_wal_dir", "") mapConfig.SetDefault("mempool_wal_dir", "")
mapConfig.SetDefault("tx_index", "kv")
logger.SetLogLevel(mapConfig.GetString("log_level")) logger.SetLogLevel(mapConfig.GetString("log_level"))
return mapConfig return mapConfig


+ 94
- 43
consensus/replay.go View File

@ -100,27 +100,51 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error {
cs.replayMode = true cs.replayMode = true
defer func() { cs.replayMode = false }() defer func() { cs.replayMode = false }()
// Ensure that height+1 doesn't exist
gr, found, err := cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(csHeight+1))
if found {
return errors.New(Fmt("WAL should not contain height %d.", csHeight+1))
}
// Ensure that ENDHEIGHT for this height doesn't exist
// NOTE: This is just a sanity check. As far as we know things work fine without it,
// and Handshake could reuse ConsensusState if it weren't for this check (since we can crash after writing ENDHEIGHT).
gr, found, err := cs.wal.group.Search("#ENDHEIGHT: ", makeHeightSearchFunc(csHeight))
if gr != nil { if gr != nil {
gr.Close() gr.Close()
} }
if found {
return errors.New(Fmt("WAL should not contain #ENDHEIGHT %d.", csHeight))
}
// Search for height marker
gr, found, err = cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(csHeight))
// Search for last height marker
gr, found, err = cs.wal.group.Search("#ENDHEIGHT: ", makeHeightSearchFunc(csHeight-1))
if err == io.EOF { if err == io.EOF {
log.Warn("Replay: wal.group.Search returned EOF", "height", csHeight)
return nil
log.Warn("Replay: wal.group.Search returned EOF", "#ENDHEIGHT", csHeight-1)
// if we upgraded from 0.9 to 0.9.1, we may have #HEIGHT instead
// TODO (0.10.0): remove this
gr, found, err = cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(csHeight))
if err == io.EOF {
log.Warn("Replay: wal.group.Search returned EOF", "#HEIGHT", csHeight)
return nil
} else if err != nil {
return err
}
} else if err != nil { } else if err != nil {
return err return err
} else {
defer gr.Close()
} }
if !found { if !found {
return errors.New(Fmt("WAL does not contain height %d.", csHeight))
// if we upgraded from 0.9 to 0.9.1, we may have #HEIGHT instead
// TODO (0.10.0): remove this
gr, found, err = cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(csHeight))
if err == io.EOF {
log.Warn("Replay: wal.group.Search returned EOF", "#HEIGHT", csHeight)
return nil
} else if err != nil {
return err
} else {
defer gr.Close()
}
// TODO (0.10.0): uncomment
// return errors.New(Fmt("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d.", csHeight, csHeight-1))
} }
defer gr.Close()
log.Notice("Catchup by replaying consensus messages", "height", csHeight) log.Notice("Catchup by replaying consensus messages", "height", csHeight)
@ -147,7 +171,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error {
//-------------------------------------------------------------------------------- //--------------------------------------------------------------------------------
// Parses marker lines of the form: // Parses marker lines of the form:
// #HEIGHT: 12345
// #ENDHEIGHT: 12345
func makeHeightSearchFunc(height int) auto.SearchFunc { func makeHeightSearchFunc(height int) auto.SearchFunc {
return func(line string) (int, error) { return func(line string) (int, error) {
line = strings.TrimRight(line, "\n") line = strings.TrimRight(line, "\n")
@ -190,6 +214,8 @@ func (h *Handshaker) NBlocks() int {
return h.nBlocks return h.nBlocks
} }
var ErrReplayLastBlockTimeout = errors.New("Timed out waiting for last block to be replayed")
// TODO: retry the handshake/replay if it fails ? // TODO: retry the handshake/replay if it fails ?
func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
// handshake is done via info request on the query conn // handshake is done via info request on the query conn
@ -207,7 +233,11 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
// replay blocks up to the latest in the blockstore // replay blocks up to the latest in the blockstore
_, err = h.ReplayBlocks(appHash, blockHeight, proxyApp) _, err = h.ReplayBlocks(appHash, blockHeight, proxyApp)
if err != nil {
if err == ErrReplayLastBlockTimeout {
log.Warn("Failed to sync via handshake. Trying other means. If they fail, please increase the timeout_handshake parameter")
return nil
} else if err != nil {
return errors.New(Fmt("Error on replay: %v", err)) return errors.New(Fmt("Error on replay: %v", err))
} }
@ -267,15 +297,18 @@ func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, proxyApp p
} else if appBlockHeight == stateBlockHeight { } else if appBlockHeight == stateBlockHeight {
// We haven't run Commit (both the state and app are one block behind), // We haven't run Commit (both the state and app are one block behind),
// so run through consensus with the real app
// so replayBlock with the real app.
// NOTE: We could instead use the cs.WAL on cs.Start,
// but we'd have to allow the WAL to replay a block that wrote it's ENDHEIGHT
log.Info("Replay last block using real app") log.Info("Replay last block using real app")
return h.replayLastBlock(proxyApp.Consensus())
return h.replayBlock(storeBlockHeight, proxyApp.Consensus())
} else if appBlockHeight == storeBlockHeight { } else if appBlockHeight == storeBlockHeight {
// We ran Commit, but didn't save the state, so run through consensus with mock app
mockApp := newMockProxyApp(appHash)
// We ran Commit, but didn't save the state, so replayBlock with mock app
abciResponses := h.state.LoadABCIResponses()
mockApp := newMockProxyApp(appHash, abciResponses)
log.Info("Replay last block using mock app") log.Info("Replay last block using mock app")
return h.replayLastBlock(mockApp)
return h.replayBlock(storeBlockHeight, mockApp)
} }
} }
@ -284,23 +317,23 @@ func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, proxyApp p
return nil, nil return nil, nil
} }
func (h *Handshaker) replayBlocks(proxyApp proxy.AppConns, appBlockHeight, storeBlockHeight int, useReplayFunc bool) ([]byte, error) {
func (h *Handshaker) replayBlocks(proxyApp proxy.AppConns, appBlockHeight, storeBlockHeight int, mutateState bool) ([]byte, error) {
// App is further behind than it should be, so we need to replay blocks. // App is further behind than it should be, so we need to replay blocks.
// We replay all blocks from appBlockHeight+1 to storeBlockHeight-1,
// and let the final block be replayed through ReplayBlocks.
// We replay all blocks from appBlockHeight+1.
// Note that we don't have an old version of the state, // Note that we don't have an old version of the state,
// so we by-pass state validation using applyBlock here.
// so we by-pass state validation/mutation using sm.ExecCommitBlock.
// If mutateState == true, the final block is replayed with h.replayBlock()
var appHash []byte var appHash []byte
var err error var err error
finalBlock := storeBlockHeight finalBlock := storeBlockHeight
if useReplayFunc {
if mutateState {
finalBlock -= 1 finalBlock -= 1
} }
for i := appBlockHeight + 1; i <= finalBlock; i++ { for i := appBlockHeight + 1; i <= finalBlock; i++ {
log.Info("Applying block", "height", i) log.Info("Applying block", "height", i)
block := h.store.LoadBlock(i) block := h.store.LoadBlock(i)
appHash, err = sm.ApplyBlock(proxyApp.Consensus(), block)
appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -308,33 +341,29 @@ func (h *Handshaker) replayBlocks(proxyApp proxy.AppConns, appBlockHeight, store
h.nBlocks += 1 h.nBlocks += 1
} }
if useReplayFunc {
if mutateState {
// sync the final block // sync the final block
return h.ReplayBlocks(appHash, finalBlock, proxyApp)
return h.replayBlock(storeBlockHeight, proxyApp.Consensus())
} }
return appHash, h.checkAppHash(appHash) return appHash, h.checkAppHash(appHash)
} }
// Replay the last block through the consensus and return the AppHash from after Commit.
func (h *Handshaker) replayLastBlock(proxyApp proxy.AppConnConsensus) ([]byte, error) {
// ApplyBlock on the proxyApp with the last block.
func (h *Handshaker) replayBlock(height int, proxyApp proxy.AppConnConsensus) ([]byte, error) {
mempool := types.MockMempool{} mempool := types.MockMempool{}
cs := NewConsensusState(h.config, h.state, proxyApp, h.store, mempool)
evsw := types.NewEventSwitch()
evsw.Start()
defer evsw.Stop()
cs.SetEventSwitch(evsw)
newBlockCh := subscribeToEvent(evsw, "consensus-replay", types.EventStringNewBlock(), 1)
var eventCache types.Fireable // nil
block := h.store.LoadBlock(height)
meta := h.store.LoadBlockMeta(height)
// run through the WAL, commit new block, stop
cs.Start()
<-newBlockCh // TODO: use a timeout and return err?
cs.Stop()
if err := h.state.ApplyBlock(eventCache, proxyApp, block, meta.BlockID.PartsHeader, mempool); err != nil {
return nil, err
}
h.nBlocks += 1 h.nBlocks += 1
return cs.state.AppHash, nil
return h.state.AppHash, nil
} }
func (h *Handshaker) checkAppHash(appHash []byte) error { func (h *Handshaker) checkAppHash(appHash []byte) error {
@ -346,9 +375,14 @@ func (h *Handshaker) checkAppHash(appHash []byte) error {
} }
//-------------------------------------------------------------------------------- //--------------------------------------------------------------------------------
func newMockProxyApp(appHash []byte) proxy.AppConnConsensus {
clientCreator := proxy.NewLocalClientCreator(&mockProxyApp{appHash: appHash})
// mockProxyApp uses ABCIResponses to give the right results
// Useful because we don't want to call Commit() twice for the same block on the real app.
func newMockProxyApp(appHash []byte, abciResponses *sm.ABCIResponses) proxy.AppConnConsensus {
clientCreator := proxy.NewLocalClientCreator(&mockProxyApp{
appHash: appHash,
abciResponses: abciResponses,
})
cli, _ := clientCreator.NewABCIClient() cli, _ := clientCreator.NewABCIClient()
return proxy.NewAppConnConsensus(cli) return proxy.NewAppConnConsensus(cli)
} }
@ -356,7 +390,24 @@ func newMockProxyApp(appHash []byte) proxy.AppConnConsensus {
type mockProxyApp struct { type mockProxyApp struct {
abci.BaseApplication abci.BaseApplication
appHash []byte
appHash []byte
txCount int
abciResponses *sm.ABCIResponses
}
func (mock *mockProxyApp) DeliverTx(tx []byte) abci.Result {
r := mock.abciResponses.DeliverTx[mock.txCount]
mock.txCount += 1
return abci.Result{
r.Code,
r.Data,
r.Log,
}
}
func (mock *mockProxyApp) EndBlock(height uint64) abci.ResponseEndBlock {
mock.txCount = 0
return mock.abciResponses.EndBlock
} }
func (mock *mockProxyApp) Commit() abci.Result { func (mock *mockProxyApp) Commit() abci.Result {


+ 0
- 2
consensus/replay_file.go View File

@ -127,8 +127,6 @@ func (pb *playback) replayReset(count int, newStepCh chan interface{}) error {
} }
func (cs *ConsensusState) startForReplay() { func (cs *ConsensusState) startForReplay() {
// don't want to start full cs
cs.BaseService.OnStart()
log.Warn("Replay commands are disabled until someone updates them and writes tests") log.Warn("Replay commands are disabled until someone updates them and writes tests")
/* TODO:! /* TODO:!


+ 1
- 1
consensus/replay_test.go View File

@ -443,7 +443,7 @@ func buildTMStateFromChain(config cfg.Config, state *sm.State, chain []*types.Bl
func makeBlockchainFromWAL(wal *WAL) ([]*types.Block, []*types.Commit, error) { func makeBlockchainFromWAL(wal *WAL) ([]*types.Block, []*types.Commit, error) {
// Search for height marker // Search for height marker
gr, found, err := wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(1))
gr, found, err := wal.group.Search("#ENDHEIGHT: ", makeHeightSearchFunc(0))
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }


+ 35
- 18
consensus/state.go View File

@ -283,7 +283,7 @@ func NewConsensusState(config cfg.Config, state *sm.State, proxyAppConn proxy.Ap
//---------------------------------------- //----------------------------------------
// Public interface // Public interface
// implements events.Eventable
// SetEventSwitch implements events.Eventable
func (cs *ConsensusState) SetEventSwitch(evsw types.EventSwitch) { func (cs *ConsensusState) SetEventSwitch(evsw types.EventSwitch) {
cs.evsw = evsw cs.evsw = evsw
} }
@ -340,16 +340,9 @@ func (cs *ConsensusState) LoadCommit(height int) *types.Commit {
} }
func (cs *ConsensusState) OnStart() error { func (cs *ConsensusState) OnStart() error {
cs.BaseService.OnStart()
walFile := cs.config.GetString("cs_wal_file") walFile := cs.config.GetString("cs_wal_file")
err := EnsureDir(path.Dir(walFile), 0700)
if err != nil {
log.Error("Error ensuring ConsensusState wal dir", "error", err.Error())
return err
}
err = cs.OpenWAL(walFile)
if err != nil {
if err := cs.OpenWAL(walFile); err != nil {
log.Error("Error loading ConsensusState wal", "error", err.Error()) log.Error("Error loading ConsensusState wal", "error", err.Error())
return err return err
} }
@ -364,8 +357,9 @@ func (cs *ConsensusState) OnStart() error {
// we may have lost some votes if the process crashed // we may have lost some votes if the process crashed
// reload from consensus log to catchup // reload from consensus log to catchup
if err := cs.catchupReplay(cs.Height); err != nil { if err := cs.catchupReplay(cs.Height); err != nil {
log.Error("Error on catchup replay", "error", err.Error())
// let's go for it anyways, maybe we're fine
log.Error("Error on catchup replay. Proceeding to start ConsensusState anyway", "error", err.Error())
// NOTE: if we ever do return an error here,
// make sure to stop the timeoutTicker
} }
// now start the receiveRoutine // now start the receiveRoutine
@ -404,6 +398,12 @@ func (cs *ConsensusState) Wait() {
// Open file to log all consensus messages and timeouts for deterministic accountability // Open file to log all consensus messages and timeouts for deterministic accountability
func (cs *ConsensusState) OpenWAL(walFile string) (err error) { func (cs *ConsensusState) OpenWAL(walFile string) (err error) {
err = EnsureDir(path.Dir(walFile), 0700)
if err != nil {
log.Error("Error ensuring ConsensusState wal dir", "error", err.Error())
return err
}
cs.mtx.Lock() cs.mtx.Lock()
defer cs.mtx.Unlock() defer cs.mtx.Unlock()
wal, err := NewWAL(walFile, cs.config.GetBool("cs_wal_light")) wal, err := NewWAL(walFile, cs.config.GetBool("cs_wal_light"))
@ -1216,13 +1216,26 @@ func (cs *ConsensusState) finalizeCommit(height int) {
fail.Fail() // XXX fail.Fail() // XXX
// Finish writing to the WAL for this height.
// NOTE: If we fail before writing this, we'll never write it,
// and just recover by running ApplyBlock in the Handshake.
// If we moved it before persisting the block, we'd have to allow
// WAL replay for blocks with an #ENDHEIGHT
// As is, ConsensusState should not be started again
// until we successfully call ApplyBlock (ie. here or in Handshake after restart)
if cs.wal != nil {
cs.wal.writeEndHeight(height)
}
fail.Fail() // XXX
// Create a copy of the state for staging // Create a copy of the state for staging
// and an event cache for txs // and an event cache for txs
stateCopy := cs.state.Copy() stateCopy := cs.state.Copy()
eventCache := types.NewEventCache(cs.evsw) eventCache := types.NewEventCache(cs.evsw)
// Execute and commit the block, and update the mempool.
// All calls to the proxyAppConn should come here.
// Execute and commit the block, update and save the state, and update the mempool.
// All calls to the proxyAppConn come here.
// NOTE: the block.AppHash wont reflect these txs until the next block // NOTE: the block.AppHash wont reflect these txs until the next block
err := stateCopy.ApplyBlock(eventCache, cs.proxyAppConn, block, blockParts.Header(), cs.mempool) err := stateCopy.ApplyBlock(eventCache, cs.proxyAppConn, block, blockParts.Header(), cs.mempool)
if err != nil { if err != nil {
@ -1232,20 +1245,24 @@ func (cs *ConsensusState) finalizeCommit(height int) {
fail.Fail() // XXX fail.Fail() // XXX
// Fire off event for new block.
// TODO: Handle app failure. See #177
// Fire event for new block.
// NOTE: If we fail before firing, these events will never fire
//
// TODO: Either
// * Fire before persisting state, in ApplyBlock
// * Fire on start up if we haven't written any new WAL msgs
// Both options mean we may fire more than once. Is that fine ?
types.FireEventNewBlock(cs.evsw, types.EventDataNewBlock{block}) types.FireEventNewBlock(cs.evsw, types.EventDataNewBlock{block})
types.FireEventNewBlockHeader(cs.evsw, types.EventDataNewBlockHeader{block.Header}) types.FireEventNewBlockHeader(cs.evsw, types.EventDataNewBlockHeader{block.Header})
eventCache.Flush() eventCache.Flush()
// Save the state.
stateCopy.Save()
fail.Fail() // XXX fail.Fail() // XXX
// NewHeightStep! // NewHeightStep!
cs.updateToState(stateCopy) cs.updateToState(stateCopy)
fail.Fail() // XXX
// cs.StartTime is already set. // cs.StartTime is already set.
// Schedule Round0 to start soon. // Schedule Round0 to start soon.
cs.scheduleRound0(&cs.RoundState) cs.scheduleRound0(&cs.RoundState)


+ 4
- 4
consensus/test_data/build.sh View File

@ -27,7 +27,7 @@ killall tendermint
# /q would print up to and including the match, then quit. # /q would print up to and including the match, then quit.
# /Q doesn't include the match. # /Q doesn't include the match.
# http://unix.stackexchange.com/questions/11305/grep-show-all-the-file-up-to-the-match # http://unix.stackexchange.com/questions/11305/grep-show-all-the-file-up-to-the-match
sed '/HEIGHT: 2/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/empty_block.cswal
sed '/ENDHEIGHT: 1/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/empty_block.cswal
reset reset
} }
@ -41,7 +41,7 @@ sleep 7
killall tendermint killall tendermint
kill -9 $PID kill -9 $PID
sed '/HEIGHT: 7/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/many_blocks.cswal
sed '/ENDHEIGHT: 6/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/many_blocks.cswal
reset reset
} }
@ -56,7 +56,7 @@ sleep 10
killall tendermint killall tendermint
kill -9 $PID kill -9 $PID
sed '/HEIGHT: 2/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/small_block1.cswal
sed '/ENDHEIGHT: 1/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/small_block1.cswal
reset reset
} }
@ -73,7 +73,7 @@ sleep 5
killall tendermint killall tendermint
kill -9 $PID kill -9 $PID
sed '/HEIGHT: 2/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/small_block2.cswal
sed '/ENDHEIGHT: 1/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/small_block2.cswal
reset reset
} }


+ 1
- 1
consensus/test_data/empty_block.cswal View File

@ -1,4 +1,4 @@
#HEIGHT: 1
#ENDHEIGHT: 0
{"time":"2016-12-18T05:05:33.502Z","msg":[3,{"duration":974084551,"height":1,"round":0,"step":1}]} {"time":"2016-12-18T05:05:33.502Z","msg":[3,{"duration":974084551,"height":1,"round":0,"step":1}]}
{"time":"2016-12-18T05:05:33.505Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]} {"time":"2016-12-18T05:05:33.505Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]}
{"time":"2016-12-18T05:05:33.505Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":1,"hash":"71D2DA2336A9F84C22A28FF6C67F35F3478FC0AF"},"pol_round":-1,"pol_block_id":{"hash":"","parts":{"total":0,"hash":""}},"signature":[1,"62C0F2BCCB491399EEDAF8E85837ADDD4E25BAB7A84BFC4F0E88594531FBC6D4755DEC7E6427F04AD7EB8BB89502762AB4380C7BBA93A4C297E6180EC78E3504"]}}],"peer_key":""}]} {"time":"2016-12-18T05:05:33.505Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":1,"hash":"71D2DA2336A9F84C22A28FF6C67F35F3478FC0AF"},"pol_round":-1,"pol_block_id":{"hash":"","parts":{"total":0,"hash":""}},"signature":[1,"62C0F2BCCB491399EEDAF8E85837ADDD4E25BAB7A84BFC4F0E88594531FBC6D4755DEC7E6427F04AD7EB8BB89502762AB4380C7BBA93A4C297E6180EC78E3504"]}}],"peer_key":""}]}


+ 6
- 6
consensus/test_data/many_blocks.cswal View File

@ -1,4 +1,4 @@
#HEIGHT: 1
#ENDHEIGHT: 0
{"time":"2017-02-17T23:54:19.013Z","msg":[3,{"duration":969121813,"height":1,"round":0,"step":1}]} {"time":"2017-02-17T23:54:19.013Z","msg":[3,{"duration":969121813,"height":1,"round":0,"step":1}]}
{"time":"2017-02-17T23:54:19.014Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]} {"time":"2017-02-17T23:54:19.014Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]}
{"time":"2017-02-17T23:54:19.014Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":1,"hash":"2E32C8D500E936D27A47FCE3FF4BE7C1AFB3FAE1"},"pol_round":-1,"pol_block_id":{"hash":"","parts":{"total":0,"hash":""}},"signature":[1,"105A5A834E9AE2FA2191CAB5CB20D63594BA7859BD3EB92F055C8A35476D71F0D89F9FD5B0FF030D021533C71A81BF6E8F026BF4A37FC637CF38CA35291A9D00"]}}],"peer_key":""}]} {"time":"2017-02-17T23:54:19.014Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":1,"hash":"2E32C8D500E936D27A47FCE3FF4BE7C1AFB3FAE1"},"pol_round":-1,"pol_block_id":{"hash":"","parts":{"total":0,"hash":""}},"signature":[1,"105A5A834E9AE2FA2191CAB5CB20D63594BA7859BD3EB92F055C8A35476D71F0D89F9FD5B0FF030D021533C71A81BF6E8F026BF4A37FC637CF38CA35291A9D00"]}}],"peer_key":""}]}
@ -8,7 +8,7 @@
{"time":"2017-02-17T23:54:19.016Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrecommit"}]} {"time":"2017-02-17T23:54:19.016Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrecommit"}]}
{"time":"2017-02-17T23:54:19.016Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":2,"block_id":{"hash":"3F32EE37F9EA674A2173CAD651836A8EE628B5C7","parts":{"total":1,"hash":"2E32C8D500E936D27A47FCE3FF4BE7C1AFB3FAE1"}},"signature":[1,"2B1070A5AB9305612A3AE74A8036D82B5E49E0DBBFBC7D723DB985CC8A8E72A52FF8E34D85273FEB8B901945CA541FA5142C3C4D43A04E9205ACECF53FD19B01"]}}],"peer_key":""}]} {"time":"2017-02-17T23:54:19.016Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":2,"block_id":{"hash":"3F32EE37F9EA674A2173CAD651836A8EE628B5C7","parts":{"total":1,"hash":"2E32C8D500E936D27A47FCE3FF4BE7C1AFB3FAE1"}},"signature":[1,"2B1070A5AB9305612A3AE74A8036D82B5E49E0DBBFBC7D723DB985CC8A8E72A52FF8E34D85273FEB8B901945CA541FA5142C3C4D43A04E9205ACECF53FD19B01"]}}],"peer_key":""}]}
{"time":"2017-02-17T23:54:19.017Z","msg":[1,{"height":1,"round":0,"step":"RoundStepCommit"}]} {"time":"2017-02-17T23:54:19.017Z","msg":[1,{"height":1,"round":0,"step":"RoundStepCommit"}]}
#HEIGHT: 2
#ENDHEIGHT: 1
{"time":"2017-02-17T23:54:19.019Z","msg":[1,{"height":2,"round":0,"step":"RoundStepNewHeight"}]} {"time":"2017-02-17T23:54:19.019Z","msg":[1,{"height":2,"round":0,"step":"RoundStepNewHeight"}]}
{"time":"2017-02-17T23:54:20.017Z","msg":[3,{"duration":998073370,"height":2,"round":0,"step":1}]} {"time":"2017-02-17T23:54:20.017Z","msg":[3,{"duration":998073370,"height":2,"round":0,"step":1}]}
{"time":"2017-02-17T23:54:20.018Z","msg":[1,{"height":2,"round":0,"step":"RoundStepPropose"}]} {"time":"2017-02-17T23:54:20.018Z","msg":[1,{"height":2,"round":0,"step":"RoundStepPropose"}]}
@ -19,7 +19,7 @@
{"time":"2017-02-17T23:54:20.021Z","msg":[1,{"height":2,"round":0,"step":"RoundStepPrecommit"}]} {"time":"2017-02-17T23:54:20.021Z","msg":[1,{"height":2,"round":0,"step":"RoundStepPrecommit"}]}
{"time":"2017-02-17T23:54:20.021Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":2,"round":0,"type":2,"block_id":{"hash":"32310D174A99844713693C9815D2CA660364E028","parts":{"total":1,"hash":"D008E9014CDDEA8EC95E1E99E21333241BD52DFC"}},"signature":[1,"AA9F03D0707752301D7CBFCF4F0BCDBD666A46C1CAED3910BD64A3C5C2874AAF328172646C951C5E2FD962359C382A3CBBA2C73EC9B533668C6386995B83EC08"]}}],"peer_key":""}]} {"time":"2017-02-17T23:54:20.021Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":2,"round":0,"type":2,"block_id":{"hash":"32310D174A99844713693C9815D2CA660364E028","parts":{"total":1,"hash":"D008E9014CDDEA8EC95E1E99E21333241BD52DFC"}},"signature":[1,"AA9F03D0707752301D7CBFCF4F0BCDBD666A46C1CAED3910BD64A3C5C2874AAF328172646C951C5E2FD962359C382A3CBBA2C73EC9B533668C6386995B83EC08"]}}],"peer_key":""}]}
{"time":"2017-02-17T23:54:20.022Z","msg":[1,{"height":2,"round":0,"step":"RoundStepCommit"}]} {"time":"2017-02-17T23:54:20.022Z","msg":[1,{"height":2,"round":0,"step":"RoundStepCommit"}]}
#HEIGHT: 3
#ENDHEIGHT: 2
{"time":"2017-02-17T23:54:20.025Z","msg":[1,{"height":3,"round":0,"step":"RoundStepNewHeight"}]} {"time":"2017-02-17T23:54:20.025Z","msg":[1,{"height":3,"round":0,"step":"RoundStepNewHeight"}]}
{"time":"2017-02-17T23:54:21.022Z","msg":[3,{"duration":997103974,"height":3,"round":0,"step":1}]} {"time":"2017-02-17T23:54:21.022Z","msg":[3,{"duration":997103974,"height":3,"round":0,"step":1}]}
{"time":"2017-02-17T23:54:21.024Z","msg":[1,{"height":3,"round":0,"step":"RoundStepPropose"}]} {"time":"2017-02-17T23:54:21.024Z","msg":[1,{"height":3,"round":0,"step":"RoundStepPropose"}]}
@ -30,7 +30,7 @@
{"time":"2017-02-17T23:54:21.028Z","msg":[1,{"height":3,"round":0,"step":"RoundStepPrecommit"}]} {"time":"2017-02-17T23:54:21.028Z","msg":[1,{"height":3,"round":0,"step":"RoundStepPrecommit"}]}
{"time":"2017-02-17T23:54:21.028Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":3,"round":0,"type":2,"block_id":{"hash":"37AF6866DA8C3167CFC280FAE47B6ED441B00D5B","parts":{"total":1,"hash":"2E5DE5777A5AD899CD2531304F42A470509DE989"}},"signature":[1,"C900519E305EC03392E7D197D5FAB535DB240C9C0BA5375A1679C75BAAA07C7410C0EF43CF97D98F2C08A1D739667D5ACFF6233A1FAE75D3DA275AEA422EFD0F"]}}],"peer_key":""}]} {"time":"2017-02-17T23:54:21.028Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":3,"round":0,"type":2,"block_id":{"hash":"37AF6866DA8C3167CFC280FAE47B6ED441B00D5B","parts":{"total":1,"hash":"2E5DE5777A5AD899CD2531304F42A470509DE989"}},"signature":[1,"C900519E305EC03392E7D197D5FAB535DB240C9C0BA5375A1679C75BAAA07C7410C0EF43CF97D98F2C08A1D739667D5ACFF6233A1FAE75D3DA275AEA422EFD0F"]}}],"peer_key":""}]}
{"time":"2017-02-17T23:54:21.028Z","msg":[1,{"height":3,"round":0,"step":"RoundStepCommit"}]} {"time":"2017-02-17T23:54:21.028Z","msg":[1,{"height":3,"round":0,"step":"RoundStepCommit"}]}
#HEIGHT: 4
#ENDHEIGHT: 3
{"time":"2017-02-17T23:54:21.032Z","msg":[1,{"height":4,"round":0,"step":"RoundStepNewHeight"}]} {"time":"2017-02-17T23:54:21.032Z","msg":[1,{"height":4,"round":0,"step":"RoundStepNewHeight"}]}
{"time":"2017-02-17T23:54:22.028Z","msg":[3,{"duration":996302067,"height":4,"round":0,"step":1}]} {"time":"2017-02-17T23:54:22.028Z","msg":[3,{"duration":996302067,"height":4,"round":0,"step":1}]}
{"time":"2017-02-17T23:54:22.030Z","msg":[1,{"height":4,"round":0,"step":"RoundStepPropose"}]} {"time":"2017-02-17T23:54:22.030Z","msg":[1,{"height":4,"round":0,"step":"RoundStepPropose"}]}
@ -41,7 +41,7 @@
{"time":"2017-02-17T23:54:22.033Z","msg":[1,{"height":4,"round":0,"step":"RoundStepPrecommit"}]} {"time":"2017-02-17T23:54:22.033Z","msg":[1,{"height":4,"round":0,"step":"RoundStepPrecommit"}]}
{"time":"2017-02-17T23:54:22.033Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":4,"round":0,"type":2,"block_id":{"hash":"04715E223BF4327FFA9B0D5AD849B74A099D5DEC","parts":{"total":1,"hash":"24CEBCBEB833F56D47AD14354071B3B7A243068A"}},"signature":[1,"F544743F17479A61F94B0F68C63D254BD60493D78E818D48A5859133619AEE5E92C47CAD89C654DF64E0911C3152091E047555D5F14655D95B9681AE9B336505"]}}],"peer_key":""}]} {"time":"2017-02-17T23:54:22.033Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":4,"round":0,"type":2,"block_id":{"hash":"04715E223BF4327FFA9B0D5AD849B74A099D5DEC","parts":{"total":1,"hash":"24CEBCBEB833F56D47AD14354071B3B7A243068A"}},"signature":[1,"F544743F17479A61F94B0F68C63D254BD60493D78E818D48A5859133619AEE5E92C47CAD89C654DF64E0911C3152091E047555D5F14655D95B9681AE9B336505"]}}],"peer_key":""}]}
{"time":"2017-02-17T23:54:22.034Z","msg":[1,{"height":4,"round":0,"step":"RoundStepCommit"}]} {"time":"2017-02-17T23:54:22.034Z","msg":[1,{"height":4,"round":0,"step":"RoundStepCommit"}]}
#HEIGHT: 5
#ENDHEIGHT: 4
{"time":"2017-02-17T23:54:22.036Z","msg":[1,{"height":5,"round":0,"step":"RoundStepNewHeight"}]} {"time":"2017-02-17T23:54:22.036Z","msg":[1,{"height":5,"round":0,"step":"RoundStepNewHeight"}]}
{"time":"2017-02-17T23:54:23.034Z","msg":[3,{"duration":997096276,"height":5,"round":0,"step":1}]} {"time":"2017-02-17T23:54:23.034Z","msg":[3,{"duration":997096276,"height":5,"round":0,"step":1}]}
{"time":"2017-02-17T23:54:23.035Z","msg":[1,{"height":5,"round":0,"step":"RoundStepPropose"}]} {"time":"2017-02-17T23:54:23.035Z","msg":[1,{"height":5,"round":0,"step":"RoundStepPropose"}]}
@ -52,7 +52,7 @@
{"time":"2017-02-17T23:54:23.038Z","msg":[1,{"height":5,"round":0,"step":"RoundStepPrecommit"}]} {"time":"2017-02-17T23:54:23.038Z","msg":[1,{"height":5,"round":0,"step":"RoundStepPrecommit"}]}
{"time":"2017-02-17T23:54:23.038Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":5,"round":0,"type":2,"block_id":{"hash":"FDC6D837995BEBBBFCBF3E7D7CF44F8FDA448543","parts":{"total":1,"hash":"A52BAA9C2E52E633A1605F4B930205613E3E7A2F"}},"signature":[1,"DF51D23D5D2C57598F67791D953A6C2D9FC5865A3048ADA4469B37500D2996B95732E0DC6F99EAEAEA12B4818CE355C7B701D16857D2AC767D740C2E30E9260C"]}}],"peer_key":""}]} {"time":"2017-02-17T23:54:23.038Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":5,"round":0,"type":2,"block_id":{"hash":"FDC6D837995BEBBBFCBF3E7D7CF44F8FDA448543","parts":{"total":1,"hash":"A52BAA9C2E52E633A1605F4B930205613E3E7A2F"}},"signature":[1,"DF51D23D5D2C57598F67791D953A6C2D9FC5865A3048ADA4469B37500D2996B95732E0DC6F99EAEAEA12B4818CE355C7B701D16857D2AC767D740C2E30E9260C"]}}],"peer_key":""}]}
{"time":"2017-02-17T23:54:23.038Z","msg":[1,{"height":5,"round":0,"step":"RoundStepCommit"}]} {"time":"2017-02-17T23:54:23.038Z","msg":[1,{"height":5,"round":0,"step":"RoundStepCommit"}]}
#HEIGHT: 6
#ENDHEIGHT: 5
{"time":"2017-02-17T23:54:23.041Z","msg":[1,{"height":6,"round":0,"step":"RoundStepNewHeight"}]} {"time":"2017-02-17T23:54:23.041Z","msg":[1,{"height":6,"round":0,"step":"RoundStepNewHeight"}]}
{"time":"2017-02-17T23:54:24.038Z","msg":[3,{"duration":997341910,"height":6,"round":0,"step":1}]} {"time":"2017-02-17T23:54:24.038Z","msg":[3,{"duration":997341910,"height":6,"round":0,"step":1}]}
{"time":"2017-02-17T23:54:24.040Z","msg":[1,{"height":6,"round":0,"step":"RoundStepPropose"}]} {"time":"2017-02-17T23:54:24.040Z","msg":[1,{"height":6,"round":0,"step":"RoundStepPropose"}]}


+ 1
- 1
consensus/test_data/small_block1.cswal View File

@ -1,4 +1,4 @@
#HEIGHT: 1
#ENDHEIGHT: 0
{"time":"2016-12-18T05:05:38.593Z","msg":[3,{"duration":970717663,"height":1,"round":0,"step":1}]} {"time":"2016-12-18T05:05:38.593Z","msg":[3,{"duration":970717663,"height":1,"round":0,"step":1}]}
{"time":"2016-12-18T05:05:38.595Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]} {"time":"2016-12-18T05:05:38.595Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]}
{"time":"2016-12-18T05:05:38.595Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":1,"hash":"A434EC796DF1CECC01296E953839C4675863A4E5"},"pol_round":-1,"pol_block_id":{"hash":"","parts":{"total":0,"hash":""}},"signature":[1,"39563C3C7EDD9855B2971457A5DABF05CFDAF52805658847EB1F05115B8341344A77761CC85E670AF1B679DA9FC0905231957174699FE8326DBE7706209BDD0B"]}}],"peer_key":""}]} {"time":"2016-12-18T05:05:38.595Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":1,"hash":"A434EC796DF1CECC01296E953839C4675863A4E5"},"pol_round":-1,"pol_block_id":{"hash":"","parts":{"total":0,"hash":""}},"signature":[1,"39563C3C7EDD9855B2971457A5DABF05CFDAF52805658847EB1F05115B8341344A77761CC85E670AF1B679DA9FC0905231957174699FE8326DBE7706209BDD0B"]}}],"peer_key":""}]}


+ 1
- 1
consensus/test_data/small_block2.cswal View File

@ -1,4 +1,4 @@
#HEIGHT: 1
#ENDHEIGHT: 0
{"time":"2016-12-18T05:05:43.641Z","msg":[3,{"duration":969409681,"height":1,"round":0,"step":1}]} {"time":"2016-12-18T05:05:43.641Z","msg":[3,{"duration":969409681,"height":1,"round":0,"step":1}]}
{"time":"2016-12-18T05:05:43.643Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]} {"time":"2016-12-18T05:05:43.643Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]}
{"time":"2016-12-18T05:05:43.643Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":5,"hash":"C916905C3C444501DDDAA1BF52E959B7531E762E"},"pol_round":-1,"pol_block_id":{"hash":"","parts":{"total":0,"hash":""}},"signature":[1,"F1A8E9928889C68FD393F3983B5362AECA4A95AA13FE3C78569B2515EC046893CB718071CAF54F3F1507DCD851B37CD5557EA17BB5471D2DC6FB5AC5FBB72E02"]}}],"peer_key":""}]} {"time":"2016-12-18T05:05:43.643Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":5,"hash":"C916905C3C444501DDDAA1BF52E959B7531E762E"},"pol_round":-1,"pol_block_id":{"hash":"","parts":{"total":0,"hash":""}},"signature":[1,"F1A8E9928889C68FD393F3983B5362AECA4A95AA13FE3C78569B2515EC046893CB718071CAF54F3F1507DCD851B37CD5557EA17BB5471D2DC6FB5AC5FBB72E02"]}}],"peer_key":""}]}


+ 0
- 1
consensus/ticker.go View File

@ -45,7 +45,6 @@ func NewTimeoutTicker() TimeoutTicker {
} }
func (t *timeoutTicker) OnStart() error { func (t *timeoutTicker) OnStart() error {
t.BaseService.OnStart()
go t.timeoutRoutine() go t.timeoutRoutine()


+ 3
- 10
consensus/wal.go View File

@ -55,12 +55,11 @@ func NewWAL(walFile string, light bool) (*WAL, error) {
} }
func (wal *WAL) OnStart() error { func (wal *WAL) OnStart() error {
wal.BaseService.OnStart()
size, err := wal.group.Head.Size() size, err := wal.group.Head.Size()
if err != nil { if err != nil {
return err return err
} else if size == 0 { } else if size == 0 {
wal.writeHeight(1)
wal.writeEndHeight(0)
} }
_, err = wal.group.Start() _, err = wal.group.Start()
return err return err
@ -84,12 +83,6 @@ func (wal *WAL) Save(wmsg WALMessage) {
} }
} }
} }
// Write #HEIGHT: XYZ if new height
if edrs, ok := wmsg.(types.EventDataRoundState); ok {
if edrs.Step == RoundStepNewHeight.String() {
wal.writeHeight(edrs.Height)
}
}
// Write the wal message // Write the wal message
var wmsgBytes = wire.JSONBytes(TimedWALMessage{time.Now(), wmsg}) var wmsgBytes = wire.JSONBytes(TimedWALMessage{time.Now(), wmsg})
err := wal.group.WriteLine(string(wmsgBytes)) err := wal.group.WriteLine(string(wmsgBytes))
@ -102,8 +95,8 @@ func (wal *WAL) Save(wmsg WALMessage) {
} }
} }
func (wal *WAL) writeHeight(height int) {
wal.group.WriteLine(Fmt("#HEIGHT: %v", height))
func (wal *WAL) writeEndHeight(height int) {
wal.group.WriteLine(Fmt("#ENDHEIGHT: %v", height))
// TODO: only flush when necessary // TODO: only flush when necessary
if err := wal.group.Flush(); err != nil { if err := wal.group.Flush(); err != nil {


+ 42
- 24
glide.lock View File

@ -1,53 +1,58 @@
hash: 41f8fec708e98b7f8c4804be46008493199fa45e89b2d5dc237fd65fe431c62f
updated: 2017-03-06T04:01:33.319604992-05:00
hash: d9724aa287c40d1b3856b6565f09235d809c8b2f7c6537c04f597137c0d6cd26
updated: 2017-04-21T13:09:25.708801802-04:00
imports: imports:
- name: github.com/btcsuite/btcd - name: github.com/btcsuite/btcd
version: d06c0bb181529331be8f8d9350288c420d9e60e4
version: 4b348c1d33373d672edd83fc576892d0e46686d2
subpackages: subpackages:
- btcec - btcec
- name: github.com/BurntSushi/toml - name: github.com/BurntSushi/toml
version: 99064174e013895bbd9b025c31100bd1d9b590ca
version: b26d9c308763d68093482582cea63d69be07a0f0
- name: github.com/davecgh/go-spew - name: github.com/davecgh/go-spew
version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9 version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9
subpackages: subpackages:
- spew - spew
- name: github.com/ebuchman/fail-test - name: github.com/ebuchman/fail-test
version: 13f91f14c826314205cdbed1ec8ac8bf08e03381
version: 95f809107225be108efcf10a3509e4ea6ceef3c4
- name: github.com/go-stack/stack - name: github.com/go-stack/stack
version: 100eb0c0a9c5b306ca2fb4f165df21d80ada4b82 version: 100eb0c0a9c5b306ca2fb4f165df21d80ada4b82
- name: github.com/gogo/protobuf - name: github.com/gogo/protobuf
version: 909568be09de550ed094403c2bf8a261b5bb730a
version: 100ba4e885062801d56799d78530b73b178a78f3
subpackages: subpackages:
- proto - proto
- name: github.com/golang/protobuf - name: github.com/golang/protobuf
version: 8ee79997227bf9b34611aee7946ae64735e6fd93
version: 2bba0603135d7d7f5cb73b2125beeda19c09f4ef
subpackages: subpackages:
- proto - proto
- ptypes/any
- name: github.com/golang/snappy - name: github.com/golang/snappy
version: d9eb7a3d35ec988b8585d4a0068e462c27d28380
version: 553a641470496b2327abcac10b36396bd98e45c9
- name: github.com/gorilla/websocket - name: github.com/gorilla/websocket
version: 3ab3a8b8831546bd18fd182c20687ca853b2bb13 version: 3ab3a8b8831546bd18fd182c20687ca853b2bb13
- name: github.com/inconshreveable/mousetrap
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
- name: github.com/jmhodges/levigo - name: github.com/jmhodges/levigo
version: c42d9e0ca023e2198120196f842701bb4c55d7b9 version: c42d9e0ca023e2198120196f842701bb4c55d7b9
- name: github.com/mattn/go-colorable - name: github.com/mattn/go-colorable
version: d228849504861217f796da67fae4f6e347643f15
version: ded68f7a9561c023e790de24279db7ebf473ea80
- name: github.com/mattn/go-isatty - name: github.com/mattn/go-isatty
version: 30a891c33c7cde7b02a981314b4228ec99380cca
version: fc9e8d8ef48496124e79ae0df75490096eccf6fe
- name: github.com/pkg/errors - name: github.com/pkg/errors
version: 645ef00459ed84a119197bfb8d8205042c6df63d version: 645ef00459ed84a119197bfb8d8205042c6df63d
- name: github.com/pmezard/go-difflib - name: github.com/pmezard/go-difflib
version: d8ed2627bdf02c080bf22230dbb337003b7aba2d version: d8ed2627bdf02c080bf22230dbb337003b7aba2d
subpackages: subpackages:
- difflib - difflib
- name: github.com/spf13/cobra
version: 10f6b9d7e1631a54ad07c5c0fb71c28a1abfd3c2
- name: github.com/spf13/pflag - name: github.com/spf13/pflag
version: 9ff6c6923cfffbcd502984b8e0c80539a94968b7
version: 2300d0f8576fe575f71aaa5b9bbe4e1b0dc2eb51
- name: github.com/stretchr/testify - name: github.com/stretchr/testify
version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0 version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0
subpackages: subpackages:
- assert - assert
- require - require
- name: github.com/syndtr/goleveldb - name: github.com/syndtr/goleveldb
version: 23851d93a2292dcc56e71a18ec9e0624d84a0f65
version: 8c81ea47d4c41a385645e133e15510fc6a2a74b4
subpackages: subpackages:
- leveldb - leveldb
- leveldb/cache - leveldb/cache
@ -62,7 +67,7 @@ imports:
- leveldb/table - leveldb/table
- leveldb/util - leveldb/util
- name: github.com/tendermint/abci - name: github.com/tendermint/abci
version: 1236e8fb6eee3a63909f4014a8e84385ead7933d
version: 56e13d87f4e3ec1ea756957d6b23caa6ebcf0998
subpackages: subpackages:
- client - client
- example/counter - example/counter
@ -79,17 +84,17 @@ imports:
- name: github.com/tendermint/go-clist - name: github.com/tendermint/go-clist
version: 3baa390bbaf7634251c42ad69a8682e7e3990552 version: 3baa390bbaf7634251c42ad69a8682e7e3990552
- name: github.com/tendermint/go-common - name: github.com/tendermint/go-common
version: dcb015dff6c7af21e65c8e2f3b450df19d38c777
version: f9e3db037330c8a8d61d3966de8473eaf01154fa
subpackages: subpackages:
- test - test
- name: github.com/tendermint/go-config - name: github.com/tendermint/go-config
version: 620dcbbd7d587cf3599dedbf329b64311b0c307a version: 620dcbbd7d587cf3599dedbf329b64311b0c307a
- name: github.com/tendermint/go-crypto - name: github.com/tendermint/go-crypto
version: 3f47cfac5fcd9e0f1727c7db980b3559913b3e3a
version: 0ca2c6fdb0706001ca4c4b9b80c9f428e8cf39da
- name: github.com/tendermint/go-data - name: github.com/tendermint/go-data
version: 32271140e8fd5abdbb22e268d7a02421fa382f0b
version: e7fcc6d081ec8518912fcdc103188275f83a3ee5
- name: github.com/tendermint/go-db - name: github.com/tendermint/go-db
version: eac3f2bc147023957c8bf69432a4e6c4dc5c3f72
version: 9643f60bc2578693844aacf380a7c32e4c029fee
- name: github.com/tendermint/go-events - name: github.com/tendermint/go-events
version: f8ffbfb2be3483e9e7927495590a727f51c0c11f version: f8ffbfb2be3483e9e7927495590a727f51c0c11f
- name: github.com/tendermint/go-flowrate - name: github.com/tendermint/go-flowrate
@ -101,17 +106,17 @@ imports:
- name: github.com/tendermint/go-merkle - name: github.com/tendermint/go-merkle
version: 714d4d04557fd068a7c2a1748241ce8428015a96 version: 714d4d04557fd068a7c2a1748241ce8428015a96
- name: github.com/tendermint/go-p2p - name: github.com/tendermint/go-p2p
version: 97a5ed2d1a17eaee8717b8a32cfaf7a9a82a273d
version: e8f33a47846708269d373f9c8080613d6c4f66b2
subpackages: subpackages:
- upnp - upnp
- name: github.com/tendermint/go-rpc - name: github.com/tendermint/go-rpc
version: fcea0cda21f64889be00a0f4b6d13266b1a76ee7
version: 2c8df0ee6b60d8ac33662df13a4e358c679e02bf
subpackages: subpackages:
- client - client
- server - server
- types - types
- name: github.com/tendermint/go-wire - name: github.com/tendermint/go-wire
version: f530b7af7a8b06e612c2063bff6ace49060a085e
version: c1c9a57ab8038448ddea1714c0698f8051e5748c
- name: github.com/tendermint/log15 - name: github.com/tendermint/log15
version: ae0f3d6450da9eac7074b439c8e1c3cabf0d5ce6 version: ae0f3d6450da9eac7074b439c8e1c3cabf0d5ce6
subpackages: subpackages:
@ -123,7 +128,7 @@ imports:
- client - client
- testutil - testutil
- name: golang.org/x/crypto - name: golang.org/x/crypto
version: 7c6cc321c680f03b9ef0764448e780704f486b51
version: 96846453c37f0876340a66a47f3f75b1f3a6cd2d
subpackages: subpackages:
- curve25519 - curve25519
- nacl/box - nacl/box
@ -134,7 +139,7 @@ imports:
- ripemd160 - ripemd160
- salsa20/salsa - salsa20/salsa
- name: golang.org/x/net - name: golang.org/x/net
version: 61557ac0112b576429a0df080e1c2cef5dfbb642
version: c8c74377599bd978aee1cf3b9b63a8634051cec2
subpackages: subpackages:
- context - context
- http2 - http2
@ -144,20 +149,33 @@ imports:
- lex/httplex - lex/httplex
- trace - trace
- name: golang.org/x/sys - name: golang.org/x/sys
version: d75a52659825e75fff6158388dddc6a5b04f9ba5
version: ea9bcade75cb975a0b9738936568ab388b845617
subpackages: subpackages:
- unix - unix
- name: golang.org/x/text
version: 19e3104b43db45fca0303f489a9536087b184802
subpackages:
- secure/bidirule
- transform
- unicode/bidi
- unicode/norm
- name: google.golang.org/genproto
version: 411e09b969b1170a9f0c467558eb4c4c110d9c77
subpackages:
- googleapis/rpc/status
- name: google.golang.org/grpc - name: google.golang.org/grpc
version: cbcceb2942a489498cf22b2f918536e819d33f0a
version: 6914ab1e338c92da4218a23d27fcd03d0ad78d46
subpackages: subpackages:
- codes - codes
- credentials - credentials
- grpclog - grpclog
- internal - internal
- keepalive
- metadata - metadata
- naming - naming
- peer - peer
- stats - stats
- status
- tap - tap
- transport - transport
testImports: [] testImports: []

+ 3
- 0
glide.yaml View File

@ -10,6 +10,8 @@ import:
version: develop version: develop
- package: github.com/tendermint/go-crypto - package: github.com/tendermint/go-crypto
version: develop version: develop
- package: github.com/tendermint/go-data
version: develop
- package: github.com/tendermint/go-db - package: github.com/tendermint/go-db
version: develop version: develop
- package: github.com/tendermint/go-events - package: github.com/tendermint/go-events
@ -39,6 +41,7 @@ import:
- proto - proto
- package: github.com/gorilla/websocket - package: github.com/gorilla/websocket
version: ^1.1.0 version: ^1.1.0
- package: github.com/spf13/cobra
- package: github.com/spf13/pflag - package: github.com/spf13/pflag
- package: github.com/pkg/errors - package: github.com/pkg/errors
version: ^0.8.0 version: ^0.8.0


+ 36
- 15
node/node.go View File

@ -10,12 +10,12 @@ import (
abci "github.com/tendermint/abci/types" abci "github.com/tendermint/abci/types"
cmn "github.com/tendermint/go-common" cmn "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config" cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-crypto"
crypto "github.com/tendermint/go-crypto"
dbm "github.com/tendermint/go-db" dbm "github.com/tendermint/go-db"
"github.com/tendermint/go-p2p"
"github.com/tendermint/go-rpc"
"github.com/tendermint/go-rpc/server"
"github.com/tendermint/go-wire"
p2p "github.com/tendermint/go-p2p"
rpc "github.com/tendermint/go-rpc"
rpcserver "github.com/tendermint/go-rpc/server"
wire "github.com/tendermint/go-wire"
bc "github.com/tendermint/tendermint/blockchain" bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/consensus" "github.com/tendermint/tendermint/consensus"
mempl "github.com/tendermint/tendermint/mempool" mempl "github.com/tendermint/tendermint/mempool"
@ -23,6 +23,9 @@ import (
rpccore "github.com/tendermint/tendermint/rpc/core" rpccore "github.com/tendermint/tendermint/rpc/core"
grpccore "github.com/tendermint/tendermint/rpc/grpc" grpccore "github.com/tendermint/tendermint/rpc/grpc"
sm "github.com/tendermint/tendermint/state" sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/state/txindex"
"github.com/tendermint/tendermint/state/txindex/kv"
"github.com/tendermint/tendermint/state/txindex/null"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/version" "github.com/tendermint/tendermint/version"
@ -51,6 +54,7 @@ type Node struct {
consensusReactor *consensus.ConsensusReactor // for participating in the consensus consensusReactor *consensus.ConsensusReactor // for participating in the consensus
proxyApp proxy.AppConns // connection to the application proxyApp proxy.AppConns // connection to the application
rpcListeners []net.Listener // rpc servers rpcListeners []net.Listener // rpc servers
txIndexer txindex.TxIndexer
} }
func NewNodeDefault(config cfg.Config) *Node { func NewNodeDefault(config cfg.Config) *Node {
@ -84,6 +88,17 @@ func NewNode(config cfg.Config, privValidator *types.PrivValidator, clientCreato
// reload the state (it may have been updated by the handshake) // reload the state (it may have been updated by the handshake)
state = sm.LoadState(stateDB) state = sm.LoadState(stateDB)
// Transaction indexing
var txIndexer txindex.TxIndexer
switch config.GetString("tx_index") {
case "kv":
store := dbm.NewDB("tx_index", config.GetString("db_backend"), config.GetString("db_dir"))
txIndexer = kv.NewTxIndex(store)
default:
txIndexer = &null.TxIndex{}
}
state.TxIndexer = txIndexer
// Generate node PrivKey // Generate node PrivKey
privKey := crypto.GenPrivKeyEd25519() privKey := crypto.GenPrivKeyEd25519()
@ -188,13 +203,13 @@ func NewNode(config cfg.Config, privValidator *types.PrivValidator, clientCreato
consensusState: consensusState, consensusState: consensusState,
consensusReactor: consensusReactor, consensusReactor: consensusReactor,
proxyApp: proxyApp, proxyApp: proxyApp,
txIndexer: txIndexer,
} }
node.BaseService = *cmn.NewBaseService(log, "Node", node) node.BaseService = *cmn.NewBaseService(log, "Node", node)
return node return node
} }
func (n *Node) OnStart() error { func (n *Node) OnStart() error {
n.BaseService.OnStart()
// Create & add listener // Create & add listener
protocol, address := ProtocolAndAddress(n.config.GetString("node_laddr")) protocol, address := ProtocolAndAddress(n.config.GetString("node_laddr"))
@ -202,7 +217,7 @@ func (n *Node) OnStart() error {
n.sw.AddListener(l) n.sw.AddListener(l)
// Start the switch // Start the switch
n.sw.SetNodeInfo(makeNodeInfo(n.config, n.sw, n.privKey))
n.sw.SetNodeInfo(n.makeNodeInfo())
n.sw.SetNodePrivKey(n.privKey) n.sw.SetNodePrivKey(n.privKey)
_, err := n.sw.Start() _, err := n.sw.Start()
if err != nil { if err != nil {
@ -279,6 +294,7 @@ func (n *Node) ConfigureRPC() {
rpccore.SetGenesisDoc(n.genesisDoc) rpccore.SetGenesisDoc(n.genesisDoc)
rpccore.SetAddrBook(n.addrBook) rpccore.SetAddrBook(n.addrBook)
rpccore.SetProxyAppQuery(n.proxyApp.Query()) rpccore.SetProxyAppQuery(n.proxyApp.Query())
rpccore.SetTxIndexer(n.txIndexer)
} }
func (n *Node) startRPC() ([]net.Listener, error) { func (n *Node) startRPC() ([]net.Listener, error) {
@ -349,34 +365,39 @@ func (n *Node) ProxyApp() proxy.AppConns {
return n.proxyApp return n.proxyApp
} }
func makeNodeInfo(config cfg.Config, sw *p2p.Switch, privKey crypto.PrivKeyEd25519) *p2p.NodeInfo {
func (n *Node) makeNodeInfo() *p2p.NodeInfo {
txIndexerStatus := "on"
if _, ok := n.txIndexer.(*null.TxIndex); ok {
txIndexerStatus = "off"
}
nodeInfo := &p2p.NodeInfo{ nodeInfo := &p2p.NodeInfo{
PubKey: privKey.PubKey().(crypto.PubKeyEd25519),
Moniker: config.GetString("moniker"),
Network: config.GetString("chain_id"),
PubKey: n.privKey.PubKey().(crypto.PubKeyEd25519),
Moniker: n.config.GetString("moniker"),
Network: n.config.GetString("chain_id"),
Version: version.Version, Version: version.Version,
Other: []string{ Other: []string{
cmn.Fmt("wire_version=%v", wire.Version), cmn.Fmt("wire_version=%v", wire.Version),
cmn.Fmt("p2p_version=%v", p2p.Version), cmn.Fmt("p2p_version=%v", p2p.Version),
cmn.Fmt("consensus_version=%v", consensus.Version), cmn.Fmt("consensus_version=%v", consensus.Version),
cmn.Fmt("rpc_version=%v/%v", rpc.Version, rpccore.Version), cmn.Fmt("rpc_version=%v/%v", rpc.Version, rpccore.Version),
cmn.Fmt("tx_index=%v", txIndexerStatus),
}, },
} }
// include git hash in the nodeInfo if available // include git hash in the nodeInfo if available
if rev, err := cmn.ReadFile(config.GetString("revision_file")); err == nil {
if rev, err := cmn.ReadFile(n.config.GetString("revision_file")); err == nil {
nodeInfo.Other = append(nodeInfo.Other, cmn.Fmt("revision=%v", string(rev))) nodeInfo.Other = append(nodeInfo.Other, cmn.Fmt("revision=%v", string(rev)))
} }
if !sw.IsListening() {
if !n.sw.IsListening() {
return nodeInfo return nodeInfo
} }
p2pListener := sw.Listeners()[0]
p2pListener := n.sw.Listeners()[0]
p2pHost := p2pListener.ExternalAddress().IP.String() p2pHost := p2pListener.ExternalAddress().IP.String()
p2pPort := p2pListener.ExternalAddress().Port p2pPort := p2pListener.ExternalAddress().Port
rpcListenAddr := config.GetString("rpc_laddr")
rpcListenAddr := n.config.GetString("rpc_laddr")
// We assume that the rpcListener has the same ExternalAddress. // We assume that the rpcListener has the same ExternalAddress.
// This is probably true because both P2P and RPC listeners use UPnP, // This is probably true because both P2P and RPC listeners use UPnP,


+ 0
- 1
proxy/multi_app_conn.go View File

@ -72,7 +72,6 @@ func (app *multiAppConn) Query() AppConnQuery {
} }
func (app *multiAppConn) OnStart() error { func (app *multiAppConn) OnStart() error {
app.BaseService.OnStart()
// query connection // query connection
querycli, err := app.clientCreator.NewABCIClient() querycli, err := app.clientCreator.NewABCIClient()


+ 31
- 14
rpc/client/httpclient.go View File

@ -22,7 +22,7 @@ out the server for test code (mock).
*/ */
type HTTP struct { type HTTP struct {
remote string remote string
rpc *rpcclient.ClientJSONRPC
rpc *rpcclient.JSONRPCClient
*WSEvents *WSEvents
} }
@ -30,7 +30,7 @@ type HTTP struct {
// and the websocket path (which always seems to be "/websocket") // and the websocket path (which always seems to be "/websocket")
func NewHTTP(remote, wsEndpoint string) *HTTP { func NewHTTP(remote, wsEndpoint string) *HTTP {
return &HTTP{ return &HTTP{
rpc: rpcclient.NewClientJSONRPC(remote),
rpc: rpcclient.NewJSONRPCClient(remote),
remote: remote, remote: remote,
WSEvents: newWSEvents(remote, wsEndpoint), WSEvents: newWSEvents(remote, wsEndpoint),
} }
@ -50,7 +50,7 @@ func (c *HTTP) _assertIsEventSwitch() types.EventSwitch {
func (c *HTTP) Status() (*ctypes.ResultStatus, error) { func (c *HTTP) Status() (*ctypes.ResultStatus, error) {
tmResult := new(ctypes.TMResult) tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("status", []interface{}{}, tmResult)
_, err := c.rpc.Call("status", map[string]interface{}{}, tmResult)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Status") return nil, errors.Wrap(err, "Status")
} }
@ -60,7 +60,7 @@ func (c *HTTP) Status() (*ctypes.ResultStatus, error) {
func (c *HTTP) ABCIInfo() (*ctypes.ResultABCIInfo, error) { func (c *HTTP) ABCIInfo() (*ctypes.ResultABCIInfo, error) {
tmResult := new(ctypes.TMResult) tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("abci_info", []interface{}{}, tmResult)
_, err := c.rpc.Call("abci_info", map[string]interface{}{}, tmResult)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "ABCIInfo") return nil, errors.Wrap(err, "ABCIInfo")
} }
@ -69,7 +69,9 @@ func (c *HTTP) ABCIInfo() (*ctypes.ResultABCIInfo, error) {
func (c *HTTP) ABCIQuery(path string, data []byte, prove bool) (*ctypes.ResultABCIQuery, error) { func (c *HTTP) ABCIQuery(path string, data []byte, prove bool) (*ctypes.ResultABCIQuery, error) {
tmResult := new(ctypes.TMResult) tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("abci_query", []interface{}{path, data, prove}, tmResult)
_, err := c.rpc.Call("abci_query",
map[string]interface{}{"path": path, "data": data, "prove": prove},
tmResult)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "ABCIQuery") return nil, errors.Wrap(err, "ABCIQuery")
} }
@ -78,7 +80,7 @@ func (c *HTTP) ABCIQuery(path string, data []byte, prove bool) (*ctypes.ResultAB
func (c *HTTP) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { func (c *HTTP) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
tmResult := new(ctypes.TMResult) tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("broadcast_tx_commit", []interface{}{tx}, tmResult)
_, err := c.rpc.Call("broadcast_tx_commit", map[string]interface{}{"tx": tx}, tmResult)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "broadcast_tx_commit") return nil, errors.Wrap(err, "broadcast_tx_commit")
} }
@ -95,7 +97,7 @@ func (c *HTTP) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
func (c *HTTP) broadcastTX(route string, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { func (c *HTTP) broadcastTX(route string, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
tmResult := new(ctypes.TMResult) tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call(route, []interface{}{tx}, tmResult)
_, err := c.rpc.Call(route, map[string]interface{}{"tx": tx}, tmResult)
if err != nil { if err != nil {
return nil, errors.Wrap(err, route) return nil, errors.Wrap(err, route)
} }
@ -104,7 +106,7 @@ func (c *HTTP) broadcastTX(route string, tx types.Tx) (*ctypes.ResultBroadcastTx
func (c *HTTP) NetInfo() (*ctypes.ResultNetInfo, error) { func (c *HTTP) NetInfo() (*ctypes.ResultNetInfo, error) {
tmResult := new(ctypes.TMResult) tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("net_info", nil, tmResult)
_, err := c.rpc.Call("net_info", map[string]interface{}{}, tmResult)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "NetInfo") return nil, errors.Wrap(err, "NetInfo")
} }
@ -113,7 +115,7 @@ func (c *HTTP) NetInfo() (*ctypes.ResultNetInfo, error) {
func (c *HTTP) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { func (c *HTTP) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) {
tmResult := new(ctypes.TMResult) tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("dump_consensus_state", nil, tmResult)
_, err := c.rpc.Call("dump_consensus_state", map[string]interface{}{}, tmResult)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "DumpConsensusState") return nil, errors.Wrap(err, "DumpConsensusState")
} }
@ -122,7 +124,9 @@ func (c *HTTP) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) {
func (c *HTTP) BlockchainInfo(minHeight, maxHeight int) (*ctypes.ResultBlockchainInfo, error) { func (c *HTTP) BlockchainInfo(minHeight, maxHeight int) (*ctypes.ResultBlockchainInfo, error) {
tmResult := new(ctypes.TMResult) tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("blockchain", []interface{}{minHeight, maxHeight}, tmResult)
_, err := c.rpc.Call("blockchain",
map[string]interface{}{"minHeight": minHeight, "maxHeight": maxHeight},
tmResult)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "BlockchainInfo") return nil, errors.Wrap(err, "BlockchainInfo")
} }
@ -131,7 +135,7 @@ func (c *HTTP) BlockchainInfo(minHeight, maxHeight int) (*ctypes.ResultBlockchai
func (c *HTTP) Genesis() (*ctypes.ResultGenesis, error) { func (c *HTTP) Genesis() (*ctypes.ResultGenesis, error) {
tmResult := new(ctypes.TMResult) tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("genesis", nil, tmResult)
_, err := c.rpc.Call("genesis", map[string]interface{}{}, tmResult)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Genesis") return nil, errors.Wrap(err, "Genesis")
} }
@ -140,7 +144,7 @@ func (c *HTTP) Genesis() (*ctypes.ResultGenesis, error) {
func (c *HTTP) Block(height int) (*ctypes.ResultBlock, error) { func (c *HTTP) Block(height int) (*ctypes.ResultBlock, error) {
tmResult := new(ctypes.TMResult) tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("block", []interface{}{height}, tmResult)
_, err := c.rpc.Call("block", map[string]interface{}{"height": height}, tmResult)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Block") return nil, errors.Wrap(err, "Block")
} }
@ -149,16 +153,29 @@ func (c *HTTP) Block(height int) (*ctypes.ResultBlock, error) {
func (c *HTTP) Commit(height int) (*ctypes.ResultCommit, error) { func (c *HTTP) Commit(height int) (*ctypes.ResultCommit, error) {
tmResult := new(ctypes.TMResult) tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("commit", []interface{}{height}, tmResult)
_, err := c.rpc.Call("commit", map[string]interface{}{"height": height}, tmResult)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Commit") return nil, errors.Wrap(err, "Commit")
} }
return (*tmResult).(*ctypes.ResultCommit), nil return (*tmResult).(*ctypes.ResultCommit), nil
} }
func (c *HTTP) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
tmResult := new(ctypes.TMResult)
query := map[string]interface{}{
"hash": hash,
"prove": prove,
}
_, err := c.rpc.Call("tx", query, tmResult)
if err != nil {
return nil, errors.Wrap(err, "Tx")
}
return (*tmResult).(*ctypes.ResultTx), nil
}
func (c *HTTP) Validators() (*ctypes.ResultValidators, error) { func (c *HTTP) Validators() (*ctypes.ResultValidators, error) {
tmResult := new(ctypes.TMResult) tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("validators", nil, tmResult)
_, err := c.rpc.Call("validators", map[string]interface{}{}, tmResult)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Validators") return nil, errors.Wrap(err, "Validators")
} }


+ 1
- 0
rpc/client/interface.go View File

@ -44,6 +44,7 @@ type SignClient interface {
Block(height int) (*ctypes.ResultBlock, error) Block(height int) (*ctypes.ResultBlock, error)
Commit(height int) (*ctypes.ResultCommit, error) Commit(height int) (*ctypes.ResultCommit, error)
Validators() (*ctypes.ResultValidators, error) Validators() (*ctypes.ResultValidators, error)
Tx(hash []byte, prove bool) (*ctypes.ResultTx, error)
} }
// HistoryClient shows us data from genesis to now in large chunks. // HistoryClient shows us data from genesis to now in large chunks.


+ 4
- 0
rpc/client/localclient.go View File

@ -103,3 +103,7 @@ func (c Local) Commit(height int) (*ctypes.ResultCommit, error) {
func (c Local) Validators() (*ctypes.ResultValidators, error) { func (c Local) Validators() (*ctypes.ResultValidators, error) {
return core.Validators() return core.Validators()
} }
func (c Local) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
return core.Tx(hash, prove)
}

+ 2
- 2
rpc/client/mock/abci.go View File

@ -45,7 +45,7 @@ func (a ABCIApp) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error
if c.IsOK() { if c.IsOK() {
go func() { a.App.DeliverTx(tx) }() go func() { a.App.DeliverTx(tx) }()
} }
return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log}, nil
return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil
} }
func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
@ -54,7 +54,7 @@ func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error)
if c.IsOK() { if c.IsOK() {
go func() { a.App.DeliverTx(tx) }() go func() { a.App.DeliverTx(tx) }()
} }
return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log}, nil
return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil
} }
// ABCIMock will send all abci related request to the named app, // ABCIMock will send all abci related request to the named app,


+ 22
- 13
rpc/client/rpc_test.go View File

@ -3,7 +3,6 @@ package client_test
import ( import (
"strings" "strings"
"testing" "testing"
"time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -11,6 +10,7 @@ import (
merktest "github.com/tendermint/merkleeyes/testutil" merktest "github.com/tendermint/merkleeyes/testutil"
"github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/client"
rpctest "github.com/tendermint/tendermint/rpc/test" rpctest "github.com/tendermint/tendermint/rpc/test"
"github.com/tendermint/tendermint/types"
) )
func getHTTPClient() *client.HTTP { func getHTTPClient() *client.HTTP {
@ -117,49 +117,58 @@ func TestAppCalls(t *testing.T) {
// write something // write something
k, v, tx := merktest.MakeTxKV() k, v, tx := merktest.MakeTxKV()
_, err = c.BroadcastTxCommit(tx)
bres, err := c.BroadcastTxCommit(tx)
require.Nil(err, "%d: %+v", i, err) require.Nil(err, "%d: %+v", i, err)
require.True(bres.DeliverTx.GetCode().IsOK())
txh := bres.Height
apph := txh + 1 // this is where the tx will be applied to the state
// wait before querying // wait before querying
time.Sleep(time.Second * 1)
client.WaitForHeight(c, apph, nil)
qres, err := c.ABCIQuery("/key", k, false) qres, err := c.ABCIQuery("/key", k, false)
if assert.Nil(err) && assert.True(qres.Response.Code.IsOK()) { if assert.Nil(err) && assert.True(qres.Response.Code.IsOK()) {
data := qres.Response data := qres.Response
// assert.Equal(k, data.GetKey()) // only returned for proofs // assert.Equal(k, data.GetKey()) // only returned for proofs
assert.Equal(v, data.GetValue()) assert.Equal(v, data.GetValue())
} }
// +/- 1 making my head hurt
h := int(qres.Response.Height) - 1
// make sure we can lookup the tx with proof
// ptx, err := c.Tx(bres.Hash, true)
ptx, err := c.Tx(bres.Hash, true)
require.Nil(err, "%d: %+v", i, err)
assert.Equal(txh, ptx.Height)
assert.Equal(types.Tx(tx), ptx.Tx)
// and we can even check the block is added // and we can even check the block is added
block, err := c.Block(h)
block, err := c.Block(apph)
require.Nil(err, "%d: %+v", i, err) require.Nil(err, "%d: %+v", i, err)
appHash := block.BlockMeta.Header.AppHash appHash := block.BlockMeta.Header.AppHash
assert.True(len(appHash) > 0) assert.True(len(appHash) > 0)
assert.EqualValues(h, block.BlockMeta.Header.Height)
assert.EqualValues(apph, block.BlockMeta.Header.Height)
// check blockchain info, now that we know there is info // check blockchain info, now that we know there is info
// TODO: is this commented somewhere that they are returned // TODO: is this commented somewhere that they are returned
// in order of descending height??? // in order of descending height???
info, err := c.BlockchainInfo(h-2, h)
info, err := c.BlockchainInfo(apph, apph)
require.Nil(err, "%d: %+v", i, err) require.Nil(err, "%d: %+v", i, err)
assert.True(info.LastHeight > 2)
if assert.Equal(3, len(info.BlockMetas)) {
assert.True(info.LastHeight >= apph)
if assert.Equal(1, len(info.BlockMetas)) {
lastMeta := info.BlockMetas[0] lastMeta := info.BlockMetas[0]
assert.EqualValues(h, lastMeta.Header.Height)
assert.EqualValues(apph, lastMeta.Header.Height)
bMeta := block.BlockMeta bMeta := block.BlockMeta
assert.Equal(bMeta.Header.AppHash, lastMeta.Header.AppHash) assert.Equal(bMeta.Header.AppHash, lastMeta.Header.AppHash)
assert.Equal(bMeta.BlockID, lastMeta.BlockID) assert.Equal(bMeta.BlockID, lastMeta.BlockID)
} }
// and get the corresponding commit with the same apphash // and get the corresponding commit with the same apphash
commit, err := c.Commit(h)
commit, err := c.Commit(apph)
require.Nil(err, "%d: %+v", i, err) require.Nil(err, "%d: %+v", i, err)
cappHash := commit.Header.AppHash cappHash := commit.Header.AppHash
assert.Equal(appHash, cappHash) assert.Equal(appHash, cappHash)
assert.NotNil(commit.Commit) assert.NotNil(commit.Commit)
// compare the commits (note Commit(2) has commit from Block(3)) // compare the commits (note Commit(2) has commit from Block(3))
commit2, err := c.Commit(h - 1)
commit2, err := c.Commit(apph - 1)
require.Nil(err, "%d: %+v", i, err) require.Nil(err, "%d: %+v", i, err)
assert.Equal(block.Block.LastCommit, commit2.Commit) assert.Equal(block.Block.LastCommit, commit2.Commit)


+ 10
- 5
rpc/core/mempool.go View File

@ -4,9 +4,9 @@ import (
"fmt" "fmt"
"time" "time"
abci "github.com/tendermint/abci/types"
ctypes "github.com/tendermint/tendermint/rpc/core/types" ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
abci "github.com/tendermint/abci/types"
) )
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
@ -18,7 +18,7 @@ func BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
if err != nil { if err != nil {
return nil, fmt.Errorf("Error broadcasting transaction: %v", err) return nil, fmt.Errorf("Error broadcasting transaction: %v", err)
} }
return &ctypes.ResultBroadcastTx{}, nil
return &ctypes.ResultBroadcastTx{Hash: tx.Hash()}, nil
} }
// Returns with the response from CheckTx // Returns with the response from CheckTx
@ -36,6 +36,7 @@ func BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
Code: r.Code, Code: r.Code,
Data: r.Data, Data: r.Data,
Log: r.Log, Log: r.Log,
Hash: tx.Hash(),
}, nil }, nil
} }
@ -65,8 +66,9 @@ func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
if checkTxR.Code != abci.CodeType_OK { if checkTxR.Code != abci.CodeType_OK {
// CheckTx failed! // CheckTx failed!
return &ctypes.ResultBroadcastTxCommit{ return &ctypes.ResultBroadcastTxCommit{
CheckTx: checkTxR,
CheckTx: checkTxR,
DeliverTx: nil, DeliverTx: nil,
Hash: tx.Hash(),
}, nil }, nil
} }
@ -84,14 +86,17 @@ func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
} }
log.Notice("DeliverTx passed ", "tx", []byte(tx), "response", deliverTxR) log.Notice("DeliverTx passed ", "tx", []byte(tx), "response", deliverTxR)
return &ctypes.ResultBroadcastTxCommit{ return &ctypes.ResultBroadcastTxCommit{
CheckTx: checkTxR,
CheckTx: checkTxR,
DeliverTx: deliverTxR, DeliverTx: deliverTxR,
Hash: tx.Hash(),
Height: deliverTxRes.Height,
}, nil }, nil
case <-timer.C: case <-timer.C:
log.Error("failed to include tx") log.Error("failed to include tx")
return &ctypes.ResultBroadcastTxCommit{ return &ctypes.ResultBroadcastTxCommit{
CheckTx: checkTxR,
CheckTx: checkTxR,
DeliverTx: nil, DeliverTx: nil,
Hash: tx.Hash(),
}, fmt.Errorf("Timed out waiting for transaction to be included in a block") }, fmt.Errorf("Timed out waiting for transaction to be included in a block")
} }


+ 10
- 1
rpc/core/net.go View File

@ -1,6 +1,8 @@
package core package core
import ( import (
"fmt"
ctypes "github.com/tendermint/tendermint/rpc/core/types" ctypes "github.com/tendermint/tendermint/rpc/core/types"
) )
@ -31,10 +33,17 @@ func NetInfo() (*ctypes.ResultNetInfo, error) {
// Dial given list of seeds // Dial given list of seeds
func UnsafeDialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) { func UnsafeDialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) {
if len(seeds) == 0 {
return &ctypes.ResultDialSeeds{}, fmt.Errorf("No seeds provided")
}
// starts go routines to dial each seed after random delays // starts go routines to dial each seed after random delays
log.Info("DialSeeds", "addrBook", addrBook, "seeds", seeds) log.Info("DialSeeds", "addrBook", addrBook, "seeds", seeds)
err := p2pSwitch.DialSeeds(addrBook, seeds) err := p2pSwitch.DialSeeds(addrBook, seeds)
return &ctypes.ResultDialSeeds{}, err
if err != nil {
return &ctypes.ResultDialSeeds{}, err
}
return &ctypes.ResultDialSeeds{"Dialing seeds in progress. See /net_info for details"}, nil
} }
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------


+ 11
- 5
rpc/core/pipe.go View File

@ -2,11 +2,12 @@ package core
import ( import (
cfg "github.com/tendermint/go-config" cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-crypto"
"github.com/tendermint/go-p2p"
crypto "github.com/tendermint/go-crypto"
p2p "github.com/tendermint/go-p2p"
"github.com/tendermint/tendermint/consensus" "github.com/tendermint/tendermint/consensus"
"github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/state/txindex"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -42,9 +43,10 @@ var (
p2pSwitch P2P p2pSwitch P2P
// objects // objects
pubKey crypto.PubKey
genDoc *types.GenesisDoc // cache the genesis structure
addrBook *p2p.AddrBook
pubKey crypto.PubKey
genDoc *types.GenesisDoc // cache the genesis structure
addrBook *p2p.AddrBook
txIndexer txindex.TxIndexer
) )
func SetConfig(c cfg.Config) { func SetConfig(c cfg.Config) {
@ -86,3 +88,7 @@ func SetAddrBook(book *p2p.AddrBook) {
func SetProxyAppQuery(appConn proxy.AppConnQuery) { func SetProxyAppQuery(appConn proxy.AppConnQuery) {
proxyAppQuery = appConn proxyAppQuery = appConn
} }
func SetTxIndexer(indexer txindex.TxIndexer) {
txIndexer = indexer
}

+ 31
- 115
rpc/core/routes.go View File

@ -19,6 +19,7 @@ var Routes = map[string]*rpc.RPCFunc{
"genesis": rpc.NewRPCFunc(GenesisResult, ""), "genesis": rpc.NewRPCFunc(GenesisResult, ""),
"block": rpc.NewRPCFunc(BlockResult, "height"), "block": rpc.NewRPCFunc(BlockResult, "height"),
"commit": rpc.NewRPCFunc(CommitResult, "height"), "commit": rpc.NewRPCFunc(CommitResult, "height"),
"tx": rpc.NewRPCFunc(TxResult, "hash,prove"),
"validators": rpc.NewRPCFunc(ValidatorsResult, ""), "validators": rpc.NewRPCFunc(ValidatorsResult, ""),
"dump_consensus_state": rpc.NewRPCFunc(DumpConsensusStateResult, ""), "dump_consensus_state": rpc.NewRPCFunc(DumpConsensusStateResult, ""),
"unconfirmed_txs": rpc.NewRPCFunc(UnconfirmedTxsResult, ""), "unconfirmed_txs": rpc.NewRPCFunc(UnconfirmedTxsResult, ""),
@ -45,185 +46,100 @@ var Routes = map[string]*rpc.RPCFunc{
} }
func SubscribeResult(wsCtx rpctypes.WSRPCContext, event string) (ctypes.TMResult, error) { func SubscribeResult(wsCtx rpctypes.WSRPCContext, event string) (ctypes.TMResult, error) {
if r, err := Subscribe(wsCtx, event); err != nil {
return nil, err
} else {
return r, nil
}
return Subscribe(wsCtx, event)
} }
func UnsubscribeResult(wsCtx rpctypes.WSRPCContext, event string) (ctypes.TMResult, error) { func UnsubscribeResult(wsCtx rpctypes.WSRPCContext, event string) (ctypes.TMResult, error) {
if r, err := Unsubscribe(wsCtx, event); err != nil {
return nil, err
} else {
return r, nil
}
return Unsubscribe(wsCtx, event)
} }
func StatusResult() (ctypes.TMResult, error) { func StatusResult() (ctypes.TMResult, error) {
if r, err := Status(); err != nil {
return nil, err
} else {
return r, nil
}
return Status()
} }
func NetInfoResult() (ctypes.TMResult, error) { func NetInfoResult() (ctypes.TMResult, error) {
if r, err := NetInfo(); err != nil {
return nil, err
} else {
return r, nil
}
return NetInfo()
} }
func UnsafeDialSeedsResult(seeds []string) (ctypes.TMResult, error) { func UnsafeDialSeedsResult(seeds []string) (ctypes.TMResult, error) {
if r, err := UnsafeDialSeeds(seeds); err != nil {
return nil, err
} else {
return r, nil
}
return UnsafeDialSeeds(seeds)
} }
func BlockchainInfoResult(min, max int) (ctypes.TMResult, error) { func BlockchainInfoResult(min, max int) (ctypes.TMResult, error) {
if r, err := BlockchainInfo(min, max); err != nil {
return nil, err
} else {
return r, nil
}
return BlockchainInfo(min, max)
} }
func GenesisResult() (ctypes.TMResult, error) { func GenesisResult() (ctypes.TMResult, error) {
if r, err := Genesis(); err != nil {
return nil, err
} else {
return r, nil
}
return Genesis()
} }
func BlockResult(height int) (ctypes.TMResult, error) { func BlockResult(height int) (ctypes.TMResult, error) {
if r, err := Block(height); err != nil {
return nil, err
} else {
return r, nil
}
return Block(height)
} }
func CommitResult(height int) (ctypes.TMResult, error) { func CommitResult(height int) (ctypes.TMResult, error) {
if r, err := Commit(height); err != nil {
return nil, err
} else {
return r, nil
}
return Commit(height)
} }
func ValidatorsResult() (ctypes.TMResult, error) { func ValidatorsResult() (ctypes.TMResult, error) {
if r, err := Validators(); err != nil {
return nil, err
} else {
return r, nil
}
return Validators()
} }
func DumpConsensusStateResult() (ctypes.TMResult, error) { func DumpConsensusStateResult() (ctypes.TMResult, error) {
if r, err := DumpConsensusState(); err != nil {
return nil, err
} else {
return r, nil
}
return DumpConsensusState()
} }
func UnconfirmedTxsResult() (ctypes.TMResult, error) { func UnconfirmedTxsResult() (ctypes.TMResult, error) {
if r, err := UnconfirmedTxs(); err != nil {
return nil, err
} else {
return r, nil
}
return UnconfirmedTxs()
} }
func NumUnconfirmedTxsResult() (ctypes.TMResult, error) { func NumUnconfirmedTxsResult() (ctypes.TMResult, error) {
if r, err := NumUnconfirmedTxs(); err != nil {
return nil, err
} else {
return r, nil
}
return NumUnconfirmedTxs()
}
// Tx allow user to query the transaction results. `nil` could mean the
// transaction is in the mempool, invalidated, or was not send in the first
// place.
func TxResult(hash []byte, prove bool) (ctypes.TMResult, error) {
return Tx(hash, prove)
} }
func BroadcastTxCommitResult(tx []byte) (ctypes.TMResult, error) { func BroadcastTxCommitResult(tx []byte) (ctypes.TMResult, error) {
if r, err := BroadcastTxCommit(tx); err != nil {
return nil, err
} else {
return r, nil
}
return BroadcastTxCommit(tx)
} }
func BroadcastTxSyncResult(tx []byte) (ctypes.TMResult, error) { func BroadcastTxSyncResult(tx []byte) (ctypes.TMResult, error) {
if r, err := BroadcastTxSync(tx); err != nil {
return nil, err
} else {
return r, nil
}
return BroadcastTxSync(tx)
} }
func BroadcastTxAsyncResult(tx []byte) (ctypes.TMResult, error) { func BroadcastTxAsyncResult(tx []byte) (ctypes.TMResult, error) {
if r, err := BroadcastTxAsync(tx); err != nil {
return nil, err
} else {
return r, nil
}
return BroadcastTxAsync(tx)
} }
func ABCIQueryResult(path string, data []byte, prove bool) (ctypes.TMResult, error) { func ABCIQueryResult(path string, data []byte, prove bool) (ctypes.TMResult, error) {
if r, err := ABCIQuery(path, data, prove); err != nil {
return nil, err
} else {
return r, nil
}
return ABCIQuery(path, data, prove)
} }
func ABCIInfoResult() (ctypes.TMResult, error) { func ABCIInfoResult() (ctypes.TMResult, error) {
if r, err := ABCIInfo(); err != nil {
return nil, err
} else {
return r, nil
}
return ABCIInfo()
} }
func UnsafeFlushMempoolResult() (ctypes.TMResult, error) { func UnsafeFlushMempoolResult() (ctypes.TMResult, error) {
if r, err := UnsafeFlushMempool(); err != nil {
return nil, err
} else {
return r, nil
}
return UnsafeFlushMempool()
} }
func UnsafeSetConfigResult(typ, key, value string) (ctypes.TMResult, error) { func UnsafeSetConfigResult(typ, key, value string) (ctypes.TMResult, error) {
if r, err := UnsafeSetConfig(typ, key, value); err != nil {
return nil, err
} else {
return r, nil
}
return UnsafeSetConfig(typ, key, value)
} }
func UnsafeStartCPUProfilerResult(filename string) (ctypes.TMResult, error) { func UnsafeStartCPUProfilerResult(filename string) (ctypes.TMResult, error) {
if r, err := UnsafeStartCPUProfiler(filename); err != nil {
return nil, err
} else {
return r, nil
}
return UnsafeStartCPUProfiler(filename)
} }
func UnsafeStopCPUProfilerResult() (ctypes.TMResult, error) { func UnsafeStopCPUProfilerResult() (ctypes.TMResult, error) {
if r, err := UnsafeStopCPUProfiler(); err != nil {
return nil, err
} else {
return r, nil
}
return UnsafeStopCPUProfiler()
} }
func UnsafeWriteHeapProfileResult(filename string) (ctypes.TMResult, error) { func UnsafeWriteHeapProfileResult(filename string) (ctypes.TMResult, error) {
if r, err := UnsafeWriteHeapProfile(filename); err != nil {
return nil, err
} else {
return r, nil
}
return UnsafeWriteHeapProfile(filename)
} }

+ 43
- 0
rpc/core/tx.go View File

@ -0,0 +1,43 @@
package core
import (
"fmt"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/state/txindex/null"
"github.com/tendermint/tendermint/types"
)
func Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
// if index is disabled, return error
if _, ok := txIndexer.(*null.TxIndex); ok {
return nil, fmt.Errorf("Transaction indexing is disabled.")
}
r, err := txIndexer.Get(hash)
if err != nil {
return nil, err
}
if r == nil {
return nil, fmt.Errorf("Tx (%X) not found", hash)
}
height := int(r.Height) // XXX
index := int(r.Index)
var proof types.TxProof
if prove {
block := blockStore.LoadBlock(height)
proof = block.Data.Txs.Proof(index)
}
return &ctypes.ResultTx{
Height: height,
Index: index,
TxResult: r.Result,
Tx: r.Tx,
Proof: proof,
}, nil
}

+ 30
- 0
rpc/core/types/responses.go View File

@ -1,6 +1,8 @@
package core_types package core_types
import ( import (
"strings"
abci "github.com/tendermint/abci/types" abci "github.com/tendermint/abci/types"
"github.com/tendermint/go-crypto" "github.com/tendermint/go-crypto"
"github.com/tendermint/go-p2p" "github.com/tendermint/go-p2p"
@ -38,6 +40,19 @@ type ResultStatus struct {
LatestBlockTime int64 `json:"latest_block_time"` // nano LatestBlockTime int64 `json:"latest_block_time"` // nano
} }
func (s *ResultStatus) TxIndexEnabled() bool {
if s == nil || s.NodeInfo == nil {
return false
}
for _, s := range s.NodeInfo.Other {
info := strings.Split(s, "=")
if len(info) == 2 && info[0] == "tx_index" {
return info[1] == "on"
}
}
return false
}
type ResultNetInfo struct { type ResultNetInfo struct {
Listening bool `json:"listening"` Listening bool `json:"listening"`
Listeners []string `json:"listeners"` Listeners []string `json:"listeners"`
@ -45,6 +60,7 @@ type ResultNetInfo struct {
} }
type ResultDialSeeds struct { type ResultDialSeeds struct {
Log string `json:"log"`
} }
type Peer struct { type Peer struct {
@ -67,11 +83,23 @@ type ResultBroadcastTx struct {
Code abci.CodeType `json:"code"` Code abci.CodeType `json:"code"`
Data []byte `json:"data"` Data []byte `json:"data"`
Log string `json:"log"` Log string `json:"log"`
Hash []byte `json:"hash"`
} }
type ResultBroadcastTxCommit struct { type ResultBroadcastTxCommit struct {
CheckTx *abci.ResponseCheckTx `json:"check_tx"` CheckTx *abci.ResponseCheckTx `json:"check_tx"`
DeliverTx *abci.ResponseDeliverTx `json:"deliver_tx"` DeliverTx *abci.ResponseDeliverTx `json:"deliver_tx"`
Hash []byte `json:"hash"`
Height int `json:"height"`
}
type ResultTx struct {
Height int `json:"height"`
Index int `json:"index"`
TxResult abci.ResponseDeliverTx `json:"tx_result"`
Tx types.Tx `json:"tx"`
Proof types.TxProof `json:"proof,omitempty"`
} }
type ResultUnconfirmedTxs struct { type ResultUnconfirmedTxs struct {
@ -127,6 +155,7 @@ const (
ResultTypeBroadcastTx = byte(0x60) ResultTypeBroadcastTx = byte(0x60)
ResultTypeUnconfirmedTxs = byte(0x61) ResultTypeUnconfirmedTxs = byte(0x61)
ResultTypeBroadcastTxCommit = byte(0x62) ResultTypeBroadcastTxCommit = byte(0x62)
ResultTypeTx = byte(0x63)
// 0x7 bytes are for querying the application // 0x7 bytes are for querying the application
ResultTypeABCIQuery = byte(0x70) ResultTypeABCIQuery = byte(0x70)
@ -163,6 +192,7 @@ var _ = wire.RegisterInterface(
wire.ConcreteType{&ResultDumpConsensusState{}, ResultTypeDumpConsensusState}, wire.ConcreteType{&ResultDumpConsensusState{}, ResultTypeDumpConsensusState},
wire.ConcreteType{&ResultBroadcastTx{}, ResultTypeBroadcastTx}, wire.ConcreteType{&ResultBroadcastTx{}, ResultTypeBroadcastTx},
wire.ConcreteType{&ResultBroadcastTxCommit{}, ResultTypeBroadcastTxCommit}, wire.ConcreteType{&ResultBroadcastTxCommit{}, ResultTypeBroadcastTxCommit},
wire.ConcreteType{&ResultTx{}, ResultTypeTx},
wire.ConcreteType{&ResultUnconfirmedTxs{}, ResultTypeUnconfirmedTxs}, wire.ConcreteType{&ResultUnconfirmedTxs{}, ResultTypeUnconfirmedTxs},
wire.ConcreteType{&ResultSubscribe{}, ResultTypeSubscribe}, wire.ConcreteType{&ResultSubscribe{}, ResultTypeSubscribe},
wire.ConcreteType{&ResultUnsubscribe{}, ResultTypeUnsubscribe}, wire.ConcreteType{&ResultUnsubscribe{}, ResultTypeUnsubscribe},


+ 38
- 0
rpc/core/types/responses_test.go View File

@ -0,0 +1,38 @@
package core_types
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tendermint/go-p2p"
)
func TestStatusIndexer(t *testing.T) {
assert := assert.New(t)
var status *ResultStatus
assert.False(status.TxIndexEnabled())
status = &ResultStatus{}
assert.False(status.TxIndexEnabled())
status.NodeInfo = &p2p.NodeInfo{}
assert.False(status.TxIndexEnabled())
cases := []struct {
expected bool
other []string
}{
{false, nil},
{false, []string{}},
{false, []string{"a=b"}},
{false, []string{"tx_indexiskv", "some=dood"}},
{true, []string{"tx_index=on", "tx_index=other"}},
{true, []string{"^(*^(", "tx_index=on", "a=n=b=d="}},
}
for _, tc := range cases {
status.NodeInfo.Other = tc.other
assert.Equal(tc.expected, status.TxIndexEnabled())
}
}

+ 128
- 54
rpc/test/client_test.go View File

@ -12,7 +12,10 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
abci "github.com/tendermint/abci/types" abci "github.com/tendermint/abci/types"
. "github.com/tendermint/go-common" . "github.com/tendermint/go-common"
rpc "github.com/tendermint/go-rpc/client"
"github.com/tendermint/tendermint/rpc/core"
ctypes "github.com/tendermint/tendermint/rpc/core/types" ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/state/txindex/null"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
@ -25,24 +28,20 @@ import (
// status // status
func TestURIStatus(t *testing.T) { func TestURIStatus(t *testing.T) {
tmResult := new(ctypes.TMResult)
_, err := GetURIClient().Call("status", map[string]interface{}{}, tmResult)
require.Nil(t, err)
testStatus(t, tmResult)
testStatus(t, GetURIClient())
} }
func TestJSONStatus(t *testing.T) { func TestJSONStatus(t *testing.T) {
tmResult := new(ctypes.TMResult)
_, err := GetJSONClient().Call("status", []interface{}{}, tmResult)
require.Nil(t, err)
testStatus(t, tmResult)
testStatus(t, GetJSONClient())
} }
func testStatus(t *testing.T, statusI interface{}) {
func testStatus(t *testing.T, client rpc.HTTPClient) {
chainID := GetConfig().GetString("chain_id") chainID := GetConfig().GetString("chain_id")
tmResult := new(ctypes.TMResult)
_, err := client.Call("status", map[string]interface{}{}, tmResult)
require.Nil(t, err)
tmRes := statusI.(*ctypes.TMResult)
status := (*tmRes).(*ctypes.ResultStatus)
status := (*tmResult).(*ctypes.ResultStatus)
assert.Equal(t, chainID, status.NodeInfo.Network) assert.Equal(t, chainID, status.NodeInfo.Network)
} }
@ -59,28 +58,22 @@ func randBytes(t *testing.T) []byte {
} }
func TestURIBroadcastTxSync(t *testing.T) { func TestURIBroadcastTxSync(t *testing.T) {
config.Set("block_size", 0)
defer config.Set("block_size", -1)
tmResult := new(ctypes.TMResult)
tx := randBytes(t)
_, err := GetURIClient().Call("broadcast_tx_sync", map[string]interface{}{"tx": tx}, tmResult)
require.Nil(t, err)
testBroadcastTxSync(t, tmResult, tx)
testBroadcastTxSync(t, GetURIClient())
} }
func TestJSONBroadcastTxSync(t *testing.T) { func TestJSONBroadcastTxSync(t *testing.T) {
testBroadcastTxSync(t, GetJSONClient())
}
func testBroadcastTxSync(t *testing.T, client rpc.HTTPClient) {
config.Set("block_size", 0) config.Set("block_size", 0)
defer config.Set("block_size", -1) defer config.Set("block_size", -1)
tmResult := new(ctypes.TMResult) tmResult := new(ctypes.TMResult)
tx := randBytes(t) tx := randBytes(t)
_, err := GetJSONClient().Call("broadcast_tx_sync", []interface{}{tx}, tmResult)
_, err := client.Call("broadcast_tx_sync", map[string]interface{}{"tx": tx}, tmResult)
require.Nil(t, err) require.Nil(t, err)
testBroadcastTxSync(t, tmResult, tx)
}
func testBroadcastTxSync(t *testing.T, resI interface{}, tx []byte) {
tmRes := resI.(*ctypes.TMResult)
res := (*tmRes).(*ctypes.ResultBroadcastTx)
res := (*tmResult).(*ctypes.ResultBroadcastTx)
require.Equal(t, abci.CodeType_OK, res.Code) require.Equal(t, abci.CodeType_OK, res.Code)
mem := node.MempoolReactor().Mempool mem := node.MempoolReactor().Mempool
require.Equal(t, 1, mem.Size()) require.Equal(t, 1, mem.Size())
@ -98,34 +91,31 @@ func testTxKV(t *testing.T) ([]byte, []byte, []byte) {
return k, v, []byte(Fmt("%s=%s", k, v)) return k, v, []byte(Fmt("%s=%s", k, v))
} }
func sendTx(t *testing.T) ([]byte, []byte) {
func sendTx(t *testing.T, client rpc.HTTPClient) ([]byte, []byte) {
tmResult := new(ctypes.TMResult) tmResult := new(ctypes.TMResult)
k, v, tx := testTxKV(t) k, v, tx := testTxKV(t)
_, err := GetJSONClient().Call("broadcast_tx_commit", []interface{}{tx}, tmResult)
_, err := client.Call("broadcast_tx_commit", map[string]interface{}{"tx": tx}, tmResult)
require.Nil(t, err) require.Nil(t, err)
return k, v return k, v
} }
func TestURIABCIQuery(t *testing.T) { func TestURIABCIQuery(t *testing.T) {
k, v := sendTx(t)
time.Sleep(time.Second)
tmResult := new(ctypes.TMResult)
_, err := GetURIClient().Call("abci_query", map[string]interface{}{"path": "", "data": k, "prove": false}, tmResult)
require.Nil(t, err)
testABCIQuery(t, tmResult, v)
testABCIQuery(t, GetURIClient())
} }
func TestJSONABCIQuery(t *testing.T) { func TestJSONABCIQuery(t *testing.T) {
k, v := sendTx(t)
testABCIQuery(t, GetURIClient())
}
func testABCIQuery(t *testing.T, client rpc.HTTPClient) {
k, _ := sendTx(t, client)
time.Sleep(time.Millisecond * 100)
tmResult := new(ctypes.TMResult) tmResult := new(ctypes.TMResult)
_, err := GetJSONClient().Call("abci_query", []interface{}{"", k, false}, tmResult)
_, err := client.Call("abci_query",
map[string]interface{}{"path": "", "data": k, "prove": false}, tmResult)
require.Nil(t, err) require.Nil(t, err)
testABCIQuery(t, tmResult, v)
}
func testABCIQuery(t *testing.T, statusI interface{}, value []byte) {
tmRes := statusI.(*ctypes.TMResult)
resQuery := (*tmRes).(*ctypes.ResultABCIQuery)
resQuery := (*tmResult).(*ctypes.ResultABCIQuery)
require.EqualValues(t, 0, resQuery.Response.Code) require.EqualValues(t, 0, resQuery.Response.Code)
// XXX: specific to value returned by the dummy // XXX: specific to value returned by the dummy
@ -136,25 +126,22 @@ func testABCIQuery(t *testing.T, statusI interface{}, value []byte) {
// broadcast tx commit // broadcast tx commit
func TestURIBroadcastTxCommit(t *testing.T) { func TestURIBroadcastTxCommit(t *testing.T) {
tmResult := new(ctypes.TMResult)
tx := randBytes(t)
_, err := GetURIClient().Call("broadcast_tx_commit", map[string]interface{}{"tx": tx}, tmResult)
require.Nil(t, err)
testBroadcastTxCommit(t, tmResult, tx)
testBroadcastTxCommit(t, GetURIClient())
} }
func TestJSONBroadcastTxCommit(t *testing.T) { func TestJSONBroadcastTxCommit(t *testing.T) {
tmResult := new(ctypes.TMResult)
tx := randBytes(t)
_, err := GetJSONClient().Call("broadcast_tx_commit", []interface{}{tx}, tmResult)
require.Nil(t, err)
testBroadcastTxCommit(t, tmResult, tx)
testBroadcastTxCommit(t, GetJSONClient())
} }
func testBroadcastTxCommit(t *testing.T, resI interface{}, tx []byte) {
func testBroadcastTxCommit(t *testing.T, client rpc.HTTPClient) {
require := require.New(t) require := require.New(t)
tmRes := resI.(*ctypes.TMResult)
res := (*tmRes).(*ctypes.ResultBroadcastTxCommit)
tmResult := new(ctypes.TMResult)
tx := randBytes(t)
_, err := client.Call("broadcast_tx_commit", map[string]interface{}{"tx": tx}, tmResult)
require.Nil(err)
res := (*tmResult).(*ctypes.ResultBroadcastTxCommit)
checkTx := res.CheckTx checkTx := res.CheckTx
require.Equal(abci.CodeType_OK, checkTx.Code) require.Equal(abci.CodeType_OK, checkTx.Code)
deliverTx := res.DeliverTx deliverTx := res.DeliverTx
@ -164,6 +151,91 @@ func testBroadcastTxCommit(t *testing.T, resI interface{}, tx []byte) {
// TODO: find tx in block // TODO: find tx in block
} }
//--------------------------------------------------------------------------------
// query tx
func TestURITx(t *testing.T) {
testTx(t, GetURIClient(), true)
core.SetTxIndexer(&null.TxIndex{})
testTx(t, GetJSONClient(), false)
core.SetTxIndexer(node.ConsensusState().GetState().TxIndexer)
}
func TestJSONTx(t *testing.T) {
testTx(t, GetJSONClient(), true)
core.SetTxIndexer(&null.TxIndex{})
testTx(t, GetJSONClient(), false)
core.SetTxIndexer(node.ConsensusState().GetState().TxIndexer)
}
func testTx(t *testing.T, client rpc.HTTPClient, withIndexer bool) {
assert, require := assert.New(t), require.New(t)
// first we broadcast a tx
tmResult := new(ctypes.TMResult)
txBytes := randBytes(t)
tx := types.Tx(txBytes)
_, err := client.Call("broadcast_tx_commit", map[string]interface{}{"tx": txBytes}, tmResult)
require.Nil(err)
res := (*tmResult).(*ctypes.ResultBroadcastTxCommit)
checkTx := res.CheckTx
require.Equal(abci.CodeType_OK, checkTx.Code)
deliverTx := res.DeliverTx
require.Equal(abci.CodeType_OK, deliverTx.Code)
mem := node.MempoolReactor().Mempool
require.Equal(0, mem.Size())
txHash := tx.Hash()
txHash2 := types.Tx("a different tx").Hash()
cases := []struct {
valid bool
hash []byte
prove bool
}{
// only valid if correct hash provided
{true, txHash, false},
{true, txHash, true},
{false, txHash2, false},
{false, txHash2, true},
{false, nil, false},
{false, nil, true},
}
for i, tc := range cases {
idx := fmt.Sprintf("%d", i)
// now we query for the tx.
// since there's only one tx, we know index=0.
tmResult = new(ctypes.TMResult)
query := map[string]interface{}{
"hash": tc.hash,
"prove": tc.prove,
}
_, err = client.Call("tx", query, tmResult)
valid := (withIndexer && tc.valid)
if !valid {
require.NotNil(err, idx)
} else {
require.Nil(err, idx)
res2 := (*tmResult).(*ctypes.ResultTx)
assert.Equal(tx, res2.Tx, idx)
assert.Equal(res.Height, res2.Height, idx)
assert.Equal(0, res2.Index, idx)
assert.Equal(abci.CodeType_OK, res2.TxResult.Code, idx)
// time to verify the proof
proof := res2.Proof
if tc.prove && assert.Equal(tx, proof.Data, idx) {
assert.True(proof.Proof.Verify(proof.Index, proof.Total, tx.Hash(), proof.RootHash), idx)
}
}
}
}
//-------------------------------------------------------------------------------- //--------------------------------------------------------------------------------
// Test the websocket service // Test the websocket service
@ -240,7 +312,7 @@ func TestWSTxEvent(t *testing.T) {
// send an tx // send an tx
tmResult := new(ctypes.TMResult) tmResult := new(ctypes.TMResult)
_, err := GetJSONClient().Call("broadcast_tx_sync", []interface{}{tx}, tmResult)
_, err := GetJSONClient().Call("broadcast_tx_sync", map[string]interface{}{"tx": tx}, tmResult)
require.Nil(err) require.Nil(err)
waitForEvent(t, wsc, eid, true, func() {}, func(eid string, b interface{}) error { waitForEvent(t, wsc, eid, true, func() {}, func(eid string, b interface{}) error {
@ -310,7 +382,9 @@ func TestURIUnsafeSetConfig(t *testing.T) {
func TestJSONUnsafeSetConfig(t *testing.T) { func TestJSONUnsafeSetConfig(t *testing.T) {
for _, testCase := range testCasesUnsafeSetConfig { for _, testCase := range testCasesUnsafeSetConfig {
tmResult := new(ctypes.TMResult) tmResult := new(ctypes.TMResult)
_, err := GetJSONClient().Call("unsafe_set_config", []interface{}{testCase[0], testCase[1], testCase[2]}, tmResult)
_, err := GetJSONClient().Call("unsafe_set_config",
map[string]interface{}{"type": testCase[0], "key": testCase[1], "value": testCase[2]},
tmResult)
require.Nil(t, err) require.Nil(t, err)
} }
testUnsafeSetConfig(t) testUnsafeSetConfig(t)


+ 4
- 4
rpc/test/helpers.go View File

@ -72,15 +72,15 @@ func GetConfig() cfg.Config {
} }
// GetURIClient gets a uri client pointing to the test tendermint rpc // GetURIClient gets a uri client pointing to the test tendermint rpc
func GetURIClient() *client.ClientURI {
func GetURIClient() *client.URIClient {
rpcAddr := GetConfig().GetString("rpc_laddr") rpcAddr := GetConfig().GetString("rpc_laddr")
return client.NewClientURI(rpcAddr)
return client.NewURIClient(rpcAddr)
} }
// GetJSONClient gets a http/json client pointing to the test tendermint rpc // GetJSONClient gets a http/json client pointing to the test tendermint rpc
func GetJSONClient() *client.ClientJSONRPC {
func GetJSONClient() *client.JSONRPCClient {
rpcAddr := GetConfig().GetString("rpc_laddr") rpcAddr := GetConfig().GetString("rpc_laddr")
return client.NewClientJSONRPC(rpcAddr)
return client.NewJSONRPCClient(rpcAddr)
} }
func GetGRPCClient() core_grpc.BroadcastAPIClient { func GetGRPCClient() core_grpc.BroadcastAPIClient {


+ 1
- 1
scripts/install_abci_apps.sh View File

@ -1,6 +1,6 @@
#! /bin/bash #! /bin/bash
go get github.com/tendermint/abci/...
go get -d github.com/tendermint/abci
# get the abci commit used by tendermint # get the abci commit used by tendermint
COMMIT=`bash scripts/glide/parse.sh abci` COMMIT=`bash scripts/glide/parse.sh abci`


+ 20
- 0
scripts/publish.sh View File

@ -0,0 +1,20 @@
#!/usr/bin/env bash
set -eu
VERSION=$1
DIST_DIR=$2 # ./build/dist
# Get the version from the environment, or try to figure it out.
if [ -z $VERSION ]; then
VERSION=$(awk -F\" '/Version =/ { print $2; exit }' < version/version.go)
fi
if [ -z "$VERSION" ]; then
echo "Please specify a version."
exit 1
fi
# copy to s3
aws s3 cp --recursive ${DIST_DIR} s3://tendermint/${VERSION} --acl public-read --exclude "*" --include "*.zip"
aws s3 cp ${DIST_DIR}/tendermint_${VERSION}_SHA256SUMS s3://tendermint/0.9.0 --acl public-read
exit 0

+ 83
- 67
state/execution.go View File

@ -2,68 +2,49 @@ package state
import ( import (
"errors" "errors"
"fmt"
"github.com/ebuchman/fail-test"
fail "github.com/ebuchman/fail-test"
abci "github.com/tendermint/abci/types" abci "github.com/tendermint/abci/types"
. "github.com/tendermint/go-common" . "github.com/tendermint/go-common"
"github.com/tendermint/go-crypto"
crypto "github.com/tendermint/go-crypto"
"github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/state/txindex"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
//-------------------------------------------------- //--------------------------------------------------
// Execute the block // Execute the block
// Execute the block to mutate State.
// Validates block and then executes Data.Txs in the block.
func (s *State) ExecBlock(eventCache types.Fireable, proxyAppConn proxy.AppConnConsensus, block *types.Block, blockPartsHeader types.PartSetHeader) error {
// ValExecBlock executes the block, but does NOT mutate State.
// + validates the block
// + executes block.Txs on the proxyAppConn
func (s *State) ValExecBlock(eventCache types.Fireable, proxyAppConn proxy.AppConnConsensus, block *types.Block) (*ABCIResponses, error) {
// Validate the block. // Validate the block.
if err := s.validateBlock(block); err != nil { if err := s.validateBlock(block); err != nil {
return ErrInvalidBlock(err)
return nil, ErrInvalidBlock(err)
} }
// compute bitarray of validators that signed
signed := commitBitArrayFromBlock(block)
_ = signed // TODO send on begin block
// copy the valset
valSet := s.Validators.Copy()
nextValSet := valSet.Copy()
// Execute the block txs // Execute the block txs
changedValidators, err := execBlockOnProxyApp(eventCache, proxyAppConn, block)
abciResponses, err := execBlockOnProxyApp(eventCache, proxyAppConn, block)
if err != nil { if err != nil {
// There was some error in proxyApp // There was some error in proxyApp
// TODO Report error and wait for proxyApp to be available. // TODO Report error and wait for proxyApp to be available.
return ErrProxyAppConn(err)
return nil, ErrProxyAppConn(err)
} }
// update the validator set
err = updateValidators(nextValSet, changedValidators)
if err != nil {
log.Warn("Error changing validator set", "error", err)
// TODO: err or carry on?
}
// All good!
// Update validator accums and set state variables
nextValSet.IncrementAccum(1)
s.SetBlockAndValidators(block.Header, blockPartsHeader, valSet, nextValSet)
fail.Fail() // XXX
return nil
return abciResponses, nil
} }
// Executes block's transactions on proxyAppConn. // Executes block's transactions on proxyAppConn.
// Returns a list of updates to the validator set
// Returns a list of transaction results and updates to the validator set
// TODO: Generate a bitmap or otherwise store tx validity in state. // TODO: Generate a bitmap or otherwise store tx validity in state.
func execBlockOnProxyApp(eventCache types.Fireable, proxyAppConn proxy.AppConnConsensus, block *types.Block) ([]*abci.Validator, error) {
func execBlockOnProxyApp(eventCache types.Fireable, proxyAppConn proxy.AppConnConsensus, block *types.Block) (*ABCIResponses, error) {
var validTxs, invalidTxs = 0, 0 var validTxs, invalidTxs = 0, 0
txIndex := 0
abciResponses := NewABCIResponses(block)
// Execute transactions and get hash // Execute transactions and get hash
proxyCb := func(req *abci.Request, res *abci.Response) { proxyCb := func(req *abci.Request, res *abci.Response) {
switch r := res.Value.(type) { switch r := res.Value.(type) {
@ -73,22 +54,27 @@ func execBlockOnProxyApp(eventCache types.Fireable, proxyAppConn proxy.AppConnCo
// Blocks may include invalid txs. // Blocks may include invalid txs.
// reqDeliverTx := req.(abci.RequestDeliverTx) // reqDeliverTx := req.(abci.RequestDeliverTx)
txError := "" txError := ""
apTx := r.DeliverTx
if apTx.Code == abci.CodeType_OK {
validTxs += 1
txResult := r.DeliverTx
if txResult.Code == abci.CodeType_OK {
validTxs++
} else { } else {
log.Debug("Invalid tx", "code", r.DeliverTx.Code, "log", r.DeliverTx.Log)
invalidTxs += 1
txError = apTx.Code.String()
log.Debug("Invalid tx", "code", txResult.Code, "log", txResult.Log)
invalidTxs++
txError = txResult.Code.String()
} }
abciResponses.DeliverTx[txIndex] = txResult
txIndex++
// NOTE: if we count we can access the tx from the block instead of // NOTE: if we count we can access the tx from the block instead of
// pulling it from the req // pulling it from the req
event := types.EventDataTx{ event := types.EventDataTx{
Tx: req.GetDeliverTx().Tx,
Data: apTx.Data,
Code: apTx.Code,
Log: apTx.Log,
Error: txError,
Height: block.Height,
Tx: types.Tx(req.GetDeliverTx().Tx),
Data: txResult.Data,
Code: txResult.Code,
Log: txResult.Log,
Error: txError,
} }
types.FireEventTx(eventCache, event) types.FireEventTx(eventCache, event)
} }
@ -102,33 +88,29 @@ func execBlockOnProxyApp(eventCache types.Fireable, proxyAppConn proxy.AppConnCo
return nil, err return nil, err
} }
fail.Fail() // XXX
// Run txs of block // Run txs of block
for _, tx := range block.Txs { for _, tx := range block.Txs {
fail.FailRand(len(block.Txs)) // XXX
proxyAppConn.DeliverTxAsync(tx) proxyAppConn.DeliverTxAsync(tx)
if err := proxyAppConn.Error(); err != nil { if err := proxyAppConn.Error(); err != nil {
return nil, err return nil, err
} }
} }
fail.Fail() // XXX
// End block // End block
respEndBlock, err := proxyAppConn.EndBlockSync(uint64(block.Height))
abciResponses.EndBlock, err = proxyAppConn.EndBlockSync(uint64(block.Height))
if err != nil { if err != nil {
log.Warn("Error in proxyAppConn.EndBlock", "error", err) log.Warn("Error in proxyAppConn.EndBlock", "error", err)
return nil, err return nil, err
} }
fail.Fail() // XXX
valDiff := abciResponses.EndBlock.Diffs
log.Info("Executed block", "height", block.Height, "valid txs", validTxs, "invalid txs", invalidTxs) log.Info("Executed block", "height", block.Height, "valid txs", validTxs, "invalid txs", invalidTxs)
if len(respEndBlock.Diffs) > 0 {
log.Info("Update to validator set", "updates", abci.ValidatorsString(respEndBlock.Diffs))
if len(valDiff) > 0 {
log.Info("Update to validator set", "updates", abci.ValidatorsString(valDiff))
} }
return respEndBlock.Diffs, nil
return abciResponses, nil
} }
func updateValidators(validators *types.ValidatorSet, changedValidators []*abci.Validator) error { func updateValidators(validators *types.ValidatorSet, changedValidators []*abci.Validator) error {
@ -219,25 +201,43 @@ func (s *State) validateBlock(block *types.Block) error {
} }
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// ApplyBlock executes the block, then commits and updates the mempool atomically
// ApplyBlock validates & executes the block, updates state w/ ABCI responses,
// then commits and updates the mempool atomically, then saves state.
// Transaction results are optionally indexed.
// Execute and commit block against app, save block and state
// Validate, execute, and commit block against app, save block and state
func (s *State) ApplyBlock(eventCache types.Fireable, proxyAppConn proxy.AppConnConsensus, func (s *State) ApplyBlock(eventCache types.Fireable, proxyAppConn proxy.AppConnConsensus,
block *types.Block, partsHeader types.PartSetHeader, mempool types.Mempool) error { block *types.Block, partsHeader types.PartSetHeader, mempool types.Mempool) error {
// Run the block on the State:
// + update validator sets
// + run txs on the proxyAppConn
err := s.ExecBlock(eventCache, proxyAppConn, block, partsHeader)
abciResponses, err := s.ValExecBlock(eventCache, proxyAppConn, block)
if err != nil { if err != nil {
return errors.New(Fmt("Exec failed for application: %v", err))
return fmt.Errorf("Exec failed for application: %v", err)
} }
fail.Fail() // XXX
// index txs. This could run in the background
s.indexTxs(abciResponses)
// save the results before we commit
s.SaveABCIResponses(abciResponses)
fail.Fail() // XXX
// now update the block and validators
s.SetBlockAndValidators(block.Header, partsHeader, abciResponses)
// lock mempool, commit state, update mempoool // lock mempool, commit state, update mempoool
err = s.CommitStateUpdateMempool(proxyAppConn, block, mempool) err = s.CommitStateUpdateMempool(proxyAppConn, block, mempool)
if err != nil { if err != nil {
return errors.New(Fmt("Commit failed for application: %v", err))
return fmt.Errorf("Commit failed for application: %v", err)
} }
fail.Fail() // XXX
// save the state
s.Save()
return nil return nil
} }
@ -268,9 +268,25 @@ func (s *State) CommitStateUpdateMempool(proxyAppConn proxy.AppConnConsensus, bl
return nil return nil
} }
// Apply and commit a block, but without all the state validation.
func (s *State) indexTxs(abciResponses *ABCIResponses) {
// save the tx results using the TxIndexer
// NOTE: these may be overwriting, but the values should be the same.
batch := txindex.NewBatch(len(abciResponses.DeliverTx))
for i, d := range abciResponses.DeliverTx {
tx := abciResponses.txs[i]
batch.Add(types.TxResult{
Height: uint64(abciResponses.Height),
Index: uint32(i),
Tx: tx,
Result: *d,
})
}
s.TxIndexer.AddBatch(batch)
}
// Exec and commit a block on the proxyApp without validating or mutating the state
// Returns the application root hash (result of abci.Commit) // Returns the application root hash (result of abci.Commit)
func ApplyBlock(appConnConsensus proxy.AppConnConsensus, block *types.Block) ([]byte, error) {
func ExecCommitBlock(appConnConsensus proxy.AppConnConsensus, block *types.Block) ([]byte, error) {
var eventCache types.Fireable // nil var eventCache types.Fireable // nil
_, err := execBlockOnProxyApp(eventCache, appConnConsensus, block) _, err := execBlockOnProxyApp(eventCache, appConnConsensus, block)
if err != nil { if err != nil {


+ 90
- 0
state/execution_test.go View File

@ -0,0 +1,90 @@
package state
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/abci/example/dummy"
crypto "github.com/tendermint/go-crypto"
dbm "github.com/tendermint/go-db"
cfg "github.com/tendermint/tendermint/config/tendermint_test"
"github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/state/txindex"
"github.com/tendermint/tendermint/types"
)
var (
privKey = crypto.GenPrivKeyEd25519FromSecret([]byte("execution_test"))
chainID = "execution_chain"
testPartSize = 65536
nTxsPerBlock = 10
)
func TestApplyBlock(t *testing.T) {
cc := proxy.NewLocalClientCreator(dummy.NewDummyApplication())
config := cfg.ResetConfig("execution_test_")
proxyApp := proxy.NewAppConns(config, cc, nil)
_, err := proxyApp.Start()
require.Nil(t, err)
defer proxyApp.Stop()
mempool := mempool.NewMempool(config, proxyApp.Mempool())
state := state()
indexer := &dummyIndexer{0}
state.TxIndexer = indexer
// make block
block := makeBlock(1, state)
err = state.ApplyBlock(nil, proxyApp.Consensus(), block, block.MakePartSet(testPartSize).Header(), mempool)
require.Nil(t, err)
assert.Equal(t, nTxsPerBlock, indexer.Indexed) // test indexing works
// TODO check state and mempool
}
//----------------------------------------------------------------------------
// make some bogus txs
func makeTxs(blockNum int) (txs []types.Tx) {
for i := 0; i < nTxsPerBlock; i++ {
txs = append(txs, types.Tx([]byte{byte(blockNum), byte(i)}))
}
return txs
}
func state() *State {
return MakeGenesisState(dbm.NewMemDB(), &types.GenesisDoc{
ChainID: chainID,
Validators: []types.GenesisValidator{
types.GenesisValidator{privKey.PubKey(), 10000, "test"},
},
AppHash: nil,
})
}
func makeBlock(num int, state *State) *types.Block {
prevHash := state.LastBlockID.Hash
prevParts := types.PartSetHeader{}
valHash := state.Validators.Hash()
prevBlockID := types.BlockID{prevHash, prevParts}
block, _ := types.MakeBlock(num, chainID, makeTxs(num), new(types.Commit),
prevBlockID, valHash, state.AppHash, testPartSize)
return block
}
// dummyIndexer increments counter every time we index transaction.
type dummyIndexer struct {
Indexed int
}
func (indexer *dummyIndexer) Get(hash []byte) (*types.TxResult, error) {
return nil, nil
}
func (indexer *dummyIndexer) AddBatch(batch *txindex.Batch) error {
indexer.Indexed += batch.Size()
return nil
}

+ 92
- 5
state/state.go View File

@ -6,15 +6,19 @@ import (
"sync" "sync"
"time" "time"
abci "github.com/tendermint/abci/types"
. "github.com/tendermint/go-common" . "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config" cfg "github.com/tendermint/go-config"
dbm "github.com/tendermint/go-db" dbm "github.com/tendermint/go-db"
"github.com/tendermint/go-wire" "github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/state/txindex"
"github.com/tendermint/tendermint/state/txindex/null"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
var ( var (
stateKey = []byte("stateKey")
stateKey = []byte("stateKey")
abciResponsesKey = []byte("abciResponsesKey")
) )
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
@ -29,7 +33,7 @@ type State struct {
GenesisDoc *types.GenesisDoc GenesisDoc *types.GenesisDoc
ChainID string ChainID string
// updated at end of ExecBlock
// updated at end of SetBlockAndValidators
LastBlockHeight int // Genesis state has this set to 0. So, Block(H=0) does not exist. LastBlockHeight int // Genesis state has this set to 0. So, Block(H=0) does not exist.
LastBlockID types.BlockID LastBlockID types.BlockID
LastBlockTime time.Time LastBlockTime time.Time
@ -38,6 +42,12 @@ type State struct {
// AppHash is updated after Commit // AppHash is updated after Commit
AppHash []byte AppHash []byte
TxIndexer txindex.TxIndexer `json:"-"` // Transaction indexer.
// Intermediate results from processing
// Persisted separately from the state
abciResponses *ABCIResponses
} }
func LoadState(db dbm.DB) *State { func LoadState(db dbm.DB) *State {
@ -45,7 +55,7 @@ func LoadState(db dbm.DB) *State {
} }
func loadState(db dbm.DB, key []byte) *State { func loadState(db dbm.DB, key []byte) *State {
s := &State{db: db}
s := &State{db: db, TxIndexer: &null.TxIndex{}}
buf := db.Get(key) buf := db.Get(key)
if len(buf) == 0 { if len(buf) == 0 {
return nil return nil
@ -54,7 +64,7 @@ func loadState(db dbm.DB, key []byte) *State {
wire.ReadBinaryPtr(&s, r, 0, n, err) wire.ReadBinaryPtr(&s, r, 0, n, err)
if *err != nil { if *err != nil {
// DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED
Exit(Fmt("Data has been corrupted or its spec has changed: %v\n", *err))
Exit(Fmt("LoadState: Data has been corrupted or its spec has changed: %v\n", *err))
} }
// TODO: ensure that buf is completely read. // TODO: ensure that buf is completely read.
} }
@ -72,6 +82,7 @@ func (s *State) Copy() *State {
Validators: s.Validators.Copy(), Validators: s.Validators.Copy(),
LastValidators: s.LastValidators.Copy(), LastValidators: s.LastValidators.Copy(),
AppHash: s.AppHash, AppHash: s.AppHash,
TxIndexer: s.TxIndexer, // pointer here, not value
} }
} }
@ -81,6 +92,29 @@ func (s *State) Save() {
s.db.SetSync(stateKey, s.Bytes()) s.db.SetSync(stateKey, s.Bytes())
} }
// Sets the ABCIResponses in the state and writes them to disk
// in case we crash after app.Commit and before s.Save()
func (s *State) SaveABCIResponses(abciResponses *ABCIResponses) {
// save the validators to the db
s.db.SetSync(abciResponsesKey, abciResponses.Bytes())
}
func (s *State) LoadABCIResponses() *ABCIResponses {
abciResponses := new(ABCIResponses)
buf := s.db.Get(abciResponsesKey)
if len(buf) != 0 {
r, n, err := bytes.NewReader(buf), new(int), new(error)
wire.ReadBinaryPtr(abciResponses, r, 0, n, err)
if *err != nil {
// DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED
Exit(Fmt("LoadABCIResponses: Data has been corrupted or its spec has changed: %v\n", *err))
}
// TODO: ensure that buf is completely read.
}
return abciResponses
}
func (s *State) Equals(s2 *State) bool { func (s *State) Equals(s2 *State) bool {
return bytes.Equal(s.Bytes(), s2.Bytes()) return bytes.Equal(s.Bytes(), s2.Bytes())
} }
@ -96,7 +130,22 @@ func (s *State) Bytes() []byte {
// Mutate state variables to match block and validators // Mutate state variables to match block and validators
// after running EndBlock // after running EndBlock
func (s *State) SetBlockAndValidators(header *types.Header, blockPartsHeader types.PartSetHeader, prevValSet, nextValSet *types.ValidatorSet) {
func (s *State) SetBlockAndValidators(header *types.Header, blockPartsHeader types.PartSetHeader, abciResponses *ABCIResponses) {
// copy the valset so we can apply changes from EndBlock
// and update s.LastValidators and s.Validators
prevValSet := s.Validators.Copy()
nextValSet := prevValSet.Copy()
// update the validator set with the latest abciResponses
err := updateValidators(nextValSet, abciResponses.EndBlock.Diffs)
if err != nil {
log.Warn("Error changing validator set", "error", err)
// TODO: err or carry on?
}
// Update validator accums and set state variables
nextValSet.IncrementAccum(1)
s.setBlockAndValidators(header.Height, s.setBlockAndValidators(header.Height,
types.BlockID{header.Hash(), blockPartsHeader}, header.Time, types.BlockID{header.Hash(), blockPartsHeader}, header.Time,
prevValSet, nextValSet) prevValSet, nextValSet)
@ -125,12 +174,46 @@ func GetState(config cfg.Config, stateDB dbm.DB) *State {
state = MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file")) state = MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
state.Save() state.Save()
} }
return state return state
} }
//--------------------------------------------------
// ABCIResponses holds intermediate state during block processing
type ABCIResponses struct {
Height int
DeliverTx []*abci.ResponseDeliverTx
EndBlock abci.ResponseEndBlock
txs types.Txs // reference for indexing results by hash
}
func NewABCIResponses(block *types.Block) *ABCIResponses {
return &ABCIResponses{
Height: block.Height,
DeliverTx: make([]*abci.ResponseDeliverTx, block.NumTxs),
txs: block.Data.Txs,
}
}
// Serialize the ABCIResponse
func (a *ABCIResponses) Bytes() []byte {
buf, n, err := new(bytes.Buffer), new(int), new(error)
wire.WriteBinary(*a, buf, n, err)
if *err != nil {
PanicCrisis(*err)
}
return buf.Bytes()
}
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// Genesis // Genesis
// MakeGenesisStateFromFile reads and unmarshals state from the given file.
//
// Used during replay and in tests.
func MakeGenesisStateFromFile(db dbm.DB, genDocFile string) *State { func MakeGenesisStateFromFile(db dbm.DB, genDocFile string) *State {
genDocJSON, err := ioutil.ReadFile(genDocFile) genDocJSON, err := ioutil.ReadFile(genDocFile)
if err != nil { if err != nil {
@ -143,6 +226,9 @@ func MakeGenesisStateFromFile(db dbm.DB, genDocFile string) *State {
return MakeGenesisState(db, genDoc) return MakeGenesisState(db, genDoc)
} }
// MakeGenesisState creates state from types.GenesisDoc.
//
// Used in tests.
func MakeGenesisState(db dbm.DB, genDoc *types.GenesisDoc) *State { func MakeGenesisState(db dbm.DB, genDoc *types.GenesisDoc) *State {
if len(genDoc.Validators) == 0 { if len(genDoc.Validators) == 0 {
Exit(Fmt("The genesis file has no validators")) Exit(Fmt("The genesis file has no validators"))
@ -176,5 +262,6 @@ func MakeGenesisState(db dbm.DB, genDoc *types.GenesisDoc) *State {
Validators: types.NewValidatorSet(validators), Validators: types.NewValidatorSet(validators),
LastValidators: types.NewValidatorSet(nil), LastValidators: types.NewValidatorSet(nil),
AppHash: genDoc.AppHash, AppHash: genDoc.AppHash,
TxIndexer: &null.TxIndex{}, // we do not need indexer during replay and in tests
} }
} }

+ 31
- 0
state/state_test.go View File

@ -1,8 +1,12 @@
package state package state
import ( import (
"fmt"
"testing" "testing"
"github.com/stretchr/testify/assert"
abci "github.com/tendermint/abci/types"
"github.com/tendermint/go-crypto"
dbm "github.com/tendermint/go-db" dbm "github.com/tendermint/go-db"
"github.com/tendermint/tendermint/config/tendermint_test" "github.com/tendermint/tendermint/config/tendermint_test"
) )
@ -40,3 +44,30 @@ func TestStateSaveLoad(t *testing.T) {
t.Fatal("expected state and its copy to be identical. got %v\n expected %v\n", loadedState, state) t.Fatal("expected state and its copy to be identical. got %v\n expected %v\n", loadedState, state)
} }
} }
func TestABCIResponsesSaveLoad(t *testing.T) {
assert := assert.New(t)
config := tendermint_test.ResetConfig("state_")
stateDB := dbm.NewDB("state", config.GetString("db_backend"), config.GetString("db_dir"))
state := GetState(config, stateDB)
state.LastBlockHeight += 1
// build mock responses
block := makeBlock(2, state)
abciResponses := NewABCIResponses(block)
abciResponses.DeliverTx[0] = &abci.ResponseDeliverTx{Data: []byte("foo")}
abciResponses.DeliverTx[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok"}
abciResponses.EndBlock = abci.ResponseEndBlock{Diffs: []*abci.Validator{
{
PubKey: crypto.GenPrivKeyEd25519().PubKey().Bytes(),
Power: 10,
},
}}
abciResponses.txs = nil
state.SaveABCIResponses(abciResponses)
abciResponses2 := state.LoadABCIResponses()
assert.Equal(abciResponses, abciResponses2, fmt.Sprintf("ABCIResponses don't match: Got %v, Expected %v", abciResponses2, abciResponses))
}

+ 57
- 0
state/txindex/indexer.go View File

@ -0,0 +1,57 @@
package txindex
import (
"errors"
"github.com/tendermint/tendermint/types"
)
// Indexer interface defines methods to index and search transactions.
type TxIndexer interface {
// Batch analyzes, indexes or stores a batch of transactions.
//
// NOTE We do not specify Index method for analyzing a single transaction
// here because it bears heavy perfomance loses. Almost all advanced indexers
// support batching.
AddBatch(b *Batch) error
// Tx returns specified transaction or nil if the transaction is not indexed
// or stored.
Get(hash []byte) (*types.TxResult, error)
}
//----------------------------------------------------
// Txs are written as a batch
// A Batch groups together multiple Index operations you would like performed
// at the same time. The Batch structure is NOT thread-safe. You should only
// perform operations on a batch from a single thread at a time. Once batch
// execution has started, you may not modify it.
type Batch struct {
Ops []types.TxResult
}
// NewBatch creates a new Batch.
func NewBatch(n int) *Batch {
return &Batch{
Ops: make([]types.TxResult, n),
}
}
// Index adds or updates entry for the given result.Index.
func (b *Batch) Add(result types.TxResult) error {
b.Ops[result.Index] = result
return nil
}
// Size returns the total number of operations inside the batch.
func (b *Batch) Size() int {
return len(b.Ops)
}
//----------------------------------------------------
// Errors
// ErrorEmptyHash indicates empty hash
var ErrorEmptyHash = errors.New("Transaction hash cannot be empty")

+ 56
- 0
state/txindex/kv/kv.go View File

@ -0,0 +1,56 @@
package kv
import (
"bytes"
"fmt"
db "github.com/tendermint/go-db"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/state/txindex"
"github.com/tendermint/tendermint/types"
)
// TxIndex is the simplest possible indexer, backed by Key-Value storage (levelDB).
// It could only index transaction by its identifier.
type TxIndex struct {
store db.DB
}
// NewTxIndex returns new instance of TxIndex.
func NewTxIndex(store db.DB) *TxIndex {
return &TxIndex{store: store}
}
// Get gets transaction from the TxIndex storage and returns it or nil if the
// transaction is not found.
func (txi *TxIndex) Get(hash []byte) (*types.TxResult, error) {
if len(hash) == 0 {
return nil, txindex.ErrorEmptyHash
}
rawBytes := txi.store.Get(hash)
if rawBytes == nil {
return nil, nil
}
r := bytes.NewReader(rawBytes)
var n int
var err error
txResult := wire.ReadBinary(&types.TxResult{}, r, 0, &n, &err).(*types.TxResult)
if err != nil {
return nil, fmt.Errorf("Error reading TxResult: %v", err)
}
return txResult, nil
}
// Batch writes a batch of transactions into the TxIndex storage.
func (txi *TxIndex) AddBatch(b *txindex.Batch) error {
storeBatch := txi.store.NewBatch()
for _, result := range b.Ops {
rawBytes := wire.BinaryBytes(&result)
storeBatch.Set(result.Tx.Hash(), rawBytes)
}
storeBatch.Write()
return nil
}

+ 63
- 0
state/txindex/kv/kv_test.go View File

@ -0,0 +1,63 @@
package kv
import (
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/abci/types"
db "github.com/tendermint/go-db"
"github.com/tendermint/tendermint/state/txindex"
"github.com/tendermint/tendermint/types"
)
func TestTxIndex(t *testing.T) {
indexer := &TxIndex{store: db.NewMemDB()}
tx := types.Tx("HELLO WORLD")
txResult := &types.TxResult{1, 0, tx, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeType_OK, Log: ""}}
hash := tx.Hash()
batch := txindex.NewBatch(1)
batch.Add(*txResult)
err := indexer.AddBatch(batch)
require.Nil(t, err)
loadedTxResult, err := indexer.Get(hash)
require.Nil(t, err)
assert.Equal(t, txResult, loadedTxResult)
}
func benchmarkTxIndex(txsCount int, b *testing.B) {
tx := types.Tx("HELLO WORLD")
txResult := &types.TxResult{1, 0, tx, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeType_OK, Log: ""}}
dir, err := ioutil.TempDir("", "tx_index_db")
if err != nil {
b.Fatal(err)
}
defer os.RemoveAll(dir)
store := db.NewDB("tx_index", "leveldb", dir)
indexer := &TxIndex{store: store}
batch := txindex.NewBatch(txsCount)
for i := 0; i < txsCount; i++ {
txResult.Index += 1
batch.Add(*txResult)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
err = indexer.AddBatch(batch)
}
}
func BenchmarkTxIndex1(b *testing.B) { benchmarkTxIndex(1, b) }
func BenchmarkTxIndex500(b *testing.B) { benchmarkTxIndex(500, b) }
func BenchmarkTxIndex1000(b *testing.B) { benchmarkTxIndex(1000, b) }
func BenchmarkTxIndex2000(b *testing.B) { benchmarkTxIndex(2000, b) }
func BenchmarkTxIndex10000(b *testing.B) { benchmarkTxIndex(10000, b) }

+ 21
- 0
state/txindex/null/null.go View File

@ -0,0 +1,21 @@
package null
import (
"errors"
"github.com/tendermint/tendermint/state/txindex"
"github.com/tendermint/tendermint/types"
)
// TxIndex acts as a /dev/null.
type TxIndex struct{}
// Tx panics.
func (txi *TxIndex) Get(hash []byte) (*types.TxResult, error) {
return nil, errors.New(`Indexing is disabled (set 'tx_index = "kv"' in config)`)
}
// Batch returns nil.
func (txi *TxIndex) AddBatch(batch *txindex.Batch) error {
return nil
}

+ 6
- 6
test/app/test.sh View File

@ -8,10 +8,10 @@ set -e
# TODO: install everything # TODO: install everything
export TMROOT=$HOME/.tendermint_app
export TMHOME=$HOME/.tendermint_app
function dummy_over_socket(){ function dummy_over_socket(){
rm -rf $TMROOT
rm -rf $TMHOME
tendermint init tendermint init
echo "Starting dummy_over_socket" echo "Starting dummy_over_socket"
dummy > /dev/null & dummy > /dev/null &
@ -28,7 +28,7 @@ function dummy_over_socket(){
# start tendermint first # start tendermint first
function dummy_over_socket_reorder(){ function dummy_over_socket_reorder(){
rm -rf $TMROOT
rm -rf $TMHOME
tendermint init tendermint init
echo "Starting dummy_over_socket_reorder (ie. start tendermint first)" echo "Starting dummy_over_socket_reorder (ie. start tendermint first)"
tendermint node > tendermint.log & tendermint node > tendermint.log &
@ -46,7 +46,7 @@ function dummy_over_socket_reorder(){
function counter_over_socket() { function counter_over_socket() {
rm -rf $TMROOT
rm -rf $TMHOME
tendermint init tendermint init
echo "Starting counter_over_socket" echo "Starting counter_over_socket"
counter --serial > /dev/null & counter --serial > /dev/null &
@ -62,7 +62,7 @@ function counter_over_socket() {
} }
function counter_over_grpc() { function counter_over_grpc() {
rm -rf $TMROOT
rm -rf $TMHOME
tendermint init tendermint init
echo "Starting counter_over_grpc" echo "Starting counter_over_grpc"
counter --serial --abci grpc > /dev/null & counter --serial --abci grpc > /dev/null &
@ -78,7 +78,7 @@ function counter_over_grpc() {
} }
function counter_over_grpc_grpc() { function counter_over_grpc_grpc() {
rm -rf $TMROOT
rm -rf $TMHOME
tendermint init tendermint init
echo "Starting counter_over_grpc_grpc (ie. with grpc broadcast_tx)" echo "Starting counter_over_grpc_grpc (ie. with grpc broadcast_tx)"
counter --serial --abci grpc > /dev/null & counter --serial --abci grpc > /dev/null &


+ 1
- 1
test/p2p/README.md View File

@ -37,7 +37,7 @@ for i in $(seq 1 4); do
--ip="172.57.0.$((100 + $i))" \ --ip="172.57.0.$((100 + $i))" \
--name local_testnet_$i \ --name local_testnet_$i \
--entrypoint tendermint \ --entrypoint tendermint \
-e TMROOT=/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$i/core \
-e TMHOME=/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$i/core \
tendermint_tester node --seeds 172.57.0.101:46656,172.57.0.102:46656,172.57.0.103:46656,172.57.0.104:46656 --proxy_app=dummy tendermint_tester node --seeds 172.57.0.101:46656,172.57.0.102:46656,172.57.0.103:46656,172.57.0.104:46656 --proxy_app=dummy
done done
``` ```


+ 3
- 2
test/p2p/peer.sh View File

@ -21,7 +21,7 @@ if [[ "$CIRCLECI" == true ]]; then
--ip=$(test/p2p/ip.sh $ID) \ --ip=$(test/p2p/ip.sh $ID) \
--name "local_testnet_$ID" \ --name "local_testnet_$ID" \
--entrypoint tendermint \ --entrypoint tendermint \
-e TMROOT="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core" \
-e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core" \
--log-driver=syslog \ --log-driver=syslog \
--log-opt syslog-address=udp://127.0.0.1:5514 \ --log-opt syslog-address=udp://127.0.0.1:5514 \
--log-opt syslog-facility=daemon \ --log-opt syslog-facility=daemon \
@ -34,6 +34,7 @@ else
--ip=$(test/p2p/ip.sh $ID) \ --ip=$(test/p2p/ip.sh $ID) \
--name "local_testnet_$ID" \ --name "local_testnet_$ID" \
--entrypoint tendermint \ --entrypoint tendermint \
-e TMROOT="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core" \
-e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core" \
"$DOCKER_IMAGE" node $NODE_FLAGS --log_level=info --proxy_app="$APP_PROXY" "$DOCKER_IMAGE" node $NODE_FLAGS --log_level=info --proxy_app="$APP_PROXY"
fi fi

+ 3
- 3
test/persist/test_failure_indices.sh View File

@ -1,15 +1,15 @@
#! /bin/bash #! /bin/bash
export TMROOT=$HOME/.tendermint_persist
export TMHOME=$HOME/.tendermint_persist
rm -rf "$TMROOT"
rm -rf "$TMHOME"
tendermint init tendermint init
# use a unix socket so we can remove it # use a unix socket so we can remove it
RPC_ADDR="$(pwd)/rpc.sock" RPC_ADDR="$(pwd)/rpc.sock"
TM_CMD="tendermint node --log_level=debug --rpc_laddr=unix://$RPC_ADDR" # &> tendermint_${name}.log" TM_CMD="tendermint node --log_level=debug --rpc_laddr=unix://$RPC_ADDR" # &> tendermint_${name}.log"
DUMMY_CMD="dummy --persist $TMROOT/dummy" # &> dummy_${name}.log"
DUMMY_CMD="dummy --persist $TMHOME/dummy" # &> dummy_${name}.log"
function start_procs(){ function start_procs(){


+ 3
- 3
test/persist/test_simple.sh View File

@ -1,15 +1,15 @@
#! /bin/bash #! /bin/bash
export TMROOT=$HOME/.tendermint_persist
export TMHOME=$HOME/.tendermint_persist
rm -rf $TMROOT
rm -rf $TMHOME
tendermint init tendermint init
function start_procs(){ function start_procs(){
name=$1 name=$1
echo "Starting persistent dummy and tendermint" echo "Starting persistent dummy and tendermint"
dummy --persist $TMROOT/dummy &> "dummy_${name}.log" &
dummy --persist $TMHOME/dummy &> "dummy_${name}.log" &
PID_DUMMY=$! PID_DUMMY=$!
tendermint node &> tendermint_${name}.log & tendermint node &> tendermint_${name}.log &
PID_TENDERMINT=$! PID_TENDERMINT=$!


+ 2
- 0
test/test_libs.sh View File

@ -12,6 +12,8 @@ fi
# libs we depend on # libs we depend on
#################### ####################
# some libs are tested with go, others with make
# TODO: should be all make (post repo merge)
LIBS_GO_TEST=(go-clist go-common go-config go-crypto go-db go-events go-merkle go-p2p) LIBS_GO_TEST=(go-clist go-common go-config go-crypto go-db go-events go-merkle go-p2p)
LIBS_MAKE_TEST=(go-rpc go-wire abci) LIBS_MAKE_TEST=(go-rpc go-wire abci)


+ 7
- 6
types/events.go View File

@ -2,10 +2,10 @@ package types
import ( import (
// for registering TMEventData as events.EventData // for registering TMEventData as events.EventData
abci "github.com/tendermint/abci/types"
. "github.com/tendermint/go-common" . "github.com/tendermint/go-common"
"github.com/tendermint/go-events" "github.com/tendermint/go-events"
"github.com/tendermint/go-wire" "github.com/tendermint/go-wire"
abci "github.com/tendermint/abci/types"
) )
// Functions to generate eventId strings // Functions to generate eventId strings
@ -73,11 +73,12 @@ type EventDataNewBlockHeader struct {
// All txs fire EventDataTx // All txs fire EventDataTx
type EventDataTx struct { type EventDataTx struct {
Tx Tx `json:"tx"`
Data []byte `json:"data"`
Log string `json:"log"`
Code abci.CodeType `json:"code"`
Error string `json:"error"` // this is redundant information for now
Height int `json:"height"`
Tx Tx `json:"tx"`
Data []byte `json:"data"`
Log string `json:"log"`
Code abci.CodeType `json:"code"`
Error string `json:"error"` // this is redundant information for now
} }
// NOTE: This goes into the replay WAL // NOTE: This goes into the replay WAL


+ 81
- 0
types/tx.go View File

@ -1,6 +1,10 @@
package types package types
import ( import (
"bytes"
"errors"
abci "github.com/tendermint/abci/types"
"github.com/tendermint/go-merkle" "github.com/tendermint/go-merkle"
) )
@ -30,3 +34,80 @@ func (txs Txs) Hash() []byte {
return merkle.SimpleHashFromTwoHashes(left, right) return merkle.SimpleHashFromTwoHashes(left, right)
} }
} }
// Index returns the index of this transaction in the list, or -1 if not found
func (txs Txs) Index(tx Tx) int {
for i := range txs {
if bytes.Equal(txs[i], tx) {
return i
}
}
return -1
}
// Index returns the index of this transaction hash in the list, or -1 if not found
func (txs Txs) IndexByHash(hash []byte) int {
for i := range txs {
if bytes.Equal(txs[i].Hash(), hash) {
return i
}
}
return -1
}
// Proof returns a simple merkle proof for this node.
//
// Panics if i < 0 or i >= len(txs)
//
// TODO: optimize this!
func (txs Txs) Proof(i int) TxProof {
l := len(txs)
hashables := make([]merkle.Hashable, l)
for i := 0; i < l; i++ {
hashables[i] = txs[i]
}
root, proofs := merkle.SimpleProofsFromHashables(hashables)
return TxProof{
Index: i,
Total: l,
RootHash: root,
Data: txs[i],
Proof: *proofs[i],
}
}
type TxProof struct {
Index, Total int
RootHash []byte
Data Tx
Proof merkle.SimpleProof
}
func (tp TxProof) LeafHash() []byte {
return tp.Data.Hash()
}
// Validate returns nil if it matches the dataHash, and is internally consistent
// otherwise, returns a sensible error
func (tp TxProof) Validate(dataHash []byte) error {
if !bytes.Equal(dataHash, tp.RootHash) {
return errors.New("Proof matches different data hash")
}
valid := tp.Proof.Verify(tp.Index, tp.Total, tp.LeafHash(), tp.RootHash)
if !valid {
return errors.New("Proof is not internally consistent")
}
return nil
}
// TxResult contains results of executing the transaction.
//
// One usage is indexing transaction results.
type TxResult struct {
Height uint64 `json:"height"`
Index uint32 `json:"index"`
Tx Tx `json:"tx"`
Result abci.ResponseDeliverTx `json:"result"`
}

+ 122
- 0
types/tx_test.go View File

@ -0,0 +1,122 @@
package types
import (
"bytes"
"testing"
"github.com/stretchr/testify/assert"
cmn "github.com/tendermint/go-common"
ctest "github.com/tendermint/go-common/test"
wire "github.com/tendermint/go-wire"
)
func makeTxs(cnt, size int) Txs {
txs := make(Txs, cnt)
for i := 0; i < cnt; i++ {
txs[i] = cmn.RandBytes(size)
}
return txs
}
func randInt(low, high int) int {
off := cmn.RandInt() % (high - low)
return low + off
}
func TestTxIndex(t *testing.T) {
assert := assert.New(t)
for i := 0; i < 20; i++ {
txs := makeTxs(15, 60)
for j := 0; j < len(txs); j++ {
tx := txs[j]
idx := txs.Index(tx)
assert.Equal(j, idx)
}
assert.Equal(-1, txs.Index(nil))
assert.Equal(-1, txs.Index(Tx("foodnwkf")))
}
}
func TestValidTxProof(t *testing.T) {
assert := assert.New(t)
cases := []struct {
txs Txs
}{
{Txs{{1, 4, 34, 87, 163, 1}}},
{Txs{{5, 56, 165, 2}, {4, 77}}},
{Txs{Tx("foo"), Tx("bar"), Tx("baz")}},
{makeTxs(20, 5)},
{makeTxs(7, 81)},
{makeTxs(61, 15)},
}
for h, tc := range cases {
txs := tc.txs
root := txs.Hash()
// make sure valid proof for every tx
for i := range txs {
leaf := txs[i]
leafHash := leaf.Hash()
proof := txs.Proof(i)
assert.Equal(i, proof.Index, "%d: %d", h, i)
assert.Equal(len(txs), proof.Total, "%d: %d", h, i)
assert.Equal(root, proof.RootHash, "%d: %d", h, i)
assert.Equal(leaf, proof.Data, "%d: %d", h, i)
assert.Equal(leafHash, proof.LeafHash(), "%d: %d", h, i)
assert.Nil(proof.Validate(root), "%d: %d", h, i)
assert.NotNil(proof.Validate([]byte("foobar")), "%d: %d", h, i)
// read-write must also work
var p2 TxProof
bin := wire.BinaryBytes(proof)
err := wire.ReadBinaryBytes(bin, &p2)
if assert.Nil(err, "%d: %d: %+v", h, i, err) {
assert.Nil(p2.Validate(root), "%d: %d", h, i)
}
}
}
}
func TestTxProofUnchangable(t *testing.T) {
// run the other test a bunch...
for i := 0; i < 40; i++ {
testTxProofUnchangable(t)
}
}
func testTxProofUnchangable(t *testing.T) {
assert := assert.New(t)
// make some proof
txs := makeTxs(randInt(2, 100), randInt(16, 128))
root := txs.Hash()
i := randInt(0, len(txs)-1)
proof := txs.Proof(i)
// make sure it is valid to start with
assert.Nil(proof.Validate(root))
bin := wire.BinaryBytes(proof)
// try mutating the data and make sure nothing breaks
for j := 0; j < 500; j++ {
bad := ctest.MutateByteSlice(bin)
if !bytes.Equal(bad, bin) {
assertBadProof(t, root, bad, proof)
}
}
}
// this make sure the proof doesn't deserialize into something valid
func assertBadProof(t *testing.T, root []byte, bad []byte, good TxProof) {
var proof TxProof
err := wire.ReadBinaryBytes(bad, &proof)
if err == nil {
err = proof.Validate(root)
if err == nil {
// okay, this can happen if we have a slightly different total
// (where the path ends up the same), if it is something else, we have
// a real problem
assert.NotEqual(t, proof.Total, good.Total, "bad: %#v\ngood: %#v", proof, good)
}
}
}

+ 2
- 2
version/version.go View File

@ -2,6 +2,6 @@ package version
const Maj = "0" const Maj = "0"
const Min = "9" const Min = "9"
const Fix = "0"
const Fix = "1"
const Version = "0.9.0"
const Version = "0.9.1"

Loading…
Cancel
Save