Browse Source

Merge pull request #1587 from tendermint/release/v0.19.4

Release/v0.19.4
pull/1590/head
Ethan Buchman 7 years ago
committed by GitHub
parent
commit
545990f845
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
92 changed files with 1572 additions and 660 deletions
  1. +13
    -0
      CHANGELOG.md
  2. +1
    -1
      Makefile
  3. +23
    -0
      ROADMAP.md
  4. +20
    -14
      Vagrantfile
  5. +1
    -1
      blockchain/store_test.go
  6. +34
    -26
      consensus/replay.go
  7. +1
    -1
      consensus/replay_test.go
  8. +68
    -40
      consensus/state.go
  9. +24
    -16
      docs/app-architecture.rst
  10. +0
    -0
      docs/assets/a_plus_t.png
  11. BIN
      docs/assets/tm-application-example.png
  12. +1
    -5
      docs/conf.py
  13. +20
    -42
      docs/deploy-testnets.rst
  14. +8
    -8
      docs/examples/getting-started.md
  15. +69
    -0
      docs/examples/init_testnet.sh
  16. +1
    -1
      docs/examples/install_tendermint.sh
  17. +169
    -0
      docs/examples/node0/config/config.toml
  18. +39
    -0
      docs/examples/node0/config/genesis.json
  19. +1
    -0
      docs/examples/node0/config/node_key.json
  20. +14
    -0
      docs/examples/node0/config/priv_validator.json
  21. +0
    -15
      docs/examples/node1/config.toml
  22. +169
    -0
      docs/examples/node1/config/config.toml
  23. +39
    -0
      docs/examples/node1/config/genesis.json
  24. +1
    -0
      docs/examples/node1/config/node_key.json
  25. +14
    -0
      docs/examples/node1/config/priv_validator.json
  26. +0
    -42
      docs/examples/node1/genesis.json
  27. +0
    -6
      docs/examples/node1/node_key.json
  28. +0
    -15
      docs/examples/node1/priv_validator.json
  29. +0
    -15
      docs/examples/node2/config.toml
  30. +169
    -0
      docs/examples/node2/config/config.toml
  31. +39
    -0
      docs/examples/node2/config/genesis.json
  32. +1
    -0
      docs/examples/node2/config/node_key.json
  33. +14
    -0
      docs/examples/node2/config/priv_validator.json
  34. +0
    -42
      docs/examples/node2/genesis.json
  35. +0
    -6
      docs/examples/node2/node_key.json
  36. +0
    -15
      docs/examples/node2/priv_validator.json
  37. +0
    -15
      docs/examples/node3/config.toml
  38. +169
    -0
      docs/examples/node3/config/config.toml
  39. +39
    -0
      docs/examples/node3/config/genesis.json
  40. +1
    -0
      docs/examples/node3/config/node_key.json
  41. +14
    -0
      docs/examples/node3/config/priv_validator.json
  42. +0
    -42
      docs/examples/node3/genesis.json
  43. +0
    -6
      docs/examples/node3/node_key.json
  44. +0
    -15
      docs/examples/node3/priv_validator.json
  45. +0
    -15
      docs/examples/node4/config.toml
  46. +0
    -42
      docs/examples/node4/genesis.json
  47. +0
    -6
      docs/examples/node4/node_key.json
  48. +0
    -15
      docs/examples/node4/priv_validator.json
  49. +1
    -2
      docs/index.rst
  50. +71
    -0
      docs/spec/README.md
  51. +0
    -0
      docs/spec/abci.md
  52. +0
    -0
      docs/spec/bft-time.md
  53. +0
    -0
      docs/spec/blockchain.md
  54. +0
    -0
      docs/spec/encoding.md
  55. +0
    -0
      docs/spec/light-client.md
  56. +0
    -0
      docs/spec/p2p/config.md
  57. +0
    -0
      docs/spec/p2p/connection.md
  58. +0
    -0
      docs/spec/p2p/node.md
  59. +0
    -0
      docs/spec/p2p/peer.md
  60. +0
    -0
      docs/spec/pre-amino.md
  61. +0
    -0
      docs/spec/reactors/block_sync/impl.md
  62. +0
    -0
      docs/spec/reactors/block_sync/reactor.md
  63. +0
    -0
      docs/spec/reactors/consensus/consensus-reactor.md
  64. +0
    -0
      docs/spec/reactors/consensus/consensus.md
  65. +0
    -0
      docs/spec/reactors/consensus/proposer-selection.md
  66. +0
    -0
      docs/spec/reactors/mempool/README.md
  67. +0
    -0
      docs/spec/reactors/mempool/concurrency.md
  68. +0
    -0
      docs/spec/reactors/mempool/config.md
  69. +0
    -0
      docs/spec/reactors/mempool/functionality.md
  70. +0
    -0
      docs/spec/reactors/mempool/messages.md
  71. +0
    -0
      docs/spec/reactors/pex/pex.md
  72. +0
    -0
      docs/spec/scripts/crypto.go
  73. +0
    -0
      docs/spec/state.md
  74. +1
    -0
      docs/specification/corruption.rst
  75. +1
    -71
      docs/specification/new-spec/README.md
  76. +138
    -0
      docs/terraform-and-ansible.rst
  77. +3
    -2
      mempool/mempool.go
  78. +0
    -52
      networks/remote/ansible/README.rst
  79. +1
    -1
      networks/remote/ansible/roles/install/templates/systemd.service.j2
  80. +132
    -0
      networks/remote/integration.sh
  81. +0
    -33
      networks/remote/terraform/README.rst
  82. +1
    -1
      networks/remote/terraform/cluster/variables.tf
  83. +1
    -0
      node/node.go
  84. +1
    -1
      p2p/pex/addrbook.go
  85. +1
    -1
      p2p/switch.go
  86. +4
    -10
      state/execution.go
  87. +1
    -1
      state/txindex/indexer.go
  88. +29
    -6
      state/txindex/indexer_service.go
  89. +2
    -2
      state/txindex/kv/kv_test.go
  90. +3
    -5
      types/part_set.go
  91. +3
    -3
      types/part_set_test.go
  92. +2
    -2
      version/version.go

+ 13
- 0
CHANGELOG.md View File

@ -1,5 +1,18 @@
# Changelog
## 0.19.4 (May 17th, 2018)
IMPROVEMENTS
- [state] Improve tx indexing by using batches
- [consensus, state] Improve logging (more consensus logs, fewer tx logs)
- [spec] Moved to `docs/spec` (TODO cleanup the rest of the docs ...)
BUG FIXES
- [consensus] Fix issue #1575 where a late proposer can get stuck
## 0.19.3 (May 14th, 2018)
FEATURES


+ 1
- 1
Makefile View File

@ -194,7 +194,7 @@ build-linux:
GOOS=linux GOARCH=amd64 $(MAKE) build
# Run a 4-node testnet locally
localnet-start:
localnet-start: localnet-stop
@if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 4 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2 ; fi
docker-compose up


+ 23
- 0
ROADMAP.md View File

@ -0,0 +1,23 @@
# Roadmap
BREAKING CHANGES:
- Better support for injecting randomness
- Upgrade consensus for more real-time use of evidence
FEATURES:
- Use the chain as its own CA for nodes and validators
- Tooling to run multiple blockchains/apps, possibly in a single process
- State syncing (without transaction replay)
- Add authentication and rate-limitting to the RPC
IMPROVEMENTS:
- Improve subtleties around mempool caching and logic
- Consensus optimizations:
- cache block parts for faster agreement after round changes
- propagate block parts rarest first
- Better testing of the consensus state machine (ie. use a DSL)
- Auto compiled serialization/deserialization code instead of go-wire reflection
BUG FIXES:
- Graceful handling/recovery for apps that have non-determinism or fail to halt
- Graceful handling/recovery for violations of safety, or liveness

+ 20
- 14
Vagrantfile View File

@ -10,31 +10,37 @@ Vagrant.configure("2") do |config|
end
config.vm.provision "shell", inline: <<-SHELL
# add docker repo
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable"
# and golang 1.9 support
# official repo doesn't have race detection runtime...
# add-apt-repository ppa:gophers/archive
add-apt-repository ppa:longsleep/golang-backports
apt-get update
# install base requirements
apt-get update
apt-get install -y --no-install-recommends wget curl jq zip \
make shellcheck bsdmainutils psmisc
apt-get install -y docker-ce golang-1.9-go
apt-get install -y language-pack-en
# install docker
apt-get install -y --no-install-recommends apt-transport-https \
ca-certificates curl software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
apt-get install -y docker-ce
usermod -a -G docker vagrant
# install go
wget -q https://dl.google.com/go/go1.10.1.linux-amd64.tar.gz
tar -xvf go1.10.1.linux-amd64.tar.gz
mv go /usr/local
rm -f go1.10.1.linux-amd64.tar.gz
# cleanup
apt-get autoremove -y
# needed for docker
usermod -a -G docker vagrant
# set env variables
echo 'export PATH=$PATH:/usr/lib/go-1.9/bin:/home/vagrant/go/bin' >> /home/vagrant/.bash_profile
echo 'export GOROOT=/usr/local/go' >> /home/vagrant/.bash_profile
echo 'export GOPATH=/home/vagrant/go' >> /home/vagrant/.bash_profile
echo 'export PATH=$PATH:$GOROOT/bin:$GOPATH/bin' >> /home/vagrant/.bash_profile
echo 'export LC_ALL=en_US.UTF-8' >> /home/vagrant/.bash_profile
echo 'cd go/src/github.com/tendermint/tendermint' >> /home/vagrant/.bash_profile


+ 1
- 1
blockchain/store_test.go View File

@ -97,7 +97,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
incompletePartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 2})
uncontiguousPartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 0})
uncontiguousPartSet.AddPart(part2, false)
uncontiguousPartSet.AddPart(part2)
header1 := types.Header{
Height: 1,


+ 34
- 26
consensus/replay.go View File

@ -26,20 +26,24 @@ import (
var crc32c = crc32.MakeTable(crc32.Castagnoli)
// Functionality to replay blocks and messages on recovery from a crash.
// There are two general failure scenarios: failure during consensus, and failure while applying the block.
// The former is handled by the WAL, the latter by the proxyApp Handshake on restart,
// which ultimately hands off the work to the WAL.
// There are two general failure scenarios:
//
// 1. failure during consensus
// 2. failure while applying the block
//
// The former is handled by the WAL, the latter by the proxyApp Handshake on
// restart, which ultimately hands off the work to the WAL.
//-----------------------------------------
// recover from failure during consensus
// by replaying messages from the WAL
// 1. Recover from failure during consensus
// (by replaying messages from the WAL)
//-----------------------------------------
// Unmarshal and apply a single message to the consensus state
// as if it were received in receiveRoutine
// Lines that start with "#" are ignored.
// NOTE: receiveRoutine should not be running
// Unmarshal and apply a single message to the consensus state as if it were
// received in receiveRoutine. Lines that start with "#" are ignored.
// NOTE: receiveRoutine should not be running.
func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepCh chan interface{}) error {
// skip meta messages
// Skip meta messages which exist for demarcating boundaries.
if _, ok := msg.Msg.(EndHeightMessage); ok {
return nil
}
@ -89,17 +93,18 @@ func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepCh chan
return nil
}
// replay only those messages since the last block.
// timeoutRoutine should run concurrently to read off tickChan
// Replay only those messages since the last block. `timeoutRoutine` should
// run concurrently to read off tickChan.
func (cs *ConsensusState) catchupReplay(csHeight int64) error {
// set replayMode
// Set replayMode to true so we don't log signing errors.
cs.replayMode = true
defer func() { cs.replayMode = false }()
// Ensure that ENDHEIGHT for this height doesn't exist.
// Ensure that #ENDHEIGHT for this height doesn't exist.
// NOTE: This is just a sanity check. As far as we know things work fine
// without it, and Handshake could reuse ConsensusState if it weren't for
// this check (since we can crash after writing ENDHEIGHT).
// this check (since we can crash after writing #ENDHEIGHT).
//
// Ignore data corruption errors since this is a sanity check.
gr, found, err := cs.wal.SearchForEndHeight(csHeight, &WALSearchOptions{IgnoreDataCorruptionErrors: true})
@ -115,7 +120,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int64) error {
return fmt.Errorf("WAL should not contain #ENDHEIGHT %d", csHeight)
}
// Search for last height marker
// Search for last height marker.
//
// Ignore data corruption errors in previous heights because we only care about last height
gr, found, err = cs.wal.SearchForEndHeight(csHeight-1, &WALSearchOptions{IgnoreDataCorruptionErrors: true})
@ -182,10 +187,11 @@ func makeHeightSearchFunc(height int64) auto.SearchFunc {
}
}*/
//----------------------------------------------
// Recover from failure during block processing
// by handshaking with the app to figure out where
// we were last and using the WAL to recover there
//---------------------------------------------------
// 2. Recover from failure while applying the block.
// (by handshaking with the app to figure out where
// we were last, and using the WAL to recover there.)
//---------------------------------------------------
type Handshaker struct {
stateDB dbm.DB
@ -220,7 +226,8 @@ func (h *Handshaker) NBlocks() int {
// TODO: retry the handshake/replay if it fails ?
func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
// handshake is done via info request on the query conn
// Handshake is done via ABCI Info on the query conn.
res, err := proxyApp.Query().InfoSync(abci.RequestInfo{version.Version})
if err != nil {
return fmt.Errorf("Error calling Info: %v", err)
@ -234,15 +241,16 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
h.logger.Info("ABCI Handshake", "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash))
// TODO: check version
// TODO: check app version.
// replay blocks up to the latest in the blockstore
// Replay blocks up to the latest in the blockstore.
_, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp)
if err != nil {
return fmt.Errorf("Error on replay: %v", err)
}
h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced", "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash))
h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced",
"appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash))
// TODO: (on restart) replay mempool
@ -250,7 +258,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
}
// Replay all blocks since appBlockHeight and ensure the result matches the current state.
// Returns the final AppHash or an error
// Returns the final AppHash or an error.
func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight int64, proxyApp proxy.AppConns) ([]byte, error) {
storeBlockHeight := h.store.Height()
@ -314,7 +322,7 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight
// We haven't run Commit (both the state and app are one block behind),
// so replayBlock with the real app.
// NOTE: We could instead use the cs.WAL on cs.Start,
// but we'd have to allow the WAL to replay a block that wrote it's ENDHEIGHT
// but we'd have to allow the WAL to replay a block that wrote it's #ENDHEIGHT
h.logger.Info("Replay last block using real app")
state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus())
return state.AppHash, err


+ 1
- 1
consensus/replay_test.go View File

@ -538,7 +538,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
case *types.PartSetHeader:
thisBlockParts = types.NewPartSetFromHeader(*p)
case *types.Part:
_, err := thisBlockParts.AddPart(p, false)
_, err := thisBlockParts.AddPart(p)
if err != nil {
return nil, nil, err
}


+ 68
- 40
consensus/state.go View File

@ -584,8 +584,9 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) {
err = cs.setProposal(msg.Proposal)
case *BlockPartMessage:
// if the proposal is complete, we'll enterPrevote or tryFinalizeCommit
_, err = cs.addProposalBlockPart(msg.Height, msg.Part, peerID != "")
_, err = cs.addProposalBlockPart(msg.Height, msg.Part)
if err != nil && msg.Round != cs.Round {
cs.Logger.Debug("Received block part from wrong round", "height", cs.Height, "csRound", cs.Round, "blockRound", msg.Round)
err = nil
}
case *VoteMessage:
@ -610,7 +611,7 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) {
cs.Logger.Error("Unknown msg type", reflect.TypeOf(msg))
}
if err != nil {
cs.Logger.Error("Error with msg", "type", reflect.TypeOf(msg), "peer", peerID, "err", err, "msg", msg)
cs.Logger.Error("Error with msg", "height", cs.Height, "round", cs.Round, "type", reflect.TypeOf(msg), "peer", peerID, "err", err, "msg", msg)
}
}
@ -667,16 +668,18 @@ func (cs *ConsensusState) handleTxsAvailable(height int64) {
// Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round)
// NOTE: cs.StartTime was already set for height.
func (cs *ConsensusState) enterNewRound(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round)
if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) {
cs.Logger.Debug(cmn.Fmt("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Debug(cmn.Fmt("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
}
if now := time.Now(); cs.StartTime.After(now) {
cs.Logger.Info("Need to set a buffer and log message here for sanity.", "startTime", cs.StartTime, "now", now)
logger.Info("Need to set a buffer and log message here for sanity.", "startTime", cs.StartTime, "now", now)
}
cs.Logger.Info(cmn.Fmt("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Info(cmn.Fmt("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
// Increment validators if necessary
validators := cs.Validators
@ -695,6 +698,7 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) {
// and meanwhile we might have received a proposal
// for round 0.
} else {
logger.Info("Resetting Proposal info")
cs.Proposal = nil
cs.ProposalBlock = nil
cs.ProposalBlockParts = nil
@ -757,11 +761,13 @@ func (cs *ConsensusState) proposalHeartbeat(height int64, round int) {
// Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ): after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval
// Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool
func (cs *ConsensusState) enterPropose(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round)
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) {
cs.Logger.Debug(cmn.Fmt("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Debug(cmn.Fmt("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
}
cs.Logger.Info(cmn.Fmt("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Info(cmn.Fmt("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterPropose:
@ -781,22 +787,22 @@ func (cs *ConsensusState) enterPropose(height int64, round int) {
// Nothing more to do if we're not a validator
if cs.privValidator == nil {
cs.Logger.Debug("This node is not a validator")
logger.Debug("This node is not a validator")
return
}
// if not a validator, we're done
if !cs.Validators.HasAddress(cs.privValidator.GetAddress()) {
cs.Logger.Debug("This node is not a validator", "addr", cs.privValidator.GetAddress(), "vals", cs.Validators)
logger.Debug("This node is not a validator", "addr", cs.privValidator.GetAddress(), "vals", cs.Validators)
return
}
cs.Logger.Debug("This node is a validator")
logger.Debug("This node is a validator")
if cs.isProposer() {
cs.Logger.Info("enterPropose: Our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
logger.Info("enterPropose: Our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
cs.decideProposal(height, round)
} else {
cs.Logger.Info("enterPropose: Not our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
logger.Info("enterPropose: Not our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
}
}
@ -959,14 +965,16 @@ func (cs *ConsensusState) defaultDoPrevote(height int64, round int) {
// Enter: any +2/3 prevotes at next round.
func (cs *ConsensusState) enterPrevoteWait(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round)
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) {
cs.Logger.Debug(cmn.Fmt("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Debug(cmn.Fmt("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
}
if !cs.Votes.Prevotes(round).HasTwoThirdsAny() {
cmn.PanicSanity(cmn.Fmt("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round))
}
cs.Logger.Info(cmn.Fmt("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Info(cmn.Fmt("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterPrevoteWait:
@ -985,12 +993,14 @@ func (cs *ConsensusState) enterPrevoteWait(height int64, round int) {
// else, unlock an existing lock and precommit nil if +2/3 of prevotes were nil,
// else, precommit nil otherwise.
func (cs *ConsensusState) enterPrecommit(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round)
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) {
cs.Logger.Debug(cmn.Fmt("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Debug(cmn.Fmt("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
}
cs.Logger.Info(cmn.Fmt("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Info(cmn.Fmt("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterPrecommit:
@ -1004,9 +1014,9 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
// If we don't have a polka, we must precommit nil.
if !ok {
if cs.LockedBlock != nil {
cs.Logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit while we're locked. Precommitting nil")
logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit while we're locked. Precommitting nil")
} else {
cs.Logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit. Precommitting nil.")
logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit. Precommitting nil.")
}
cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{})
return
@ -1024,9 +1034,9 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
// +2/3 prevoted nil. Unlock and precommit nil.
if len(blockID.Hash) == 0 {
if cs.LockedBlock == nil {
cs.Logger.Info("enterPrecommit: +2/3 prevoted for nil.")
logger.Info("enterPrecommit: +2/3 prevoted for nil.")
} else {
cs.Logger.Info("enterPrecommit: +2/3 prevoted for nil. Unlocking")
logger.Info("enterPrecommit: +2/3 prevoted for nil. Unlocking")
cs.LockedRound = 0
cs.LockedBlock = nil
cs.LockedBlockParts = nil
@ -1040,7 +1050,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
// If we're already locked on that block, precommit it, and update the LockedRound
if cs.LockedBlock.HashesTo(blockID.Hash) {
cs.Logger.Info("enterPrecommit: +2/3 prevoted locked block. Relocking")
logger.Info("enterPrecommit: +2/3 prevoted locked block. Relocking")
cs.LockedRound = round
cs.eventBus.PublishEventRelock(cs.RoundStateEvent())
cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader)
@ -1049,7 +1059,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
// If +2/3 prevoted for proposal block, stage and precommit it
if cs.ProposalBlock.HashesTo(blockID.Hash) {
cs.Logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash)
logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash)
// Validate the block.
if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil {
cmn.PanicConsensus(cmn.Fmt("enterPrecommit: +2/3 prevoted for an invalid block: %v", err))
@ -1079,14 +1089,16 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
// Enter: any +2/3 precommits for next round.
func (cs *ConsensusState) enterPrecommitWait(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round)
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommitWait <= cs.Step) {
cs.Logger.Debug(cmn.Fmt("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Debug(cmn.Fmt("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
}
if !cs.Votes.Precommits(round).HasTwoThirdsAny() {
cmn.PanicSanity(cmn.Fmt("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round))
}
cs.Logger.Info(cmn.Fmt("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Info(cmn.Fmt("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterPrecommitWait:
@ -1101,11 +1113,13 @@ func (cs *ConsensusState) enterPrecommitWait(height int64, round int) {
// Enter: +2/3 precommits for block
func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
logger := cs.Logger.With("height", height, "commitRound", commitRound)
if cs.Height != height || cstypes.RoundStepCommit <= cs.Step {
cs.Logger.Debug(cmn.Fmt("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
logger.Debug(cmn.Fmt("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
return
}
cs.Logger.Info(cmn.Fmt("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
logger.Info(cmn.Fmt("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterCommit:
@ -1128,6 +1142,7 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
// Move them over to ProposalBlock if they match the commit hash,
// otherwise they'll be cleared in updateToState.
if cs.LockedBlock.HashesTo(blockID.Hash) {
logger.Info("Commit is for locked block. Set ProposalBlock=LockedBlock", "blockHash", blockID.Hash)
cs.ProposalBlock = cs.LockedBlock
cs.ProposalBlockParts = cs.LockedBlockParts
}
@ -1135,6 +1150,7 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
// If we don't have the block being committed, set up to get it.
if !cs.ProposalBlock.HashesTo(blockID.Hash) {
if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) {
logger.Info("Commit is for a block we don't know about. Set ProposalBlock=nil", "proposal", cs.ProposalBlock.Hash(), "commit", blockID.Hash)
// We're getting the wrong block.
// Set up ProposalBlockParts and keep waiting.
cs.ProposalBlock = nil
@ -1147,19 +1163,21 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
// If we have the block AND +2/3 commits for it, finalize.
func (cs *ConsensusState) tryFinalizeCommit(height int64) {
logger := cs.Logger.With("height", height)
if cs.Height != height {
cmn.PanicSanity(cmn.Fmt("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height))
}
blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
if !ok || len(blockID.Hash) == 0 {
cs.Logger.Error("Attempt to finalize failed. There was no +2/3 majority, or +2/3 was for <nil>.", "height", height)
logger.Error("Attempt to finalize failed. There was no +2/3 majority, or +2/3 was for <nil>.")
return
}
if !cs.ProposalBlock.HashesTo(blockID.Hash) {
// TODO: this happens every time if we're not a validator (ugly logs)
// TODO: ^^ wait, why does it matter that we're a validator?
cs.Logger.Info("Attempt to finalize failed. We don't have the commit block.", "height", height, "proposal-block", cs.ProposalBlock.Hash(), "commit-block", blockID.Hash)
logger.Info("Attempt to finalize failed. We don't have the commit block.", "proposal-block", cs.ProposalBlock.Hash(), "commit-block", blockID.Hash)
return
}
@ -1210,23 +1228,28 @@ func (cs *ConsensusState) finalizeCommit(height int64) {
fail.Fail() // XXX
// Finish writing to the WAL for this height.
// NOTE: If we fail before writing this, we'll never write it,
// and just recover by running ApplyBlock in the Handshake.
// If we moved it before persisting the block, we'd have to allow
// WAL replay for blocks with an #ENDHEIGHT
// As is, ConsensusState should not be started again
// until we successfully call ApplyBlock (ie. here or in Handshake after restart)
// Write EndHeightMessage{} for this height, implying that the blockstore
// has saved the block.
//
// If we crash before writing this EndHeightMessage{}, we will recover by
// running ApplyBlock during the ABCI handshake when we restart. If we
// didn't save the block to the blockstore before writing
// EndHeightMessage{}, we'd have to change WAL replay -- currently it
// complains about replaying for heights where an #ENDHEIGHT entry already
// exists.
//
// Either way, the ConsensusState should not be resumed until we
// successfully call ApplyBlock (ie. later here, or in Handshake after
// restart).
cs.wal.Save(EndHeightMessage{height})
fail.Fail() // XXX
// Create a copy of the state for staging
// and an event cache for txs
// Create a copy of the state for staging and an event cache for txs.
stateCopy := cs.state.Copy()
// Execute and commit the block, update and save the state, and update the mempool.
// NOTE: the block.AppHash wont reflect these txs until the next block
// NOTE The block.AppHash wont reflect these txs until the next block.
var err error
stateCopy, err = cs.blockExec.ApplyBlock(stateCopy, types.BlockID{block.Hash(), blockParts.Header()}, block)
if err != nil {
@ -1293,18 +1316,20 @@ func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error {
// NOTE: block is not necessarily valid.
// Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit, once we have the full block.
func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part, verify bool) (added bool, err error) {
func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part) (added bool, err error) {
// Blocks might be reused, so round mismatch is OK
if cs.Height != height {
cs.Logger.Debug("Received block part from wrong height", "height", height)
return false, nil
}
// We're not expecting a block part.
if cs.ProposalBlockParts == nil {
cs.Logger.Info("Received a block part when we're not expecting any", "height", height)
return false, nil // TODO: bad peer? Return error?
}
added, err = cs.ProposalBlockParts.AddPart(part, verify)
added, err = cs.ProposalBlockParts.AddPart(part)
if err != nil {
return added, err
}
@ -1322,6 +1347,8 @@ func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part, v
blockID, hasTwoThirds := prevotes.TwoThirdsMajority()
if hasTwoThirds && !blockID.IsZero() && (cs.ValidRound < cs.Round) {
if cs.ProposalBlock.HashesTo(blockID.Hash) {
cs.Logger.Info("Updating valid block to new proposal block",
"valid-round", cs.Round, "valid-block-hash", cs.ProposalBlock.Hash())
cs.ValidRound = cs.Round
cs.ValidBlock = cs.ProposalBlock
cs.ValidBlockParts = cs.ProposalBlockParts
@ -1453,6 +1480,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
(vote.Round <= cs.Round) &&
cs.ProposalBlock.HashesTo(blockID.Hash) {
cs.Logger.Info("Updating ValidBlock because of POL.", "validRound", cs.ValidRound, "POLRound", vote.Round)
cs.ValidRound = vote.Round
cs.ValidBlock = cs.ProposalBlock
cs.ValidBlockParts = cs.ProposalBlockParts


+ 24
- 16
docs/app-architecture.rst View File

@ -66,15 +66,14 @@ and possibly await a response). And one method to query app-specific
data from the ABCI application.
Pros:
* Server code already written
* Access to block headers to validate merkle proofs (nice for light clients)
* Basic read/write functionality is supported
- Server code already written
- Access to block headers to validate merkle proofs (nice for light clients)
- Basic read/write functionality is supported
Cons:
* Limited interface to app. All queries must be serialized into
[]byte (less expressive than JSON over HTTP) and there is no way to push
data from ABCI app to the client (eg. notify me if account X receives a
transaction)
- Limited interface to app. All queries must be serialized into []byte (less expressive than JSON over HTTP) and there is no way to push data from ABCI app to the client (eg. notify me if account X receives a transaction)
Custom ABCI server
~~~~~~~~~~~~~~~~~~
@ -92,14 +91,19 @@ store. For "reads", we can do any queries we wish that are supported by
our architecture, using any web technology that is useful. The general
architecture is shown in the following diagram:
Pros: \* Separates application logic from blockchain logic \* Allows
much richer, more flexible client-facing API \* Allows pub-sub, watching
certain fields, etc.
.. figure:: assets/tm-application-example.png
Pros:
- Separates application logic from blockchain logic
- Allows much richer, more flexible client-facing API
- Allows pub-sub, watching certain fields, etc.
Cons: \* Access to ABCI app can be dangerous (be VERY careful not to
write unless it comes from the validator node) \* No direct access to
the blockchain headers to verify tx \* You must write your own API (but
maybe that's a pro...)
Cons:
- Access to ABCI app can be dangerous (be VERY careful not to write unless it comes from the validator node)
- No direct access to the blockchain headers to verify tx
- You must write your own API (but maybe that's a pro...)
Hybrid solutions
~~~~~~~~~~~~~~~~
@ -108,9 +112,13 @@ Likely the least secure but most versatile. The client can access both
the tendermint node for all blockchain info, as well as a custom app
server, for complex queries and pub-sub on the abci app.
Pros: All from both above solutions
Pros:
- All from both above solutions
Cons:
Cons: Even more complexity; even more attack vectors (less
- Even more complexity; even more attack vectors (less
security)
Scalability


networks/remote/ansible/assets/a_plus_t.png → docs/assets/a_plus_t.png View File


BIN
docs/assets/tm-application-example.png View File

Before After
Width: 503  |  Height: 533  |  Size: 26 KiB

+ 1
- 5
docs/conf.py View File

@ -71,7 +71,7 @@ language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'architecture', 'specification/new-spec', 'examples']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'architecture', 'spec', 'examples']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
@ -184,9 +184,6 @@ if os.path.isdir(tools_dir) != True:
if os.path.isdir(assets_dir) != True:
os.mkdir(assets_dir)
urllib.urlretrieve(tools_repo+tools_branch+'/ansible/README.rst', filename=tools_dir+'/ansible.rst')
urllib.urlretrieve(tools_repo+tools_branch+'/ansible/assets/a_plus_t.png', filename=assets_dir+'/a_plus_t.png')
urllib.urlretrieve(tools_repo+tools_branch+'/docker/README.rst', filename=tools_dir+'/docker.rst')
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/README.rst', filename=tools_dir+'/mintnet-kubernetes.rst')
@ -195,7 +192,6 @@ urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/gce2.png'
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/statefulset.png', filename=assets_dir+'/statefulset.png')
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/t_plus_k.png', filename=assets_dir+'/t_plus_k.png')
urllib.urlretrieve(tools_repo+tools_branch+'/terraform-digitalocean/README.rst', filename=tools_dir+'/terraform-digitalocean.rst')
urllib.urlretrieve(tools_repo+tools_branch+'/tm-bench/README.rst', filename=tools_dir+'/benchmarking.rst')
urllib.urlretrieve('https://raw.githubusercontent.com/tendermint/tools/master/tm-monitor/README.rst', filename='tools/monitoring.rst')


+ 20
- 42
docs/deploy-testnets.rst View File

@ -3,8 +3,7 @@ Deploy a Testnet
Now that we've seen how ABCI works, and even played with a few
applications on a single validator node, it's time to deploy a test
network to four validator nodes. For this deployment, we'll use the
``basecoin`` application.
network to four validator nodes.
Manual Deployments
------------------
@ -24,67 +23,46 @@ Here are the steps to setting up a testnet manually:
``tendermint init``
4) Compile a list of public keys for each validator into a
``genesis.json`` file and replace the existing file with it.
5) Run ``tendermint node --p2p.persistent_peers=< peer addresses >`` on each node,
5) Run ``tendermint node --proxy_app=kvstore --p2p.persistent_peers=< peer addresses >`` on each node,
where ``< peer addresses >`` is a comma separated list of the IP:PORT
combination for each node. The default port for Tendermint is
``46656``. Thus, if the IP addresses of your nodes were
``192.168.0.1, 192.168.0.2, 192.168.0.3, 192.168.0.4``, the command
would look like:
``tendermint node --p2p.persistent_peers=96663a3dd0d7b9d17d4c8211b191af259621c693@192.168.0.1:46656, 429fcf25974313b95673f58d77eacdd434402665@192.168.0.2:46656, 0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@192.168.0.3:46656, f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@192.168.0.4:46656``.
``tendermint node --proxy_app=kvstore --p2p.persistent_peers=96663a3dd0d7b9d17d4c8211b191af259621c693@192.168.0.1:46656, 429fcf25974313b95673f58d77eacdd434402665@192.168.0.2:46656, 0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@192.168.0.3:46656, f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@192.168.0.4:46656``.
After a few seconds, all the nodes should connect to each other and start
making blocks! For more information, see the Tendermint Networks section
of `the guide to using Tendermint <using-tendermint.html>`__.
Automated Deployments
---------------------
While the manual deployment is easy enough, an automated deployment is
usually quicker. The below examples show different tools that can be used
for automated deployments.
Automated Deployment using Kubernetes
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The `mintnet-kubernetes tool <https://github.com/tendermint/tools/tree/master/mintnet-kubernetes>`__
allows automating the deployment of a Tendermint network on an already
provisioned Kubernetes cluster. For simple provisioning of a Kubernetes
cluster, check out the `Google Cloud Platform <https://cloud.google.com/>`__.
Automated Deployment using Terraform and Ansible
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The `terraform-digitalocean tool <https://github.com/tendermint/tools/tree/master/terraform-digitalocean>`__
allows creating a set of servers on the DigitalOcean cloud.
The `ansible playbooks <https://github.com/tendermint/tools/tree/master/ansible>`__
allow creating and managing a ``basecoin`` or ``ethermint`` testnet on provisioned servers.
Automated Deployments
---------------------
Package Deployment on Linux for developers
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Local
^^^^^
The ``tendermint`` and ``basecoin`` applications can be installed from RPM or DEB packages on
Linux machines for development purposes. The packages are configured to be validators on the
one-node network that the machine represents. The services are not started after installation,
this way giving an opportunity to reconfigure the applications before starting.
With ``docker`` installed, run the command:
The Ansible playbooks in the previous section use this repository to install ``basecoin``.
After installation, additional steps are executed to make sure that the multi-node testnet has
the right configuration before start.
::
Install from the CentOS/RedHat repository:
make localnet-start
::
from the root of the tendermint repository. This will spin up a 4-node local testnet.
rpm --import https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint
wget -O /etc/yum.repos.d/tendermint.repo https://tendermint-packages.interblock.io/centos/7/os/x86_64/tendermint.repo
yum install basecoin
Cloud Deployment using Kubernetes
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Install from the Debian/Ubuntu repository:
The `mintnet-kubernetes tool <https://github.com/tendermint/tools/tree/master/mintnet-kubernetes>`__
allows automating the deployment of a Tendermint network on an already
provisioned Kubernetes cluster. For simple provisioning of a Kubernetes
cluster, check out the `Google Cloud Platform <https://cloud.google.com/>`__.
::
Cloud Deployment using Terraform and Ansible
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
wget -O - https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint | apt-key add -
wget -O /etc/apt/sources.list.d/tendermint.list https://tendermint-packages.interblock.io/debian/tendermint.list
apt-get update && apt-get install basecoin
See the `next section <./terraform-and-ansible.html>`__ for details.

+ 8
- 8
docs/examples/getting-started.md View File

@ -10,10 +10,10 @@ documentation](http://tendermint.readthedocs.io/en/master/).
### Quick Install
On a fresh Ubuntu 16.04 machine can be done with [this script](https://git.io/vNLfY), like so:
On a fresh Ubuntu 16.04 machine can be done with [this script](https://git.io/vpgEI), like so:
```
curl -L https://git.io/vxWlX | bash
curl -L https://git.io/vpgEI | bash
source ~/.profile
```
@ -24,7 +24,7 @@ The script is also used to facilitate cluster deployment below.
### Manual Install
Requires:
- `go` minimum version 1.9
- `go` minimum version 1.10
- `$GOPATH` environment variable must be set
- `$GOPATH/bin` must be on your `$PATH` (see https://github.com/tendermint/tendermint/wiki/Setting-GOPATH)
@ -125,7 +125,7 @@ addresses below as IP1, IP2, IP3, IP4.
Then, `ssh` into each machine, and execute [this script](https://git.io/vNLfY):
```
curl -L https://git.io/vNLfY | bash
curl -L https://git.io/vpgEI | bash
source ~/.profile
```
@ -134,10 +134,10 @@ This will install `go` and other dependencies, get the Tendermint source code, t
Next, `cd` into `docs/examples`. Each command below should be run from each node, in sequence:
```
tendermint node --home ./node1 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656"
tendermint node --home ./node2 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656"
tendermint node --home ./node3 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656"
tendermint node --home ./node4 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656"
tendermint node --home ./node0 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:46656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:46656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:46656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:46656"
tendermint node --home ./node1 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:46656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:46656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:46656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:46656"
tendermint node --home ./node2 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:46656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:46656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:46656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:46656"
tendermint node --home ./node3 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:46656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:46656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:46656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:46656"
```
Note that after the third node is started, blocks will start to stream in


+ 69
- 0
docs/examples/init_testnet.sh View File

@ -0,0 +1,69 @@
#!/usr/bin/env bash
# make all the files
tendermint init --home ./tester/node0
tendermint init --home ./tester/node1
tendermint init --home ./tester/node2
tendermint init --home ./tester/node3
file0=./tester/node0/config/genesis.json
file1=./tester/node1/config/genesis.json
file2=./tester/node2/config/genesis.json
file3=./tester/node3/config/genesis.json
genesis_time=`cat $file0 | jq '.genesis_time'`
chain_id=`cat $file0 | jq '.chain_id'`
value0=`cat $file0 | jq '.validators[0].pub_key.value'`
value1=`cat $file1 | jq '.validators[0].pub_key.value'`
value2=`cat $file2 | jq '.validators[0].pub_key.value'`
value3=`cat $file3 | jq '.validators[0].pub_key.value'`
rm $file0
rm $file1
rm $file2
rm $file3
echo "{
\"genesis_time\": $genesis_time,
\"chain_id\": $chain_id,
\"validators\": [
{
\"pub_key\": {
\"type\": \"AC26791624DE60\",
\"value\": $value0
},
\"power:\": 10,
\"name\":, \"\"
},
{
\"pub_key\": {
\"type\": \"AC26791624DE60\",
\"value\": $value1
},
\"power:\": 10,
\"name\":, \"\"
},
{
\"pub_key\": {
\"type\": \"AC26791624DE60\",
\"value\": $value2
},
\"power:\": 10,
\"name\":, \"\"
},
{
\"pub_key\": {
\"type\": \"AC26791624DE60\",
\"value\": $value3
},
\"power:\": 10,
\"name\":, \"\"
}
],
\"app_hash\": \"\"
}" >> $file0
cp $file0 $file1
cp $file0 $file2
cp $file2 $file3

+ 1
- 1
docs/examples/install_tendermint.sh View File

@ -26,7 +26,7 @@ go get $REPO
cd $GOPATH/src/$REPO
## build
git checkout v0.18.0
git checkout master
make get_tools
make get_vendor_deps
make install

+ 169
- 0
docs/examples/node0/config/config.toml View File

@ -0,0 +1,169 @@
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
##### main base config options #####
# TCP or UNIX socket address of the ABCI application,
# or the name of an ABCI application compiled in with the Tendermint binary
proxy_app = "tcp://127.0.0.1:46658"
# A custom human readable name for this node
moniker = "alpha"
# If this node is many blocks behind the tip of the chain, FastSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
fast_sync = true
# Database backend: leveldb | memdb
db_backend = "leveldb"
# Database directory
db_path = "data"
# Output level for logging, including package level options
log_level = "main:info,state:info,*:error"
##### additional base config options #####
# Path to the JSON file containing the initial validator set and other meta data
genesis_file = "config/genesis.json"
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
priv_validator_file = "config/priv_validator.json"
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
node_key_file = "config/node_key.json"
# Mechanism to connect to the ABCI application: socket | grpc
abci = "socket"
# TCP or UNIX socket address for the profiling server to listen on
prof_laddr = ""
# If true, query the ABCI app on connecting to a new peer
# so the app can decide if we should keep the connection or not
filter_peers = false
##### advanced configuration options #####
##### rpc server configuration options #####
[rpc]
# TCP or UNIX socket address for the RPC server to listen on
laddr = "tcp://0.0.0.0:46657"
# TCP or UNIX socket address for the gRPC server to listen on
# NOTE: This server only supports /broadcast_tx_commit
grpc_laddr = ""
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
unsafe = false
##### peer to peer configuration options #####
[p2p]
# Address to listen for incoming connections
laddr = "tcp://0.0.0.0:46656"
# Comma separated list of seed nodes to connect to
seeds = ""
# Comma separated list of nodes to keep persistent connections to
# Do not add private peers to this list if you don't want them advertised
persistent_peers = ""
# Path to address book
addr_book_file = "config/addrbook.json"
# Set true for strict address routability rules
addr_book_strict = true
# Time to wait before flushing messages out on the connection, in ms
flush_throttle_timeout = 100
# Maximum number of peers to connect to
max_num_peers = 50
# Maximum size of a message packet payload, in bytes
max_packet_msg_payload_size = 1024
# Rate at which packets can be sent, in bytes/second
send_rate = 512000
# Rate at which packets can be received, in bytes/second
recv_rate = 512000
# Set true to enable the peer-exchange reactor
pex = true
# Seed mode, in which node constantly crawls the network and looks for
# peers. If another node asks it for addresses, it responds and disconnects.
#
# Does not work if the peer-exchange reactor is disabled.
seed_mode = false
# Authenticated encryption
auth_enc = true
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
private_peer_ids = ""
##### mempool configuration options #####
[mempool]
recheck = true
recheck_empty = true
broadcast = true
wal_dir = "data/mempool.wal"
##### consensus configuration options #####
[consensus]
wal_file = "data/cs.wal/wal"
# All timeouts are in milliseconds
timeout_propose = 3000
timeout_propose_delta = 500
timeout_prevote = 1000
timeout_prevote_delta = 500
timeout_precommit = 1000
timeout_precommit_delta = 500
timeout_commit = 1000
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
skip_timeout_commit = false
# BlockSize
max_block_size_txs = 10000
max_block_size_bytes = 1
# EmptyBlocks mode and possible interval between empty blocks in seconds
create_empty_blocks = true
create_empty_blocks_interval = 0
# Reactor sleep duration parameters are in milliseconds
peer_gossip_sleep_duration = 100
peer_query_maj23_sleep_duration = 2000
##### transactions indexer configuration options #####
[tx_index]
# What indexer to use for transactions
#
# Options:
# 1) "null" (default)
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
indexer = "kv"
# Comma-separated list of tags to index (by default the only tag is tx hash)
#
# It's recommended to index only a subset of tags due to possible memory
# bloat. This is, of course, depends on the indexer's DB and the volume of
# transactions.
index_tags = ""
# When set to true, tells indexer to index all tags. Note this may be not
# desirable (see the comment above). IndexTags has a precedence over
# IndexAllTags (i.e. when given both, IndexTags will be indexed).
index_all_tags = false

+ 39
- 0
docs/examples/node0/config/genesis.json View File

@ -0,0 +1,39 @@
{
"genesis_time": "0001-01-01T00:00:00Z",
"chain_id": "test-chain-A2i3OZ",
"validators": [
{
"pub_key": {
"type": "AC26791624DE60",
"value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k="
},
"power": 10,
"name": ""
},
{
"pub_key": {
"type": "AC26791624DE60",
"value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU="
},
"power": 10,
"name": ""
},
{
"pub_key": {
"type": "AC26791624DE60",
"value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU="
},
"power": 10,
"name": ""
},
{
"pub_key": {
"type": "AC26791624DE60",
"value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU="
},
"power": 10,
"name": ""
}
],
"app_hash": ""
}

+ 1
- 0
docs/examples/node0/config/node_key.json View File

@ -0,0 +1 @@
{"priv_key":{"type":"954568A3288910","value":"7lY+k6EDllG8Q9gVbF5313t/ag2YGkBVKdVa0YHJ9xO5k0w3Q/hke0Z7UFT1KgVDGRUEKzwAwwjwFQUvgF0ZWg=="}}

+ 14
- 0
docs/examples/node0/config/priv_validator.json View File

@ -0,0 +1,14 @@
{
"address": "122A9414774A2FCAD026201DA477EF3F41970EF0",
"pub_key": {
"type": "AC26791624DE60",
"value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k="
},
"last_height": 0,
"last_round": 0,
"last_step": 0,
"priv_key": {
"type": "954568A3288910",
"value": "YLxp3ho+kySgAnzjBptbxDzSGw2ntGZLsIHQsaVxY/cP6TgB2Odg9ZsH3CZp3XfsF2mj+QC6U6hNFCsvL9BziQ=="
}
}

+ 0
- 15
docs/examples/node1/config.toml View File

@ -1,15 +0,0 @@
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
proxy_app = "tcp://127.0.0.1:46658"
moniker = "penguin"
fast_sync = true
db_backend = "leveldb"
log_level = "state:info,*:error"
[rpc]
laddr = "tcp://0.0.0.0:46657"
[p2p]
laddr = "tcp://0.0.0.0:46656"
seeds = ""

+ 169
- 0
docs/examples/node1/config/config.toml View File

@ -0,0 +1,169 @@
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
##### main base config options #####
# TCP or UNIX socket address of the ABCI application,
# or the name of an ABCI application compiled in with the Tendermint binary
proxy_app = "tcp://127.0.0.1:46658"
# A custom human readable name for this node
moniker = "bravo"
# If this node is many blocks behind the tip of the chain, FastSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
fast_sync = true
# Database backend: leveldb | memdb
db_backend = "leveldb"
# Database directory
db_path = "data"
# Output level for logging, including package level options
log_level = "main:info,state:info,*:error"
##### additional base config options #####
# Path to the JSON file containing the initial validator set and other meta data
genesis_file = "config/genesis.json"
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
priv_validator_file = "config/priv_validator.json"
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
node_key_file = "config/node_key.json"
# Mechanism to connect to the ABCI application: socket | grpc
abci = "socket"
# TCP or UNIX socket address for the profiling server to listen on
prof_laddr = ""
# If true, query the ABCI app on connecting to a new peer
# so the app can decide if we should keep the connection or not
filter_peers = false
##### advanced configuration options #####
##### rpc server configuration options #####
[rpc]
# TCP or UNIX socket address for the RPC server to listen on
laddr = "tcp://0.0.0.0:46657"
# TCP or UNIX socket address for the gRPC server to listen on
# NOTE: This server only supports /broadcast_tx_commit
grpc_laddr = ""
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
unsafe = false
##### peer to peer configuration options #####
[p2p]
# Address to listen for incoming connections
laddr = "tcp://0.0.0.0:46656"
# Comma separated list of seed nodes to connect to
seeds = ""
# Comma separated list of nodes to keep persistent connections to
# Do not add private peers to this list if you don't want them advertised
persistent_peers = ""
# Path to address book
addr_book_file = "config/addrbook.json"
# Set true for strict address routability rules
addr_book_strict = true
# Time to wait before flushing messages out on the connection, in ms
flush_throttle_timeout = 100
# Maximum number of peers to connect to
max_num_peers = 50
# Maximum size of a message packet payload, in bytes
max_packet_msg_payload_size = 1024
# Rate at which packets can be sent, in bytes/second
send_rate = 512000
# Rate at which packets can be received, in bytes/second
recv_rate = 512000
# Set true to enable the peer-exchange reactor
pex = true
# Seed mode, in which node constantly crawls the network and looks for
# peers. If another node asks it for addresses, it responds and disconnects.
#
# Does not work if the peer-exchange reactor is disabled.
seed_mode = false
# Authenticated encryption
auth_enc = true
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
private_peer_ids = ""
##### mempool configuration options #####
[mempool]
recheck = true
recheck_empty = true
broadcast = true
wal_dir = "data/mempool.wal"
##### consensus configuration options #####
[consensus]
wal_file = "data/cs.wal/wal"
# All timeouts are in milliseconds
timeout_propose = 3000
timeout_propose_delta = 500
timeout_prevote = 1000
timeout_prevote_delta = 500
timeout_precommit = 1000
timeout_precommit_delta = 500
timeout_commit = 1000
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
skip_timeout_commit = false
# BlockSize
max_block_size_txs = 10000
max_block_size_bytes = 1
# EmptyBlocks mode and possible interval between empty blocks in seconds
create_empty_blocks = true
create_empty_blocks_interval = 0
# Reactor sleep duration parameters are in milliseconds
peer_gossip_sleep_duration = 100
peer_query_maj23_sleep_duration = 2000
##### transactions indexer configuration options #####
[tx_index]
# What indexer to use for transactions
#
# Options:
# 1) "null" (default)
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
indexer = "kv"
# Comma-separated list of tags to index (by default the only tag is tx hash)
#
# It's recommended to index only a subset of tags due to possible memory
# bloat. This is, of course, depends on the indexer's DB and the volume of
# transactions.
index_tags = ""
# When set to true, tells indexer to index all tags. Note this may be not
# desirable (see the comment above). IndexTags has a precedence over
# IndexAllTags (i.e. when given both, IndexTags will be indexed).
index_all_tags = false

+ 39
- 0
docs/examples/node1/config/genesis.json View File

@ -0,0 +1,39 @@
{
"genesis_time": "0001-01-01T00:00:00Z",
"chain_id": "test-chain-A2i3OZ",
"validators": [
{
"pub_key": {
"type": "AC26791624DE60",
"value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k="
},
"power": 10,
"name": ""
},
{
"pub_key": {
"type": "AC26791624DE60",
"value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU="
},
"power": 10,
"name": ""
},
{
"pub_key": {
"type": "AC26791624DE60",
"value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU="
},
"power": 10,
"name": ""
},
{
"pub_key": {
"type": "AC26791624DE60",
"value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU="
},
"power": 10,
"name": ""
}
],
"app_hash": ""
}

+ 1
- 0
docs/examples/node1/config/node_key.json View File

@ -0,0 +1 @@
{"priv_key":{"type":"954568A3288910","value":"H71dc/TIG7nTselfa9nG0WRArXLKYnm7P5eFCk2lk8ASKQ3sIHpbdxCSHQD/RcdHe7TiabJeuOssNPvPWiyQEQ=="}}

+ 14
- 0
docs/examples/node1/config/priv_validator.json View File

@ -0,0 +1,14 @@
{
"address": "BEA1B57F5806CF9AC4D54C8CF806DED5C0F102E1",
"pub_key": {
"type": "AC26791624DE60",
"value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU="
},
"last_height": 0,
"last_round": 0,
"last_step": 0,
"priv_key": {
"type": "954568A3288910",
"value": "o0IqrHSPtd5YqGefodWxpJuRzvuVBjgbH785vbMgk7Vvno3kYJHVp1xVG4Q2N8rD+aubZ2SFPvA1ldX9IOwqxQ=="
}
}

+ 0
- 42
docs/examples/node1/genesis.json View File

@ -1,42 +0,0 @@
{
"genesis_time":"0001-01-01T00:00:00Z",
"chain_id":"test-chain-wt7apy",
"validators":[
{
"pub_key":{
"type":"ed25519",
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
},
"power":10,
"name":"node1"
}
,
{
"pub_key":{
"type":"ed25519",
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
},
"power":10,
"name":"node2"
}
,
{
"pub_key":{
"type":"ed25519",
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
},
"power":10,
"name":"node3"
}
,
{
"pub_key":{
"type":"ed25519",
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
},
"power":10,
"name":"node4"
}
],
"app_hash":""
}

+ 0
- 6
docs/examples/node1/node_key.json View File

@ -1,6 +0,0 @@
{
"priv_key" : {
"data" : "DA9BAABEA7211A6D93D9A1986B4279EAB3021FAA1653D459D53E6AB4D1CFB4C69BF7D52E48CF00AC5779AA0A6D3C368955D5636A677F72370B8ED19989714CFC",
"type" : "ed25519"
}
}

+ 0
- 15
docs/examples/node1/priv_validator.json View File

@ -1,15 +0,0 @@
{
"address":"4DC2756029CE0D8F8C6C3E4C3CE6EE8C30AF352F",
"pub_key":{
"type":"ed25519",
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
},
"last_height":0,
"last_round":0,
"last_step":0,
"last_signature":null,
"priv_key":{
"type":"ed25519",
"data":"4D3648E1D93C8703E436BFF814728B6BD270CFDFD686DF5385E8ACBEB7BE2D7DF08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
}
}

+ 0
- 15
docs/examples/node2/config.toml View File

@ -1,15 +0,0 @@
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
proxy_app = "tcp://127.0.0.1:46658"
moniker = "penguin"
fast_sync = true
db_backend = "leveldb"
log_level = "state:info,*:error"
[rpc]
laddr = "tcp://0.0.0.0:46657"
[p2p]
laddr = "tcp://0.0.0.0:46656"
seeds = ""

+ 169
- 0
docs/examples/node2/config/config.toml View File

@ -0,0 +1,169 @@
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
##### main base config options #####
# TCP or UNIX socket address of the ABCI application,
# or the name of an ABCI application compiled in with the Tendermint binary
proxy_app = "tcp://127.0.0.1:46658"
# A custom human readable name for this node
moniker = "charlie"
# If this node is many blocks behind the tip of the chain, FastSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
fast_sync = true
# Database backend: leveldb | memdb
db_backend = "leveldb"
# Database directory
db_path = "data"
# Output level for logging, including package level options
log_level = "main:info,state:info,*:error"
##### additional base config options #####
# Path to the JSON file containing the initial validator set and other meta data
genesis_file = "config/genesis.json"
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
priv_validator_file = "config/priv_validator.json"
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
node_key_file = "config/node_key.json"
# Mechanism to connect to the ABCI application: socket | grpc
abci = "socket"
# TCP or UNIX socket address for the profiling server to listen on
prof_laddr = ""
# If true, query the ABCI app on connecting to a new peer
# so the app can decide if we should keep the connection or not
filter_peers = false
##### advanced configuration options #####
##### rpc server configuration options #####
[rpc]
# TCP or UNIX socket address for the RPC server to listen on
laddr = "tcp://0.0.0.0:46657"
# TCP or UNIX socket address for the gRPC server to listen on
# NOTE: This server only supports /broadcast_tx_commit
grpc_laddr = ""
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
unsafe = false
##### peer to peer configuration options #####
[p2p]
# Address to listen for incoming connections
laddr = "tcp://0.0.0.0:46656"
# Comma separated list of seed nodes to connect to
seeds = ""
# Comma separated list of nodes to keep persistent connections to
# Do not add private peers to this list if you don't want them advertised
persistent_peers = ""
# Path to address book
addr_book_file = "config/addrbook.json"
# Set true for strict address routability rules
addr_book_strict = true
# Time to wait before flushing messages out on the connection, in ms
flush_throttle_timeout = 100
# Maximum number of peers to connect to
max_num_peers = 50
# Maximum size of a message packet payload, in bytes
max_packet_msg_payload_size = 1024
# Rate at which packets can be sent, in bytes/second
send_rate = 512000
# Rate at which packets can be received, in bytes/second
recv_rate = 512000
# Set true to enable the peer-exchange reactor
pex = true
# Seed mode, in which node constantly crawls the network and looks for
# peers. If another node asks it for addresses, it responds and disconnects.
#
# Does not work if the peer-exchange reactor is disabled.
seed_mode = false
# Authenticated encryption
auth_enc = true
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
private_peer_ids = ""
##### mempool configuration options #####
[mempool]
recheck = true
recheck_empty = true
broadcast = true
wal_dir = "data/mempool.wal"
##### consensus configuration options #####
[consensus]
wal_file = "data/cs.wal/wal"
# All timeouts are in milliseconds
timeout_propose = 3000
timeout_propose_delta = 500
timeout_prevote = 1000
timeout_prevote_delta = 500
timeout_precommit = 1000
timeout_precommit_delta = 500
timeout_commit = 1000
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
skip_timeout_commit = false
# BlockSize
max_block_size_txs = 10000
max_block_size_bytes = 1
# EmptyBlocks mode and possible interval between empty blocks in seconds
create_empty_blocks = true
create_empty_blocks_interval = 0
# Reactor sleep duration parameters are in milliseconds
peer_gossip_sleep_duration = 100
peer_query_maj23_sleep_duration = 2000
##### transactions indexer configuration options #####
[tx_index]
# What indexer to use for transactions
#
# Options:
# 1) "null" (default)
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
indexer = "kv"
# Comma-separated list of tags to index (by default the only tag is tx hash)
#
# It's recommended to index only a subset of tags due to possible memory
# bloat. This is, of course, depends on the indexer's DB and the volume of
# transactions.
index_tags = ""
# When set to true, tells indexer to index all tags. Note this may be not
# desirable (see the comment above). IndexTags has a precedence over
# IndexAllTags (i.e. when given both, IndexTags will be indexed).
index_all_tags = false

+ 39
- 0
docs/examples/node2/config/genesis.json View File

@ -0,0 +1,39 @@
{
"genesis_time": "0001-01-01T00:00:00Z",
"chain_id": "test-chain-A2i3OZ",
"validators": [
{
"pub_key": {
"type": "AC26791624DE60",
"value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k="
},
"power": 10,
"name": ""
},
{
"pub_key": {
"type": "AC26791624DE60",
"value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU="
},
"power": 10,
"name": ""
},
{
"pub_key": {
"type": "AC26791624DE60",
"value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU="
},
"power": 10,
"name": ""
},
{
"pub_key": {
"type": "AC26791624DE60",
"value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU="
},
"power": 10,
"name": ""
}
],
"app_hash": ""
}

+ 1
- 0
docs/examples/node2/config/node_key.json View File

@ -0,0 +1 @@
{"priv_key":{"type":"954568A3288910","value":"COHZ/Y2cWGWxJNkRwtpQBt5sYvOnb6Gpz0lO46XERRJFBIdSWD5x1UMGRSTmnvW1ec5G4bMdg6zUZKOZD+vVPg=="}}

+ 14
- 0
docs/examples/node2/config/priv_validator.json View File

@ -0,0 +1,14 @@
{
"address": "F0AA266949FB29ADA0B679C27889ED930BD1BDA1",
"pub_key": {
"type": "AC26791624DE60",
"value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU="
},
"last_height": 0,
"last_round": 0,
"last_step": 0,
"priv_key": {
"type": "954568A3288910",
"value": "khADeZ5K/8u/L99DFaZNRq8V5g+EHWbwfqFjhCrppaAiBkOkm8YDRMBqaJwDyKtzL5Ff8GRSWPoNfAzv3XLAhQ=="
}
}

+ 0
- 42
docs/examples/node2/genesis.json View File

@ -1,42 +0,0 @@
{
"genesis_time":"0001-01-01T00:00:00Z",
"chain_id":"test-chain-wt7apy",
"validators":[
{
"pub_key":{
"type":"ed25519",
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
},
"power":10,
"name":"node1"
}
,
{
"pub_key":{
"type":"ed25519",
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
},
"power":10,
"name":"node2"
}
,
{
"pub_key":{
"type":"ed25519",
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
},
"power":10,
"name":"node3"
}
,
{
"pub_key":{
"type":"ed25519",
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
},
"power":10,
"name":"node4"
}
],
"app_hash":""
}

+ 0
- 6
docs/examples/node2/node_key.json View File

@ -1,6 +0,0 @@
{
"priv_key" : {
"data" : "F7BCABA165DFC0DDD50AE563EFB285BAA236EA805D35612504238A36EFA105958756442B1D9F942D7ABD259F2D59671657B6378E9C7194342A7AAA47A66D1E95",
"type" : "ed25519"
}
}

+ 0
- 15
docs/examples/node2/priv_validator.json View File

@ -1,15 +0,0 @@
{
"address": "DD6C63A762608A9DDD4A845657743777F63121D6",
"pub_key": {
"type": "ed25519",
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
},
"last_height": 0,
"last_round": 0,
"last_step": 0,
"last_signature": null,
"priv_key": {
"type": "ed25519",
"data": "7B0DE666FF5E9B437D284BCE767F612381890C018B93B0A105D2E829A568DA6FA8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
}
}

+ 0
- 15
docs/examples/node3/config.toml View File

@ -1,15 +0,0 @@
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
proxy_app = "tcp://127.0.0.1:46658"
moniker = "penguin"
fast_sync = true
db_backend = "leveldb"
log_level = "state:info,*:error"
[rpc]
laddr = "tcp://0.0.0.0:46657"
[p2p]
laddr = "tcp://0.0.0.0:46656"
seeds = ""

+ 169
- 0
docs/examples/node3/config/config.toml View File

@ -0,0 +1,169 @@
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
##### main base config options #####
# TCP or UNIX socket address of the ABCI application,
# or the name of an ABCI application compiled in with the Tendermint binary
proxy_app = "tcp://127.0.0.1:46658"
# A custom human readable name for this node
moniker = "delta"
# If this node is many blocks behind the tip of the chain, FastSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
fast_sync = true
# Database backend: leveldb | memdb
db_backend = "leveldb"
# Database directory
db_path = "data"
# Output level for logging, including package level options
log_level = "main:info,state:info,*:error"
##### additional base config options #####
# Path to the JSON file containing the initial validator set and other meta data
genesis_file = "config/genesis.json"
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
priv_validator_file = "config/priv_validator.json"
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
node_key_file = "config/node_key.json"
# Mechanism to connect to the ABCI application: socket | grpc
abci = "socket"
# TCP or UNIX socket address for the profiling server to listen on
prof_laddr = ""
# If true, query the ABCI app on connecting to a new peer
# so the app can decide if we should keep the connection or not
filter_peers = false
##### advanced configuration options #####
##### rpc server configuration options #####
[rpc]
# TCP or UNIX socket address for the RPC server to listen on
laddr = "tcp://0.0.0.0:46657"
# TCP or UNIX socket address for the gRPC server to listen on
# NOTE: This server only supports /broadcast_tx_commit
grpc_laddr = ""
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
unsafe = false
##### peer to peer configuration options #####
[p2p]
# Address to listen for incoming connections
laddr = "tcp://0.0.0.0:46656"
# Comma separated list of seed nodes to connect to
seeds = ""
# Comma separated list of nodes to keep persistent connections to
# Do not add private peers to this list if you don't want them advertised
persistent_peers = ""
# Path to address book
addr_book_file = "config/addrbook.json"
# Set true for strict address routability rules
addr_book_strict = true
# Time to wait before flushing messages out on the connection, in ms
flush_throttle_timeout = 100
# Maximum number of peers to connect to
max_num_peers = 50
# Maximum size of a message packet payload, in bytes
max_packet_msg_payload_size = 1024
# Rate at which packets can be sent, in bytes/second
send_rate = 512000
# Rate at which packets can be received, in bytes/second
recv_rate = 512000
# Set true to enable the peer-exchange reactor
pex = true
# Seed mode, in which node constantly crawls the network and looks for
# peers. If another node asks it for addresses, it responds and disconnects.
#
# Does not work if the peer-exchange reactor is disabled.
seed_mode = false
# Authenticated encryption
auth_enc = true
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
private_peer_ids = ""
##### mempool configuration options #####
[mempool]
recheck = true
recheck_empty = true
broadcast = true
wal_dir = "data/mempool.wal"
##### consensus configuration options #####
[consensus]
wal_file = "data/cs.wal/wal"
# All timeouts are in milliseconds
timeout_propose = 3000
timeout_propose_delta = 500
timeout_prevote = 1000
timeout_prevote_delta = 500
timeout_precommit = 1000
timeout_precommit_delta = 500
timeout_commit = 1000
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
skip_timeout_commit = false
# BlockSize
max_block_size_txs = 10000
max_block_size_bytes = 1
# EmptyBlocks mode and possible interval between empty blocks in seconds
create_empty_blocks = true
create_empty_blocks_interval = 0
# Reactor sleep duration parameters are in milliseconds
peer_gossip_sleep_duration = 100
peer_query_maj23_sleep_duration = 2000
##### transactions indexer configuration options #####
[tx_index]
# What indexer to use for transactions
#
# Options:
# 1) "null" (default)
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
indexer = "kv"
# Comma-separated list of tags to index (by default the only tag is tx hash)
#
# It's recommended to index only a subset of tags due to possible memory
# bloat. This is, of course, depends on the indexer's DB and the volume of
# transactions.
index_tags = ""
# When set to true, tells indexer to index all tags. Note this may be not
# desirable (see the comment above). IndexTags has a precedence over
# IndexAllTags (i.e. when given both, IndexTags will be indexed).
index_all_tags = false

+ 39
- 0
docs/examples/node3/config/genesis.json View File

@ -0,0 +1,39 @@
{
"genesis_time": "0001-01-01T00:00:00Z",
"chain_id": "test-chain-A2i3OZ",
"validators": [
{
"pub_key": {
"type": "AC26791624DE60",
"value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k="
},
"power": 10,
"name": ""
},
{
"pub_key": {
"type": "AC26791624DE60",
"value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU="
},
"power": 10,
"name": ""
},
{
"pub_key": {
"type": "AC26791624DE60",
"value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU="
},
"power": 10,
"name": ""
},
{
"pub_key": {
"type": "AC26791624DE60",
"value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU="
},
"power": 10,
"name": ""
}
],
"app_hash": ""
}

+ 1
- 0
docs/examples/node3/config/node_key.json View File

@ -0,0 +1 @@
{"priv_key":{"type":"954568A3288910","value":"9Y9xp/tUJJ6pHTF5SUV0bGKYSdVbFtMHu+Lr8S0JBSZAwneaejnfOEU1LMKOnQ07skrDUaJcj5di3jAyjxJzqg=="}}

+ 14
- 0
docs/examples/node3/config/priv_validator.json View File

@ -0,0 +1,14 @@
{
"address": "9A1A6914EB5F4FF0269C7EEEE627C27310CC64F9",
"pub_key": {
"type": "AC26791624DE60",
"value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU="
},
"last_height": 0,
"last_round": 0,
"last_step": 0,
"priv_key": {
"type": "954568A3288910",
"value": "jb52LZ5gp+eQ8nJlFK1z06nBMp1gD8ICmyzdM1icGOgoYBl/Fm8hntptt4hDzlTUQIbr4jrYpJ1ofy6VzT46JQ=="
}
}

+ 0
- 42
docs/examples/node3/genesis.json View File

@ -1,42 +0,0 @@
{
"genesis_time":"0001-01-01T00:00:00Z",
"chain_id":"test-chain-wt7apy",
"validators":[
{
"pub_key":{
"type":"ed25519",
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
},
"power":10,
"name":"node1"
}
,
{
"pub_key":{
"type":"ed25519",
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
},
"power":10,
"name":"node2"
}
,
{
"pub_key":{
"type":"ed25519",
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
},
"power":10,
"name":"node3"
}
,
{
"pub_key":{
"type":"ed25519",
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
},
"power":10,
"name":"node4"
}
],
"app_hash":""
}

+ 0
- 6
docs/examples/node3/node_key.json View File

@ -1,6 +0,0 @@
{
"priv_key" : {
"data" : "95136FCC97E4446B3141EDF9841078107ECE755E99925D79CCBF91085492680B3CA1034D9917DF1DED4E4AB2D9BC225919F6CB2176F210D2368697CC339DF4E7",
"type" : "ed25519"
}
}

+ 0
- 15
docs/examples/node3/priv_validator.json View File

@ -1,15 +0,0 @@
{
"address": "6D6A1E313B407B5474106CA8759C976B777AB659",
"pub_key": {
"type": "ed25519",
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
},
"last_height": 0,
"last_round": 0,
"last_step": 0,
"last_signature": null,
"priv_key": {
"type": "ed25519",
"data": "622432A370111A5C25CFE121E163FE709C9D5C95F551EDBD7A2C69A8545C9B76E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
}
}

+ 0
- 15
docs/examples/node4/config.toml View File

@ -1,15 +0,0 @@
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
proxy_app = "tcp://127.0.0.1:46658"
moniker = "penguin"
fast_sync = true
db_backend = "leveldb"
log_level = "state:info,*:error"
[rpc]
laddr = "tcp://0.0.0.0:46657"
[p2p]
laddr = "tcp://0.0.0.0:46656"
seeds = ""

+ 0
- 42
docs/examples/node4/genesis.json View File

@ -1,42 +0,0 @@
{
"genesis_time":"0001-01-01T00:00:00Z",
"chain_id":"test-chain-wt7apy",
"validators":[
{
"pub_key":{
"type":"ed25519",
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
},
"power":10,
"name":"node1"
}
,
{
"pub_key":{
"type":"ed25519",
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
},
"power":10,
"name":"node2"
}
,
{
"pub_key":{
"type":"ed25519",
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
},
"power":10,
"name":"node3"
}
,
{
"pub_key":{
"type":"ed25519",
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
},
"power":10,
"name":"node4"
}
],
"app_hash":""
}

+ 0
- 6
docs/examples/node4/node_key.json View File

@ -1,6 +0,0 @@
{
"priv_key" : {
"data" : "8895D6C9A1B46AB83A8E2BAE2121B8C3E245B9E9126EBD797FEAC5058285F2F64FDE2E8182C88AD5185A49D837C581465D57BD478C41865A66D7D9742D8AEF57",
"type" : "ed25519"
}
}

+ 0
- 15
docs/examples/node4/priv_validator.json View File

@ -1,15 +0,0 @@
{
"address": "829A9663611D3DD88A3D84EA0249679D650A0755",
"pub_key": {
"type": "ed25519",
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
},
"last_height": 0,
"last_round": 0,
"last_step": 0,
"last_signature": null,
"priv_key": {
"type": "ed25519",
"data": "0A604D1C9AE94A50150BF39E603239092F9392E4773F4D8F4AC1D86E6438E89E2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
}
}

+ 1
- 2
docs/index.rst View File

@ -40,10 +40,9 @@ Tendermint Tools
:maxdepth: 2
deploy-testnets.rst
tools/ansible.rst
terraform-and-ansible.rst
tools/docker.rst
tools/mintnet-kubernetes.rst
tools/terraform-digitalocean.rst
tools/benchmarking.rst
tools/monitoring.rst


+ 71
- 0
docs/spec/README.md View File

@ -0,0 +1,71 @@
# Tendermint Specification
This is a markdown specification of the Tendermint blockchain.
It defines the base data structures, how they are validated,
and how they are communicated over the network.
If you find discrepancies between the spec and the code that
do not have an associated issue or pull request on github,
please submit them to our [bug bounty](https://tendermint.com/security)!
## Contents
### Data Structures
- [Overview](#overview)
- [Encoding and Digests](encoding.md)
- [Blockchain](blockchain.md)
- [State](state.md)
### P2P and Network Protocols
- [The Base P2P Layer](p2p): multiplex the protocols ("reactors") on authenticated and encrypted TCP connections
- [Peer Exchange (PEX)](reactors/pex): gossip known peer addresses so peers can find each other
- [Block Sync](reactors/block_sync): gossip blocks so peers can catch up quickly
- [Consensus](reactors/consensus): gossip votes and block parts so new blocks can be committed
- [Mempool](reactors/mempool): gossip transactions so they get included in blocks
- Evidence: TODO
### More
- Light Client: TODO
- Persistence: TODO
## Overview
Tendermint provides Byzantine Fault Tolerant State Machine Replication using
hash-linked batches of transactions. Such transaction batches are called "blocks".
Hence, Tendermint defines a "blockchain".
Each block in Tendermint has a unique index - its Height.
A block at `Height == H` can only be committed *after* the
block at `Height == H-1`.
Each block is committed by a known set of weighted Validators.
Membership and weighting within this set may change over time.
Tendermint guarantees the safety and liveness of the blockchain
so long as less than 1/3 of the total weight of the Validator set
is malicious or faulty.
A commit in Tendermint is a set of signed messages from more than 2/3 of
the total weight of the current Validator set. Validators take turns proposing
blocks and voting on them. Once enough votes are received, the block is considered
committed. These votes are included in the *next* block as proof that the previous block
was committed - they cannot be included in the current block, as that block has already been
created.
Once a block is committed, it can be executed against an application.
The application returns results for each of the transactions in the block.
The application can also return changes to be made to the validator set,
as well as a cryptographic digest of its latest state.
Tendermint is designed to enable efficient verification and authentication
of the latest state of the blockchain. To achieve this, it embeds
cryptographic commitments to certain information in the block "header".
This information includes the contents of the block (eg. the transactions),
the validator set committing the block, as well as the various results returned by the application.
Note, however, that block execution only occurs *after* a block is committed.
Thus, application results can only be included in the *next* block.
Also note that information like the transaction results and the validator set are never
directly included in the block - only their cryptographic digests (Merkle roots) are.
Hence, verification of a block requires a separate data structure to store this information.
We call this the `State`. Block verification also requires access to the previous block.

docs/specification/new-spec/abci.md → docs/spec/abci.md View File


docs/specification/new-spec/bft-time.md → docs/spec/bft-time.md View File


docs/specification/new-spec/blockchain.md → docs/spec/blockchain.md View File


docs/specification/new-spec/encoding.md → docs/spec/encoding.md View File


docs/specification/new-spec/light-client.md → docs/spec/light-client.md View File


docs/specification/new-spec/p2p/config.md → docs/spec/p2p/config.md View File


docs/specification/new-spec/p2p/connection.md → docs/spec/p2p/connection.md View File


docs/specification/new-spec/p2p/node.md → docs/spec/p2p/node.md View File


docs/specification/new-spec/p2p/peer.md → docs/spec/p2p/peer.md View File


docs/specification/new-spec/pre-amino.md → docs/spec/pre-amino.md View File


docs/specification/new-spec/reactors/block_sync/impl.md → docs/spec/reactors/block_sync/impl.md View File


docs/specification/new-spec/reactors/block_sync/reactor.md → docs/spec/reactors/block_sync/reactor.md View File


docs/specification/new-spec/reactors/consensus/consensus-reactor.md → docs/spec/reactors/consensus/consensus-reactor.md View File


docs/specification/new-spec/reactors/consensus/consensus.md → docs/spec/reactors/consensus/consensus.md View File


docs/specification/new-spec/reactors/consensus/proposer-selection.md → docs/spec/reactors/consensus/proposer-selection.md View File


docs/specification/new-spec/reactors/mempool/README.md → docs/spec/reactors/mempool/README.md View File


docs/specification/new-spec/reactors/mempool/concurrency.md → docs/spec/reactors/mempool/concurrency.md View File


docs/specification/new-spec/reactors/mempool/config.md → docs/spec/reactors/mempool/config.md View File


docs/specification/new-spec/reactors/mempool/functionality.md → docs/spec/reactors/mempool/functionality.md View File


docs/specification/new-spec/reactors/mempool/messages.md → docs/spec/reactors/mempool/messages.md View File


docs/specification/new-spec/reactors/pex/pex.md → docs/spec/reactors/pex/pex.md View File


docs/specification/new-spec/scripts/crypto.go → docs/spec/scripts/crypto.go View File


docs/specification/new-spec/state.md → docs/spec/state.md View File


+ 1
- 0
docs/specification/corruption.rst View File

@ -38,6 +38,7 @@ Recovering from data corruption can be hard and time-consuming. Here are two app
1) Delete the WAL file and restart Tendermint. It will attempt to sync with other peers.
2) Try to repair the WAL file manually:
1. Create a backup of the corrupted WAL file:
.. code:: bash


+ 1
- 71
docs/specification/new-spec/README.md View File

@ -1,71 +1 @@
# Tendermint Specification
This is a markdown specification of the Tendermint blockchain.
It defines the base data structures, how they are validated,
and how they are communicated over the network.
If you find discrepancies between the spec and the code that
do not have an associated issue or pull request on github,
please submit them to our [bug bounty](https://tendermint.com/security)!
## Contents
### Data Structures
- [Overview](#overview)
- [Encoding and Digests](encoding.md)
- [Blockchain](blockchain.md)
- [State](state.md)
### P2P and Network Protocols
- [The Base P2P Layer](p2p): multiplex the protocols ("reactors") on authenticated and encrypted TCP connections
- [Peer Exchange (PEX)](reactors/pex): gossip known peer addresses so peers can find each other
- [Block Sync](reactors/block_sync): gossip blocks so peers can catch up quickly
- [Consensus](reactors/consensus): gossip votes and block parts so new blocks can be committed
- [Mempool](reactors/mempool): gossip transactions so they get included in blocks
- Evidence: TODO
### More
- Light Client: TODO
- Persistence: TODO
## Overview
Tendermint provides Byzantine Fault Tolerant State Machine Replication using
hash-linked batches of transactions. Such transaction batches are called "blocks".
Hence, Tendermint defines a "blockchain".
Each block in Tendermint has a unique index - its Height.
A block at `Height == H` can only be committed *after* the
block at `Height == H-1`.
Each block is committed by a known set of weighted Validators.
Membership and weighting within this set may change over time.
Tendermint guarantees the safety and liveness of the blockchain
so long as less than 1/3 of the total weight of the Validator set
is malicious or faulty.
A commit in Tendermint is a set of signed messages from more than 2/3 of
the total weight of the current Validator set. Validators take turns proposing
blocks and voting on them. Once enough votes are received, the block is considered
committed. These votes are included in the *next* block as proof that the previous block
was committed - they cannot be included in the current block, as that block has already been
created.
Once a block is committed, it can be executed against an application.
The application returns results for each of the transactions in the block.
The application can also return changes to be made to the validator set,
as well as a cryptographic digest of its latest state.
Tendermint is designed to enable efficient verification and authentication
of the latest state of the blockchain. To achieve this, it embeds
cryptographic commitments to certain information in the block "header".
This information includes the contents of the block (eg. the transactions),
the validator set committing the block, as well as the various results returned by the application.
Note, however, that block execution only occurs *after* a block is committed.
Thus, application results can only be included in the *next* block.
Also note that information like the transaction results and the validator set are never
directly included in the block - only their cryptographic digests (Merkle roots) are.
Hence, verification of a block requires a separate data structure to store this information.
We call this the `State`. Block verification also requires access to the previous block.
Spec moved to [docs/spec](./docs/spec).

+ 138
- 0
docs/terraform-and-ansible.rst View File

@ -0,0 +1,138 @@
Terraform & Ansible
===================
Automated deployments are done using `Terraform <https://www.terraform.io/>`__ to create servers on Digital Ocean then
`Ansible <http://www.ansible.com/>`__ to create and manage testnets on those servers.
Install
-------
NOTE: see the `integration bash script <https://github.com/tendermint/tendermint/blob/zach/ansible/networks/remote/integration.sh>`__ that can be run on a fresh DO droplet and will automatically spin up a 4 node testnet. The script more or less does everything described below.
- Install `Terraform <https://www.terraform.io/downloads.html>`__ and `Ansible <http://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html>`__ on a Linux machine.
- Create a `DigitalOcean API token <https://cloud.digitalocean.com/settings/api/tokens>`__ with read and write capability.
- Install the python dopy package (``pip install dopy``)
- Create SSH keys (``ssh-keygen``)
- Set environment variables:
::
export DO_API_TOKEN="abcdef01234567890abcdef01234567890"
export SSH_KEY_FILE="$HOME/.ssh/id_rsa.pub"
These will be used by both ``terraform`` and ``ansible``.
Terraform
---------
This step will create four Digital Ocean droplets. First, go to the correct directory:
::
cd $GOPATH/src/github.com/tendermint/tendermint/networks/remote/terraform
then:
::
terraform init
terraform apply -var DO_API_TOKEN="$DO_API_TOKEN" -var SSH_KEY_FILE="$SSH_KEY_FILE"
and you will get a list of IP addresses that belong to your droplets.
With the droplets created and running, let's setup Ansible.
Using Ansible
-------------
The playbooks in `the ansible directory <https://github.com/tendermint/tendermint/tree/master/networks/remote/ansible>`__
run ansible roles to configure the sentry node architecture. You must switch to this directory to run ansible (``cd $GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible``).
There are several roles that are self-explanatory:
First, we configure our droplets by specifying the paths for tendermint (``BINARY``) and the node files (``CONFIGDIR``). The latter expects any number of directories named ``node0, node1, ...`` and so on (equal to the number of droplets created). For this example, we use pre-created files from `this directory <https://github.com/tendermint/tendermint/tree/master/docs/examples>`__. To create your own files, use either the ``tendermint testnet`` command or review `manual deployments <./deploy-testnets.html>`__.
Here's the command to run:
::
ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$GOPATH/src/github.com/tendermint/tendermint/build/tendermint -e CONFIGDIR=$GOPATH/src/github.com/tendermint/tendermint/docs/examples
Voila! All your droplets now have the ``tendermint`` binary and required configuration files to run a testnet.
Next, we run the install role:
::
ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml
which as you'll see below, executes ``tendermint node --proxy_app=kvstore`` on all droplets. Although we'll soon be modifying this role and running it again, this first execution allows us to get each ``node_info.id`` that corresponds to each ``node_info.listen_addr``. (This part will be automated in the future). In your browser (or using ``curl``), for every droplet, go to IP:46657/status and note the two just mentioned ``node_info`` fields. Notice that blocks aren't being created (``latest_block_height`` should be zero and not increasing).
Next, open ``roles/install/templates/systemd.service.j2`` and look for the line ``ExecStart`` which should look something like:
::
ExecStart=/usr/bin/tendermint node --proxy_app=kvstore
and add the ``--p2p.persistent_peers`` flag with the relevant information for each node. The resulting file should look something like:
::
[Unit]
Description={{service}}
Requires=network-online.target
After=network-online.target
[Service]
Restart=on-failure
User={{service}}
Group={{service}}
PermissionsStartOnly=true
ExecStart=/usr/bin/tendermint node --proxy_app=kvstore --p2p.persistent_peers=167b80242c300bf0ccfb3ced3dec60dc2a81776e@165.227.41.206:46656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@165.227.43.146:46656,303a1a4312c30525c99ba66522dd81cca56a361a@159.89.115.32:46656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@159.89.119.125:46656
ExecReload=/bin/kill -HUP $MAINPID
KillSignal=SIGTERM
[Install]
WantedBy=multi-user.target
Then, stop the nodes:
::
ansible-playbook -i inventory/digital_ocean.py -l sentrynet stop.yml
Finally, we run the install role again:
::
ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml
to re-run ``tendermint node`` with the new flag, on all droplets. The ``latest_block_hash`` should now be changing and ``latest_block_height`` increasing. Your testnet is now up and running :)
Peek at the logs with the status role:
::
ansible-playbook -i inventory/digital_ocean.py -l sentrynet status.yml
Logging
-------
The crudest way is the status role described above. You can also ship logs to Logz.io, an Elastic stack (Elastic search, Logstash and Kibana) service provider. You can set up your nodes to log there automatically. Create an account and get your API key from the notes on `this page <https://app.logz.io/#/dashboard/data-sources/Filebeat>`__, then:
::
yum install systemd-devel || echo "This will only work on RHEL-based systems."
apt-get install libsystemd-dev || echo "This will only work on Debian-based systems."
go get github.com/mheese/journalbeat
ansible-playbook -i inventory/digital_ocean.py -l sentrynet logzio.yml -e LOGZIO_TOKEN=ABCDEFGHIJKLMNOPQRSTUVWXYZ012345
Cleanup
-------
To remove your droplets, run:
::
terraform destroy -var DO_API_TOKEN="$DO_API_TOKEN" -var SSH_KEY_FILE="$SSH_KEY_FILE"

+ 3
- 2
mempool/mempool.go View File

@ -3,6 +3,7 @@ package mempool
import (
"bytes"
"container/list"
"fmt"
"sync"
"sync/atomic"
"time"
@ -255,11 +256,11 @@ func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) {
tx: tx,
}
mem.txs.PushBack(memTx)
mem.logger.Info("Added good transaction", "tx", tx, "res", r)
mem.logger.Info("Added good transaction", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "res", r)
mem.notifyTxsAvailable()
} else {
// ignore bad transaction
mem.logger.Info("Rejected bad transaction", "tx", tx, "res", r)
mem.logger.Info("Rejected bad transaction", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "res", r)
// remove from cache (it might be good later)
mem.cache.Remove(tx)


+ 0
- 52
networks/remote/ansible/README.rst View File

@ -1,52 +0,0 @@
Using Ansible
=============
.. figure:: assets/a_plus_t.png
:alt: Ansible plus Tendermint
Ansible plus Tendermint
The playbooks in `the ansible directory <https://github.com/tendermint/tendermint/tree/master/networks/remote/ansible>`__
run ansible `roles <http://www.ansible.com/>`__ to configure the sentry node architecture.
Prerequisites
-------------
- Install `Ansible 2.0 or higher <https://www.ansible.com>`__ on a linux machine.
- Create a `DigitalOcean API token <https://cloud.digitalocean.com/settings/api/tokens>`__ with read and write capability.
- Create SSH keys
- Install the python dopy package (for the digital_ocean.py script)
Build
-----
::
export DO_API_TOKEN="abcdef01234567890abcdef01234567890"
export SSH_KEY_FILE="$HOME/.ssh/id_rsa.pub"
ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml
# The scripts assume that you have your validator set up already.
# You can create the folder structure for the sentry nodes using `tendermint testnet`.
# For example: tendermint testnet --v 0 --n 4 --o build/
# Then copy your genesis.json and modify the config.toml as you see fit.
# Reconfig the sentry nodes with a new BINARY and the configuration files from the build folder:
ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=`pwd`/build/tendermint -e CONFIGDIR=`pwd`/build
Shipping logs to logz.io
------------------------
Logz.io is an Elastic stack (Elastic search, Logstash and Kibana) service provider. You can set up your nodes to log there automatically. Create an account and get your API key from the notes on `this page <https://app.logz.io/#/dashboard/data-sources/Filebeat>`__.
::
yum install systemd-devel || echo "This will only work on RHEL-based systems."
apt-get install libsystemd-dev || echo "This will only work on Debian-based systems."
go get github.com/mheese/journalbeat
ansible-playbook -i inventory/digital_ocean.py -l sentrynet logzio.yml -e LOGZIO_TOKEN=ABCDEFGHIJKLMNOPQRSTUVWXYZ012345

+ 1
- 1
networks/remote/ansible/roles/install/templates/systemd.service.j2 View File

@ -8,7 +8,7 @@ Restart=on-failure
User={{service}}
Group={{service}}
PermissionsStartOnly=true
ExecStart=/usr/bin/tendermint node --proxy_app=dummy
ExecStart=/usr/bin/tendermint node --proxy_app=kvstore
ExecReload=/bin/kill -HUP $MAINPID
KillSignal=SIGTERM


+ 132
- 0
networks/remote/integration.sh View File

@ -0,0 +1,132 @@
#!/usr/bin/env bash
# XXX: this script is intended to be run from a fresh Digital Ocean droplet
# NOTE: you must set this manually now
echo "export DO_API_TOKEN=\"yourToken\"" >> ~/.profile
sudo apt-get update -y
sudo apt-get upgrade -y
sudo apt-get install -y jq unzip python-pip software-properties-common make
# get and unpack golang
curl -O https://storage.googleapis.com/golang/go1.10.linux-amd64.tar.gz
tar -xvf go1.10.linux-amd64.tar.gz
## move binary and add to path
mv go /usr/local
echo "export PATH=\$PATH:/usr/local/go/bin" >> ~/.profile
## create the goApps directory, set GOPATH, and put it on PATH
mkdir goApps
echo "export GOPATH=/root/goApps" >> ~/.profile
echo "export PATH=\$PATH:\$GOPATH/bin" >> ~/.profile
source ~/.profile
## get the code and move into repo
REPO=github.com/tendermint/tendermint
go get $REPO
cd $GOPATH/src/$REPO
## build
git checkout zach/ansible
make get_tools
make get_vendor_deps
make build
# generate an ssh key
ssh-keygen -f $HOME/.ssh/id_rsa -t rsa -N ''
echo "export SSH_KEY_FILE=\"\$HOME/.ssh/id_rsa.pub\"" >> ~/.profile
source ~/.profile
# install terraform
wget https://releases.hashicorp.com/terraform/0.11.7/terraform_0.11.7_linux_amd64.zip
unzip terraform_0.11.7_linux_amd64.zip -d /usr/bin/
# install ansible
sudo apt-get update -y
sudo apt-add-repository ppa:ansible/ansible -y
sudo apt-get update -y
sudo apt-get install ansible -y
# required by ansible
pip install dopy
# the next two commands are directory sensitive
cd $GOPATH/src/github.com/tendermint/tendermint/networks/remote/terraform
terraform init
terraform apply -var DO_API_TOKEN="$DO_API_TOKEN" -var SSH_KEY_FILE="$SSH_KEY_FILE" -auto-approve
# let the droplets boot
sleep 60
# get the IPs
ip0=`terraform output -json public_ips | jq '.value[0]'`
ip1=`terraform output -json public_ips | jq '.value[1]'`
ip2=`terraform output -json public_ips | jq '.value[2]'`
ip3=`terraform output -json public_ips | jq '.value[3]'`
# to remove quotes
strip() {
opt=$1
temp="${opt%\"}"
temp="${temp#\"}"
echo $temp
}
ip0=$(strip $ip0)
ip1=$(strip $ip1)
ip2=$(strip $ip2)
ip3=$(strip $ip3)
# all the ansible commands are also directory specific
cd $GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible
ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml
ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$GOPATH/src/github.com/tendermint/tendermint/build/tendermint -e CONFIGDIR=$GOPATH/src/github.com/tendermint/tendermint/docs/examples
sleep 10
# get each nodes ID then populate the ansible file
id0=`curl $ip0:46657/status | jq .result.node_info.id`
id1=`curl $ip1:46657/status | jq .result.node_info.id`
id2=`curl $ip2:46657/status | jq .result.node_info.id`
id3=`curl $ip3:46657/status | jq .result.node_info.id`
id0=$(strip $id0)
id1=$(strip $id1)
id2=$(strip $id2)
id3=$(strip $id3)
# remove file we'll re-write to with new info
old_ansible_file=$GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible/roles/install/templates/systemd.service.j2
rm $old_ansible_file
# need to populate the `--p2p.persistent_peers` flag
echo "[Unit]
Description={{service}}
Requires=network-online.target
After=network-online.target
[Service]
Restart=on-failure
User={{service}}
Group={{service}}
PermissionsStartOnly=true
ExecStart=/usr/bin/tendermint node --proxy_app=kvstore --p2p.persistent_peers=$id0@$ip0:46656,$id1@$ip1:46656,$id2@$ip2:46656,$id3@$ip3:46656
ExecReload=/bin/kill -HUP \$MAINPID
KillSignal=SIGTERM
[Install]
WantedBy=multi-user.target
" >> $old_ansible_file
# now, we can re-run the install command
ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml
# and finally restart it all
ansible-playbook -i inventory/digital_ocean.py -l sentrynet restart.yml
echo "congratulations, your testnet is now running :)"

+ 0
- 33
networks/remote/terraform/README.rst View File

@ -1,33 +0,0 @@
Using Terraform
===============
This is a `Terraform <https://www.terraform.io/>`__ configuration that sets up DigitalOcean droplets.
Prerequisites
-------------
- Install `HashiCorp Terraform <https://www.terraform.io>`__ on a linux machine.
- Create a `DigitalOcean API token <https://cloud.digitalocean.com/settings/api/tokens>`__ with read and write capability.
- Create SSH keys
Build
-----
::
export DO_API_TOKEN="abcdef01234567890abcdef01234567890"
export SSH_KEY_FILE="$HOME/.ssh/id_rsa.pub"
terraform init
terraform apply -var DO_API_TOKEN="$DO_API_TOKEN" -var SSH_KEY_FILE="$SSH_KEY_FILE"
At the end you will get a list of IP addresses that belongs to your new droplets.
Destroy
-------
Run the below:
::
terraform destroy

+ 1
- 1
networks/remote/terraform/cluster/variables.tf View File

@ -5,7 +5,7 @@ variable "name" {
variable "regions" {
description = "Regions to launch in"
type = "list"
default = ["AMS2", "FRA1", "LON1", "NYC3", "SFO2", "SGP1", "TOR1"]
default = ["AMS3", "FRA1", "LON1", "NYC3", "SFO2", "SGP1", "TOR1"]
}
variable "ssh_key" {


+ 1
- 0
node/node.go View File

@ -343,6 +343,7 @@ func NewNode(config *cfg.Config,
}
indexerService := txindex.NewIndexerService(txIndexer, eventBus)
indexerService.SetLogger(logger.With("module", "txindex"))
// run the profile server
profileHost := config.ProfListenAddress


+ 1
- 1
p2p/pex/addrbook.go View File

@ -186,7 +186,7 @@ func (a *addrBook) RemoveAddress(addr *p2p.NetAddress) {
if ka == nil {
return
}
a.Logger.Info("Remove address from book", "addr", ka.Addr, "ID", ka.ID)
a.Logger.Info("Remove address from book", "addr", ka.Addr, "ID", ka.ID())
a.removeFromAllBuckets(ka)
}


+ 1
- 1
p2p/switch.go View File

@ -565,7 +565,7 @@ func (sw *Switch) addPeer(pc peerConn) error {
if sw.nodeKey.ID() == peerID {
addr := peerNodeInfo.NetAddress()
// remove the given address from the address book if we're added it earlier
// remove the given address from the address book if we added it earlier
sw.addrBook.RemoveAddress(addr)
// add the given address to the address book to avoid dialing ourselves


+ 4
- 10
state/execution.go View File

@ -341,23 +341,17 @@ func updateState(s State, blockID types.BlockID, header *types.Header,
// Fire TxEvent for every tx.
// NOTE: if Tendermint crashes before commit, some or all of these events may be published again.
func fireEvents(logger log.Logger, eventBus types.BlockEventPublisher, block *types.Block, abciResponses *ABCIResponses) {
// NOTE: do we still need this buffer ?
txEventBuffer := types.NewTxEventBuffer(eventBus, int(block.NumTxs))
eventBus.PublishEventNewBlock(types.EventDataNewBlock{block})
eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{block.Header})
for i, tx := range block.Data.Txs {
txEventBuffer.PublishEventTx(types.EventDataTx{types.TxResult{
eventBus.PublishEventTx(types.EventDataTx{types.TxResult{
Height: block.Height,
Index: uint32(i),
Tx: tx,
Result: *(abciResponses.DeliverTx[i]),
}})
}
eventBus.PublishEventNewBlock(types.EventDataNewBlock{block})
eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{block.Header})
err := txEventBuffer.Flush()
if err != nil {
logger.Error("Failed to flush event buffer", "err", err)
}
}
//----------------------------------------------------------------------------------------------------


+ 1
- 1
state/txindex/indexer.go View File

@ -34,7 +34,7 @@ type Batch struct {
}
// NewBatch creates a new Batch.
func NewBatch(n int) *Batch {
func NewBatch(n int64) *Batch {
return &Batch{
Ops: make([]*types.TxResult, n),
}


+ 29
- 6
state/txindex/indexer_service.go View File

@ -11,6 +11,8 @@ const (
subscriber = "IndexerService"
)
// IndexerService connects event bus and transaction indexer together in order
// to index transactions coming from event bus.
type IndexerService struct {
cmn.BaseService
@ -18,6 +20,7 @@ type IndexerService struct {
eventBus *types.EventBus
}
// NewIndexerService returns a new service instance.
func NewIndexerService(idr TxIndexer, eventBus *types.EventBus) *IndexerService {
is := &IndexerService{idr: idr, eventBus: eventBus}
is.BaseService = *cmn.NewBaseService(nil, "IndexerService", is)
@ -27,15 +30,35 @@ func NewIndexerService(idr TxIndexer, eventBus *types.EventBus) *IndexerService
// OnStart implements cmn.Service by subscribing for all transactions
// and indexing them by tags.
func (is *IndexerService) OnStart() error {
ch := make(chan interface{})
if err := is.eventBus.Subscribe(context.Background(), subscriber, types.EventQueryTx, ch); err != nil {
blockHeadersCh := make(chan interface{})
if err := is.eventBus.Subscribe(context.Background(), subscriber, types.EventQueryNewBlockHeader, blockHeadersCh); err != nil {
return err
}
txsCh := make(chan interface{})
if err := is.eventBus.Subscribe(context.Background(), subscriber, types.EventQueryTx, txsCh); err != nil {
return err
}
go func() {
for event := range ch {
// TODO: may be not perfomant to write one event at a time
txResult := event.(types.EventDataTx).TxResult
is.idr.Index(&txResult)
for {
e, ok := <-blockHeadersCh
if !ok {
return
}
header := e.(types.EventDataNewBlockHeader).Header
batch := NewBatch(header.NumTxs)
for i := int64(0); i < header.NumTxs; i++ {
e, ok := <-txsCh
if !ok {
is.Logger.Error("Failed to index all transactions due to closed transactions channel", "height", header.Height, "numTxs", header.NumTxs, "numProcessed", i)
return
}
txResult := e.(types.EventDataTx).TxResult
batch.Add(&txResult)
}
is.idr.AddBatch(batch)
is.Logger.Info("Indexed block", "height", header.Height)
}
}()
return nil


+ 2
- 2
state/txindex/kv/kv_test.go View File

@ -190,7 +190,7 @@ func txResultWithTags(tags []cmn.KVPair) *types.TxResult {
}
}
func benchmarkTxIndex(txsCount int, b *testing.B) {
func benchmarkTxIndex(txsCount int64, b *testing.B) {
tx := types.Tx("HELLO WORLD")
txResult := &types.TxResult{
Height: 1,
@ -215,7 +215,7 @@ func benchmarkTxIndex(txsCount int, b *testing.B) {
indexer := NewTxIndex(store)
batch := txindex.NewBatch(txsCount)
for i := 0; i < txsCount; i++ {
for i := int64(0); i < txsCount; i++ {
if err := batch.Add(txResult); err != nil {
b.Fatal(err)
}


+ 3
- 5
types/part_set.go View File

@ -176,7 +176,7 @@ func (ps *PartSet) Total() int {
return ps.total
}
func (ps *PartSet) AddPart(part *Part, verify bool) (bool, error) {
func (ps *PartSet) AddPart(part *Part) (bool, error) {
ps.mtx.Lock()
defer ps.mtx.Unlock()
@ -191,10 +191,8 @@ func (ps *PartSet) AddPart(part *Part, verify bool) (bool, error) {
}
// Check hash proof
if verify {
if !part.Proof.Verify(part.Index, ps.total, part.Hash(), ps.Hash()) {
return false, ErrPartSetInvalidProof
}
if !part.Proof.Verify(part.Index, ps.total, part.Hash(), ps.Hash()) {
return false, ErrPartSetInvalidProof
}
// Add part


+ 3
- 3
types/part_set_test.go View File

@ -34,7 +34,7 @@ func TestBasicPartSet(t *testing.T) {
for i := 0; i < partSet.Total(); i++ {
part := partSet.GetPart(i)
//t.Logf("\n%v", part)
added, err := partSet2.AddPart(part, true)
added, err := partSet2.AddPart(part)
if !added || err != nil {
t.Errorf("Failed to add part %v, error: %v", i, err)
}
@ -74,7 +74,7 @@ func TestWrongProof(t *testing.T) {
// Test adding a part with wrong trail.
part := partSet.GetPart(0)
part.Proof.Aunts[0][0] += byte(0x01)
added, err := partSet2.AddPart(part, true)
added, err := partSet2.AddPart(part)
if added || err == nil {
t.Errorf("Expected to fail adding a part with bad trail.")
}
@ -82,7 +82,7 @@ func TestWrongProof(t *testing.T) {
// Test adding a part with wrong bytes.
part = partSet.GetPart(1)
part.Bytes[0] += byte(0x01)
added, err = partSet2.AddPart(part, true)
added, err = partSet2.AddPart(part)
if added || err == nil {
t.Errorf("Expected to fail adding a part with bad bytes.")
}


+ 2
- 2
version/version.go View File

@ -4,13 +4,13 @@ package version
const (
Maj = "0"
Min = "19"
Fix = "3"
Fix = "4"
)
var (
// Version is the current version of Tendermint
// Must be a string because scripts like dist.sh read this file.
Version = "0.19.3"
Version = "0.19.4"
// GitCommit is the current HEAD set using ldflags.
GitCommit string


Loading…
Cancel
Save