From e4ee34cfbc923ce902118c7ff503cb2b8f542478 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 18 Sep 2018 22:32:44 -0400 Subject: [PATCH 001/113] version: types --- version/version.go | 58 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 44 insertions(+), 14 deletions(-) diff --git a/version/version.go b/version/version.go index d8bab5772..5a089141f 100644 --- a/version/version.go +++ b/version/version.go @@ -1,26 +1,56 @@ package version -// Version components -const ( - Maj = "0" - Min = "25" - Fix = "0" -) - var ( - // Version is the current version of Tendermint - // Must be a string because scripts like dist.sh read this file. - Version = "0.25.0" - // GitCommit is the current HEAD set using ldflags. GitCommit string -) -// ABCIVersion is the version of the ABCI library -const ABCIVersion = "0.14.0" + // Version is the built softwares version. + Version string = TMCoreSemVer +) func init() { if GitCommit != "" { Version += "-" + GitCommit } } + +const ( + // TMCoreSemVer is the current version of Tendermint Core. + // It's the Semantic Version of the software. + // Must be a string because scripts like dist.sh read this file. + TMCoreSemVer = "0.25.0" + + // ABCISemVer is the semantic version of the ABCI library + ABCISemVer = "0.14.0" + ABCIVersion = ABCISemVer +) + +// Protocol is used for implementation agnostic versioning. +type Protocol uint64 + +var ( + // P2PProtocol versions all p2p behaviour and msgs. + P2PProtocol Protocol = 4 + + // BlockProtocol versions all block data structures and processing. + BlockProtocol Protocol = 7 +) + +//------------------------------------------------------------------------ +// Version types + +// App includes the protocol and software version for the application. +// This information is included in ResponseInfo. The App.Protocol can be +// updated in ResponseEndBlock. +type App struct { + Protocol Protocol `json:"protocol"` + Software string `json:"software"` +} + +// Consensus captures the consensus rules for processing a block in the blockchain, +// including all blockchain data structures and the rules of the application's +// state transition machine. +type Consensus struct { + Block Protocol `json:"block"` + App Protocol `json:"app"` +} From d12e55c494afec886bc4958e8fec633116a874b7 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 25 Sep 2018 14:24:18 +0400 Subject: [PATCH 002/113] node: Respond always to OS interrupts (#2479) * stop node upon receiving SIGTERM or CTRL-Ceven during genesis sleep by setting up interrupt before starting a node Closes #2434 * call Start, not OnStart when starting a component to avoid: ``` E[09-24|10:13:15.805] Not stopping PubSub -- have not been started yet module=pubsub impl=PubSub ``` being printed on exit --- CHANGELOG_PENDING.md | 2 ++ cmd/tendermint/commands/run_node.go | 21 ++++++++++++++++++--- node/node.go | 8 -------- p2p/transport.go | 6 +++++- types/event_bus.go | 2 +- 5 files changed, 26 insertions(+), 13 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 81c7a3a29..450c6a5df 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -9,9 +9,11 @@ BREAKING CHANGES: * Apps * Go API +- [node] Remove node.RunForever FEATURES: IMPROVEMENTS: BUG FIXES: +- [node] \#2434 Make node respond to signal interrupts while sleeping for genesis time diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index 542e5c991..6dabacb1f 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -2,6 +2,9 @@ package commands import ( "fmt" + "os" + "os/signal" + "syscall" "github.com/spf13/cobra" @@ -49,19 +52,31 @@ func NewRunNodeCmd(nodeProvider nm.NodeProvider) *cobra.Command { Use: "node", Short: "Run the tendermint node", RunE: func(cmd *cobra.Command, args []string) error { - // Create & start node n, err := nodeProvider(config, logger) if err != nil { return fmt.Errorf("Failed to create node: %v", err) } + // Stop upon receiving SIGTERM or CTRL-C + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + for sig := range c { + logger.Error(fmt.Sprintf("captured %v, exiting...", sig)) + if n.IsRunning() { + n.Stop() + } + os.Exit(1) + } + }() + if err := n.Start(); err != nil { return fmt.Errorf("Failed to start node: %v", err) } logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo()) - // Trap signal, run forever. - n.RunForever() + // Run forever + select {} return nil }, diff --git a/node/node.go b/node/node.go index 0e5581a56..016ed367b 100644 --- a/node/node.go +++ b/node/node.go @@ -586,14 +586,6 @@ func (n *Node) OnStop() { } } -// RunForever waits for an interrupt signal and stops the node. -func (n *Node) RunForever() { - // Sleep forever and then... - cmn.TrapSignal(func() { - n.Stop() - }) -} - // ConfigureRPC sets all variables in rpccore so they will serve // rpc calls from this node func (n *Node) ConfigureRPC() { diff --git a/p2p/transport.go b/p2p/transport.go index 61cff55d9..903d193d9 100644 --- a/p2p/transport.go +++ b/p2p/transport.go @@ -207,7 +207,11 @@ func (mt *MultiplexTransport) Dial( func (mt *MultiplexTransport) Close() error { close(mt.closec) - return mt.listener.Close() + if mt.listener != nil { + return mt.listener.Close() + } + + return nil } // Listen implements transportLifecycle. diff --git a/types/event_bus.go b/types/event_bus.go index d11c65205..466ae7b44 100644 --- a/types/event_bus.go +++ b/types/event_bus.go @@ -45,7 +45,7 @@ func (b *EventBus) SetLogger(l log.Logger) { } func (b *EventBus) OnStart() error { - return b.pubsub.OnStart() + return b.pubsub.Start() } func (b *EventBus) OnStop() { From eb0da7f9cb48eb98cd8f239c1a5cc7ca0ee501cd Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 25 Sep 2018 14:43:28 +0400 Subject: [PATCH 003/113] libs: Handle SIGHUP explicitly inside autofile (#2480) * handle SIGHUP explicitly inside autofile Refs #2260 * libs: Use consistent channel suffix --- libs/autofile/autofile.go | 79 +++++++++++++++++++++------------ libs/autofile/autofile_test.go | 9 +--- libs/autofile/sighup_watcher.go | 69 ---------------------------- 3 files changed, 52 insertions(+), 105 deletions(-) delete mode 100644 libs/autofile/sighup_watcher.go diff --git a/libs/autofile/autofile.go b/libs/autofile/autofile.go index fa1eab20b..6822545e2 100644 --- a/libs/autofile/autofile.go +++ b/libs/autofile/autofile.go @@ -2,7 +2,9 @@ package autofile import ( "os" + "os/signal" "sync" + "syscall" "time" cmn "github.com/tendermint/tendermint/libs/common" @@ -32,50 +34,70 @@ if err != nil { */ const ( - autoFileOpenDuration = 1000 * time.Millisecond - autoFilePerms = os.FileMode(0600) + autoFileClosePeriod = 1000 * time.Millisecond + autoFilePerms = os.FileMode(0600) ) -// Automatically closes and re-opens file for writing. +// AutoFile automatically closes and re-opens file for writing. The file is +// automatically setup to close itself every 1s and upon receiving SIGHUP. +// // This is useful for using a log file with the logrotate tool. type AutoFile struct { - ID string - Path string - ticker *time.Ticker - tickerStopped chan struct{} // closed when ticker is stopped - mtx sync.Mutex - file *os.File + ID string + Path string + + closeTicker *time.Ticker + closeTickerStopc chan struct{} // closed when closeTicker is stopped + hupc chan os.Signal + + mtx sync.Mutex + file *os.File } -func OpenAutoFile(path string) (af *AutoFile, err error) { - af = &AutoFile{ - ID: cmn.RandStr(12) + ":" + path, - Path: path, - ticker: time.NewTicker(autoFileOpenDuration), - tickerStopped: make(chan struct{}), +// OpenAutoFile creates an AutoFile in the path (with random ID). If there is +// an error, it will be of type *PathError or *ErrPermissionsChanged (if file's +// permissions got changed (should be 0600)). +func OpenAutoFile(path string) (*AutoFile, error) { + af := &AutoFile{ + ID: cmn.RandStr(12) + ":" + path, + Path: path, + closeTicker: time.NewTicker(autoFileClosePeriod), + closeTickerStopc: make(chan struct{}), } - if err = af.openFile(); err != nil { - return + if err := af.openFile(); err != nil { + af.Close() + return nil, err } - go af.processTicks() - sighupWatchers.addAutoFile(af) - return + + // Close file on SIGHUP. + af.hupc = make(chan os.Signal, 1) + signal.Notify(af.hupc, syscall.SIGHUP) + go func() { + for range af.hupc { + af.closeFile() + } + }() + + go af.closeFileRoutine() + + return af, nil } func (af *AutoFile) Close() error { - af.ticker.Stop() - close(af.tickerStopped) - err := af.closeFile() - sighupWatchers.removeAutoFile(af) - return err + af.closeTicker.Stop() + close(af.closeTickerStopc) + if af.hupc != nil { + close(af.hupc) + } + return af.closeFile() } -func (af *AutoFile) processTicks() { +func (af *AutoFile) closeFileRoutine() { for { select { - case <-af.ticker.C: + case <-af.closeTicker.C: af.closeFile() - case <-af.tickerStopped: + case <-af.closeTickerStopc: return } } @@ -89,6 +111,7 @@ func (af *AutoFile) closeFile() (err error) { if file == nil { return nil } + af.file = nil return file.Close() } diff --git a/libs/autofile/autofile_test.go b/libs/autofile/autofile_test.go index e8a9b3e4d..5408d8204 100644 --- a/libs/autofile/autofile_test.go +++ b/libs/autofile/autofile_test.go @@ -3,7 +3,6 @@ package autofile import ( "io/ioutil" "os" - "sync/atomic" "syscall" "testing" "time" @@ -37,13 +36,10 @@ func TestSIGHUP(t *testing.T) { require.NoError(t, err) // Send SIGHUP to self. - oldSighupCounter := atomic.LoadInt32(&sighupCounter) syscall.Kill(syscall.Getpid(), syscall.SIGHUP) // Wait a bit... signals are not handled synchronously. - for atomic.LoadInt32(&sighupCounter) == oldSighupCounter { - time.Sleep(time.Millisecond * 10) - } + time.Sleep(time.Millisecond * 10) // Write more to the file. _, err = af.Write([]byte("Line 3\n")) @@ -87,7 +83,4 @@ func TestOpenAutoFilePerms(t *testing.T) { } else { t.Errorf("unexpected error %v", e) } - - err = af.Close() - require.NoError(t, err) } diff --git a/libs/autofile/sighup_watcher.go b/libs/autofile/sighup_watcher.go deleted file mode 100644 index f72f12fcd..000000000 --- a/libs/autofile/sighup_watcher.go +++ /dev/null @@ -1,69 +0,0 @@ -package autofile - -import ( - "os" - "os/signal" - "sync" - "sync/atomic" - "syscall" -) - -func init() { - initSighupWatcher() -} - -var sighupWatchers *SighupWatcher -var sighupCounter int32 // For testing - -func initSighupWatcher() { - sighupWatchers = newSighupWatcher() - - hup := make(chan os.Signal, 1) - signal.Notify(hup, syscall.SIGHUP) - - quit := make(chan os.Signal, 1) - signal.Notify(quit, os.Interrupt, syscall.SIGTERM) - - go func() { - select { - case <-hup: - sighupWatchers.closeAll() - atomic.AddInt32(&sighupCounter, 1) - case <-quit: - return - } - }() -} - -// Watchces for SIGHUP events and notifies registered AutoFiles -type SighupWatcher struct { - mtx sync.Mutex - autoFiles map[string]*AutoFile -} - -func newSighupWatcher() *SighupWatcher { - return &SighupWatcher{ - autoFiles: make(map[string]*AutoFile, 10), - } -} - -func (w *SighupWatcher) addAutoFile(af *AutoFile) { - w.mtx.Lock() - w.autoFiles[af.ID] = af - w.mtx.Unlock() -} - -// If AutoFile isn't registered or was already removed, does nothing. -func (w *SighupWatcher) removeAutoFile(af *AutoFile) { - w.mtx.Lock() - delete(w.autoFiles, af.ID) - w.mtx.Unlock() -} - -func (w *SighupWatcher) closeAll() { - w.mtx.Lock() - for _, af := range w.autoFiles { - af.closeFile() - } - w.mtx.Unlock() -} From 587116dae19d09b1d4fa23a1750bf88410429648 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Tue, 25 Sep 2018 04:14:38 -0700 Subject: [PATCH 004/113] metrics: Add additional metrics to p2p and consensus (#2425) * Add additional metrics to p2p and consensus Partially addresses https://github.com/cosmos/cosmos-sdk/issues/2169. * WIP * Updates from code review * Updates from code review * Add instrumentation namespace to configuration * Fix test failure * Updates from code review * Add quotes * Add atomic load * Use storeint64 * Use addInt64 in writePacketMsgTo --- CHANGELOG_PENDING.md | 2 + config/config.go | 4 ++ config/toml.go | 3 ++ consensus/metrics.go | 64 ++++++++++++++++++------- consensus/reactor.go | 29 +++++++++++- consensus/state.go | 12 +++-- docs/tendermint-core/configuration.md | 3 ++ mempool/metrics.go | 4 +- node/node.go | 6 +-- p2p/conn/connection.go | 8 ++-- p2p/metrics.go | 35 ++++++++++++-- p2p/peer.go | 67 +++++++++++++++++++++++---- p2p/peer_set_test.go | 1 + p2p/switch.go | 2 + p2p/test_util.go | 1 + p2p/transport.go | 4 +- 16 files changed, 202 insertions(+), 43 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 450c6a5df..3893cc4cf 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -14,6 +14,8 @@ BREAKING CHANGES: FEATURES: IMPROVEMENTS: +- [consensus] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) add additional metrics +- [p2p] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) add additional metrics BUG FIXES: - [node] \#2434 Make node respond to signal interrupts while sleeping for genesis time diff --git a/config/config.go b/config/config.go index d0e075477..ebb7a9ac7 100644 --- a/config/config.go +++ b/config/config.go @@ -634,6 +634,9 @@ type InstrumentationConfig struct { // you increase your OS limits. // 0 - unlimited. MaxOpenConnections int `mapstructure:"max_open_connections"` + + // Tendermint instrumentation namespace. + Namespace string `mapstructure:"namespace"` } // DefaultInstrumentationConfig returns a default configuration for metrics @@ -643,6 +646,7 @@ func DefaultInstrumentationConfig() *InstrumentationConfig { Prometheus: false, PrometheusListenAddr: ":26660", MaxOpenConnections: 3, + Namespace: "tendermint", } } diff --git a/config/toml.go b/config/toml.go index 9beb9d799..bc10590c8 100644 --- a/config/toml.go +++ b/config/toml.go @@ -284,6 +284,9 @@ prometheus_listen_addr = "{{ .Instrumentation.PrometheusListenAddr }}" # you increase your OS limits. # 0 - unlimited. max_open_connections = {{ .Instrumentation.MaxOpenConnections }} + +# Instrumentation namespace +namespace = "{{ .Instrumentation.Namespace }}" ` /****** these are for test settings ***********/ diff --git a/consensus/metrics.go b/consensus/metrics.go index 68d065ec6..39bfd24bd 100644 --- a/consensus/metrics.go +++ b/consensus/metrics.go @@ -8,6 +8,8 @@ import ( stdprometheus "github.com/prometheus/client_golang/prometheus" ) +const MetricsSubsystem = "consensus" + // Metrics contains metrics exposed by this package. type Metrics struct { // Height of the chain. @@ -38,74 +40,102 @@ type Metrics struct { BlockSizeBytes metrics.Gauge // Total number of transactions. TotalTxs metrics.Gauge + // The latest block height. + CommittedHeight metrics.Gauge + // Whether or not a node is fast syncing. 1 if yes, 0 if no. + FastSyncing metrics.Gauge } // PrometheusMetrics returns Metrics build using Prometheus client library. -func PrometheusMetrics() *Metrics { +func PrometheusMetrics(namespace string) *Metrics { return &Metrics{ Height: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "height", Help: "Height of the chain.", }, []string{}), Rounds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "rounds", Help: "Number of rounds.", }, []string{}), Validators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "validators", Help: "Number of validators.", }, []string{}), ValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "validators_power", Help: "Total power of all validators.", }, []string{}), MissingValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "missing_validators", Help: "Number of validators who did not sign.", }, []string{}), MissingValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "missing_validators_power", Help: "Total power of the missing validators.", }, []string{}), ByzantineValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "byzantine_validators", Help: "Number of validators who tried to double sign.", }, []string{}), ByzantineValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "byzantine_validators_power", Help: "Total power of the byzantine validators.", }, []string{}), BlockIntervalSeconds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "block_interval_seconds", Help: "Time between this and the last block.", }, []string{}), NumTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "num_txs", Help: "Number of transactions.", }, []string{}), BlockSizeBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "block_size_bytes", Help: "Size of the block.", }, []string{}), TotalTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "total_txs", Help: "Total number of transactions.", }, []string{}), + CommittedHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "latest_block_height", + Help: "The latest block height.", + }, []string{}), + FastSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "fast_syncing", + Help: "Whether or not a node is fast syncing. 1 if yes, 0 if no.", + }, []string{}), } } @@ -125,8 +155,10 @@ func NopMetrics() *Metrics { BlockIntervalSeconds: discard.NewGauge(), - NumTxs: discard.NewGauge(), - BlockSizeBytes: discard.NewGauge(), - TotalTxs: discard.NewGauge(), + NumTxs: discard.NewGauge(), + BlockSizeBytes: discard.NewGauge(), + TotalTxs: discard.NewGauge(), + CommittedHeight: discard.NewGauge(), + FastSyncing: discard.NewGauge(), } } diff --git a/consensus/reactor.go b/consensus/reactor.go index 4a915ace1..2b4bab135 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -43,16 +43,27 @@ type ConsensusReactor struct { mtx sync.RWMutex fastSync bool eventBus *types.EventBus + + metrics *Metrics } +type ReactorOption func(*ConsensusReactor) + // NewConsensusReactor returns a new ConsensusReactor with the given // consensusState. -func NewConsensusReactor(consensusState *ConsensusState, fastSync bool) *ConsensusReactor { +func NewConsensusReactor(consensusState *ConsensusState, fastSync bool, options ...ReactorOption) *ConsensusReactor { conR := &ConsensusReactor{ conS: consensusState, fastSync: fastSync, + metrics: NopMetrics(), } + conR.updateFastSyncingMetric() conR.BaseReactor = *p2p.NewBaseReactor("ConsensusReactor", conR) + + for _, option := range options { + option(conR) + } + return conR } @@ -98,6 +109,7 @@ func (conR *ConsensusReactor) SwitchToConsensus(state sm.State, blocksSynced int conR.mtx.Lock() conR.fastSync = false conR.mtx.Unlock() + conR.metrics.FastSyncing.Set(0) if blocksSynced > 0 { // dont bother with the WAL if we fast synced @@ -850,6 +862,21 @@ func (conR *ConsensusReactor) StringIndented(indent string) string { return s } +func (conR *ConsensusReactor) updateFastSyncingMetric() { + var fastSyncing float64 + if conR.fastSync { + fastSyncing = 1 + } else { + fastSyncing = 0 + } + conR.metrics.FastSyncing.Set(fastSyncing) +} + +// ReactorMetrics sets the metrics +func ReactorMetrics(metrics *Metrics) ReactorOption { + return func(conR *ConsensusReactor) { conR.metrics = metrics } +} + //----------------------------------------------------------------------------- var ( diff --git a/consensus/state.go b/consensus/state.go index 3ee1cfbf1..12dfa4edf 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -124,8 +124,8 @@ type ConsensusState struct { metrics *Metrics } -// CSOption sets an optional parameter on the ConsensusState. -type CSOption func(*ConsensusState) +// StateOption sets an optional parameter on the ConsensusState. +type StateOption func(*ConsensusState) // NewConsensusState returns a new ConsensusState. func NewConsensusState( @@ -135,7 +135,7 @@ func NewConsensusState( blockStore sm.BlockStore, mempool sm.Mempool, evpool sm.EvidencePool, - options ...CSOption, + options ...StateOption, ) *ConsensusState { cs := &ConsensusState{ config: config, @@ -185,8 +185,8 @@ func (cs *ConsensusState) SetEventBus(b *types.EventBus) { cs.blockExec.SetEventBus(b) } -// WithMetrics sets the metrics. -func WithMetrics(metrics *Metrics) CSOption { +// StateMetrics sets the metrics. +func StateMetrics(metrics *Metrics) StateOption { return func(cs *ConsensusState) { cs.metrics = metrics } } @@ -1397,6 +1397,8 @@ func (cs *ConsensusState) recordMetrics(height int64, block *types.Block) { cs.metrics.NumTxs.Set(float64(block.NumTxs)) cs.metrics.BlockSizeBytes.Set(float64(block.Size())) cs.metrics.TotalTxs.Set(float64(block.TotalTxs)) + cs.metrics.CommittedHeight.Set(float64(block.Height)) + } //----------------------------------------------------------------------------- diff --git a/docs/tendermint-core/configuration.md b/docs/tendermint-core/configuration.md index 29db12125..d759ab9fd 100644 --- a/docs/tendermint-core/configuration.md +++ b/docs/tendermint-core/configuration.md @@ -227,4 +227,7 @@ prometheus_listen_addr = ":26660" # you increase your OS limits. # 0 - unlimited. max_open_connections = 3 + +# Instrumentation namespace +namespace = "tendermint" ``` diff --git a/mempool/metrics.go b/mempool/metrics.go index f381678cb..fc4bb4fbe 100644 --- a/mempool/metrics.go +++ b/mempool/metrics.go @@ -3,7 +3,6 @@ package mempool import ( "github.com/go-kit/kit/metrics" "github.com/go-kit/kit/metrics/discard" - prometheus "github.com/go-kit/kit/metrics/prometheus" stdprometheus "github.com/prometheus/client_golang/prometheus" ) @@ -16,9 +15,10 @@ type Metrics struct { } // PrometheusMetrics returns Metrics build using Prometheus client library. -func PrometheusMetrics() *Metrics { +func PrometheusMetrics(namespace string) *Metrics { return &Metrics{ Size: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, Subsystem: "mempool", Name: "size", Help: "Size of the mempool (number of uncommitted transactions).", diff --git a/node/node.go b/node/node.go index 016ed367b..bba4dbda5 100644 --- a/node/node.go +++ b/node/node.go @@ -105,7 +105,7 @@ type MetricsProvider func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics) func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider { return func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics) { if config.Prometheus { - return cs.PrometheusMetrics(), p2p.PrometheusMetrics(), mempl.PrometheusMetrics() + return cs.PrometheusMetrics(config.Namespace), p2p.PrometheusMetrics(config.Namespace), mempl.PrometheusMetrics(config.Namespace) } return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics() } @@ -303,13 +303,13 @@ func NewNode(config *cfg.Config, blockStore, mempool, evidencePool, - cs.WithMetrics(csMetrics), + cs.StateMetrics(csMetrics), ) consensusState.SetLogger(consensusLogger) if privValidator != nil { consensusState.SetPrivValidator(privValidator) } - consensusReactor := cs.NewConsensusReactor(consensusState, fastSync) + consensusReactor := cs.NewConsensusReactor(consensusState, fastSync, cs.ReactorMetrics(csMetrics)) consensusReactor.SetLogger(consensusLogger) eventBus := types.NewEventBus() diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index bb67eab30..2eb210e3c 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -585,9 +585,9 @@ func (c *MConnection) Status() ConnectionStatus { status.Channels[i] = ChannelStatus{ ID: channel.desc.ID, SendQueueCapacity: cap(channel.sendQueue), - SendQueueSize: int(channel.sendQueueSize), // TODO use atomic + SendQueueSize: int(atomic.LoadInt32(&channel.sendQueueSize)), Priority: channel.desc.Priority, - RecentlySent: channel.recentlySent, + RecentlySent: atomic.LoadInt64(&channel.recentlySent), } } return status @@ -724,7 +724,7 @@ func (ch *Channel) nextPacketMsg() PacketMsg { func (ch *Channel) writePacketMsgTo(w io.Writer) (n int64, err error) { var packet = ch.nextPacketMsg() n, err = cdc.MarshalBinaryWriter(w, packet) - ch.recentlySent += n + atomic.AddInt64(&ch.recentlySent, n) return } @@ -756,7 +756,7 @@ func (ch *Channel) recvPacketMsg(packet PacketMsg) ([]byte, error) { func (ch *Channel) updateStats() { // Exponential decay of stats. // TODO: optimize. - ch.recentlySent = int64(float64(ch.recentlySent) * 0.8) + atomic.StoreInt64(&ch.recentlySent, int64(float64(atomic.LoadInt64(&ch.recentlySent)) * 0.8)) } //---------------------------------------- diff --git a/p2p/metrics.go b/p2p/metrics.go index ab876ee7c..94794dfd9 100644 --- a/p2p/metrics.go +++ b/p2p/metrics.go @@ -3,25 +3,51 @@ package p2p import ( "github.com/go-kit/kit/metrics" "github.com/go-kit/kit/metrics/discard" - prometheus "github.com/go-kit/kit/metrics/prometheus" stdprometheus "github.com/prometheus/client_golang/prometheus" ) +const MetricsSubsystem = "p2p" + // Metrics contains metrics exposed by this package. type Metrics struct { // Number of peers. Peers metrics.Gauge + // Number of bytes received from a given peer. + PeerReceiveBytesTotal metrics.Counter + // Number of bytes sent to a given peer. + PeerSendBytesTotal metrics.Counter + // Pending bytes to be sent to a given peer. + PeerPendingSendBytes metrics.Gauge } // PrometheusMetrics returns Metrics build using Prometheus client library. -func PrometheusMetrics() *Metrics { +func PrometheusMetrics(namespace string) *Metrics { return &Metrics{ Peers: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "p2p", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "peers", Help: "Number of peers.", }, []string{}), + PeerReceiveBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peer_receive_bytes_total", + Help: "Number of bytes received from a given peer.", + }, []string{"peer_id"}), + PeerSendBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peer_send_bytes_total", + Help: "Number of bytes sent to a given peer.", + }, []string{"peer_id"}), + PeerPendingSendBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peer_pending_send_bytes", + Help: "Number of pending bytes to be sent to a given peer.", + }, []string{"peer_id"}), } } @@ -29,5 +55,8 @@ func PrometheusMetrics() *Metrics { func NopMetrics() *Metrics { return &Metrics{ Peers: discard.NewGauge(), + PeerReceiveBytesTotal: discard.NewCounter(), + PeerSendBytesTotal: discard.NewCounter(), + PeerPendingSendBytes: discard.NewGauge(), } } diff --git a/p2p/peer.go b/p2p/peer.go index 5dbc582c0..064f91817 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -13,6 +13,8 @@ import ( tmconn "github.com/tendermint/tendermint/p2p/conn" ) +const metricsTickerDuration = 10 * time.Second + var testIPSuffix uint32 // Peer is an interface representing a peer connected on a reactor. @@ -99,8 +101,13 @@ type peer struct { // User data Data *cmn.CMap + + metrics *Metrics + metricsTicker *time.Ticker } +type PeerOption func(*peer) + func newPeer( pc peerConn, mConfig tmconn.MConnConfig, @@ -108,12 +115,15 @@ func newPeer( reactorsByCh map[byte]Reactor, chDescs []*tmconn.ChannelDescriptor, onPeerError func(Peer, interface{}), + options ...PeerOption, ) *peer { p := &peer{ - peerConn: pc, - nodeInfo: nodeInfo, - channels: nodeInfo.Channels, - Data: cmn.NewCMap(), + peerConn: pc, + nodeInfo: nodeInfo, + channels: nodeInfo.Channels, + Data: cmn.NewCMap(), + metricsTicker: time.NewTicker(metricsTickerDuration), + metrics: NopMetrics(), } p.mconn = createMConnection( @@ -125,6 +135,9 @@ func newPeer( mConfig, ) p.BaseService = *cmn.NewBaseService(nil, "Peer", p) + for _, option := range options { + option(p) + } return p } @@ -143,12 +156,18 @@ func (p *peer) OnStart() error { if err := p.BaseService.OnStart(); err != nil { return err } - err := p.mconn.Start() - return err + + if err := p.mconn.Start(); err != nil { + return err + } + + go p.metricsReporter() + return nil } // OnStop implements BaseService. func (p *peer) OnStop() { + p.metricsTicker.Stop() p.BaseService.OnStop() p.mconn.Stop() // stop everything and close the conn } @@ -200,7 +219,11 @@ func (p *peer) Send(chID byte, msgBytes []byte) bool { } else if !p.hasChannel(chID) { return false } - return p.mconn.Send(chID, msgBytes) + res := p.mconn.Send(chID, msgBytes) + if res { + p.metrics.PeerSendBytesTotal.With("peer-id", string(p.ID())).Add(float64(len(msgBytes))) + } + return res } // TrySend msg bytes to the channel identified by chID byte. Immediately returns @@ -211,7 +234,11 @@ func (p *peer) TrySend(chID byte, msgBytes []byte) bool { } else if !p.hasChannel(chID) { return false } - return p.mconn.TrySend(chID, msgBytes) + res := p.mconn.TrySend(chID, msgBytes) + if res { + p.metrics.PeerSendBytesTotal.With("peer-id", string(p.ID())).Add(float64(len(msgBytes))) + } + return res } // Get the data for a given key. @@ -314,6 +341,29 @@ func (p *peer) String() string { return fmt.Sprintf("Peer{%v %v in}", p.mconn, p.ID()) } +func PeerMetrics(metrics *Metrics) PeerOption { + return func(p *peer) { + p.metrics = metrics + } +} + +func (p *peer) metricsReporter() { + for { + select { + case <-p.metricsTicker.C: + status := p.mconn.Status() + var sendQueueSize float64 + for _, chStatus := range status.Channels { + sendQueueSize += float64(chStatus.SendQueueSize) + } + + p.metrics.PeerPendingSendBytes.With("peer-id", string(p.ID())).Set(sendQueueSize) + case <-p.Quit(): + return + } + } +} + //------------------------------------------------------------------ // helper funcs @@ -333,6 +383,7 @@ func createMConnection( // which does onPeerError. panic(fmt.Sprintf("Unknown channel %X", chID)) } + p.metrics.PeerReceiveBytesTotal.With("peer_id", string(p.ID())).Add(float64(len(msgBytes))) reactor.Receive(chID, p, msgBytes) } diff --git a/p2p/peer_set_test.go b/p2p/peer_set_test.go index a352cce00..ee1c52eab 100644 --- a/p2p/peer_set_test.go +++ b/p2p/peer_set_test.go @@ -24,6 +24,7 @@ func randPeer(ip net.IP) *peer { ID: nodeKey.ID(), ListenAddr: fmt.Sprintf("%v.%v.%v.%v:26656", cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256), }, + metrics: NopMetrics(), } p.ip = ip diff --git a/p2p/switch.go b/p2p/switch.go index 57077e07d..dbef56ebf 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -463,6 +463,7 @@ func (sw *Switch) acceptRoutine() { chDescs: sw.chDescs, onPeerError: sw.StopPeerForError, reactorsByCh: sw.reactorsByCh, + metrics: sw.metrics, }) if err != nil { switch err.(type) { @@ -549,6 +550,7 @@ func (sw *Switch) addOutboundPeerWithConfig( onPeerError: sw.StopPeerForError, persistent: persistent, reactorsByCh: sw.reactorsByCh, + metrics: sw.metrics, }) if err != nil { switch e := err.(type) { diff --git a/p2p/test_util.go b/p2p/test_util.go index 64b8b215c..3d48aaac4 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -29,6 +29,7 @@ func CreateRandomPeer(outbound bool) *peer { ListenAddr: netAddr.DialString(), }, mconn: &conn.MConnection{}, + metrics: NopMetrics(), } p.SetLogger(log.TestingLogger().With("peer", addr)) return p diff --git a/p2p/transport.go b/p2p/transport.go index 903d193d9..6f097b4f7 100644 --- a/p2p/transport.go +++ b/p2p/transport.go @@ -7,7 +7,7 @@ import ( "time" "github.com/tendermint/tendermint/config" - crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/p2p/conn" ) @@ -41,6 +41,7 @@ type peerConfig struct { onPeerError func(Peer, interface{}) outbound, persistent bool reactorsByCh map[byte]Reactor + metrics *Metrics } // Transport emits and connects to Peers. The implementation of Peer is left to @@ -411,6 +412,7 @@ func (mt *MultiplexTransport) wrapPeer( cfg.reactorsByCh, cfg.chDescs, cfg.onPeerError, + PeerMetrics(cfg.metrics), ) // Wait for Peer to Stop so we can cleanup. From 110b07fb3fe7d9930f066ce2b46335f53673c11e Mon Sep 17 00:00:00 2001 From: goolAdapter <267310165@qq.com> Date: Tue, 25 Sep 2018 19:22:45 +0800 Subject: [PATCH 005/113] libs: Call Flush() before rename #2428 (#2439) * fix Group.RotateFile need call Flush() before rename. #2428 * fix some review issue. #2428 refactor Group's config: replace setting member with initial option * fix a handwriting mistake * fix a time window error between rename and write. * fix a syntax mistake. * change option name Get_ to With_ * fix review issue * fix review issue --- CHANGELOG_PENDING.md | 1 + consensus/wal.go | 4 +- consensus/wal_generator.go | 38 ++++++++----- consensus/wal_test.go | 50 +++++++++++++++++ libs/autofile/cmd/logjack.go | 5 +- libs/autofile/group.go | 105 ++++++++++++++++++++++------------- libs/autofile/group_test.go | 4 +- 7 files changed, 146 insertions(+), 61 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 3893cc4cf..26a31461f 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -18,4 +18,5 @@ IMPROVEMENTS: - [p2p] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) add additional metrics BUG FIXES: +- [autofile] \#2428 Group.RotateFile need call Flush() before rename (@goolAdapter) - [node] \#2434 Make node respond to signal interrupts while sleeping for genesis time diff --git a/consensus/wal.go b/consensus/wal.go index 10bef542b..6472c2573 100644 --- a/consensus/wal.go +++ b/consensus/wal.go @@ -73,13 +73,13 @@ type baseWAL struct { enc *WALEncoder } -func NewWAL(walFile string) (*baseWAL, error) { +func NewWAL(walFile string, groupOptions ...func(*auto.Group)) (*baseWAL, error) { err := cmn.EnsureDir(filepath.Dir(walFile), 0700) if err != nil { return nil, errors.Wrap(err, "failed to ensure WAL directory is in place") } - group, err := auto.OpenGroup(walFile) + group, err := auto.OpenGroup(walFile, groupOptions...) if err != nil { return nil, err } diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go index cdb667edf..980a44892 100644 --- a/consensus/wal_generator.go +++ b/consensus/wal_generator.go @@ -4,6 +4,7 @@ import ( "bufio" "bytes" "fmt" + "io" "os" "path/filepath" "strings" @@ -23,12 +24,11 @@ import ( "github.com/tendermint/tendermint/types" ) -// WALWithNBlocks generates a consensus WAL. It does this by spining up a +// WALGenerateNBlocks generates a consensus WAL. It does this by spining up a // stripped down version of node (proxy app, event bus, consensus state) with a // persistent kvstore application and special consensus wal instance -// (byteBufferWAL) and waits until numBlocks are created. Then it returns a WAL -// content. If the node fails to produce given numBlocks, it returns an error. -func WALWithNBlocks(numBlocks int) (data []byte, err error) { +// (byteBufferWAL) and waits until numBlocks are created. If the node fails to produce given numBlocks, it returns an error. +func WALGenerateNBlocks(wr io.Writer, numBlocks int) (err error) { config := getConfig() app := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), "wal_generator")) @@ -43,26 +43,26 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) { privValidator := privval.LoadOrGenFilePV(privValidatorFile) genDoc, err := types.GenesisDocFromFile(config.GenesisFile()) if err != nil { - return nil, errors.Wrap(err, "failed to read genesis file") + return errors.Wrap(err, "failed to read genesis file") } stateDB := db.NewMemDB() blockStoreDB := db.NewMemDB() state, err := sm.MakeGenesisState(genDoc) if err != nil { - return nil, errors.Wrap(err, "failed to make genesis state") + return errors.Wrap(err, "failed to make genesis state") } blockStore := bc.NewBlockStore(blockStoreDB) proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app)) proxyApp.SetLogger(logger.With("module", "proxy")) if err := proxyApp.Start(); err != nil { - return nil, errors.Wrap(err, "failed to start proxy app connections") + return errors.Wrap(err, "failed to start proxy app connections") } defer proxyApp.Stop() eventBus := types.NewEventBus() eventBus.SetLogger(logger.With("module", "events")) if err := eventBus.Start(); err != nil { - return nil, errors.Wrap(err, "failed to start event bus") + return errors.Wrap(err, "failed to start event bus") } defer eventBus.Stop() mempool := sm.MockMempool{} @@ -78,8 +78,6 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) { ///////////////////////////////////////////////////////////////////////////// // set consensus wal to buffered WAL, which will write all incoming msgs to buffer - var b bytes.Buffer - wr := bufio.NewWriter(&b) numBlocksWritten := make(chan struct{}) wal := newByteBufferWAL(logger, NewWALEncoder(wr), int64(numBlocks), numBlocksWritten) // see wal.go#103 @@ -87,20 +85,32 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) { consensusState.wal = wal if err := consensusState.Start(); err != nil { - return nil, errors.Wrap(err, "failed to start consensus state") + return errors.Wrap(err, "failed to start consensus state") } select { case <-numBlocksWritten: consensusState.Stop() - wr.Flush() - return b.Bytes(), nil + return nil case <-time.After(1 * time.Minute): consensusState.Stop() - return []byte{}, fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks) + return fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks) } } +//WALWithNBlocks returns a WAL content with numBlocks. +func WALWithNBlocks(numBlocks int) (data []byte, err error) { + var b bytes.Buffer + wr := bufio.NewWriter(&b) + + if err := WALGenerateNBlocks(wr, numBlocks); err != nil { + return []byte{}, err + } + + wr.Flush() + return b.Bytes(), nil +} + // f**ing long, but unique for each test func makePathname() string { // get path diff --git a/consensus/wal_test.go b/consensus/wal_test.go index e5744c0a1..c45f6acee 100644 --- a/consensus/wal_test.go +++ b/consensus/wal_test.go @@ -4,11 +4,16 @@ import ( "bytes" "crypto/rand" "fmt" + "io/ioutil" + "os" + "path/filepath" + // "sync" "testing" "time" "github.com/tendermint/tendermint/consensus/types" + "github.com/tendermint/tendermint/libs/autofile" tmtypes "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" @@ -16,6 +21,51 @@ import ( "github.com/stretchr/testify/require" ) +func TestWALTruncate(t *testing.T) { + walDir, err := ioutil.TempDir("", "wal") + if err != nil { + panic(fmt.Errorf("failed to create temp WAL file: %v", err)) + } + defer os.RemoveAll(walDir) + + walFile := filepath.Join(walDir, "wal") + + //this magic number 4K can truncate the content when RotateFile. defaultHeadSizeLimit(10M) is hard to simulate. + //this magic number 1 * time.Millisecond make RotateFile check frequently. defaultGroupCheckDuration(5s) is hard to simulate. + wal, err := NewWAL(walFile, autofile.GroupHeadSizeLimit(4096), autofile.GroupCheckDuration(1*time.Millisecond)) + if err != nil { + t.Fatal(err) + } + + wal.Start() + defer wal.Stop() + + //60 block's size nearly 70K, greater than group's headBuf size(4096 * 10), when headBuf is full, truncate content will Flush to the file. + //at this time, RotateFile is called, truncate content exist in each file. + err = WALGenerateNBlocks(wal.Group(), 60) + if err != nil { + t.Fatal(err) + } + + time.Sleep(1 * time.Millisecond) //wait groupCheckDuration, make sure RotateFile run + + wal.Group().Flush() + + h := int64(50) + gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{}) + assert.NoError(t, err, fmt.Sprintf("expected not to err on height %d", h)) + assert.True(t, found, fmt.Sprintf("expected to find end height for %d", h)) + assert.NotNil(t, gr, "expected group not to be nil") + defer gr.Close() + + dec := NewWALDecoder(gr) + msg, err := dec.Decode() + assert.NoError(t, err, "expected to decode a message") + rs, ok := msg.Msg.(tmtypes.EventDataRoundState) + assert.True(t, ok, "expected message of type EventDataRoundState") + assert.Equal(t, rs.Height, h+1, fmt.Sprintf("wrong height")) +} + func TestWALEncoderDecoder(t *testing.T) { now := tmtime.Now() msgs := []TimedWALMessage{ diff --git a/libs/autofile/cmd/logjack.go b/libs/autofile/cmd/logjack.go index 17b482bed..ead3f8305 100644 --- a/libs/autofile/cmd/logjack.go +++ b/libs/autofile/cmd/logjack.go @@ -39,13 +39,12 @@ func main() { } // Open Group - group, err := auto.OpenGroup(headPath) + group, err := auto.OpenGroup(headPath, auto.GroupHeadSizeLimit(chopSize), auto.GroupTotalSizeLimit(limitSize)) if err != nil { fmt.Printf("logjack couldn't create output file %v\n", headPath) os.Exit(1) } - group.SetHeadSizeLimit(chopSize) - group.SetTotalSizeLimit(limitSize) + err = group.Start() if err != nil { fmt.Printf("logjack couldn't start with file %v\n", headPath) diff --git a/libs/autofile/group.go b/libs/autofile/group.go index 286447cda..807f7e1ed 100644 --- a/libs/autofile/group.go +++ b/libs/autofile/group.go @@ -19,10 +19,10 @@ import ( ) const ( - groupCheckDuration = 5000 * time.Millisecond - defaultHeadSizeLimit = 10 * 1024 * 1024 // 10MB - defaultTotalSizeLimit = 1 * 1024 * 1024 * 1024 // 1GB - maxFilesToRemove = 4 // needs to be greater than 1 + defaultGroupCheckDuration = 5000 * time.Millisecond + defaultHeadSizeLimit = 10 * 1024 * 1024 // 10MB + defaultTotalSizeLimit = 1 * 1024 * 1024 * 1024 // 1GB + maxFilesToRemove = 4 // needs to be greater than 1 ) /* @@ -56,16 +56,17 @@ assuming that marker lines are written occasionally. type Group struct { cmn.BaseService - ID string - Head *AutoFile // The head AutoFile to write to - headBuf *bufio.Writer - Dir string // Directory that contains .Head - ticker *time.Ticker - mtx sync.Mutex - headSizeLimit int64 - totalSizeLimit int64 - minIndex int // Includes head - maxIndex int // Includes head, where Head will move to + ID string + Head *AutoFile // The head AutoFile to write to + headBuf *bufio.Writer + Dir string // Directory that contains .Head + ticker *time.Ticker + mtx sync.Mutex + headSizeLimit int64 + totalSizeLimit int64 + groupCheckDuration time.Duration + minIndex int // Includes head + maxIndex int // Includes head, where Head will move to // TODO: When we start deleting files, we need to start tracking GroupReaders // and their dependencies. @@ -73,7 +74,7 @@ type Group struct { // OpenGroup creates a new Group with head at headPath. It returns an error if // it fails to open head file. -func OpenGroup(headPath string) (g *Group, err error) { +func OpenGroup(headPath string, groupOptions ...func(*Group)) (g *Group, err error) { dir := path.Dir(headPath) head, err := OpenAutoFile(headPath) if err != nil { @@ -81,15 +82,21 @@ func OpenGroup(headPath string) (g *Group, err error) { } g = &Group{ - ID: "group:" + head.ID, - Head: head, - headBuf: bufio.NewWriterSize(head, 4096*10), - Dir: dir, - headSizeLimit: defaultHeadSizeLimit, - totalSizeLimit: defaultTotalSizeLimit, - minIndex: 0, - maxIndex: 0, + ID: "group:" + head.ID, + Head: head, + headBuf: bufio.NewWriterSize(head, 4096*10), + Dir: dir, + headSizeLimit: defaultHeadSizeLimit, + totalSizeLimit: defaultTotalSizeLimit, + groupCheckDuration: defaultGroupCheckDuration, + minIndex: 0, + maxIndex: 0, } + + for _, option := range groupOptions { + option(g) + } + g.BaseService = *cmn.NewBaseService(nil, "Group", g) gInfo := g.readGroupInfo() @@ -98,10 +105,31 @@ func OpenGroup(headPath string) (g *Group, err error) { return } +// GroupCheckDuration allows you to overwrite default groupCheckDuration. +func GroupCheckDuration(duration time.Duration) func(*Group) { + return func(g *Group) { + g.groupCheckDuration = duration + } +} + +// GroupHeadSizeLimit allows you to overwrite default head size limit - 10MB. +func GroupHeadSizeLimit(limit int64) func(*Group) { + return func(g *Group) { + g.headSizeLimit = limit + } +} + +// GroupTotalSizeLimit allows you to overwrite default total size limit of the group - 1GB. +func GroupTotalSizeLimit(limit int64) func(*Group) { + return func(g *Group) { + g.totalSizeLimit = limit + } +} + // OnStart implements Service by starting the goroutine that checks file and // group limits. func (g *Group) OnStart() error { - g.ticker = time.NewTicker(groupCheckDuration) + g.ticker = time.NewTicker(g.groupCheckDuration) go g.processTicks() return nil } @@ -122,13 +150,6 @@ func (g *Group) Close() { g.mtx.Unlock() } -// SetHeadSizeLimit allows you to overwrite default head size limit - 10MB. -func (g *Group) SetHeadSizeLimit(limit int64) { - g.mtx.Lock() - g.headSizeLimit = limit - g.mtx.Unlock() -} - // HeadSizeLimit returns the current head size limit. func (g *Group) HeadSizeLimit() int64 { g.mtx.Lock() @@ -136,14 +157,6 @@ func (g *Group) HeadSizeLimit() int64 { return g.headSizeLimit } -// SetTotalSizeLimit allows you to overwrite default total size limit of the -// group - 1GB. -func (g *Group) SetTotalSizeLimit(limit int64) { - g.mtx.Lock() - g.totalSizeLimit = limit - g.mtx.Unlock() -} - // TotalSizeLimit returns total size limit of the group. func (g *Group) TotalSizeLimit() int64 { g.mtx.Lock() @@ -266,6 +279,14 @@ func (g *Group) RotateFile() { headPath := g.Head.Path + if err := g.headBuf.Flush(); err != nil { + panic(err) //panic is used for consistent with below + } + + if err := g.Head.Sync(); err != nil { + panic(err) + } + if err := g.Head.closeFile(); err != nil { panic(err) } @@ -275,6 +296,12 @@ func (g *Group) RotateFile() { panic(err) } + //make sure head file exist, there is a window time between rename and next write + //when NewReader(maxIndex), lead to "open /tmp/wal058868562/wal: no such file or directory" + if err := g.Head.openFile(); err != nil { + panic(err) + } + g.maxIndex++ } diff --git a/libs/autofile/group_test.go b/libs/autofile/group_test.go index d87bdba82..e173e4996 100644 --- a/libs/autofile/group_test.go +++ b/libs/autofile/group_test.go @@ -23,12 +23,10 @@ func createTestGroupWithHeadSizeLimit(t *testing.T, headSizeLimit int64) *Group require.NoError(t, err, "Error creating dir") headPath := testDir + "/myfile" - g, err := OpenGroup(headPath) + g, err := OpenGroup(headPath, GroupHeadSizeLimit(headSizeLimit)) require.NoError(t, err, "Error opening Group") require.NotEqual(t, nil, g, "Failed to create Group") - g.SetHeadSizeLimit(headSizeLimit) - return g } From cf8b42d813be9b502e696e95b27a48d00653bde5 Mon Sep 17 00:00:00 2001 From: zramsay Date: Tue, 25 Sep 2018 17:15:08 -0400 Subject: [PATCH 006/113] rpc/core: ints are strings in responses, closes #1896 --- rpc/core/abci.go | 6 ++--- rpc/core/blocks.go | 60 +++++++++++++++++++++---------------------- rpc/core/consensus.go | 60 +++++++++++++++++++++---------------------- rpc/core/doc.go | 2 +- rpc/core/mempool.go | 14 +++++----- rpc/core/net.go | 4 +-- rpc/core/tx.go | 20 +++++++-------- 7 files changed, 83 insertions(+), 83 deletions(-) diff --git a/rpc/core/abci.go b/rpc/core/abci.go index 3f399be80..9c7af92cd 100644 --- a/rpc/core/abci.go +++ b/rpc/core/abci.go @@ -28,12 +28,12 @@ import ( // "result": { // "response": { // "log": "exists", -// "height": 0, +// "height": "0", // "proof": "010114FED0DAD959F36091AD761C922ABA3CBF1D8349990101020103011406AA2262E2F448242DF2C2607C3CDC705313EE3B0001149D16177BC71E445476174622EA559715C293740C", // "value": "61626364", // "key": "61626364", -// "index": -1, -// "code": 0 +// "index": "-1", +// "code": "0" // } // }, // "id": "", diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index bb69db63f..a9252f553 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -32,13 +32,13 @@ import ( // "header": { // "app_hash": "", // "chain_id": "test-chain-6UTNIN", -// "height": 10, +// "height": "10", // "time": "2017-05-29T15:05:53.877Z", -// "num_txs": 0, +// "num_txs": "0", // "last_block_id": { // "parts": { // "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": 1 +// "total": "1" // }, // "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" // }, @@ -49,13 +49,13 @@ import ( // "block_id": { // "parts": { // "hash": "277A4DBEF91483A18B85F2F5677ABF9694DFA40F", -// "total": 1 +// "total": "1" // }, // "hash": "96B1D2F2D201BA4BC383EB8224139DB1294944E5" // } // } // ], -// "last_height": 5493 +// "last_height": "5493" // }, // "id": "", // "jsonrpc": "2.0" @@ -143,21 +143,21 @@ func filterMinMax(height, min, max, limit int64) (int64, int64, error) { // "block_id": { // "parts": { // "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": 1 +// "total": "1" // }, // "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" // }, -// "type": 2, -// "round": 0, -// "height": 9, -// "validator_index": 0, +// "type": "2", +// "round": "0", +// "height": "9", +// "validator_index": "0", // "validator_address": "E89A51D60F68385E09E716D353373B11F8FACD62" // } // ], // "blockID": { // "parts": { // "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": 1 +// "total": "1" // }, // "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" // } @@ -168,13 +168,13 @@ func filterMinMax(height, min, max, limit int64) (int64, int64, error) { // "header": { // "app_hash": "", // "chain_id": "test-chain-6UTNIN", -// "height": 10, +// "height": "10", // "time": "2017-05-29T15:05:53.877Z", -// "num_txs": 0, +// "num_txs": "0", // "last_block_id": { // "parts": { // "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": 1 +// "total": "1" // }, // "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" // }, @@ -187,13 +187,13 @@ func filterMinMax(height, min, max, limit int64) (int64, int64, error) { // "header": { // "app_hash": "", // "chain_id": "test-chain-6UTNIN", -// "height": 10, +// "height": "10", // "time": "2017-05-29T15:05:53.877Z", -// "num_txs": 0, +// "num_txs": "0", // "last_block_id": { // "parts": { // "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": 1 +// "total": "1" // }, // "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" // }, @@ -204,7 +204,7 @@ func filterMinMax(height, min, max, limit int64) (int64, int64, error) { // "block_id": { // "parts": { // "hash": "277A4DBEF91483A18B85F2F5677ABF9694DFA40F", -// "total": 1 +// "total": "1" // }, // "hash": "96B1D2F2D201BA4BC383EB8224139DB1294944E5" // } @@ -255,21 +255,21 @@ func Block(heightPtr *int64) (*ctypes.ResultBlock, error) { // "block_id": { // "parts": { // "hash": "9E37CBF266BC044A779E09D81C456E653B89E006", -// "total": 1 +// "total": "1" // }, // "hash": "CC6E861E31CA4334E9888381B4A9137D1458AB6A" // }, -// "type": 2, -// "round": 0, -// "height": 11, -// "validator_index": 0, +// "type": "2", +// "round": "0", +// "height": "11", +// "validator_index": "0", // "validator_address": "E89A51D60F68385E09E716D353373B11F8FACD62" // } // ], // "blockID": { // "parts": { // "hash": "9E37CBF266BC044A779E09D81C456E653B89E006", -// "total": 1 +// "total": "1" // }, // "hash": "CC6E861E31CA4334E9888381B4A9137D1458AB6A" // } @@ -277,13 +277,13 @@ func Block(heightPtr *int64) (*ctypes.ResultBlock, error) { // "header": { // "app_hash": "", // "chain_id": "test-chain-6UTNIN", -// "height": 11, +// "height": "11", // "time": "2017-05-29T15:05:54.893Z", -// "num_txs": 0, +// "num_txs": "0", // "last_block_id": { // "parts": { // "hash": "277A4DBEF91483A18B85F2F5677ABF9694DFA40F", -// "total": 1 +// "total": "1" // }, // "hash": "96B1D2F2D201BA4BC383EB8224139DB1294944E5" // }, @@ -337,14 +337,14 @@ func Commit(heightPtr *int64) (*ctypes.ResultCommit, error) { // // ```json // { -// "height": 10, +// "height": "10", // "results": [ // { -// "code": 0, +// "code": "0", // "data": "CAFE00F00D" // }, // { -// "code": 102, +// "code": "102", // "data": "" // } // ] diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index a4a2c667c..1d5f92753 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -28,8 +28,8 @@ import ( // "result": { // "validators": [ // { -// "accum": 0, -// "voting_power": 10, +// "accum": "0", +// "voting_power": "10", // "pub_key": { // "data": "68DFDA7E50F82946E7E8546BED37944A422CD1B831E70DF66BA3B8430593944D", // "type": "ed25519" @@ -37,7 +37,7 @@ import ( // "address": "E89A51D60F68385E09E716D353373B11F8FACD62" // } // ], -// "block_height": 5241 +// "block_height": "5241" // }, // "id": "", // "jsonrpc": "2.0" @@ -79,9 +79,9 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { // "id": "", // "result": { // "round_state": { -// "height": 7185, -// "round": 0, -// "step": 1, +// "height": "7185", +// "round": "0", +// "step": "1", // "start_time": "2018-05-12T13:57:28.440293621-07:00", // "commit_time": "2018-05-12T13:57:27.440293621-07:00", // "validators": { @@ -92,8 +92,8 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { // "type": "tendermint/PubKeyEd25519", // "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" // }, -// "voting_power": 10, -// "accum": 0 +// "voting_power": "10", +// "accum": "0" // } // ], // "proposer": { @@ -102,27 +102,27 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { // "type": "tendermint/PubKeyEd25519", // "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" // }, -// "voting_power": 10, -// "accum": 0 +// "voting_power": "10", +// "accum": "0" // } // }, // "proposal": null, // "proposal_block": null, // "proposal_block_parts": null, -// "locked_round": 0, +// "locked_round": "0", // "locked_block": null, // "locked_block_parts": null, -// "valid_round": 0, +// "valid_round": "0", // "valid_block": null, // "valid_block_parts": null, // "votes": [ // { -// "round": 0, +// "round": "0", // "prevotes": "_", // "precommits": "_" // } // ], -// "commit_round": -1, +// "commit_round": "-1", // "last_commit": { // "votes": [ // "Vote{0:B5B3D40BE539 7184/00/2(Precommit) 14F946FA7EF0 /702B1B1A602A.../ @ 2018-05-12T20:57:27.342Z}" @@ -138,8 +138,8 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { // "type": "tendermint/PubKeyEd25519", // "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" // }, -// "voting_power": 10, -// "accum": 0 +// "voting_power": "10", +// "accum": "0" // } // ], // "proposer": { @@ -148,8 +148,8 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { // "type": "tendermint/PubKeyEd25519", // "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" // }, -// "voting_power": 10, -// "accum": 0 +// "voting_power": "10", +// "accum": "0" // } // } // }, @@ -158,30 +158,30 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { // "node_address": "30ad1854af22506383c3f0e57fb3c7f90984c5e8@172.16.63.221:26656", // "peer_state": { // "round_state": { -// "height": 7185, -// "round": 0, -// "step": 1, +// "height": "7185", +// "round": "0", +// "step": "1", // "start_time": "2018-05-12T13:57:27.438039872-07:00", // "proposal": false, // "proposal_block_parts_header": { -// "total": 0, +// "total": "0", // "hash": "" // }, // "proposal_block_parts": null, -// "proposal_pol_round": -1, +// "proposal_pol_round": "-1", // "proposal_pol": "_", // "prevotes": "_", // "precommits": "_", -// "last_commit_round": 0, +// "last_commit_round": "0", // "last_commit": "x", -// "catchup_commit_round": -1, +// "catchup_commit_round": "-1", // "catchup_commit": "_" // }, // "stats": { -// "last_vote_height": 7184, -// "votes": 255, -// "last_block_part_height": 7184, -// "block_parts": 255 +// "last_vote_height": "7184", +// "votes": "255", +// "last_block_part_height": "7184", +// "block_parts": "255" // } // } // } @@ -241,7 +241,7 @@ func DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { // "valid_block_hash": "", // "height_vote_set": [ // { -// "round": 0, +// "round": "0", // "prevotes": [ // "nil-Vote" // ], diff --git a/rpc/core/doc.go b/rpc/core/doc.go index 603b6679e..5378dde24 100644 --- a/rpc/core/doc.go +++ b/rpc/core/doc.go @@ -33,7 +33,7 @@ curl 'localhost:26657/broadcast_tx_sync?tx="abc"' "hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF", "log": "", "data": "", - "code": 0 + "code": "0" }, "id": "", "jsonrpc": "2.0" diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index 728d77f63..c015363af 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -36,7 +36,7 @@ import ( // "hash": "E39AAB7A537ABAA237831742DCE1117F187C3C52", // "log": "", // "data": "", -// "code": 0 +// "code": "0" // }, // "id": "", // "jsonrpc": "2.0" @@ -74,7 +74,7 @@ func BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { // "jsonrpc": "2.0", // "id": "", // "result": { -// "code": 0, +// "code": "0", // "data": "", // "log": "", // "hash": "0D33F2F03A5234F38706E43004489E061AC40A2E" @@ -126,17 +126,17 @@ func BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { // { // "error": "", // "result": { -// "height": 26682, +// "height": "26682", // "hash": "75CA0F856A4DA078FC4911580360E70CEFB2EBEE", // "deliver_tx": { // "log": "", // "data": "", -// "code": 0 +// "code": "0" // }, // "check_tx": { // "log": "", // "data": "", -// "code": 0 +// "code": "0" // } // }, // "id": "", @@ -227,7 +227,7 @@ func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { // "error": "", // "result": { // "txs": [], -// "n_txs": 0 +// "n_txs": "0" // }, // "id": "", // "jsonrpc": "2.0" @@ -265,7 +265,7 @@ func UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) { // "error": "", // "result": { // "txs": null, -// "n_txs": 0 +// "n_txs": "0" // }, // "id": "", // "jsonrpc": "2.0" diff --git a/rpc/core/net.go b/rpc/core/net.go index acb18a34f..9816d2f63 100644 --- a/rpc/core/net.go +++ b/rpc/core/net.go @@ -23,7 +23,7 @@ import ( // { // "error": "", // "result": { -// "n_peers": 0, +// "n_peers": "0", // "peers": [], // "listeners": [ // "Listener(@10.0.2.15:26656)" @@ -102,7 +102,7 @@ func UnsafeDialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, // "validators": [ // { // "name": "", -// "power": 10, +// "power": "10", // "pub_key": { // "data": "68DFDA7E50F82946E7E8546BED37944A422CD1B831E70DF66BA3B8430593944D", // "type": "ed25519" diff --git a/rpc/core/tx.go b/rpc/core/tx.go index f53d82f14..ba6320016 100644 --- a/rpc/core/tx.go +++ b/rpc/core/tx.go @@ -36,17 +36,17 @@ import ( // }, // "Data": "YWJjZA==", // "RootHash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF", -// "Total": 1, -// "Index": 0 +// "Total": "1", +// "Index": "0" // }, // "tx": "YWJjZA==", // "tx_result": { // "log": "", // "data": "", -// "code": 0 +// "code": "0" // }, -// "index": 0, -// "height": 52, +// "index": "0", +// "height": "52", // "hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF" // }, // "id": "", @@ -140,17 +140,17 @@ func Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { // }, // "Data": "mvZHHa7HhZ4aRT0xMDA=", // "RootHash": "F6541223AA46E428CB1070E9840D2C3DF3B6D776", -// "Total": 32, -// "Index": 31 +// "Total": "32", +// "Index": "31" // }, // "tx": "mvZHHa7HhZ4aRT0xMDA=", // "tx_result": {}, -// "index": 31, -// "height": 12, +// "index": "31", +// "height": "12", // "hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF" // } // ], -// "total_count": 1 +// "total_count": "1" // } // } // ``` From df329e8f2763498e092cd037ad62501597339e77 Mon Sep 17 00:00:00 2001 From: zramsay Date: Tue, 25 Sep 2018 18:11:18 -0400 Subject: [PATCH 007/113] rpc/libs/doc: formatting for godoc, closes #2420 --- rpc/lib/doc.go | 187 +++++++++++++++++++++++-------------------------- 1 file changed, 86 insertions(+), 101 deletions(-) diff --git a/rpc/lib/doc.go b/rpc/lib/doc.go index b96b9123c..dbdb362da 100644 --- a/rpc/lib/doc.go +++ b/rpc/lib/doc.go @@ -1,103 +1,88 @@ -/* -HTTP RPC server supporting calls via uri params, jsonrpc, and jsonrpc over websockets - -# Client Requests - -Suppose we want to expose the rpc function `HelloWorld(name string, num int)`. - -## GET (URI) - -As a GET request, it would have URI encoded parameters, and look like: - -``` -curl 'http://localhost:8008/hello_world?name="my_world"&num=5' -``` - -Note the `'` around the url, which is just so bash doesn't ignore the quotes in `"my_world"`. -This should also work: - -``` -curl http://localhost:8008/hello_world?name=\"my_world\"&num=5 -``` - -A GET request to `/` returns a list of available endpoints. -For those which take arguments, the arguments will be listed in order, with `_` where the actual value should be. - -## POST (JSONRPC) - -As a POST request, we use JSONRPC. For instance, the same request would have this as the body: - -``` -{ - "jsonrpc": "2.0", - "id": "anything", - "method": "hello_world", - "params": { - "name": "my_world", - "num": 5 - } -} -``` - -With the above saved in file `data.json`, we can make the request with - -``` -curl --data @data.json http://localhost:8008 -``` - -## WebSocket (JSONRPC) - -All requests are exposed over websocket in the same form as the POST JSONRPC. -Websocket connections are available at their own endpoint, typically `/websocket`, -though this is configurable when starting the server. - -# Server Definition - -Define some types and routes: - -``` -type ResultStatus struct { - Value string -} - +// HTTP RPC server supporting calls via uri params, jsonrpc, and jsonrpc over websockets +// +// Client Requests +// +// Suppose we want to expose the rpc function `HelloWorld(name string, num int)`. +// +// GET (URI) +// +// As a GET request, it would have URI encoded parameters, and look like: +// +// curl 'http://localhost:8008/hello_world?name="my_world"&num=5' +// +// Note the `'` around the url, which is just so bash doesn't ignore the quotes in `"my_world"`. +// This should also work: +// +// curl http://localhost:8008/hello_world?name=\"my_world\"&num=5 +// +// A GET request to `/` returns a list of available endpoints. +// For those which take arguments, the arguments will be listed in order, with `_` where the actual value should be. +// +// POST (JSONRPC) +// +// As a POST request, we use JSONRPC. For instance, the same request would have this as the body: +// +// { +// "jsonrpc": "2.0", +// "id": "anything", +// "method": "hello_world", +// "params": { +// "name": "my_world", +// "num": 5 +// } +// } +// +// With the above saved in file `data.json`, we can make the request with +// +// curl --data @data.json http://localhost:8008 +// +// +// WebSocket (JSONRPC) +// +// All requests are exposed over websocket in the same form as the POST JSONRPC. +// Websocket connections are available at their own endpoint, typically `/websocket`, +// though this is configurable when starting the server. +// +// Server Definition +// +// Define some types and routes: +// +// type ResultStatus struct { +// Value string +// } +// // Define some routes -var Routes = map[string]*rpcserver.RPCFunc{ - "status": rpcserver.NewRPCFunc(Status, "arg"), -} - -// an rpc function -func Status(v string) (*ResultStatus, error) { - return &ResultStatus{v}, nil -} - -``` - -Now start the server: - -``` -mux := http.NewServeMux() -rpcserver.RegisterRPCFuncs(mux, Routes) -wm := rpcserver.NewWebsocketManager(Routes) -mux.HandleFunc("/websocket", wm.WebsocketHandler) -logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) -go func() { - _, err := rpcserver.StartHTTPServer("0.0.0.0:8008", mux, logger) - if err != nil { - panic(err) - } -}() - -``` - -Note that unix sockets are supported as well (eg. `/path/to/socket` instead of `0.0.0.0:8008`) - -Now see all available endpoints by sending a GET request to `0.0.0.0:8008`. -Each route is available as a GET request, as a JSONRPCv2 POST request, and via JSONRPCv2 over websockets. - - -# Examples - -* [Tendermint](https://github.com/tendermint/tendermint/blob/master/rpc/core/routes.go) -* [tm-monitor](https://github.com/tendermint/tendermint/blob/master/tools/tm-monitor/rpc.go) -*/ +// +// var Routes = map[string]*rpcserver.RPCFunc{ +// "status": rpcserver.NewRPCFunc(Status, "arg"), +// } +// +// An rpc function: +// +// func Status(v string) (*ResultStatus, error) { +// return &ResultStatus{v}, nil +// } +// +// Now start the server: +// +// mux := http.NewServeMux() +// rpcserver.RegisterRPCFuncs(mux, Routes) +// wm := rpcserver.NewWebsocketManager(Routes) +// mux.HandleFunc("/websocket", wm.WebsocketHandler) +// logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) +// go func() { +// _, err := rpcserver.StartHTTPServer("0.0.0.0:8008", mux, logger) +// if err != nil { +// panic(err) +// } +// }() +// +// Note that unix sockets are supported as well (eg. `/path/to/socket` instead of `0.0.0.0:8008`) +// Now see all available endpoints by sending a GET request to `0.0.0.0:8008`. +// Each route is available as a GET request, as a JSONRPCv2 POST request, and via JSONRPCv2 over websockets. +// +// Examples +// +// - [Tendermint](https://github.com/tendermint/tendermint/blob/master/rpc/core/routes.go) +// - [tm-monitor](https://github.com/tendermint/tendermint/blob/master/tools/tm-monitor/rpc.go) package rpc From 4c4a95ca53b17dd3a73eb03669cf6013d46e1bdf Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 26 Sep 2018 14:04:44 +0400 Subject: [PATCH 008/113] config: Add ValidateBasic (#2485) * add missing options to config.toml template and docs Refs #2232 * config#ValidateBasic Refs #2232 * [config] timeouts as time.Duration, not ints Why: - native type provides better guarantees than ", in ms" comment (harder to shoot yourself in the leg) - flexibility: you can change units --- CHANGELOG_PENDING.md | 2 + cmd/tendermint/commands/root.go | 4 + config/config.go | 194 ++++++++++++++++++-------- config/config_test.go | 10 ++ config/toml.go | 40 +++--- consensus/mempool_test.go | 2 +- consensus/reactor.go | 24 ++-- consensus/state.go | 2 +- consensus/state_test.go | 4 +- docs/tendermint-core/configuration.md | 38 ++--- 10 files changed, 210 insertions(+), 110 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 26a31461f..c1db67632 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -10,12 +10,14 @@ BREAKING CHANGES: * Go API - [node] Remove node.RunForever +- [config] \#2232 timeouts as time.Duration, not ints FEATURES: IMPROVEMENTS: - [consensus] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) add additional metrics - [p2p] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) add additional metrics +- [config] \#2232 added ValidateBasic method, which performs basic checks BUG FIXES: - [autofile] \#2428 Group.RotateFile need call Flush() before rename (@goolAdapter) diff --git a/cmd/tendermint/commands/root.go b/cmd/tendermint/commands/root.go index 3c67ddc14..89ffbe749 100644 --- a/cmd/tendermint/commands/root.go +++ b/cmd/tendermint/commands/root.go @@ -1,6 +1,7 @@ package commands import ( + "fmt" "os" "github.com/spf13/cobra" @@ -35,6 +36,9 @@ func ParseConfig() (*cfg.Config, error) { } conf.SetRoot(conf.RootDir) cfg.EnsureRoot(conf.RootDir) + if err = conf.ValidateBasic(); err != nil { + return nil, fmt.Errorf("Error in config file: %v", err) + } return conf, err } diff --git a/config/config.go b/config/config.go index ebb7a9ac7..87a741311 100644 --- a/config/config.go +++ b/config/config.go @@ -1,6 +1,7 @@ package config import ( + "errors" "fmt" "os" "path/filepath" @@ -89,6 +90,88 @@ func (cfg *Config) SetRoot(root string) *Config { return cfg } +// ValidateBasic performs basic validation (checking param bounds, etc.) and +// returns an error if any check fails. +func (cfg *Config) ValidateBasic() error { + // RPCConfig + if cfg.RPC.GRPCMaxOpenConnections < 0 { + return errors.New("[rpc] grpc_max_open_connections can't be negative") + } + if cfg.RPC.MaxOpenConnections < 0 { + return errors.New("[rpc] max_open_connections can't be negative") + } + + // P2PConfig + if cfg.P2P.MaxNumInboundPeers < 0 { + return errors.New("[p2p] max_num_inbound_peers can't be negative") + } + if cfg.P2P.MaxNumOutboundPeers < 0 { + return errors.New("[p2p] max_num_outbound_peers can't be negative") + } + if cfg.P2P.FlushThrottleTimeout < 0 { + return errors.New("[p2p] flush_throttle_timeout can't be negative") + } + if cfg.P2P.MaxPacketMsgPayloadSize < 0 { + return errors.New("[p2p] max_packet_msg_payload_size can't be negative") + } + if cfg.P2P.SendRate < 0 { + return errors.New("[p2p] send_rate can't be negative") + } + if cfg.P2P.RecvRate < 0 { + return errors.New("[p2p] recv_rate can't be negative") + } + + // MempoolConfig + if cfg.Mempool.Size < 0 { + return errors.New("[mempool] size can't be negative") + } + if cfg.Mempool.CacheSize < 0 { + return errors.New("[mempool] cache_size can't be negative") + } + + // ConsensusConfig + if cfg.Consensus.TimeoutPropose < 0 { + return errors.New("[consensus] timeout_propose can't be negative") + } + if cfg.Consensus.TimeoutProposeDelta < 0 { + return errors.New("[consensus] timeout_propose_delta can't be negative") + } + if cfg.Consensus.TimeoutPrevote < 0 { + return errors.New("[consensus] timeout_prevote can't be negative") + } + if cfg.Consensus.TimeoutPrevoteDelta < 0 { + return errors.New("[consensus] timeout_prevote_delta can't be negative") + } + if cfg.Consensus.TimeoutPrecommit < 0 { + return errors.New("[consensus] timeout_precommit can't be negative") + } + if cfg.Consensus.TimeoutPrecommitDelta < 0 { + return errors.New("[consensus] timeout_precommit_delta can't be negative") + } + if cfg.Consensus.TimeoutCommit < 0 { + return errors.New("[consensus] timeout_commit can't be negative") + } + if cfg.Consensus.CreateEmptyBlocksInterval < 0 { + return errors.New("[consensus] create_empty_blocks_interval can't be negative") + } + if cfg.Consensus.PeerGossipSleepDuration < 0 { + return errors.New("[consensus] peer_gossip_sleep_duration can't be negative") + } + if cfg.Consensus.PeerQueryMaj23SleepDuration < 0 { + return errors.New("[consensus] peer_query_maj23_sleep_duration can't be negative") + } + if cfg.Consensus.BlockTimeIota < 0 { + return errors.New("[consensus] blocktime_iota can't be negative") + } + + // InstrumentationConfig + if cfg.Instrumentation.MaxOpenConnections < 0 { + return errors.New("[instrumentation] max_open_connections can't be negative") + } + + return nil +} + //----------------------------------------------------------------------------- // BaseConfig @@ -301,8 +384,8 @@ type P2PConfig struct { // Maximum number of outbound peers to connect to, excluding persistent peers MaxNumOutboundPeers int `mapstructure:"max_num_outbound_peers"` - // Time to wait before flushing messages out on the connection, in ms - FlushThrottleTimeout int `mapstructure:"flush_throttle_timeout"` + // Time to wait before flushing messages out on the connection + FlushThrottleTimeout time.Duration `mapstructure:"flush_throttle_timeout"` // Maximum size of a message packet payload, in bytes MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"` @@ -351,7 +434,7 @@ func DefaultP2PConfig() *P2PConfig { AddrBookStrict: true, MaxNumInboundPeers: 40, MaxNumOutboundPeers: 10, - FlushThrottleTimeout: 100, + FlushThrottleTimeout: 100 * time.Millisecond, MaxPacketMsgPayloadSize: 1024, // 1 kB SendRate: 5120000, // 5 mB/s RecvRate: 5120000, // 5 mB/s @@ -450,72 +533,70 @@ type ConsensusConfig struct { WalPath string `mapstructure:"wal_file"` walFile string // overrides WalPath if set - // All timeouts are in milliseconds - TimeoutPropose int `mapstructure:"timeout_propose"` - TimeoutProposeDelta int `mapstructure:"timeout_propose_delta"` - TimeoutPrevote int `mapstructure:"timeout_prevote"` - TimeoutPrevoteDelta int `mapstructure:"timeout_prevote_delta"` - TimeoutPrecommit int `mapstructure:"timeout_precommit"` - TimeoutPrecommitDelta int `mapstructure:"timeout_precommit_delta"` - TimeoutCommit int `mapstructure:"timeout_commit"` + TimeoutPropose time.Duration `mapstructure:"timeout_propose"` + TimeoutProposeDelta time.Duration `mapstructure:"timeout_propose_delta"` + TimeoutPrevote time.Duration `mapstructure:"timeout_prevote"` + TimeoutPrevoteDelta time.Duration `mapstructure:"timeout_prevote_delta"` + TimeoutPrecommit time.Duration `mapstructure:"timeout_precommit"` + TimeoutPrecommitDelta time.Duration `mapstructure:"timeout_precommit_delta"` + TimeoutCommit time.Duration `mapstructure:"timeout_commit"` // Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) SkipTimeoutCommit bool `mapstructure:"skip_timeout_commit"` - // EmptyBlocks mode and possible interval between empty blocks in seconds - CreateEmptyBlocks bool `mapstructure:"create_empty_blocks"` - CreateEmptyBlocksInterval int `mapstructure:"create_empty_blocks_interval"` + // EmptyBlocks mode and possible interval between empty blocks + CreateEmptyBlocks bool `mapstructure:"create_empty_blocks"` + CreateEmptyBlocksInterval time.Duration `mapstructure:"create_empty_blocks_interval"` - // Reactor sleep duration parameters are in milliseconds - PeerGossipSleepDuration int `mapstructure:"peer_gossip_sleep_duration"` - PeerQueryMaj23SleepDuration int `mapstructure:"peer_query_maj23_sleep_duration"` + // Reactor sleep duration parameters + PeerGossipSleepDuration time.Duration `mapstructure:"peer_gossip_sleep_duration"` + PeerQueryMaj23SleepDuration time.Duration `mapstructure:"peer_query_maj23_sleep_duration"` - // Block time parameters in milliseconds. Corresponds to the minimum time increment between consecutive blocks. - BlockTimeIota int `mapstructure:"blocktime_iota"` + // Block time parameters. Corresponds to the minimum time increment between consecutive blocks. + BlockTimeIota time.Duration `mapstructure:"blocktime_iota"` } // DefaultConsensusConfig returns a default configuration for the consensus service func DefaultConsensusConfig() *ConsensusConfig { return &ConsensusConfig{ WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"), - TimeoutPropose: 3000, - TimeoutProposeDelta: 500, - TimeoutPrevote: 1000, - TimeoutPrevoteDelta: 500, - TimeoutPrecommit: 1000, - TimeoutPrecommitDelta: 500, - TimeoutCommit: 1000, + TimeoutPropose: 3000 * time.Millisecond, + TimeoutProposeDelta: 500 * time.Millisecond, + TimeoutPrevote: 1000 * time.Millisecond, + TimeoutPrevoteDelta: 500 * time.Millisecond, + TimeoutPrecommit: 1000 * time.Millisecond, + TimeoutPrecommitDelta: 500 * time.Millisecond, + TimeoutCommit: 1000 * time.Millisecond, SkipTimeoutCommit: false, CreateEmptyBlocks: true, - CreateEmptyBlocksInterval: 0, - PeerGossipSleepDuration: 100, - PeerQueryMaj23SleepDuration: 2000, - BlockTimeIota: 1000, + CreateEmptyBlocksInterval: 0 * time.Second, + PeerGossipSleepDuration: 100 * time.Millisecond, + PeerQueryMaj23SleepDuration: 2000 * time.Millisecond, + BlockTimeIota: 1000 * time.Millisecond, } } // TestConsensusConfig returns a configuration for testing the consensus service func TestConsensusConfig() *ConsensusConfig { cfg := DefaultConsensusConfig() - cfg.TimeoutPropose = 100 - cfg.TimeoutProposeDelta = 1 - cfg.TimeoutPrevote = 10 - cfg.TimeoutPrevoteDelta = 1 - cfg.TimeoutPrecommit = 10 - cfg.TimeoutPrecommitDelta = 1 - cfg.TimeoutCommit = 10 + cfg.TimeoutPropose = 100 * time.Millisecond + cfg.TimeoutProposeDelta = 1 * time.Millisecond + cfg.TimeoutPrevote = 10 * time.Millisecond + cfg.TimeoutPrevoteDelta = 1 * time.Millisecond + cfg.TimeoutPrecommit = 10 * time.Millisecond + cfg.TimeoutPrecommitDelta = 1 * time.Millisecond + cfg.TimeoutCommit = 10 * time.Millisecond cfg.SkipTimeoutCommit = true - cfg.PeerGossipSleepDuration = 5 - cfg.PeerQueryMaj23SleepDuration = 250 - cfg.BlockTimeIota = 10 + cfg.PeerGossipSleepDuration = 5 * time.Millisecond + cfg.PeerQueryMaj23SleepDuration = 250 * time.Millisecond + cfg.BlockTimeIota = 10 * time.Millisecond return cfg } // MinValidVoteTime returns the minimum acceptable block time. // See the [BFT time spec](https://godoc.org/github.com/tendermint/tendermint/docs/spec/consensus/bft-time.md). func (cfg *ConsensusConfig) MinValidVoteTime(lastBlockTime time.Time) time.Time { - return lastBlockTime. - Add(time.Duration(cfg.BlockTimeIota) * time.Millisecond) + return lastBlockTime.Add(cfg.BlockTimeIota) } // WaitForTxs returns true if the consensus should wait for transactions before entering the propose step @@ -523,39 +604,30 @@ func (cfg *ConsensusConfig) WaitForTxs() bool { return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0 } -// EmptyBlocks returns the amount of time to wait before proposing an empty block or starting the propose timer if there are no txs available -func (cfg *ConsensusConfig) EmptyBlocksInterval() time.Duration { - return time.Duration(cfg.CreateEmptyBlocksInterval) * time.Second -} - // Propose returns the amount of time to wait for a proposal func (cfg *ConsensusConfig) Propose(round int) time.Duration { - return time.Duration(cfg.TimeoutPropose+cfg.TimeoutProposeDelta*round) * time.Millisecond + return time.Duration( + cfg.TimeoutPropose.Nanoseconds()+cfg.TimeoutProposeDelta.Nanoseconds()*int64(round), + ) * time.Nanosecond } // Prevote returns the amount of time to wait for straggler votes after receiving any +2/3 prevotes func (cfg *ConsensusConfig) Prevote(round int) time.Duration { - return time.Duration(cfg.TimeoutPrevote+cfg.TimeoutPrevoteDelta*round) * time.Millisecond + return time.Duration( + cfg.TimeoutPrevote.Nanoseconds()+cfg.TimeoutPrevoteDelta.Nanoseconds()*int64(round), + ) * time.Nanosecond } // Precommit returns the amount of time to wait for straggler votes after receiving any +2/3 precommits func (cfg *ConsensusConfig) Precommit(round int) time.Duration { - return time.Duration(cfg.TimeoutPrecommit+cfg.TimeoutPrecommitDelta*round) * time.Millisecond + return time.Duration( + cfg.TimeoutPrecommit.Nanoseconds()+cfg.TimeoutPrecommitDelta.Nanoseconds()*int64(round), + ) * time.Nanosecond } // Commit returns the amount of time to wait for straggler votes after receiving +2/3 precommits for a single block (ie. a commit). func (cfg *ConsensusConfig) Commit(t time.Time) time.Time { - return t.Add(time.Duration(cfg.TimeoutCommit) * time.Millisecond) -} - -// PeerGossipSleep returns the amount of time to sleep if there is nothing to send from the ConsensusReactor -func (cfg *ConsensusConfig) PeerGossipSleep() time.Duration { - return time.Duration(cfg.PeerGossipSleepDuration) * time.Millisecond -} - -// PeerQueryMaj23Sleep returns the amount of time to sleep after each VoteSetMaj23Message is sent in the ConsensusReactor -func (cfg *ConsensusConfig) PeerQueryMaj23Sleep() time.Duration { - return time.Duration(cfg.PeerQueryMaj23SleepDuration) * time.Millisecond + return t.Add(cfg.TimeoutCommit) } // WalFile returns the full path to the write-ahead log file diff --git a/config/config_test.go b/config/config_test.go index 6379960fa..afdbed181 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -2,6 +2,7 @@ package config import ( "testing" + "time" "github.com/stretchr/testify/assert" ) @@ -26,3 +27,12 @@ func TestDefaultConfig(t *testing.T) { assert.Equal("/foo/wal/mem", cfg.Mempool.WalDir()) } + +func TestConfigValidateBasic(t *testing.T) { + cfg := DefaultConfig() + assert.NoError(t, cfg.ValidateBasic()) + + // tamper with timeout_propose + cfg.Consensus.TimeoutPropose = -10 * time.Second + assert.Error(t, cfg.ValidateBasic()) +} diff --git a/config/toml.go b/config/toml.go index bc10590c8..846b33d16 100644 --- a/config/toml.go +++ b/config/toml.go @@ -99,7 +99,7 @@ priv_validator_file = "{{ js .BaseConfig.PrivValidator }}" priv_validator_laddr = "{{ .BaseConfig.PrivValidatorListenAddr }}" # Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "{{ js .BaseConfig.NodeKey}}" +node_key_file = "{{ js .BaseConfig.NodeKey }}" # Mechanism to connect to the ABCI application: socket | grpc abci = "{{ .BaseConfig.ABCI }}" @@ -172,15 +172,15 @@ addr_book_file = "{{ js .P2P.AddrBook }}" # Set false for private or local networks addr_book_strict = {{ .P2P.AddrBookStrict }} -# Time to wait before flushing messages out on the connection, in ms -flush_throttle_timeout = {{ .P2P.FlushThrottleTimeout }} - # Maximum number of inbound peers max_num_inbound_peers = {{ .P2P.MaxNumInboundPeers }} # Maximum number of outbound peers to connect to, excluding persistent peers max_num_outbound_peers = {{ .P2P.MaxNumOutboundPeers }} +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "{{ .P2P.FlushThrottleTimeout }}" + # Maximum size of a message packet payload, in bytes max_packet_msg_payload_size = {{ .P2P.MaxPacketMsgPayloadSize }} @@ -202,6 +202,13 @@ seed_mode = {{ .P2P.SeedMode }} # Comma separated list of peer IDs to keep private (will not be gossiped to other peers) private_peer_ids = "{{ .P2P.PrivatePeerIDs }}" +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = {{ .P2P.AllowDuplicateIP }} + +# Peer connection configuration. +handshake_timeout = "{{ .P2P.HandshakeTimeout }}" +dial_timeout = "{{ .P2P.DialTimeout }}" + ##### mempool configuration options ##### [mempool] @@ -221,25 +228,24 @@ cache_size = {{ .Mempool.CacheSize }} wal_file = "{{ js .Consensus.WalPath }}" -# All timeouts are in milliseconds -timeout_propose = {{ .Consensus.TimeoutPropose }} -timeout_propose_delta = {{ .Consensus.TimeoutProposeDelta }} -timeout_prevote = {{ .Consensus.TimeoutPrevote }} -timeout_prevote_delta = {{ .Consensus.TimeoutPrevoteDelta }} -timeout_precommit = {{ .Consensus.TimeoutPrecommit }} -timeout_precommit_delta = {{ .Consensus.TimeoutPrecommitDelta }} -timeout_commit = {{ .Consensus.TimeoutCommit }} +timeout_propose = "{{ .Consensus.TimeoutPropose }}" +timeout_propose_delta = "{{ .Consensus.TimeoutProposeDelta }}" +timeout_prevote = "{{ .Consensus.TimeoutPrevote }}" +timeout_prevote_delta = "{{ .Consensus.TimeoutPrevoteDelta }}" +timeout_precommit = "{{ .Consensus.TimeoutPrecommit }}" +timeout_precommit_delta = "{{ .Consensus.TimeoutPrecommitDelta }}" +timeout_commit = "{{ .Consensus.TimeoutCommit }}" # Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) skip_timeout_commit = {{ .Consensus.SkipTimeoutCommit }} -# EmptyBlocks mode and possible interval between empty blocks in seconds +# EmptyBlocks mode and possible interval between empty blocks create_empty_blocks = {{ .Consensus.CreateEmptyBlocks }} -create_empty_blocks_interval = {{ .Consensus.CreateEmptyBlocksInterval }} +create_empty_blocks_interval = "{{ .Consensus.CreateEmptyBlocksInterval }}" -# Reactor sleep duration parameters are in milliseconds -peer_gossip_sleep_duration = {{ .Consensus.PeerGossipSleepDuration }} -peer_query_maj23_sleep_duration = {{ .Consensus.PeerQueryMaj23SleepDuration }} +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "{{ .Consensus.PeerGossipSleepDuration }}" +peer_query_maj23_sleep_duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}" ##### transactions indexer configuration options ##### [tx_index] diff --git a/consensus/mempool_test.go b/consensus/mempool_test.go index 950cf67d8..179766fd0 100644 --- a/consensus/mempool_test.go +++ b/consensus/mempool_test.go @@ -38,7 +38,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { config := ResetConfig("consensus_mempool_txs_available_test") - config.Consensus.CreateEmptyBlocksInterval = int(ensureTimeout.Seconds()) + config.Consensus.CreateEmptyBlocksInterval = ensureTimeout state, privVals := randGenesisState(1, false, 10) cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication()) cs.mempool.EnableTxsAvailable() diff --git a/consensus/reactor.go b/consensus/reactor.go index 2b4bab135..16e2e7e2e 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -508,7 +508,7 @@ OUTER_LOOP: // If height and round don't match, sleep. if (rs.Height != prs.Height) || (rs.Round != prs.Round) { //logger.Info("Peer Height|Round mismatch, sleeping", "peerHeight", prs.Height, "peerRound", prs.Round, "peer", peer) - time.Sleep(conR.conS.config.PeerGossipSleep()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) continue OUTER_LOOP } @@ -544,7 +544,7 @@ OUTER_LOOP: } // Nothing to do. Sleep. - time.Sleep(conR.conS.config.PeerGossipSleep()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) continue OUTER_LOOP } } @@ -558,12 +558,12 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype if blockMeta == nil { logger.Error("Failed to load block meta", "ourHeight", rs.Height, "blockstoreHeight", conR.conS.blockStore.Height()) - time.Sleep(conR.conS.config.PeerGossipSleep()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) return } else if !blockMeta.BlockID.PartsHeader.Equals(prs.ProposalBlockPartsHeader) { logger.Info("Peer ProposalBlockPartsHeader mismatch, sleeping", "blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader) - time.Sleep(conR.conS.config.PeerGossipSleep()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) return } // Load the part @@ -571,7 +571,7 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype if part == nil { logger.Error("Could not load part", "index", index, "blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader) - time.Sleep(conR.conS.config.PeerGossipSleep()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) return } // Send the part @@ -589,7 +589,7 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype return } //logger.Info("No parts to send in catch-up, sleeping") - time.Sleep(conR.conS.config.PeerGossipSleep()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) } func (conR *ConsensusReactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) { @@ -658,7 +658,7 @@ OUTER_LOOP: sleeping = 1 } - time.Sleep(conR.conS.config.PeerGossipSleep()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) continue OUTER_LOOP } } @@ -742,7 +742,7 @@ OUTER_LOOP: Type: types.VoteTypePrevote, BlockID: maj23, })) - time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } } @@ -759,7 +759,7 @@ OUTER_LOOP: Type: types.VoteTypePrecommit, BlockID: maj23, })) - time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } } @@ -776,7 +776,7 @@ OUTER_LOOP: Type: types.VoteTypePrevote, BlockID: maj23, })) - time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } } @@ -795,11 +795,11 @@ OUTER_LOOP: Type: types.VoteTypePrecommit, BlockID: commit.BlockID, })) - time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } - time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) continue OUTER_LOOP } diff --git a/consensus/state.go b/consensus/state.go index 12dfa4edf..35bbca0f6 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -782,7 +782,7 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) { waitForTxs := cs.config.WaitForTxs() && round == 0 && !cs.needProofBlock(height) if waitForTxs { if cs.config.CreateEmptyBlocksInterval > 0 { - cs.scheduleTimeout(cs.config.EmptyBlocksInterval(), height, round, cstypes.RoundStepNewRound) + cs.scheduleTimeout(cs.config.CreateEmptyBlocksInterval, height, round, cstypes.RoundStepNewRound) } go cs.proposalHeartbeat(height, round) } else { diff --git a/consensus/state_test.go b/consensus/state_test.go index 32fc5fd6a..4c34d9d2f 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -21,8 +21,8 @@ func init() { config = ResetConfig("consensus_state_test") } -func ensureProposeTimeout(timeoutPropose int) time.Duration { - return time.Duration(timeoutPropose*2) * time.Millisecond +func ensureProposeTimeout(timeoutPropose time.Duration) time.Duration { + return time.Duration(timeoutPropose.Nanoseconds()*2) * time.Nanosecond } /* diff --git a/docs/tendermint-core/configuration.md b/docs/tendermint-core/configuration.md index d759ab9fd..c5b07497c 100644 --- a/docs/tendermint-core/configuration.md +++ b/docs/tendermint-core/configuration.md @@ -115,15 +115,15 @@ addr_book_file = "addrbook.json" # Set false for private or local networks addr_book_strict = true -# Time to wait before flushing messages out on the connection, in ms -flush_throttle_timeout = 100 - # Maximum number of inbound peers max_num_inbound_peers = 40 # Maximum number of outbound peers to connect to, excluding persistent peers max_num_outbound_peers = 10 +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" + # Maximum size of a message packet payload, in bytes max_packet_msg_payload_size = 1024 @@ -145,6 +145,13 @@ seed_mode = false # Comma separated list of peer IDs to keep private (will not be gossiped to other peers) private_peer_ids = "" +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = true + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + ##### mempool configuration options ##### [mempool] @@ -164,25 +171,24 @@ cache_size = 100000 wal_file = "data/cs.wal/wal" -# All timeouts are in milliseconds -timeout_propose = 3000 -timeout_propose_delta = 500 -timeout_prevote = 1000 -timeout_prevote_delta = 500 -timeout_precommit = 1000 -timeout_precommit_delta = 500 -timeout_commit = 1000 +timeout_propose = "3000ms" +timeout_propose_delta = "500ms" +timeout_prevote = "1000ms" +timeout_prevote_delta = "500ms" +timeout_precommit = "1000ms" +timeout_precommit_delta = "500ms" +timeout_commit = "1000ms" # Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) skip_timeout_commit = false -# EmptyBlocks mode and possible interval between empty blocks in seconds +# EmptyBlocks mode and possible interval between empty blocks create_empty_blocks = true -create_empty_blocks_interval = 0 +create_empty_blocks_interval = "0s" -# Reactor sleep duration parameters are in milliseconds -peer_gossip_sleep_duration = 100 -peer_query_maj23_sleep_duration = 2000 +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_query_maj23_sleep_duration = "2000ms" ##### transactions indexer configuration options ##### [tx_index] From d007ade6c35d38d5441adbb87d8df37dff562df5 Mon Sep 17 00:00:00 2001 From: zramsay Date: Wed, 26 Sep 2018 17:49:20 -0400 Subject: [PATCH 009/113] add version to docs --- docs/DOCS_README.md | 3 ++- docs/README.md | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/DOCS_README.md b/docs/DOCS_README.md index e87ef23df..e2f22ff6d 100644 --- a/docs/DOCS_README.md +++ b/docs/DOCS_README.md @@ -20,7 +20,8 @@ a private website repository has make targets consumed by a standard Jenkins tas ## README The [README.md](./README.md) is also the landing page for the documentation -on the website. +on the website. During the Jenkins build, the current commit is added to the bottom +of the README. ## Config.js diff --git a/docs/README.md b/docs/README.md index 58b3bcb6b..2ecf625e8 100644 --- a/docs/README.md +++ b/docs/README.md @@ -39,3 +39,7 @@ Dive deep into the spec. There's one for each Tendermint and the ABCI See [this file](./DOCS_README.md) for details of the build process and considerations when making changes. + +## Version + +This documentation is built from the following commit: From 8dda3c3b28e70a0305ea297ee5d95e80f9105860 Mon Sep 17 00:00:00 2001 From: HaoyangLiu Date: Sat, 29 Sep 2018 07:23:21 +0800 Subject: [PATCH 010/113] lite: Add synchronization in lite verify (#2396) * Implement issues 2386: add synchronization in lite verify and change all Certify to Verify * Replace make(chan struct{}, 0) with make(chan struct{}) * Parameterize memroy cache size and add concurrent test * Refactor import order --- cmd/tendermint/commands/lite.go | 4 +- lite/base_verifier.go | 14 +++--- lite/base_verifier_test.go | 2 +- lite/doc.go | 4 +- lite/dynamic_verifier.go | 58 +++++++++++++++++++++---- lite/dynamic_verifier_test.go | 75 +++++++++++++++++++++++++++++---- lite/proxy/query.go | 2 +- lite/proxy/verifier.go | 4 +- lite/proxy/wrapper.go | 4 +- lite/types.go | 2 +- 10 files changed, 135 insertions(+), 34 deletions(-) diff --git a/cmd/tendermint/commands/lite.go b/cmd/tendermint/commands/lite.go index edad4fbb7..150371d62 100644 --- a/cmd/tendermint/commands/lite.go +++ b/cmd/tendermint/commands/lite.go @@ -30,6 +30,7 @@ var ( nodeAddr string chainID string home string + cacheSize int ) func init() { @@ -37,6 +38,7 @@ func init() { LiteCmd.Flags().StringVar(&nodeAddr, "node", "tcp://localhost:26657", "Connect to a Tendermint node at this address") LiteCmd.Flags().StringVar(&chainID, "chain-id", "tendermint", "Specify the Tendermint chain ID") LiteCmd.Flags().StringVar(&home, "home-dir", ".tendermint-lite", "Specify the home directory") + LiteCmd.Flags().IntVar(&cacheSize, "cache-size", 10, "Specify the memory trust store cache size") } func ensureAddrHasSchemeOrDefaultToTCP(addr string) (string, error) { @@ -69,7 +71,7 @@ func runProxy(cmd *cobra.Command, args []string) error { node := rpcclient.NewHTTP(nodeAddr, "/websocket") logger.Info("Constructing Verifier...") - cert, err := proxy.NewVerifier(chainID, home, node, logger) + cert, err := proxy.NewVerifier(chainID, home, node, logger, cacheSize) if err != nil { return cmn.ErrorWrap(err, "constructing Verifier") } diff --git a/lite/base_verifier.go b/lite/base_verifier.go index e60d3953a..fcde01c0e 100644 --- a/lite/base_verifier.go +++ b/lite/base_verifier.go @@ -12,7 +12,7 @@ var _ Verifier = (*BaseVerifier)(nil) // BaseVerifier lets us check the validity of SignedHeaders at height or // later, requiring sufficient votes (> 2/3) from the given valset. -// To certify blocks produced by a blockchain with mutable validator sets, +// To verify blocks produced by a blockchain with mutable validator sets, // use the DynamicVerifier. // TODO: Handle unbonding time. type BaseVerifier struct { @@ -40,15 +40,15 @@ func (bc *BaseVerifier) ChainID() string { } // Implements Verifier. -func (bc *BaseVerifier) Certify(signedHeader types.SignedHeader) error { +func (bc *BaseVerifier) Verify(signedHeader types.SignedHeader) error { - // We can't certify commits older than bc.height. + // We can't verify commits older than bc.height. if signedHeader.Height < bc.height { - return cmn.NewError("BaseVerifier height is %v, cannot certify height %v", + return cmn.NewError("BaseVerifier height is %v, cannot verify height %v", bc.height, signedHeader.Height) } - // We can't certify with the wrong validator set. + // We can't verify with the wrong validator set. if !bytes.Equal(signedHeader.ValidatorsHash, bc.valset.Hash()) { return lerr.ErrUnexpectedValidators(signedHeader.ValidatorsHash, bc.valset.Hash()) @@ -57,7 +57,7 @@ func (bc *BaseVerifier) Certify(signedHeader types.SignedHeader) error { // Do basic sanity checks. err := signedHeader.ValidateBasic(bc.chainID) if err != nil { - return cmn.ErrorWrap(err, "in certify") + return cmn.ErrorWrap(err, "in verify") } // Check commit signatures. @@ -65,7 +65,7 @@ func (bc *BaseVerifier) Certify(signedHeader types.SignedHeader) error { bc.chainID, signedHeader.Commit.BlockID, signedHeader.Height, signedHeader.Commit) if err != nil { - return cmn.ErrorWrap(err, "in certify") + return cmn.ErrorWrap(err, "in verify") } return nil diff --git a/lite/base_verifier_test.go b/lite/base_verifier_test.go index dab7885f6..2ef1203fb 100644 --- a/lite/base_verifier_test.go +++ b/lite/base_verifier_test.go @@ -43,7 +43,7 @@ func TestBaseCert(t *testing.T) { for _, tc := range cases { sh := tc.keys.GenSignedHeader(chainID, tc.height, nil, tc.vals, tc.vals, []byte("foo"), []byte("params"), []byte("results"), tc.first, tc.last) - err := cert.Certify(sh) + err := cert.Verify(sh) if tc.proper { assert.Nil(err, "%+v", err) } else { diff --git a/lite/doc.go b/lite/doc.go index 59f770567..2a0ba23ea 100644 --- a/lite/doc.go +++ b/lite/doc.go @@ -54,11 +54,11 @@ validator set, and that the height of the commit is at least height (or greater). SignedHeader.Commit may be signed by a different validator set, it can get -certified with a BaseVerifier as long as sufficient signatures from the +verified with a BaseVerifier as long as sufficient signatures from the previous validator set are present in the commit. DynamicVerifier - this Verifier implements an auto-update and persistence -strategy to certify any SignedHeader of the blockchain. +strategy to verify any SignedHeader of the blockchain. ## Provider and PersistentProvider diff --git a/lite/dynamic_verifier.go b/lite/dynamic_verifier.go index 3d1a70f27..2dee69f9d 100644 --- a/lite/dynamic_verifier.go +++ b/lite/dynamic_verifier.go @@ -2,12 +2,15 @@ package lite import ( "bytes" - + "fmt" + "sync" log "github.com/tendermint/tendermint/libs/log" lerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" ) +const sizeOfPendingMap = 1024 + var _ Verifier = (*DynamicVerifier)(nil) // DynamicVerifier implements an auto-updating Verifier. It uses a @@ -21,6 +24,11 @@ type DynamicVerifier struct { trusted PersistentProvider // This is a source of new info, like a node rpc, or other import method. source Provider + + // pending map for synchronize concurrent verification requests + pendingVerifications map[int64]chan struct{} + + mtx sync.Mutex } // NewDynamicVerifier returns a new DynamicVerifier. It uses the @@ -31,10 +39,11 @@ type DynamicVerifier struct { // files.Provider. The source provider should be a client.HTTPProvider. func NewDynamicVerifier(chainID string, trusted PersistentProvider, source Provider) *DynamicVerifier { return &DynamicVerifier{ - logger: log.NewNopLogger(), - chainID: chainID, - trusted: trusted, - source: source, + logger: log.NewNopLogger(), + chainID: chainID, + trusted: trusted, + source: source, + pendingVerifications: make(map[int64]chan struct{}, sizeOfPendingMap), } } @@ -56,7 +65,40 @@ func (ic *DynamicVerifier) ChainID() string { // ic.trusted and ic.source to prove the new validators. On success, it will // try to store the SignedHeader in ic.trusted if the next // validator can be sourced. -func (ic *DynamicVerifier) Certify(shdr types.SignedHeader) error { +func (ic *DynamicVerifier) Verify(shdr types.SignedHeader) error { + + // Performs synchronization for multi-threads verification at the same height. + ic.mtx.Lock() + if pending := ic.pendingVerifications[shdr.Height]; pending != nil { + ic.mtx.Unlock() + <-pending // pending is chan struct{} + } else { + pending := make(chan struct{}) + ic.pendingVerifications[shdr.Height] = pending + defer func() { + close(pending) + ic.mtx.Lock() + delete(ic.pendingVerifications, shdr.Height) + ic.mtx.Unlock() + }() + ic.mtx.Unlock() + } + //Get the exact trusted commit for h, and if it is + // equal to shdr, then don't even verify it, + // and just return nil. + trustedFCSameHeight, err := ic.trusted.LatestFullCommit(ic.chainID, shdr.Height, shdr.Height) + if err == nil { + // If loading trust commit successfully, and trust commit equal to shdr, then don't verify it, + // just return nil. + if bytes.Equal(trustedFCSameHeight.SignedHeader.Hash(), shdr.Hash()) { + ic.logger.Info(fmt.Sprintf("Load full commit at height %d from cache, there is not need to verify.", shdr.Height)) + return nil + } + } else if !lerr.IsErrCommitNotFound(err) { + // Return error if it is not CommitNotFound error + ic.logger.Info(fmt.Sprintf("Encountered unknown error in loading full commit at height %d.", shdr.Height)) + return err + } // Get the latest known full commit <= h-1 from our trusted providers. // The full commit at h-1 contains the valset to sign for h. @@ -94,9 +136,9 @@ func (ic *DynamicVerifier) Certify(shdr types.SignedHeader) error { } } - // Certify the signed header using the matching valset. + // Verify the signed header using the matching valset. cert := NewBaseVerifier(ic.chainID, trustedFC.Height()+1, trustedFC.NextValidators) - err = cert.Certify(shdr) + err = cert.Verify(shdr) if err != nil { return err } diff --git a/lite/dynamic_verifier_test.go b/lite/dynamic_verifier_test.go index 74e2d55a9..401c14871 100644 --- a/lite/dynamic_verifier_test.go +++ b/lite/dynamic_verifier_test.go @@ -2,8 +2,8 @@ package lite import ( "fmt" + "sync" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -49,7 +49,7 @@ func TestInquirerValidPath(t *testing.T) { // This should fail validation: sh := fcz[count-1].SignedHeader - err = cert.Certify(sh) + err = cert.Verify(sh) require.NotNil(err) // Adding a few commits in the middle should be insufficient. @@ -57,7 +57,7 @@ func TestInquirerValidPath(t *testing.T) { err := source.SaveFullCommit(fcz[i]) require.Nil(err) } - err = cert.Certify(sh) + err = cert.Verify(sh) assert.NotNil(err) // With more info, we succeed. @@ -65,7 +65,7 @@ func TestInquirerValidPath(t *testing.T) { err := source.SaveFullCommit(fcz[i]) require.Nil(err) } - err = cert.Certify(sh) + err = cert.Verify(sh) assert.Nil(err, "%+v", err) } @@ -115,18 +115,18 @@ func TestInquirerVerifyHistorical(t *testing.T) { err = source.SaveFullCommit(fcz[7]) require.Nil(err, "%+v", err) sh := fcz[8].SignedHeader - err = cert.Certify(sh) + err = cert.Verify(sh) require.Nil(err, "%+v", err) assert.Equal(fcz[7].Height(), cert.LastTrustedHeight()) fc_, err := trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height()) require.NotNil(err, "%+v", err) assert.Equal(fc_, (FullCommit{})) - // With fcz[9] Certify will update last trusted height. + // With fcz[9] Verify will update last trusted height. err = source.SaveFullCommit(fcz[9]) require.Nil(err, "%+v", err) sh = fcz[8].SignedHeader - err = cert.Certify(sh) + err = cert.Verify(sh) require.Nil(err, "%+v", err) assert.Equal(fcz[8].Height(), cert.LastTrustedHeight()) fc_, err = trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height()) @@ -141,13 +141,70 @@ func TestInquirerVerifyHistorical(t *testing.T) { // Try to check an unknown seed in the past. sh = fcz[3].SignedHeader - err = cert.Certify(sh) + err = cert.Verify(sh) require.Nil(err, "%+v", err) assert.Equal(fcz[8].Height(), cert.LastTrustedHeight()) // Jump all the way forward again. sh = fcz[count-1].SignedHeader - err = cert.Certify(sh) + err = cert.Verify(sh) require.Nil(err, "%+v", err) assert.Equal(fcz[9].Height(), cert.LastTrustedHeight()) } + +func TestConcurrencyInquirerVerify(t *testing.T) { + _, require := assert.New(t), require.New(t) + trust := NewDBProvider("trust", dbm.NewMemDB()).SetLimit(10) + source := NewDBProvider("source", dbm.NewMemDB()) + + // Set up the validators to generate test blocks. + var vote int64 = 10 + keys := genPrivKeys(5) + nkeys := keys.Extend(1) + + // Construct a bunch of commits, each with one more height than the last. + chainID := "inquiry-test" + count := 10 + consHash := []byte("special-params") + fcz := make([]FullCommit, count) + for i := 0; i < count; i++ { + vals := keys.ToValidators(vote, 0) + nextVals := nkeys.ToValidators(vote, 0) + h := int64(1 + i) + appHash := []byte(fmt.Sprintf("h=%d", h)) + resHash := []byte(fmt.Sprintf("res=%d", h)) + fcz[i] = keys.GenFullCommit( + chainID, h, nil, + vals, nextVals, + appHash, consHash, resHash, 0, len(keys)) + // Extend the keys by 1 each time. + keys = nkeys + nkeys = nkeys.Extend(1) + } + + // Initialize a Verifier with the initial state. + err := trust.SaveFullCommit(fcz[0]) + require.Nil(err) + cert := NewDynamicVerifier(chainID, trust, source) + cert.SetLogger(log.TestingLogger()) + + err = source.SaveFullCommit(fcz[7]) + err = source.SaveFullCommit(fcz[8]) + require.Nil(err, "%+v", err) + sh := fcz[8].SignedHeader + + var wg sync.WaitGroup + count = 100 + errList := make([]error, count) + for i := 0; i < count; i++ { + wg.Add(1) + go func(index int) { + errList[index] = cert.Verify(sh) + defer wg.Done() + }(i) + } + wg.Wait() + for _, err := range errList { + require.Nil(err) + } +} diff --git a/lite/proxy/query.go b/lite/proxy/query.go index 6f5a28992..84ff98b47 100644 --- a/lite/proxy/query.go +++ b/lite/proxy/query.go @@ -146,7 +146,7 @@ func GetCertifiedCommit(h int64, client rpcclient.Client, cert lite.Verifier) (t h, sh.Height) } - if err = cert.Certify(sh); err != nil { + if err = cert.Verify(sh); err != nil { return types.SignedHeader{}, err } diff --git a/lite/proxy/verifier.go b/lite/proxy/verifier.go index a93d30c7f..b7c11f18e 100644 --- a/lite/proxy/verifier.go +++ b/lite/proxy/verifier.go @@ -8,12 +8,12 @@ import ( lclient "github.com/tendermint/tendermint/lite/client" ) -func NewVerifier(chainID, rootDir string, client lclient.SignStatusClient, logger log.Logger) (*lite.DynamicVerifier, error) { +func NewVerifier(chainID, rootDir string, client lclient.SignStatusClient, logger log.Logger, cacheSize int) (*lite.DynamicVerifier, error) { logger = logger.With("module", "lite/proxy") logger.Info("lite/proxy/NewVerifier()...", "chainID", chainID, "rootDir", rootDir, "client", client) - memProvider := lite.NewDBProvider("trusted.mem", dbm.NewMemDB()).SetLimit(10) + memProvider := lite.NewDBProvider("trusted.mem", dbm.NewMemDB()).SetLimit(cacheSize) lvlProvider := lite.NewDBProvider("trusted.lvl", dbm.NewDB("trust-base", dbm.LevelDBBackend, rootDir)) trust := lite.NewMultiProvider( memProvider, diff --git a/lite/proxy/wrapper.go b/lite/proxy/wrapper.go index 522511a81..4c0df0229 100644 --- a/lite/proxy/wrapper.go +++ b/lite/proxy/wrapper.go @@ -134,10 +134,10 @@ func (w Wrapper) Commit(height *int64) (*ctypes.ResultCommit, error) { } rpcclient.WaitForHeight(w.Client, *height, nil) res, err := w.Client.Commit(height) - // if we got it, then certify it + // if we got it, then verify it if err == nil { sh := res.SignedHeader - err = w.cert.Certify(sh) + err = w.cert.Verify(sh) } return res, err } diff --git a/lite/types.go b/lite/types.go index 7228c74a9..643f5ad48 100644 --- a/lite/types.go +++ b/lite/types.go @@ -8,6 +8,6 @@ import ( // Verifier must know the current or recent set of validitors by some other // means. type Verifier interface { - Certify(sheader types.SignedHeader) error + Verify(sheader types.SignedHeader) error ChainID() string } From 47bc15c27a2bc8f01c6d31eca01691659871cfdb Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Sat, 29 Sep 2018 03:28:42 +0400 Subject: [PATCH 011/113] disable mempool WAL by default (#2490) --- CHANGELOG_PENDING.md | 1 + config/config.go | 2 +- docs/tendermint-core/running-in-production.md | 4 ++++ 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index c1db67632..89ac9c130 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -5,6 +5,7 @@ Special thanks to external contributors on this release: BREAKING CHANGES: * CLI/RPC/Config +- [config] `mempool.wal` is disabled by default * Apps diff --git a/config/config.go b/config/config.go index 87a741311..2ccb49083 100644 --- a/config/config.go +++ b/config/config.go @@ -503,7 +503,7 @@ func DefaultMempoolConfig() *MempoolConfig { Recheck: true, RecheckEmpty: true, Broadcast: true, - WalPath: filepath.Join(defaultDataDir, "mempool.wal"), + WalPath: "", // Each signature verification takes .5ms, size reduced until we implement // ABCI Recheck Size: 5000, diff --git a/docs/tendermint-core/running-in-production.md b/docs/tendermint-core/running-in-production.md index c774cd131..fb98626ad 100644 --- a/docs/tendermint-core/running-in-production.md +++ b/docs/tendermint-core/running-in-production.md @@ -74,6 +74,10 @@ propose it. Clients must monitor their txs by subscribing over websockets, polling for them, or using `/broadcast_tx_commit`. In the worst case, txs can be resent from the mempool WAL manually. +For the above reasons, the `mempool.wal` is disabled by default. To enable, set +`mempool.wal_dir` to where you want the WAL to be located (e.g. +`data/mempool.wal`). + ## DOS Exposure and Mitigation Validators are supposed to setup [Sentry Node From fc073746a0c12da3f1de7113c6e141638e707f77 Mon Sep 17 00:00:00 2001 From: Ismail Khoffi Date: Sat, 29 Sep 2018 01:57:29 +0200 Subject: [PATCH 012/113] privval: Switch to amino encoding in SignBytes (#2459) * switch to amino for SignBytes and add Reply with error message - currently only Vote is done * switch Reply type in socket for other messages - add error description on error * add TODOs regarding error handling * address comments from peer review session (thx @xla) - contains all changes besides the test-coverage / error'ing branches * increase test coverage: - add tests for each newly introduced error'ing code path * return error if received wrong response * add test for wrong response branches (ErrUnexpectedResponse) * update CHANGELOG_PENDING and related documentation (spec) * fix typo: s/CanonicallockID/CanonicalBlockID * fixes from review --- CHANGELOG_PENDING.md | 6 + docs/spec/blockchain/blockchain.md | 16 ++- docs/spec/blockchain/encoding.md | 24 ++-- privval/priv_validator.go | 31 ++--- privval/socket.go | 120 ++++++++++++++----- privval/socket_test.go | 185 +++++++++++++++++++++++++++-- types/canonical.go | 116 ++++++++++++++++++ types/canonical_json.go | 115 ------------------ types/heartbeat.go | 2 +- types/heartbeat_test.go | 30 +++-- types/priv_validator.go | 27 +++++ types/proposal.go | 2 +- types/proposal_test.go | 15 +-- types/vote.go | 12 +- types/vote_test.go | 10 +- 15 files changed, 492 insertions(+), 219 deletions(-) create mode 100644 types/canonical.go delete mode 100644 types/canonical_json.go diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 89ac9c130..c6346f6a8 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -13,6 +13,12 @@ BREAKING CHANGES: - [node] Remove node.RunForever - [config] \#2232 timeouts as time.Duration, not ints +* Blockchain Protocol + * [types] \#2459 `Vote`/`Proposal`/`Heartbeat` use amino encoding instead of JSON in `SignBytes`. + * [privval] \#2459 Split `SocketPVMsg`s implementations into Request and Response, where the Response may contain a error message (returned by the remote signer). + +* P2P Protocol + FEATURES: IMPROVEMENTS: diff --git a/docs/spec/blockchain/blockchain.md b/docs/spec/blockchain/blockchain.md index bd4d8ddd2..bd0af70ab 100644 --- a/docs/spec/blockchain/blockchain.md +++ b/docs/spec/blockchain/blockchain.md @@ -401,14 +401,22 @@ must be greater than 2/3 of the total voting power of the complete validator set A vote is a signed message broadcast in the consensus for a particular block at a particular height and round. When stored in the blockchain or propagated over the network, votes are encoded in Amino. -For signing, votes are encoded in JSON, and the ChainID is included, in the form of the `CanonicalSignBytes`. +For signing, votes are represented via `CanonicalVote` and also encoded using amino (protobuf compatible) via +`Vote.SignBytes` which includes the `ChainID`. -We define a method `Verify` that returns `true` if the signature verifies against the pubkey for the CanonicalSignBytes +We define a method `Verify` that returns `true` if the signature verifies against the pubkey for the `SignBytes` using the given ChainID: ```go -func (v Vote) Verify(chainID string, pubKey PubKey) bool { - return pubKey.Verify(v.Signature, CanonicalSignBytes(chainID, v)) +func (vote *Vote) Verify(chainID string, pubKey crypto.PubKey) error { + if !bytes.Equal(pubKey.Address(), vote.ValidatorAddress) { + return ErrVoteInvalidValidatorAddress + } + + if !pubKey.VerifyBytes(vote.SignBytes(chainID), vote.Signature) { + return ErrVoteInvalidSignature + } + return nil } ``` diff --git a/docs/spec/blockchain/encoding.md b/docs/spec/blockchain/encoding.md index 4ad30df6b..2ff024ce0 100644 --- a/docs/spec/blockchain/encoding.md +++ b/docs/spec/blockchain/encoding.md @@ -298,14 +298,22 @@ Where the `"value"` is the base64 encoding of the raw pubkey bytes, and the ### Signed Messages -Signed messages (eg. votes, proposals) in the consensus are encoded using Amino-JSON, rather than in the standard binary format -(NOTE: this is subject to change: https://github.com/tendermint/tendermint/issues/1622) +Signed messages (eg. votes, proposals) in the consensus are encoded using Amino. -When signing, the elements of a message are sorted by key and prepended with -a `@chain_id` and `@type` field. -We call this encoding the CanonicalSignBytes. For instance, CanonicalSignBytes for a vote would look -like: +When signing, the elements of a message are sorted alphabetically by key and prepended with +a `chain_id` and `type` field. +We call this encoding the SignBytes. For instance, SignBytes for a vote is the Amino encoding of the following struct: -```json -{"@chain_id":"test_chain_id","@type":"vote","block_id":{"hash":"8B01023386C371778ECB6368573E539AFC3CC860","parts":{"hash":"72DB3D959635DFF1BB567BEDAA70573392C51596","total":"1000000"}},"height":"12345","round":"2","timestamp":"2017-12-25T03:00:01.234Z","type":2} +```go +type CanonicalVote struct { + ChainID string + Type string + BlockID CanonicalBlockID + Height int64 + Round int + Timestamp time.Time + VoteType byte +} ``` + +NOTE: see [#1622](https://github.com/tendermint/tendermint/issues/1622) for how field ordering will change diff --git a/privval/priv_validator.go b/privval/priv_validator.go index 3ba0519cb..8091744ce 100644 --- a/privval/priv_validator.go +++ b/privval/priv_validator.go @@ -311,21 +311,18 @@ func (pv *FilePV) String() string { // returns the timestamp from the lastSignBytes. // returns true if the only difference in the votes is their timestamp. func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) { - var lastVote, newVote types.CanonicalJSONVote - if err := cdc.UnmarshalJSON(lastSignBytes, &lastVote); err != nil { + var lastVote, newVote types.CanonicalVote + if err := cdc.UnmarshalBinary(lastSignBytes, &lastVote); err != nil { panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into vote: %v", err)) } - if err := cdc.UnmarshalJSON(newSignBytes, &newVote); err != nil { + if err := cdc.UnmarshalBinary(newSignBytes, &newVote); err != nil { panic(fmt.Sprintf("signBytes cannot be unmarshalled into vote: %v", err)) } - lastTime, err := time.Parse(types.TimeFormat, lastVote.Timestamp) - if err != nil { - panic(err) - } + lastTime := lastVote.Timestamp // set the times to the same value and check equality - now := types.CanonicalTime(tmtime.Now()) + now := tmtime.Now() lastVote.Timestamp = now newVote.Timestamp = now lastVoteBytes, _ := cdc.MarshalJSON(lastVote) @@ -337,25 +334,21 @@ func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.T // returns the timestamp from the lastSignBytes. // returns true if the only difference in the proposals is their timestamp func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) { - var lastProposal, newProposal types.CanonicalJSONProposal - if err := cdc.UnmarshalJSON(lastSignBytes, &lastProposal); err != nil { + var lastProposal, newProposal types.CanonicalProposal + if err := cdc.UnmarshalBinary(lastSignBytes, &lastProposal); err != nil { panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into proposal: %v", err)) } - if err := cdc.UnmarshalJSON(newSignBytes, &newProposal); err != nil { + if err := cdc.UnmarshalBinary(newSignBytes, &newProposal); err != nil { panic(fmt.Sprintf("signBytes cannot be unmarshalled into proposal: %v", err)) } - lastTime, err := time.Parse(types.TimeFormat, lastProposal.Timestamp) - if err != nil { - panic(err) - } - + lastTime := lastProposal.Timestamp // set the times to the same value and check equality - now := types.CanonicalTime(tmtime.Now()) + now := tmtime.Now() lastProposal.Timestamp = now newProposal.Timestamp = now - lastProposalBytes, _ := cdc.MarshalJSON(lastProposal) - newProposalBytes, _ := cdc.MarshalJSON(newProposal) + lastProposalBytes, _ := cdc.MarshalBinary(lastProposal) + newProposalBytes, _ := cdc.MarshalBinary(newProposal) return lastTime, bytes.Equal(newProposalBytes, lastProposalBytes) } diff --git a/privval/socket.go b/privval/socket.go index d5ede471c..da95f8fb4 100644 --- a/privval/socket.go +++ b/privval/socket.go @@ -7,7 +7,7 @@ import ( "net" "time" - amino "github.com/tendermint/go-amino" + "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" @@ -27,9 +27,10 @@ const ( // Socket errors. var ( - ErrDialRetryMax = errors.New("dialed maximum retries") - ErrConnWaitTimeout = errors.New("waited for remote signer for too long") - ErrConnTimeout = errors.New("remote signer timed out") + ErrDialRetryMax = errors.New("dialed maximum retries") + ErrConnWaitTimeout = errors.New("waited for remote signer for too long") + ErrConnTimeout = errors.New("remote signer timed out") + ErrUnexpectedResponse = errors.New("received unexpected response") ) var ( @@ -150,7 +151,7 @@ func (sc *SocketPV) getPubKey() (crypto.PubKey, error) { // SignVote implements PrivValidator. func (sc *SocketPV) SignVote(chainID string, vote *types.Vote) error { - err := writeMsg(sc.conn, &SignVoteMsg{Vote: vote}) + err := writeMsg(sc.conn, &SignVoteRequest{Vote: vote}) if err != nil { return err } @@ -160,7 +161,16 @@ func (sc *SocketPV) SignVote(chainID string, vote *types.Vote) error { return err } - *vote = *res.(*SignVoteMsg).Vote + resp, ok := res.(*SignedVoteResponse) + if !ok { + return ErrUnexpectedResponse + } + if resp.Error != nil { + return fmt.Errorf("remote error occurred: code: %v, description: %s", + resp.Error.Code, + resp.Error.Description) + } + *vote = *resp.Vote return nil } @@ -170,7 +180,7 @@ func (sc *SocketPV) SignProposal( chainID string, proposal *types.Proposal, ) error { - err := writeMsg(sc.conn, &SignProposalMsg{Proposal: proposal}) + err := writeMsg(sc.conn, &SignProposalRequest{Proposal: proposal}) if err != nil { return err } @@ -179,8 +189,16 @@ func (sc *SocketPV) SignProposal( if err != nil { return err } - - *proposal = *res.(*SignProposalMsg).Proposal + resp, ok := res.(*SignedProposalResponse) + if !ok { + return ErrUnexpectedResponse + } + if resp.Error != nil { + return fmt.Errorf("remote error occurred: code: %v, description: %s", + resp.Error.Code, + resp.Error.Description) + } + *proposal = *resp.Proposal return nil } @@ -190,7 +208,7 @@ func (sc *SocketPV) SignHeartbeat( chainID string, heartbeat *types.Heartbeat, ) error { - err := writeMsg(sc.conn, &SignHeartbeatMsg{Heartbeat: heartbeat}) + err := writeMsg(sc.conn, &SignHeartbeatRequest{Heartbeat: heartbeat}) if err != nil { return err } @@ -199,8 +217,16 @@ func (sc *SocketPV) SignHeartbeat( if err != nil { return err } - - *heartbeat = *res.(*SignHeartbeatMsg).Heartbeat + resp, ok := res.(*SignedHeartbeatResponse) + if !ok { + return ErrUnexpectedResponse + } + if resp.Error != nil { + return fmt.Errorf("remote error occurred: code: %v, description: %s", + resp.Error.Code, + resp.Error.Description) + } + *heartbeat = *resp.Heartbeat return nil } @@ -462,22 +488,34 @@ func (rs *RemoteSigner) handleConnection(conn net.Conn) { var p crypto.PubKey p = rs.privVal.GetPubKey() res = &PubKeyMsg{p} - case *SignVoteMsg: + case *SignVoteRequest: err = rs.privVal.SignVote(rs.chainID, r.Vote) - res = &SignVoteMsg{r.Vote} - case *SignProposalMsg: + if err != nil { + res = &SignedVoteResponse{nil, &RemoteSignerError{0, err.Error()}} + } else { + res = &SignedVoteResponse{r.Vote, nil} + } + case *SignProposalRequest: err = rs.privVal.SignProposal(rs.chainID, r.Proposal) - res = &SignProposalMsg{r.Proposal} - case *SignHeartbeatMsg: + if err != nil { + res = &SignedProposalResponse{nil, &RemoteSignerError{0, err.Error()}} + } else { + res = &SignedProposalResponse{r.Proposal, nil} + } + case *SignHeartbeatRequest: err = rs.privVal.SignHeartbeat(rs.chainID, r.Heartbeat) - res = &SignHeartbeatMsg{r.Heartbeat} + if err != nil { + res = &SignedHeartbeatResponse{nil, &RemoteSignerError{0, err.Error()}} + } else { + res = &SignedHeartbeatResponse{r.Heartbeat, nil} + } default: err = fmt.Errorf("unknown msg: %v", r) } if err != nil { + // only log the error; we'll reply with an error in res rs.Logger.Error("handleConnection", "err", err) - return } err = writeMsg(conn, res) @@ -496,9 +534,12 @@ type SocketPVMsg interface{} func RegisterSocketPVMsg(cdc *amino.Codec) { cdc.RegisterInterface((*SocketPVMsg)(nil), nil) cdc.RegisterConcrete(&PubKeyMsg{}, "tendermint/socketpv/PubKeyMsg", nil) - cdc.RegisterConcrete(&SignVoteMsg{}, "tendermint/socketpv/SignVoteMsg", nil) - cdc.RegisterConcrete(&SignProposalMsg{}, "tendermint/socketpv/SignProposalMsg", nil) - cdc.RegisterConcrete(&SignHeartbeatMsg{}, "tendermint/socketpv/SignHeartbeatMsg", nil) + cdc.RegisterConcrete(&SignVoteRequest{}, "tendermint/socketpv/SignVoteRequest", nil) + cdc.RegisterConcrete(&SignedVoteResponse{}, "tendermint/socketpv/SignedVoteResponse", nil) + cdc.RegisterConcrete(&SignProposalRequest{}, "tendermint/socketpv/SignProposalRequest", nil) + cdc.RegisterConcrete(&SignedProposalResponse{}, "tendermint/socketpv/SignedProposalResponse", nil) + cdc.RegisterConcrete(&SignHeartbeatRequest{}, "tendermint/socketpv/SignHeartbeatRequest", nil) + cdc.RegisterConcrete(&SignedHeartbeatResponse{}, "tendermint/socketpv/SignedHeartbeatResponse", nil) } // PubKeyMsg is a PrivValidatorSocket message containing the public key. @@ -506,21 +547,44 @@ type PubKeyMsg struct { PubKey crypto.PubKey } -// SignVoteMsg is a PrivValidatorSocket message containing a vote. -type SignVoteMsg struct { +// SignVoteRequest is a PrivValidatorSocket message containing a vote. +type SignVoteRequest struct { Vote *types.Vote } -// SignProposalMsg is a PrivValidatorSocket message containing a Proposal. -type SignProposalMsg struct { +// SignedVoteResponse is a PrivValidatorSocket message containing a signed vote along with a potenial error message. +type SignedVoteResponse struct { + Vote *types.Vote + Error *RemoteSignerError +} + +// SignProposalRequest is a PrivValidatorSocket message containing a Proposal. +type SignProposalRequest struct { + Proposal *types.Proposal +} + +type SignedProposalResponse struct { Proposal *types.Proposal + Error *RemoteSignerError } -// SignHeartbeatMsg is a PrivValidatorSocket message containing a Heartbeat. -type SignHeartbeatMsg struct { +// SignHeartbeatRequest is a PrivValidatorSocket message containing a Heartbeat. +type SignHeartbeatRequest struct { Heartbeat *types.Heartbeat } +type SignedHeartbeatResponse struct { + Heartbeat *types.Heartbeat + Error *RemoteSignerError +} + +// RemoteSignerError allows (remote) validators to include meaningful error descriptions in their reply. +type RemoteSignerError struct { + // TODO(ismail): create an enum of known errors + Code int + Description string +} + func readMsg(r io.Reader) (msg SocketPVMsg, err error) { const maxSocketPVMsgSize = 1024 * 10 _, err = cdc.UnmarshalBinaryReader(r, &msg, maxSocketPVMsgSize) diff --git a/privval/socket_test.go b/privval/socket_test.go index 461ce3f85..84e721be7 100644 --- a/privval/socket_test.go +++ b/privval/socket_test.go @@ -20,7 +20,7 @@ import ( func TestSocketPVAddress(t *testing.T) { var ( chainID = cmn.RandStr(12) - sc, rs = testSetupSocketPair(t, chainID) + sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV()) ) defer sc.Stop() defer rs.Stop() @@ -40,7 +40,7 @@ func TestSocketPVAddress(t *testing.T) { func TestSocketPVPubKey(t *testing.T) { var ( chainID = cmn.RandStr(12) - sc, rs = testSetupSocketPair(t, chainID) + sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV()) ) defer sc.Stop() defer rs.Stop() @@ -59,7 +59,7 @@ func TestSocketPVPubKey(t *testing.T) { func TestSocketPVProposal(t *testing.T) { var ( chainID = cmn.RandStr(12) - sc, rs = testSetupSocketPair(t, chainID) + sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV()) ts = time.Now() privProposal = &types.Proposal{Timestamp: ts} @@ -76,7 +76,7 @@ func TestSocketPVProposal(t *testing.T) { func TestSocketPVVote(t *testing.T) { var ( chainID = cmn.RandStr(12) - sc, rs = testSetupSocketPair(t, chainID) + sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV()) ts = time.Now() vType = types.VoteTypePrecommit @@ -94,7 +94,7 @@ func TestSocketPVVote(t *testing.T) { func TestSocketPVHeartbeat(t *testing.T) { var ( chainID = cmn.RandStr(12) - sc, rs = testSetupSocketPair(t, chainID) + sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV()) want = &types.Heartbeat{} have = &types.Heartbeat{} @@ -231,14 +231,163 @@ func TestRemoteSignerRetry(t *testing.T) { } } +func TestRemoteSignVoteErrors(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupSocketPair(t, chainID, types.NewErroringMockPV()) + + ts = time.Now() + vType = types.VoteTypePrecommit + vote = &types.Vote{Timestamp: ts, Type: vType} + ) + defer sc.Stop() + defer rs.Stop() + + err := writeMsg(sc.conn, &SignVoteRequest{Vote: vote}) + require.NoError(t, err) + + res, err := readMsg(sc.conn) + require.NoError(t, err) + + resp := *res.(*SignedVoteResponse) + require.NotNil(t, resp.Error) + require.Equal(t, resp.Error.Description, types.ErroringMockPVErr.Error()) + + err = rs.privVal.SignVote(chainID, vote) + require.Error(t, err) + err = sc.SignVote(chainID, vote) + require.Error(t, err) +} + +func TestRemoteSignProposalErrors(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupSocketPair(t, chainID, types.NewErroringMockPV()) + + ts = time.Now() + proposal = &types.Proposal{Timestamp: ts} + ) + defer sc.Stop() + defer rs.Stop() + + err := writeMsg(sc.conn, &SignProposalRequest{Proposal: proposal}) + require.NoError(t, err) + + res, err := readMsg(sc.conn) + require.NoError(t, err) + + resp := *res.(*SignedProposalResponse) + require.NotNil(t, resp.Error) + require.Equal(t, resp.Error.Description, types.ErroringMockPVErr.Error()) + + err = rs.privVal.SignProposal(chainID, proposal) + require.Error(t, err) + + err = sc.SignProposal(chainID, proposal) + require.Error(t, err) +} + +func TestRemoteSignHeartbeatErrors(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupSocketPair(t, chainID, types.NewErroringMockPV()) + hb = &types.Heartbeat{} + ) + defer sc.Stop() + defer rs.Stop() + + err := writeMsg(sc.conn, &SignHeartbeatRequest{Heartbeat: hb}) + require.NoError(t, err) + + res, err := readMsg(sc.conn) + require.NoError(t, err) + + resp := *res.(*SignedHeartbeatResponse) + require.NotNil(t, resp.Error) + require.Equal(t, resp.Error.Description, types.ErroringMockPVErr.Error()) + + err = rs.privVal.SignHeartbeat(chainID, hb) + require.Error(t, err) + + err = sc.SignHeartbeat(chainID, hb) + require.Error(t, err) +} + +func TestErrUnexpectedResponse(t *testing.T) { + var ( + addr = testFreeAddr(t) + logger = log.TestingLogger() + chainID = cmn.RandStr(12) + readyc = make(chan struct{}) + errc = make(chan error, 1) + + rs = NewRemoteSigner( + logger, + chainID, + addr, + types.NewMockPV(), + ed25519.GenPrivKey(), + ) + sc = NewSocketPV( + logger, + addr, + ed25519.GenPrivKey(), + ) + ) + + testStartSocketPV(t, readyc, sc) + defer sc.Stop() + RemoteSignerConnDeadline(time.Millisecond)(rs) + RemoteSignerConnRetries(1e6)(rs) + + // we do not want to Start() the remote signer here and instead use the connection to + // reply with intentionally wrong replies below: + rsConn, err := rs.connect() + defer rsConn.Close() + require.NoError(t, err) + require.NotNil(t, rsConn) + <-readyc + + // Heartbeat: + go func(errc chan error) { + errc <- sc.SignHeartbeat(chainID, &types.Heartbeat{}) + }(errc) + // read request and write wrong response: + go testReadWriteResponse(t, &SignedVoteResponse{}, rsConn) + err = <-errc + require.Error(t, err) + require.Equal(t, err, ErrUnexpectedResponse) + + // Proposal: + go func(errc chan error) { + errc <- sc.SignProposal(chainID, &types.Proposal{}) + }(errc) + // read request and write wrong response: + go testReadWriteResponse(t, &SignedHeartbeatResponse{}, rsConn) + err = <-errc + require.Error(t, err) + require.Equal(t, err, ErrUnexpectedResponse) + + // Vote: + go func(errc chan error) { + errc <- sc.SignVote(chainID, &types.Vote{}) + }(errc) + // read request and write wrong response: + go testReadWriteResponse(t, &SignedHeartbeatResponse{}, rsConn) + err = <-errc + require.Error(t, err) + require.Equal(t, err, ErrUnexpectedResponse) +} + func testSetupSocketPair( t *testing.T, chainID string, + privValidator types.PrivValidator, ) (*SocketPV, *RemoteSigner) { var ( addr = testFreeAddr(t) logger = log.TestingLogger() - privVal = types.NewMockPV() + privVal = privValidator readyc = make(chan struct{}) rs = NewRemoteSigner( logger, @@ -254,12 +403,7 @@ func testSetupSocketPair( ) ) - go func(sc *SocketPV) { - require.NoError(t, sc.Start()) - assert.True(t, sc.IsRunning()) - - readyc <- struct{}{} - }(sc) + testStartSocketPV(t, readyc, sc) RemoteSignerConnDeadline(time.Millisecond)(rs) RemoteSignerConnRetries(1e6)(rs) @@ -272,6 +416,23 @@ func testSetupSocketPair( return sc, rs } +func testReadWriteResponse(t *testing.T, resp SocketPVMsg, rsConn net.Conn) { + _, err := readMsg(rsConn) + require.NoError(t, err) + + err = writeMsg(rsConn, resp) + require.NoError(t, err) +} + +func testStartSocketPV(t *testing.T, readyc chan struct{}, sc *SocketPV) { + go func(sc *SocketPV) { + require.NoError(t, sc.Start()) + assert.True(t, sc.IsRunning()) + + readyc <- struct{}{} + }(sc) +} + // testFreeAddr claims a free port so we don't block on listener being ready. func testFreeAddr(t *testing.T) string { ln, err := net.Listen("tcp", "127.0.0.1:0") diff --git a/types/canonical.go b/types/canonical.go new file mode 100644 index 000000000..cdf0bd7b5 --- /dev/null +++ b/types/canonical.go @@ -0,0 +1,116 @@ +package types + +import ( + "time" + + cmn "github.com/tendermint/tendermint/libs/common" + tmtime "github.com/tendermint/tendermint/types/time" +) + +// Canonical* wraps the structs in types for amino encoding them for use in SignBytes / the Signable interface. + +// TimeFormat is used for generating the sigs +const TimeFormat = time.RFC3339Nano + +type CanonicalBlockID struct { + Hash cmn.HexBytes `json:"hash,omitempty"` + PartsHeader CanonicalPartSetHeader `json:"parts,omitempty"` +} + +type CanonicalPartSetHeader struct { + Hash cmn.HexBytes `json:"hash,omitempty"` + Total int `json:"total,omitempty"` +} + +type CanonicalProposal struct { + ChainID string `json:"@chain_id"` + Type string `json:"@type"` + BlockPartsHeader CanonicalPartSetHeader `json:"block_parts_header"` + Height int64 `json:"height"` + POLBlockID CanonicalBlockID `json:"pol_block_id"` + POLRound int `json:"pol_round"` + Round int `json:"round"` + Timestamp time.Time `json:"timestamp"` +} + +type CanonicalVote struct { + ChainID string `json:"@chain_id"` + Type string `json:"@type"` + BlockID CanonicalBlockID `json:"block_id"` + Height int64 `json:"height"` + Round int `json:"round"` + Timestamp time.Time `json:"timestamp"` + VoteType byte `json:"type"` +} + +type CanonicalHeartbeat struct { + ChainID string `json:"@chain_id"` + Type string `json:"@type"` + Height int64 `json:"height"` + Round int `json:"round"` + Sequence int `json:"sequence"` + ValidatorAddress Address `json:"validator_address"` + ValidatorIndex int `json:"validator_index"` +} + +//----------------------------------- +// Canonicalize the structs + +func CanonicalizeBlockID(blockID BlockID) CanonicalBlockID { + return CanonicalBlockID{ + Hash: blockID.Hash, + PartsHeader: CanonicalizePartSetHeader(blockID.PartsHeader), + } +} + +func CanonicalizePartSetHeader(psh PartSetHeader) CanonicalPartSetHeader { + return CanonicalPartSetHeader{ + psh.Hash, + psh.Total, + } +} + +func CanonicalizeProposal(chainID string, proposal *Proposal) CanonicalProposal { + return CanonicalProposal{ + ChainID: chainID, + Type: "proposal", + BlockPartsHeader: CanonicalizePartSetHeader(proposal.BlockPartsHeader), + Height: proposal.Height, + Timestamp: proposal.Timestamp, + POLBlockID: CanonicalizeBlockID(proposal.POLBlockID), + POLRound: proposal.POLRound, + Round: proposal.Round, + } +} + +func CanonicalizeVote(chainID string, vote *Vote) CanonicalVote { + return CanonicalVote{ + ChainID: chainID, + Type: "vote", + BlockID: CanonicalizeBlockID(vote.BlockID), + Height: vote.Height, + Round: vote.Round, + Timestamp: vote.Timestamp, + VoteType: vote.Type, + } +} + +func CanonicalizeHeartbeat(chainID string, heartbeat *Heartbeat) CanonicalHeartbeat { + return CanonicalHeartbeat{ + ChainID: chainID, + Type: "heartbeat", + Height: heartbeat.Height, + Round: heartbeat.Round, + Sequence: heartbeat.Sequence, + ValidatorAddress: heartbeat.ValidatorAddress, + ValidatorIndex: heartbeat.ValidatorIndex, + } +} + +// CanonicalTime can be used to stringify time in a canonical way. +func CanonicalTime(t time.Time) string { + // Note that sending time over amino resets it to + // local time, we need to force UTC here, so the + // signatures match + return tmtime.Canonical(t).Format(TimeFormat) +} diff --git a/types/canonical_json.go b/types/canonical_json.go deleted file mode 100644 index d8399ff19..000000000 --- a/types/canonical_json.go +++ /dev/null @@ -1,115 +0,0 @@ -package types - -import ( - "time" - - cmn "github.com/tendermint/tendermint/libs/common" - tmtime "github.com/tendermint/tendermint/types/time" -) - -// Canonical json is amino's json for structs with fields in alphabetical order - -// TimeFormat is used for generating the sigs -const TimeFormat = time.RFC3339Nano - -type CanonicalJSONBlockID struct { - Hash cmn.HexBytes `json:"hash,omitempty"` - PartsHeader CanonicalJSONPartSetHeader `json:"parts,omitempty"` -} - -type CanonicalJSONPartSetHeader struct { - Hash cmn.HexBytes `json:"hash,omitempty"` - Total int `json:"total,omitempty"` -} - -type CanonicalJSONProposal struct { - ChainID string `json:"@chain_id"` - Type string `json:"@type"` - BlockPartsHeader CanonicalJSONPartSetHeader `json:"block_parts_header"` - Height int64 `json:"height"` - POLBlockID CanonicalJSONBlockID `json:"pol_block_id"` - POLRound int `json:"pol_round"` - Round int `json:"round"` - Timestamp string `json:"timestamp"` -} - -type CanonicalJSONVote struct { - ChainID string `json:"@chain_id"` - Type string `json:"@type"` - BlockID CanonicalJSONBlockID `json:"block_id"` - Height int64 `json:"height"` - Round int `json:"round"` - Timestamp string `json:"timestamp"` - VoteType byte `json:"type"` -} - -type CanonicalJSONHeartbeat struct { - ChainID string `json:"@chain_id"` - Type string `json:"@type"` - Height int64 `json:"height"` - Round int `json:"round"` - Sequence int `json:"sequence"` - ValidatorAddress Address `json:"validator_address"` - ValidatorIndex int `json:"validator_index"` -} - -//----------------------------------- -// Canonicalize the structs - -func CanonicalBlockID(blockID BlockID) CanonicalJSONBlockID { - return CanonicalJSONBlockID{ - Hash: blockID.Hash, - PartsHeader: CanonicalPartSetHeader(blockID.PartsHeader), - } -} - -func CanonicalPartSetHeader(psh PartSetHeader) CanonicalJSONPartSetHeader { - return CanonicalJSONPartSetHeader{ - psh.Hash, - psh.Total, - } -} - -func CanonicalProposal(chainID string, proposal *Proposal) CanonicalJSONProposal { - return CanonicalJSONProposal{ - ChainID: chainID, - Type: "proposal", - BlockPartsHeader: CanonicalPartSetHeader(proposal.BlockPartsHeader), - Height: proposal.Height, - Timestamp: CanonicalTime(proposal.Timestamp), - POLBlockID: CanonicalBlockID(proposal.POLBlockID), - POLRound: proposal.POLRound, - Round: proposal.Round, - } -} - -func CanonicalVote(chainID string, vote *Vote) CanonicalJSONVote { - return CanonicalJSONVote{ - ChainID: chainID, - Type: "vote", - BlockID: CanonicalBlockID(vote.BlockID), - Height: vote.Height, - Round: vote.Round, - Timestamp: CanonicalTime(vote.Timestamp), - VoteType: vote.Type, - } -} - -func CanonicalHeartbeat(chainID string, heartbeat *Heartbeat) CanonicalJSONHeartbeat { - return CanonicalJSONHeartbeat{ - ChainID: chainID, - Type: "heartbeat", - Height: heartbeat.Height, - Round: heartbeat.Round, - Sequence: heartbeat.Sequence, - ValidatorAddress: heartbeat.ValidatorAddress, - ValidatorIndex: heartbeat.ValidatorIndex, - } -} - -func CanonicalTime(t time.Time) string { - // Note that sending time over amino resets it to - // local time, we need to force UTC here, so the - // signatures match - return tmtime.Canonical(t).Format(TimeFormat) -} diff --git a/types/heartbeat.go b/types/heartbeat.go index 151f1b0b2..de03d5cc4 100644 --- a/types/heartbeat.go +++ b/types/heartbeat.go @@ -23,7 +23,7 @@ type Heartbeat struct { // SignBytes returns the Heartbeat bytes for signing. // It panics if the Heartbeat is nil. func (heartbeat *Heartbeat) SignBytes(chainID string) []byte { - bz, err := cdc.MarshalJSON(CanonicalHeartbeat(chainID, heartbeat)) + bz, err := cdc.MarshalBinary(CanonicalizeHeartbeat(chainID, heartbeat)) if err != nil { panic(err) } diff --git a/types/heartbeat_test.go b/types/heartbeat_test.go index ce9e49230..550bcc739 100644 --- a/types/heartbeat_test.go +++ b/types/heartbeat_test.go @@ -34,19 +34,27 @@ func TestHeartbeatString(t *testing.T) { } func TestHeartbeatWriteSignBytes(t *testing.T) { - - hb := &Heartbeat{ValidatorIndex: 1, Height: 10, Round: 1} - bz := hb.SignBytes("0xdeadbeef") - // XXX HMMMMMMM - require.Equal(t, string(bz), `{"@chain_id":"0xdeadbeef","@type":"heartbeat","height":"10","round":"1","sequence":"0","validator_address":"","validator_index":"1"}`) - - plainHb := &Heartbeat{} - bz = plainHb.SignBytes("0xdeadbeef") - require.Equal(t, string(bz), `{"@chain_id":"0xdeadbeef","@type":"heartbeat","height":"0","round":"0","sequence":"0","validator_address":"","validator_index":"0"}`) + chainID := "test_chain_id" + + { + testHeartbeat := &Heartbeat{ValidatorIndex: 1, Height: 10, Round: 1} + signBytes := testHeartbeat.SignBytes(chainID) + expected, err := cdc.MarshalBinary(CanonicalizeHeartbeat(chainID, testHeartbeat)) + require.NoError(t, err) + require.Equal(t, expected, signBytes, "Got unexpected sign bytes for Heartbeat") + } + + { + testHeartbeat := &Heartbeat{} + signBytes := testHeartbeat.SignBytes(chainID) + expected, err := cdc.MarshalBinary(CanonicalizeHeartbeat(chainID, testHeartbeat)) + require.NoError(t, err) + require.Equal(t, expected, signBytes, "Got unexpected sign bytes for Heartbeat") + } require.Panics(t, func() { var nilHb *Heartbeat - bz := nilHb.SignBytes("0xdeadbeef") - require.Equal(t, string(bz), "null") + signBytes := nilHb.SignBytes(chainID) + require.Equal(t, string(signBytes), "null") }) } diff --git a/types/priv_validator.go b/types/priv_validator.go index 1642be41b..25be5220d 100644 --- a/types/priv_validator.go +++ b/types/priv_validator.go @@ -2,6 +2,7 @@ package types import ( "bytes" + "errors" "fmt" "github.com/tendermint/tendermint/crypto" @@ -103,3 +104,29 @@ func (pv *MockPV) DisableChecks() { // Currently this does nothing, // as MockPV has no safety checks at all. } + +type erroringMockPV struct { + *MockPV +} + +var ErroringMockPVErr = errors.New("erroringMockPV always returns an error") + +// Implements PrivValidator. +func (pv *erroringMockPV) SignVote(chainID string, vote *Vote) error { + return ErroringMockPVErr +} + +// Implements PrivValidator. +func (pv *erroringMockPV) SignProposal(chainID string, proposal *Proposal) error { + return ErroringMockPVErr +} + +// signHeartbeat signs the heartbeat without any checking. +func (pv *erroringMockPV) SignHeartbeat(chainID string, heartbeat *Heartbeat) error { + return ErroringMockPVErr +} + +// NewErroringMockPV returns a MockPV that fails on each signing request. Again, for testing only. +func NewErroringMockPV() *erroringMockPV { + return &erroringMockPV{&MockPV{ed25519.GenPrivKey()}} +} diff --git a/types/proposal.go b/types/proposal.go index 97e0dca37..a2bc8e367 100644 --- a/types/proposal.go +++ b/types/proposal.go @@ -52,7 +52,7 @@ func (p *Proposal) String() string { // SignBytes returns the Proposal bytes for signing func (p *Proposal) SignBytes(chainID string) []byte { - bz, err := cdc.MarshalJSON(CanonicalProposal(chainID, p)) + bz, err := cdc.MarshalBinary(CanonicalizeProposal(chainID, p)) if err != nil { panic(err) } diff --git a/types/proposal_test.go b/types/proposal_test.go index 7396fb767..5f9433083 100644 --- a/types/proposal_test.go +++ b/types/proposal_test.go @@ -24,17 +24,12 @@ func init() { } func TestProposalSignable(t *testing.T) { - signBytes := testProposal.SignBytes("test_chain_id") - signStr := string(signBytes) + chainID := "test_chain_id" + signBytes := testProposal.SignBytes(chainID) - expected := `{"@chain_id":"test_chain_id","@type":"proposal","block_parts_header":{"hash":"626C6F636B7061727473","total":"111"},"height":"12345","pol_block_id":{},"pol_round":"-1","round":"23456","timestamp":"2018-02-11T07:09:22.765Z"}` - if signStr != expected { - t.Errorf("Got unexpected sign string for Proposal. Expected:\n%v\nGot:\n%v", expected, signStr) - } - - if signStr != expected { - t.Errorf("Got unexpected sign string for Proposal. Expected:\n%v\nGot:\n%v", expected, signStr) - } + expected, err := cdc.MarshalBinary(CanonicalizeProposal(chainID, testProposal)) + require.NoError(t, err) + require.Equal(t, expected, signBytes, "Got unexpected sign bytes for Proposal") } func TestProposalString(t *testing.T) { diff --git a/types/vote.go b/types/vote.go index 4a90a7185..ba2f1dfe4 100644 --- a/types/vote.go +++ b/types/vote.go @@ -6,7 +6,7 @@ import ( "fmt" "time" - crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto" cmn "github.com/tendermint/tendermint/libs/common" ) @@ -77,7 +77,7 @@ type Vote struct { } func (vote *Vote) SignBytes(chainID string) []byte { - bz, err := cdc.MarshalJSON(CanonicalVote(chainID, vote)) + bz, err := cdc.MarshalBinary(CanonicalizeVote(chainID, vote)) if err != nil { panic(err) } @@ -104,8 +104,12 @@ func (vote *Vote) String() string { } return fmt.Sprintf("Vote{%v:%X %v/%02d/%v(%v) %X %X @ %s}", - vote.ValidatorIndex, cmn.Fingerprint(vote.ValidatorAddress), - vote.Height, vote.Round, vote.Type, typeString, + vote.ValidatorIndex, + cmn.Fingerprint(vote.ValidatorAddress), + vote.Height, + vote.Round, + vote.Type, + typeString, cmn.Fingerprint(vote.BlockID.Hash), cmn.Fingerprint(vote.Signature), CanonicalTime(vote.Timestamp)) diff --git a/types/vote_test.go b/types/vote_test.go index dd7663e59..d0c41a065 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -46,13 +46,11 @@ func exampleVote(t byte) *Vote { func TestVoteSignable(t *testing.T) { vote := examplePrecommit() signBytes := vote.SignBytes("test_chain_id") - signStr := string(signBytes) - expected := `{"@chain_id":"test_chain_id","@type":"vote","block_id":{"hash":"8B01023386C371778ECB6368573E539AFC3CC860","parts":{"hash":"72DB3D959635DFF1BB567BEDAA70573392C51596","total":"1000000"}},"height":"12345","round":"2","timestamp":"2017-12-25T03:00:01.234Z","type":2}` - if signStr != expected { - // NOTE: when this fails, you probably want to fix up consensus/replay_test too - t.Errorf("Got unexpected sign string for Vote. Expected:\n%v\nGot:\n%v", expected, signStr) - } + expected, err := cdc.MarshalBinary(CanonicalizeVote("test_chain_id", vote)) + require.NoError(t, err) + + require.Equal(t, expected, signBytes, "Got unexpected sign bytes for Vote.") } func TestVoteVerifySignature(t *testing.T) { From 71a34adfe58e654e36583a5db3b0ad4d78e3c0b3 Mon Sep 17 00:00:00 2001 From: Joon Date: Sat, 29 Sep 2018 09:03:19 +0900 Subject: [PATCH 013/113] General Merkle Proof (#2298) * first commit finalize rebase add protoc_merkle to Makefile * in progress * fix kvstore * fix tests * remove iavl dependency * fix tx_test * fix test_abci_cli fix test_apps * fix test_apps * fix test_cover * rm rebase residue * address comment in progress * finalize rebase --- Makefile | 4 +- abci/cmd/abci-cli/abci-cli.go | 5 +- abci/example/code/code.go | 1 + abci/example/kvstore/kvstore.go | 3 +- abci/tests/test_cli/ex1.abci.out | 4 + abci/types/types.pb.go | 807 +++++++++++---------------- abci/types/types.proto | 3 +- abci/types/typespb_test.go | 1 + crypto/merkle/compile.sh | 6 + crypto/merkle/merkle.pb.go | 792 ++++++++++++++++++++++++++ crypto/merkle/merkle.proto | 30 + crypto/merkle/proof.go | 132 +++++ crypto/merkle/proof_key_path.go | 107 ++++ crypto/merkle/proof_key_path_test.go | 41 ++ crypto/merkle/proof_simple_value.go | 91 +++ crypto/merkle/simple_proof.go | 53 +- crypto/merkle/simple_tree_test.go | 59 +- crypto/merkle/wire.go | 12 + docs/app-dev/app-development.md | 70 ++- lite/errors/errors.go | 21 + lite/proxy/proof.go | 14 + lite/proxy/query.go | 120 ++-- lite/proxy/query_test.go | 98 ++-- lite/proxy/wrapper.go | 7 +- rpc/client/httpclient.go | 2 +- rpc/client/localclient.go | 2 +- rpc/client/mock/abci.go | 23 +- rpc/client/mock/abci_test.go | 16 +- rpc/client/mock/client.go | 5 +- rpc/client/rpc_test.go | 14 +- rpc/client/types.go | 9 +- rpc/core/abci.go | 16 +- test/app/kvstore_test.sh | 2 +- types/block.go | 1 - types/part_set.go | 2 +- types/results_test.go | 4 +- types/tx.go | 17 +- types/tx_test.go | 6 +- 38 files changed, 1859 insertions(+), 741 deletions(-) create mode 100644 crypto/merkle/compile.sh create mode 100644 crypto/merkle/merkle.pb.go create mode 100644 crypto/merkle/merkle.proto create mode 100644 crypto/merkle/proof.go create mode 100644 crypto/merkle/proof_key_path.go create mode 100644 crypto/merkle/proof_key_path_test.go create mode 100644 crypto/merkle/proof_simple_value.go create mode 100644 crypto/merkle/wire.go create mode 100644 lite/proxy/proof.go diff --git a/Makefile b/Makefile index ffc72c465..73bd67b0b 100644 --- a/Makefile +++ b/Makefile @@ -35,7 +35,7 @@ install: ######################################## ### Protobuf -protoc_all: protoc_libs protoc_abci protoc_grpc +protoc_all: protoc_libs protoc_merkle protoc_abci protoc_grpc %.pb.go: %.proto ## If you get the following error, @@ -137,6 +137,8 @@ grpc_dbserver: protoc_grpc: rpc/grpc/types.pb.go +protoc_merkle: crypto/merkle/merkle.pb.go + ######################################## ### Testing diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index b7b8e7d72..50972ec30 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -22,6 +22,7 @@ import ( servertest "github.com/tendermint/tendermint/abci/tests/server" "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/abci/version" + "github.com/tendermint/tendermint/crypto/merkle" ) // client is a global variable so it can be reused by the console @@ -100,7 +101,7 @@ type queryResponse struct { Key []byte Value []byte Height int64 - Proof []byte + Proof *merkle.Proof } func Execute() error { @@ -748,7 +749,7 @@ func printResponse(cmd *cobra.Command, args []string, rsp response) { fmt.Printf("-> value.hex: %X\n", rsp.Query.Value) } if rsp.Query.Proof != nil { - fmt.Printf("-> proof: %X\n", rsp.Query.Proof) + fmt.Printf("-> proof: %#v\n", rsp.Query.Proof) } } } diff --git a/abci/example/code/code.go b/abci/example/code/code.go index 94e9d015e..988b2a93e 100644 --- a/abci/example/code/code.go +++ b/abci/example/code/code.go @@ -6,4 +6,5 @@ const ( CodeTypeEncodingError uint32 = 1 CodeTypeBadNonce uint32 = 2 CodeTypeUnauthorized uint32 = 3 + CodeTypeUnknownError uint32 = 4 ) diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index c1554cc57..9523bf746 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -81,7 +81,7 @@ func (app *KVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx { app.state.Size += 1 tags := []cmn.KVPair{ - {Key: []byte("app.creator"), Value: []byte("jae")}, + {Key: []byte("app.creator"), Value: []byte("Cosmoshi Netowoko")}, {Key: []byte("app.key"), Value: key}, } return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags} @@ -114,6 +114,7 @@ func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery type } return } else { + resQuery.Key = reqQuery.Data value := app.state.db.Get(prefixKey(reqQuery.Data)) resQuery.Value = value if value != nil { diff --git a/abci/tests/test_cli/ex1.abci.out b/abci/tests/test_cli/ex1.abci.out index 5d4c196dc..0cdd43df6 100644 --- a/abci/tests/test_cli/ex1.abci.out +++ b/abci/tests/test_cli/ex1.abci.out @@ -28,6 +28,8 @@ -> code: OK -> log: exists -> height: 0 +-> key: abc +-> key.hex: 616263 -> value: abc -> value.hex: 616263 @@ -42,6 +44,8 @@ -> code: OK -> log: exists -> height: 0 +-> key: def +-> key.hex: 646566 -> value: xyz -> value.hex: 78797A diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 3c7f81ab6..427315df3 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -9,6 +9,7 @@ import fmt "fmt" import math "math" import _ "github.com/gogo/protobuf/gogoproto" import _ "github.com/golang/protobuf/ptypes/timestamp" +import merkle "github.com/tendermint/tendermint/crypto/merkle" import common "github.com/tendermint/tendermint/libs/common" import time "time" @@ -60,7 +61,7 @@ func (m *Request) Reset() { *m = Request{} } func (m *Request) String() string { return proto.CompactTextString(m) } func (*Request) ProtoMessage() {} func (*Request) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{0} + return fileDescriptor_types_03c41ca87033c976, []int{0} } func (m *Request) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -482,7 +483,7 @@ func (m *RequestEcho) Reset() { *m = RequestEcho{} } func (m *RequestEcho) String() string { return proto.CompactTextString(m) } func (*RequestEcho) ProtoMessage() {} func (*RequestEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{1} + return fileDescriptor_types_03c41ca87033c976, []int{1} } func (m *RequestEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -528,7 +529,7 @@ func (m *RequestFlush) Reset() { *m = RequestFlush{} } func (m *RequestFlush) String() string { return proto.CompactTextString(m) } func (*RequestFlush) ProtoMessage() {} func (*RequestFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{2} + return fileDescriptor_types_03c41ca87033c976, []int{2} } func (m *RequestFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -568,7 +569,7 @@ func (m *RequestInfo) Reset() { *m = RequestInfo{} } func (m *RequestInfo) String() string { return proto.CompactTextString(m) } func (*RequestInfo) ProtoMessage() {} func (*RequestInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{3} + return fileDescriptor_types_03c41ca87033c976, []int{3} } func (m *RequestInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -617,7 +618,7 @@ func (m *RequestSetOption) Reset() { *m = RequestSetOption{} } func (m *RequestSetOption) String() string { return proto.CompactTextString(m) } func (*RequestSetOption) ProtoMessage() {} func (*RequestSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{4} + return fileDescriptor_types_03c41ca87033c976, []int{4} } func (m *RequestSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -675,7 +676,7 @@ func (m *RequestInitChain) Reset() { *m = RequestInitChain{} } func (m *RequestInitChain) String() string { return proto.CompactTextString(m) } func (*RequestInitChain) ProtoMessage() {} func (*RequestInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{5} + return fileDescriptor_types_03c41ca87033c976, []int{5} } func (m *RequestInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -753,7 +754,7 @@ func (m *RequestQuery) Reset() { *m = RequestQuery{} } func (m *RequestQuery) String() string { return proto.CompactTextString(m) } func (*RequestQuery) ProtoMessage() {} func (*RequestQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{6} + return fileDescriptor_types_03c41ca87033c976, []int{6} } func (m *RequestQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -825,7 +826,7 @@ func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} } func (m *RequestBeginBlock) String() string { return proto.CompactTextString(m) } func (*RequestBeginBlock) ProtoMessage() {} func (*RequestBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{7} + return fileDescriptor_types_03c41ca87033c976, []int{7} } func (m *RequestBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -893,7 +894,7 @@ func (m *RequestCheckTx) Reset() { *m = RequestCheckTx{} } func (m *RequestCheckTx) String() string { return proto.CompactTextString(m) } func (*RequestCheckTx) ProtoMessage() {} func (*RequestCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{8} + return fileDescriptor_types_03c41ca87033c976, []int{8} } func (m *RequestCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -940,7 +941,7 @@ func (m *RequestDeliverTx) Reset() { *m = RequestDeliverTx{} } func (m *RequestDeliverTx) String() string { return proto.CompactTextString(m) } func (*RequestDeliverTx) ProtoMessage() {} func (*RequestDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{9} + return fileDescriptor_types_03c41ca87033c976, []int{9} } func (m *RequestDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -987,7 +988,7 @@ func (m *RequestEndBlock) Reset() { *m = RequestEndBlock{} } func (m *RequestEndBlock) String() string { return proto.CompactTextString(m) } func (*RequestEndBlock) ProtoMessage() {} func (*RequestEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{10} + return fileDescriptor_types_03c41ca87033c976, []int{10} } func (m *RequestEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1033,7 +1034,7 @@ func (m *RequestCommit) Reset() { *m = RequestCommit{} } func (m *RequestCommit) String() string { return proto.CompactTextString(m) } func (*RequestCommit) ProtoMessage() {} func (*RequestCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{11} + return fileDescriptor_types_03c41ca87033c976, []int{11} } func (m *RequestCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1086,7 +1087,7 @@ func (m *Response) Reset() { *m = Response{} } func (m *Response) String() string { return proto.CompactTextString(m) } func (*Response) ProtoMessage() {} func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{12} + return fileDescriptor_types_03c41ca87033c976, []int{12} } func (m *Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1539,7 +1540,7 @@ func (m *ResponseException) Reset() { *m = ResponseException{} } func (m *ResponseException) String() string { return proto.CompactTextString(m) } func (*ResponseException) ProtoMessage() {} func (*ResponseException) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{13} + return fileDescriptor_types_03c41ca87033c976, []int{13} } func (m *ResponseException) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1586,7 +1587,7 @@ func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } func (*ResponseEcho) ProtoMessage() {} func (*ResponseEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{14} + return fileDescriptor_types_03c41ca87033c976, []int{14} } func (m *ResponseEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1632,7 +1633,7 @@ func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } func (*ResponseFlush) ProtoMessage() {} func (*ResponseFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{15} + return fileDescriptor_types_03c41ca87033c976, []int{15} } func (m *ResponseFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1675,7 +1676,7 @@ func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } func (*ResponseInfo) ProtoMessage() {} func (*ResponseInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{16} + return fileDescriptor_types_03c41ca87033c976, []int{16} } func (m *ResponseInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1747,7 +1748,7 @@ func (m *ResponseSetOption) Reset() { *m = ResponseSetOption{} } func (m *ResponseSetOption) String() string { return proto.CompactTextString(m) } func (*ResponseSetOption) ProtoMessage() {} func (*ResponseSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{17} + return fileDescriptor_types_03c41ca87033c976, []int{17} } func (m *ResponseSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1809,7 +1810,7 @@ func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } func (*ResponseInitChain) ProtoMessage() {} func (*ResponseInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{18} + return fileDescriptor_types_03c41ca87033c976, []int{18} } func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1855,23 +1856,23 @@ func (m *ResponseInitChain) GetValidators() []ValidatorUpdate { type ResponseQuery struct { Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // bytes data = 2; // use "value" instead. - Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` - Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` - Index int64 `protobuf:"varint,5,opt,name=index,proto3" json:"index,omitempty"` - Key []byte `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,7,opt,name=value,proto3" json:"value,omitempty"` - Proof []byte `protobuf:"bytes,8,opt,name=proof,proto3" json:"proof,omitempty"` - Height int64 `protobuf:"varint,9,opt,name=height,proto3" json:"height,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + Index int64 `protobuf:"varint,5,opt,name=index,proto3" json:"index,omitempty"` + Key []byte `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,7,opt,name=value,proto3" json:"value,omitempty"` + Proof *merkle.Proof `protobuf:"bytes,8,opt,name=proof" json:"proof,omitempty"` + Height int64 `protobuf:"varint,9,opt,name=height,proto3" json:"height,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } func (*ResponseQuery) ProtoMessage() {} func (*ResponseQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{19} + return fileDescriptor_types_03c41ca87033c976, []int{19} } func (m *ResponseQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1942,7 +1943,7 @@ func (m *ResponseQuery) GetValue() []byte { return nil } -func (m *ResponseQuery) GetProof() []byte { +func (m *ResponseQuery) GetProof() *merkle.Proof { if m != nil { return m.Proof } @@ -1967,7 +1968,7 @@ func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } func (*ResponseBeginBlock) ProtoMessage() {} func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{20} + return fileDescriptor_types_03c41ca87033c976, []int{20} } func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2020,7 +2021,7 @@ func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } func (*ResponseCheckTx) ProtoMessage() {} func (*ResponseCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{21} + return fileDescriptor_types_03c41ca87033c976, []int{21} } func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2115,7 +2116,7 @@ func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } func (*ResponseDeliverTx) ProtoMessage() {} func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{22} + return fileDescriptor_types_03c41ca87033c976, []int{22} } func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2206,7 +2207,7 @@ func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } func (*ResponseEndBlock) ProtoMessage() {} func (*ResponseEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{23} + return fileDescriptor_types_03c41ca87033c976, []int{23} } func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2268,7 +2269,7 @@ func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } func (*ResponseCommit) ProtoMessage() {} func (*ResponseCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{24} + return fileDescriptor_types_03c41ca87033c976, []int{24} } func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2318,7 +2319,7 @@ func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } func (m *ConsensusParams) String() string { return proto.CompactTextString(m) } func (*ConsensusParams) ProtoMessage() {} func (*ConsensusParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{25} + return fileDescriptor_types_03c41ca87033c976, []int{25} } func (m *ConsensusParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2376,7 +2377,7 @@ func (m *BlockSize) Reset() { *m = BlockSize{} } func (m *BlockSize) String() string { return proto.CompactTextString(m) } func (*BlockSize) ProtoMessage() {} func (*BlockSize) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{26} + return fileDescriptor_types_03c41ca87033c976, []int{26} } func (m *BlockSize) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2432,7 +2433,7 @@ func (m *EvidenceParams) Reset() { *m = EvidenceParams{} } func (m *EvidenceParams) String() string { return proto.CompactTextString(m) } func (*EvidenceParams) ProtoMessage() {} func (*EvidenceParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{27} + return fileDescriptor_types_03c41ca87033c976, []int{27} } func (m *EvidenceParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2480,7 +2481,7 @@ func (m *LastCommitInfo) Reset() { *m = LastCommitInfo{} } func (m *LastCommitInfo) String() string { return proto.CompactTextString(m) } func (*LastCommitInfo) ProtoMessage() {} func (*LastCommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{28} + return fileDescriptor_types_03c41ca87033c976, []int{28} } func (m *LastCommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2553,7 +2554,7 @@ func (m *Header) Reset() { *m = Header{} } func (m *Header) String() string { return proto.CompactTextString(m) } func (*Header) ProtoMessage() {} func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{29} + return fileDescriptor_types_03c41ca87033c976, []int{29} } func (m *Header) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2699,7 +2700,7 @@ func (m *BlockID) Reset() { *m = BlockID{} } func (m *BlockID) String() string { return proto.CompactTextString(m) } func (*BlockID) ProtoMessage() {} func (*BlockID) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{30} + return fileDescriptor_types_03c41ca87033c976, []int{30} } func (m *BlockID) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2754,7 +2755,7 @@ func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } func (*PartSetHeader) ProtoMessage() {} func (*PartSetHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{31} + return fileDescriptor_types_03c41ca87033c976, []int{31} } func (m *PartSetHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2811,7 +2812,7 @@ func (m *Validator) Reset() { *m = Validator{} } func (m *Validator) String() string { return proto.CompactTextString(m) } func (*Validator) ProtoMessage() {} func (*Validator) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{32} + return fileDescriptor_types_03c41ca87033c976, []int{32} } func (m *Validator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2867,7 +2868,7 @@ func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } func (*ValidatorUpdate) ProtoMessage() {} func (*ValidatorUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{33} + return fileDescriptor_types_03c41ca87033c976, []int{33} } func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2923,7 +2924,7 @@ func (m *VoteInfo) Reset() { *m = VoteInfo{} } func (m *VoteInfo) String() string { return proto.CompactTextString(m) } func (*VoteInfo) ProtoMessage() {} func (*VoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{34} + return fileDescriptor_types_03c41ca87033c976, []int{34} } func (m *VoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2978,7 +2979,7 @@ func (m *PubKey) Reset() { *m = PubKey{} } func (m *PubKey) String() string { return proto.CompactTextString(m) } func (*PubKey) ProtoMessage() {} func (*PubKey) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{35} + return fileDescriptor_types_03c41ca87033c976, []int{35} } func (m *PubKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3036,7 +3037,7 @@ func (m *Evidence) Reset() { *m = Evidence{} } func (m *Evidence) String() string { return proto.CompactTextString(m) } func (*Evidence) ProtoMessage() {} func (*Evidence) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{36} + return fileDescriptor_types_03c41ca87033c976, []int{36} } func (m *Evidence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4347,7 +4348,7 @@ func (this *ResponseQuery) Equal(that interface{}) bool { if !bytes.Equal(this.Value, that1.Value) { return false } - if !bytes.Equal(this.Proof, that1.Proof) { + if !this.Proof.Equal(that1.Proof) { return false } if this.Height != that1.Height { @@ -6377,11 +6378,15 @@ func (m *ResponseQuery) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) i += copy(dAtA[i:], m.Value) } - if len(m.Proof) > 0 { + if m.Proof != nil { dAtA[i] = 0x42 i++ - i = encodeVarintTypes(dAtA, i, uint64(len(m.Proof))) - i += copy(dAtA[i:], m.Proof) + i = encodeVarintTypes(dAtA, i, uint64(m.Proof.Size())) + n31, err := m.Proof.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 } if m.Height != 0 { dAtA[i] = 0x48 @@ -6590,11 +6595,11 @@ func (m *ResponseEndBlock) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintTypes(dAtA, i, uint64(m.ConsensusParamUpdates.Size())) - n31, err := m.ConsensusParamUpdates.MarshalTo(dAtA[i:]) + n32, err := m.ConsensusParamUpdates.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n31 + i += n32 } if len(m.Tags) > 0 { for _, msg := range m.Tags { @@ -6660,21 +6665,21 @@ func (m *ConsensusParams) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintTypes(dAtA, i, uint64(m.BlockSize.Size())) - n32, err := m.BlockSize.MarshalTo(dAtA[i:]) + n33, err := m.BlockSize.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n32 + i += n33 } if m.EvidenceParams != nil { dAtA[i] = 0x12 i++ i = encodeVarintTypes(dAtA, i, uint64(m.EvidenceParams.Size())) - n33, err := m.EvidenceParams.MarshalTo(dAtA[i:]) + n34, err := m.EvidenceParams.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n33 + i += n34 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -6806,11 +6811,11 @@ func (m *Header) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintTypes(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Time))) - n34, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) + n35, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) if err != nil { return 0, err } - i += n34 + i += n35 if m.NumTxs != 0 { dAtA[i] = 0x20 i++ @@ -6824,11 +6829,11 @@ func (m *Header) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintTypes(dAtA, i, uint64(m.LastBlockId.Size())) - n35, err := m.LastBlockId.MarshalTo(dAtA[i:]) + n36, err := m.LastBlockId.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n35 + i += n36 if len(m.LastCommitHash) > 0 { dAtA[i] = 0x3a i++ @@ -6913,11 +6918,11 @@ func (m *BlockID) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintTypes(dAtA, i, uint64(m.PartsHeader.Size())) - n36, err := m.PartsHeader.MarshalTo(dAtA[i:]) + n37, err := m.PartsHeader.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n36 + i += n37 if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -7006,11 +7011,11 @@ func (m *ValidatorUpdate) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintTypes(dAtA, i, uint64(m.PubKey.Size())) - n37, err := m.PubKey.MarshalTo(dAtA[i:]) + n38, err := m.PubKey.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n37 + i += n38 if m.Power != 0 { dAtA[i] = 0x10 i++ @@ -7040,11 +7045,11 @@ func (m *VoteInfo) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintTypes(dAtA, i, uint64(m.Validator.Size())) - n38, err := m.Validator.MarshalTo(dAtA[i:]) + n39, err := m.Validator.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n38 + i += n39 if m.SignedLastBlock { dAtA[i] = 0x10 i++ @@ -7118,11 +7123,11 @@ func (m *Evidence) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintTypes(dAtA, i, uint64(m.Validator.Size())) - n39, err := m.Validator.MarshalTo(dAtA[i:]) + n40, err := m.Validator.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n40 if m.Height != 0 { dAtA[i] = 0x18 i++ @@ -7131,11 +7136,11 @@ func (m *Evidence) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintTypes(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Time))) - n40, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) + n41, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n41 if m.TotalVotingPower != 0 { dAtA[i] = 0x28 i++ @@ -7586,10 +7591,8 @@ func NewPopulatedResponseQuery(r randyTypes, easy bool) *ResponseQuery { for i := 0; i < v17; i++ { this.Value[i] = byte(r.Intn(256)) } - v18 := r.Intn(100) - this.Proof = make([]byte, v18) - for i := 0; i < v18; i++ { - this.Proof[i] = byte(r.Intn(256)) + if r.Intn(10) != 0 { + this.Proof = merkle.NewPopulatedProof(r, easy) } this.Height = int64(r.Int63()) if r.Intn(2) == 0 { @@ -7604,11 +7607,11 @@ func NewPopulatedResponseQuery(r randyTypes, easy bool) *ResponseQuery { func NewPopulatedResponseBeginBlock(r randyTypes, easy bool) *ResponseBeginBlock { this := &ResponseBeginBlock{} if r.Intn(10) != 0 { - v19 := r.Intn(5) - this.Tags = make([]common.KVPair, v19) - for i := 0; i < v19; i++ { - v20 := common.NewPopulatedKVPair(r, easy) - this.Tags[i] = *v20 + v18 := r.Intn(5) + this.Tags = make([]common.KVPair, v18) + for i := 0; i < v18; i++ { + v19 := common.NewPopulatedKVPair(r, easy) + this.Tags[i] = *v19 } } if !easy && r.Intn(10) != 0 { @@ -7620,9 +7623,9 @@ func NewPopulatedResponseBeginBlock(r randyTypes, easy bool) *ResponseBeginBlock func NewPopulatedResponseCheckTx(r randyTypes, easy bool) *ResponseCheckTx { this := &ResponseCheckTx{} this.Code = uint32(r.Uint32()) - v21 := r.Intn(100) - this.Data = make([]byte, v21) - for i := 0; i < v21; i++ { + v20 := r.Intn(100) + this.Data = make([]byte, v20) + for i := 0; i < v20; i++ { this.Data[i] = byte(r.Intn(256)) } this.Log = string(randStringTypes(r)) @@ -7636,11 +7639,11 @@ func NewPopulatedResponseCheckTx(r randyTypes, easy bool) *ResponseCheckTx { this.GasUsed *= -1 } if r.Intn(10) != 0 { - v22 := r.Intn(5) - this.Tags = make([]common.KVPair, v22) - for i := 0; i < v22; i++ { - v23 := common.NewPopulatedKVPair(r, easy) - this.Tags[i] = *v23 + v21 := r.Intn(5) + this.Tags = make([]common.KVPair, v21) + for i := 0; i < v21; i++ { + v22 := common.NewPopulatedKVPair(r, easy) + this.Tags[i] = *v22 } } if !easy && r.Intn(10) != 0 { @@ -7652,9 +7655,9 @@ func NewPopulatedResponseCheckTx(r randyTypes, easy bool) *ResponseCheckTx { func NewPopulatedResponseDeliverTx(r randyTypes, easy bool) *ResponseDeliverTx { this := &ResponseDeliverTx{} this.Code = uint32(r.Uint32()) - v24 := r.Intn(100) - this.Data = make([]byte, v24) - for i := 0; i < v24; i++ { + v23 := r.Intn(100) + this.Data = make([]byte, v23) + for i := 0; i < v23; i++ { this.Data[i] = byte(r.Intn(256)) } this.Log = string(randStringTypes(r)) @@ -7668,11 +7671,11 @@ func NewPopulatedResponseDeliverTx(r randyTypes, easy bool) *ResponseDeliverTx { this.GasUsed *= -1 } if r.Intn(10) != 0 { - v25 := r.Intn(5) - this.Tags = make([]common.KVPair, v25) - for i := 0; i < v25; i++ { - v26 := common.NewPopulatedKVPair(r, easy) - this.Tags[i] = *v26 + v24 := r.Intn(5) + this.Tags = make([]common.KVPair, v24) + for i := 0; i < v24; i++ { + v25 := common.NewPopulatedKVPair(r, easy) + this.Tags[i] = *v25 } } if !easy && r.Intn(10) != 0 { @@ -7684,22 +7687,22 @@ func NewPopulatedResponseDeliverTx(r randyTypes, easy bool) *ResponseDeliverTx { func NewPopulatedResponseEndBlock(r randyTypes, easy bool) *ResponseEndBlock { this := &ResponseEndBlock{} if r.Intn(10) != 0 { - v27 := r.Intn(5) - this.ValidatorUpdates = make([]ValidatorUpdate, v27) - for i := 0; i < v27; i++ { - v28 := NewPopulatedValidatorUpdate(r, easy) - this.ValidatorUpdates[i] = *v28 + v26 := r.Intn(5) + this.ValidatorUpdates = make([]ValidatorUpdate, v26) + for i := 0; i < v26; i++ { + v27 := NewPopulatedValidatorUpdate(r, easy) + this.ValidatorUpdates[i] = *v27 } } if r.Intn(10) != 0 { this.ConsensusParamUpdates = NewPopulatedConsensusParams(r, easy) } if r.Intn(10) != 0 { - v29 := r.Intn(5) - this.Tags = make([]common.KVPair, v29) - for i := 0; i < v29; i++ { - v30 := common.NewPopulatedKVPair(r, easy) - this.Tags[i] = *v30 + v28 := r.Intn(5) + this.Tags = make([]common.KVPair, v28) + for i := 0; i < v28; i++ { + v29 := common.NewPopulatedKVPair(r, easy) + this.Tags[i] = *v29 } } if !easy && r.Intn(10) != 0 { @@ -7710,9 +7713,9 @@ func NewPopulatedResponseEndBlock(r randyTypes, easy bool) *ResponseEndBlock { func NewPopulatedResponseCommit(r randyTypes, easy bool) *ResponseCommit { this := &ResponseCommit{} - v31 := r.Intn(100) - this.Data = make([]byte, v31) - for i := 0; i < v31; i++ { + v30 := r.Intn(100) + this.Data = make([]byte, v30) + for i := 0; i < v30; i++ { this.Data[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -7770,11 +7773,11 @@ func NewPopulatedLastCommitInfo(r randyTypes, easy bool) *LastCommitInfo { this.Round *= -1 } if r.Intn(10) != 0 { - v32 := r.Intn(5) - this.Votes = make([]VoteInfo, v32) - for i := 0; i < v32; i++ { - v33 := NewPopulatedVoteInfo(r, easy) - this.Votes[i] = *v33 + v31 := r.Intn(5) + this.Votes = make([]VoteInfo, v31) + for i := 0; i < v31; i++ { + v32 := NewPopulatedVoteInfo(r, easy) + this.Votes[i] = *v32 } } if !easy && r.Intn(10) != 0 { @@ -7790,8 +7793,8 @@ func NewPopulatedHeader(r randyTypes, easy bool) *Header { if r.Intn(2) == 0 { this.Height *= -1 } - v34 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) - this.Time = *v34 + v33 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) + this.Time = *v33 this.NumTxs = int64(r.Int63()) if r.Intn(2) == 0 { this.NumTxs *= -1 @@ -7800,51 +7803,51 @@ func NewPopulatedHeader(r randyTypes, easy bool) *Header { if r.Intn(2) == 0 { this.TotalTxs *= -1 } - v35 := NewPopulatedBlockID(r, easy) - this.LastBlockId = *v35 + v34 := NewPopulatedBlockID(r, easy) + this.LastBlockId = *v34 + v35 := r.Intn(100) + this.LastCommitHash = make([]byte, v35) + for i := 0; i < v35; i++ { + this.LastCommitHash[i] = byte(r.Intn(256)) + } v36 := r.Intn(100) - this.LastCommitHash = make([]byte, v36) + this.DataHash = make([]byte, v36) for i := 0; i < v36; i++ { - this.LastCommitHash[i] = byte(r.Intn(256)) + this.DataHash[i] = byte(r.Intn(256)) } v37 := r.Intn(100) - this.DataHash = make([]byte, v37) + this.ValidatorsHash = make([]byte, v37) for i := 0; i < v37; i++ { - this.DataHash[i] = byte(r.Intn(256)) + this.ValidatorsHash[i] = byte(r.Intn(256)) } v38 := r.Intn(100) - this.ValidatorsHash = make([]byte, v38) + this.NextValidatorsHash = make([]byte, v38) for i := 0; i < v38; i++ { - this.ValidatorsHash[i] = byte(r.Intn(256)) + this.NextValidatorsHash[i] = byte(r.Intn(256)) } v39 := r.Intn(100) - this.NextValidatorsHash = make([]byte, v39) + this.ConsensusHash = make([]byte, v39) for i := 0; i < v39; i++ { - this.NextValidatorsHash[i] = byte(r.Intn(256)) + this.ConsensusHash[i] = byte(r.Intn(256)) } v40 := r.Intn(100) - this.ConsensusHash = make([]byte, v40) + this.AppHash = make([]byte, v40) for i := 0; i < v40; i++ { - this.ConsensusHash[i] = byte(r.Intn(256)) + this.AppHash[i] = byte(r.Intn(256)) } v41 := r.Intn(100) - this.AppHash = make([]byte, v41) + this.LastResultsHash = make([]byte, v41) for i := 0; i < v41; i++ { - this.AppHash[i] = byte(r.Intn(256)) + this.LastResultsHash[i] = byte(r.Intn(256)) } v42 := r.Intn(100) - this.LastResultsHash = make([]byte, v42) + this.EvidenceHash = make([]byte, v42) for i := 0; i < v42; i++ { - this.LastResultsHash[i] = byte(r.Intn(256)) + this.EvidenceHash[i] = byte(r.Intn(256)) } v43 := r.Intn(100) - this.EvidenceHash = make([]byte, v43) + this.ProposerAddress = make([]byte, v43) for i := 0; i < v43; i++ { - this.EvidenceHash[i] = byte(r.Intn(256)) - } - v44 := r.Intn(100) - this.ProposerAddress = make([]byte, v44) - for i := 0; i < v44; i++ { this.ProposerAddress[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -7855,13 +7858,13 @@ func NewPopulatedHeader(r randyTypes, easy bool) *Header { func NewPopulatedBlockID(r randyTypes, easy bool) *BlockID { this := &BlockID{} - v45 := r.Intn(100) - this.Hash = make([]byte, v45) - for i := 0; i < v45; i++ { + v44 := r.Intn(100) + this.Hash = make([]byte, v44) + for i := 0; i < v44; i++ { this.Hash[i] = byte(r.Intn(256)) } - v46 := NewPopulatedPartSetHeader(r, easy) - this.PartsHeader = *v46 + v45 := NewPopulatedPartSetHeader(r, easy) + this.PartsHeader = *v45 if !easy && r.Intn(10) != 0 { this.XXX_unrecognized = randUnrecognizedTypes(r, 3) } @@ -7874,9 +7877,9 @@ func NewPopulatedPartSetHeader(r randyTypes, easy bool) *PartSetHeader { if r.Intn(2) == 0 { this.Total *= -1 } - v47 := r.Intn(100) - this.Hash = make([]byte, v47) - for i := 0; i < v47; i++ { + v46 := r.Intn(100) + this.Hash = make([]byte, v46) + for i := 0; i < v46; i++ { this.Hash[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -7887,9 +7890,9 @@ func NewPopulatedPartSetHeader(r randyTypes, easy bool) *PartSetHeader { func NewPopulatedValidator(r randyTypes, easy bool) *Validator { this := &Validator{} - v48 := r.Intn(100) - this.Address = make([]byte, v48) - for i := 0; i < v48; i++ { + v47 := r.Intn(100) + this.Address = make([]byte, v47) + for i := 0; i < v47; i++ { this.Address[i] = byte(r.Intn(256)) } this.Power = int64(r.Int63()) @@ -7904,8 +7907,8 @@ func NewPopulatedValidator(r randyTypes, easy bool) *Validator { func NewPopulatedValidatorUpdate(r randyTypes, easy bool) *ValidatorUpdate { this := &ValidatorUpdate{} - v49 := NewPopulatedPubKey(r, easy) - this.PubKey = *v49 + v48 := NewPopulatedPubKey(r, easy) + this.PubKey = *v48 this.Power = int64(r.Int63()) if r.Intn(2) == 0 { this.Power *= -1 @@ -7918,8 +7921,8 @@ func NewPopulatedValidatorUpdate(r randyTypes, easy bool) *ValidatorUpdate { func NewPopulatedVoteInfo(r randyTypes, easy bool) *VoteInfo { this := &VoteInfo{} - v50 := NewPopulatedValidator(r, easy) - this.Validator = *v50 + v49 := NewPopulatedValidator(r, easy) + this.Validator = *v49 this.SignedLastBlock = bool(bool(r.Intn(2) == 0)) if !easy && r.Intn(10) != 0 { this.XXX_unrecognized = randUnrecognizedTypes(r, 3) @@ -7930,9 +7933,9 @@ func NewPopulatedVoteInfo(r randyTypes, easy bool) *VoteInfo { func NewPopulatedPubKey(r randyTypes, easy bool) *PubKey { this := &PubKey{} this.Type = string(randStringTypes(r)) - v51 := r.Intn(100) - this.Data = make([]byte, v51) - for i := 0; i < v51; i++ { + v50 := r.Intn(100) + this.Data = make([]byte, v50) + for i := 0; i < v50; i++ { this.Data[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -7944,14 +7947,14 @@ func NewPopulatedPubKey(r randyTypes, easy bool) *PubKey { func NewPopulatedEvidence(r randyTypes, easy bool) *Evidence { this := &Evidence{} this.Type = string(randStringTypes(r)) - v52 := NewPopulatedValidator(r, easy) - this.Validator = *v52 + v51 := NewPopulatedValidator(r, easy) + this.Validator = *v51 this.Height = int64(r.Int63()) if r.Intn(2) == 0 { this.Height *= -1 } - v53 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) - this.Time = *v53 + v52 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) + this.Time = *v52 this.TotalVotingPower = int64(r.Int63()) if r.Intn(2) == 0 { this.TotalVotingPower *= -1 @@ -7981,9 +7984,9 @@ func randUTF8RuneTypes(r randyTypes) rune { return rune(ru + 61) } func randStringTypes(r randyTypes) string { - v54 := r.Intn(100) - tmps := make([]rune, v54) - for i := 0; i < v54; i++ { + v53 := r.Intn(100) + tmps := make([]rune, v53) + for i := 0; i < v53; i++ { tmps[i] = randUTF8RuneTypes(r) } return string(tmps) @@ -8005,11 +8008,11 @@ func randFieldTypes(dAtA []byte, r randyTypes, fieldNumber int, wire int) []byte switch wire { case 0: dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) - v55 := r.Int63() + v54 := r.Int63() if r.Intn(2) == 0 { - v55 *= -1 + v54 *= -1 } - dAtA = encodeVarintPopulateTypes(dAtA, uint64(v55)) + dAtA = encodeVarintPopulateTypes(dAtA, uint64(v54)) case 1: dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) @@ -8035,9 +8038,6 @@ func encodeVarintPopulateTypes(dAtA []byte, v uint64) []byte { return dAtA } func (m *Request) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Value != nil { @@ -8050,9 +8050,6 @@ func (m *Request) Size() (n int) { } func (m *Request_Echo) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Echo != nil { @@ -8062,9 +8059,6 @@ func (m *Request_Echo) Size() (n int) { return n } func (m *Request_Flush) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Flush != nil { @@ -8074,9 +8068,6 @@ func (m *Request_Flush) Size() (n int) { return n } func (m *Request_Info) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Info != nil { @@ -8086,9 +8077,6 @@ func (m *Request_Info) Size() (n int) { return n } func (m *Request_SetOption) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.SetOption != nil { @@ -8098,9 +8086,6 @@ func (m *Request_SetOption) Size() (n int) { return n } func (m *Request_InitChain) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.InitChain != nil { @@ -8110,9 +8095,6 @@ func (m *Request_InitChain) Size() (n int) { return n } func (m *Request_Query) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Query != nil { @@ -8122,9 +8104,6 @@ func (m *Request_Query) Size() (n int) { return n } func (m *Request_BeginBlock) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.BeginBlock != nil { @@ -8134,9 +8113,6 @@ func (m *Request_BeginBlock) Size() (n int) { return n } func (m *Request_CheckTx) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.CheckTx != nil { @@ -8146,9 +8122,6 @@ func (m *Request_CheckTx) Size() (n int) { return n } func (m *Request_EndBlock) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.EndBlock != nil { @@ -8158,9 +8131,6 @@ func (m *Request_EndBlock) Size() (n int) { return n } func (m *Request_Commit) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Commit != nil { @@ -8170,9 +8140,6 @@ func (m *Request_Commit) Size() (n int) { return n } func (m *Request_DeliverTx) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.DeliverTx != nil { @@ -8182,9 +8149,6 @@ func (m *Request_DeliverTx) Size() (n int) { return n } func (m *RequestEcho) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Message) @@ -8198,9 +8162,6 @@ func (m *RequestEcho) Size() (n int) { } func (m *RequestFlush) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.XXX_unrecognized != nil { @@ -8210,9 +8171,6 @@ func (m *RequestFlush) Size() (n int) { } func (m *RequestInfo) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Version) @@ -8226,9 +8184,6 @@ func (m *RequestInfo) Size() (n int) { } func (m *RequestSetOption) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Key) @@ -8246,9 +8201,6 @@ func (m *RequestSetOption) Size() (n int) { } func (m *RequestInitChain) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time) @@ -8278,9 +8230,6 @@ func (m *RequestInitChain) Size() (n int) { } func (m *RequestQuery) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Data) @@ -8304,9 +8253,6 @@ func (m *RequestQuery) Size() (n int) { } func (m *RequestBeginBlock) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Hash) @@ -8330,9 +8276,6 @@ func (m *RequestBeginBlock) Size() (n int) { } func (m *RequestCheckTx) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Tx) @@ -8346,9 +8289,6 @@ func (m *RequestCheckTx) Size() (n int) { } func (m *RequestDeliverTx) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Tx) @@ -8362,9 +8302,6 @@ func (m *RequestDeliverTx) Size() (n int) { } func (m *RequestEndBlock) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Height != 0 { @@ -8377,9 +8314,6 @@ func (m *RequestEndBlock) Size() (n int) { } func (m *RequestCommit) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.XXX_unrecognized != nil { @@ -8389,9 +8323,6 @@ func (m *RequestCommit) Size() (n int) { } func (m *Response) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Value != nil { @@ -8404,9 +8335,6 @@ func (m *Response) Size() (n int) { } func (m *Response_Exception) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Exception != nil { @@ -8416,9 +8344,6 @@ func (m *Response_Exception) Size() (n int) { return n } func (m *Response_Echo) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Echo != nil { @@ -8428,9 +8353,6 @@ func (m *Response_Echo) Size() (n int) { return n } func (m *Response_Flush) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Flush != nil { @@ -8440,9 +8362,6 @@ func (m *Response_Flush) Size() (n int) { return n } func (m *Response_Info) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Info != nil { @@ -8452,9 +8371,6 @@ func (m *Response_Info) Size() (n int) { return n } func (m *Response_SetOption) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.SetOption != nil { @@ -8464,9 +8380,6 @@ func (m *Response_SetOption) Size() (n int) { return n } func (m *Response_InitChain) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.InitChain != nil { @@ -8476,9 +8389,6 @@ func (m *Response_InitChain) Size() (n int) { return n } func (m *Response_Query) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Query != nil { @@ -8488,9 +8398,6 @@ func (m *Response_Query) Size() (n int) { return n } func (m *Response_BeginBlock) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.BeginBlock != nil { @@ -8500,9 +8407,6 @@ func (m *Response_BeginBlock) Size() (n int) { return n } func (m *Response_CheckTx) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.CheckTx != nil { @@ -8512,9 +8416,6 @@ func (m *Response_CheckTx) Size() (n int) { return n } func (m *Response_DeliverTx) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.DeliverTx != nil { @@ -8524,9 +8425,6 @@ func (m *Response_DeliverTx) Size() (n int) { return n } func (m *Response_EndBlock) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.EndBlock != nil { @@ -8536,9 +8434,6 @@ func (m *Response_EndBlock) Size() (n int) { return n } func (m *Response_Commit) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Commit != nil { @@ -8548,9 +8443,6 @@ func (m *Response_Commit) Size() (n int) { return n } func (m *ResponseException) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Error) @@ -8564,9 +8456,6 @@ func (m *ResponseException) Size() (n int) { } func (m *ResponseEcho) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Message) @@ -8580,9 +8469,6 @@ func (m *ResponseEcho) Size() (n int) { } func (m *ResponseFlush) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.XXX_unrecognized != nil { @@ -8592,9 +8478,6 @@ func (m *ResponseFlush) Size() (n int) { } func (m *ResponseInfo) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Data) @@ -8619,9 +8502,6 @@ func (m *ResponseInfo) Size() (n int) { } func (m *ResponseSetOption) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Code != 0 { @@ -8642,9 +8522,6 @@ func (m *ResponseSetOption) Size() (n int) { } func (m *ResponseInitChain) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ConsensusParams != nil { @@ -8664,9 +8541,6 @@ func (m *ResponseInitChain) Size() (n int) { } func (m *ResponseQuery) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Code != 0 { @@ -8691,8 +8565,8 @@ func (m *ResponseQuery) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - l = len(m.Proof) - if l > 0 { + if m.Proof != nil { + l = m.Proof.Size() n += 1 + l + sovTypes(uint64(l)) } if m.Height != 0 { @@ -8705,9 +8579,6 @@ func (m *ResponseQuery) Size() (n int) { } func (m *ResponseBeginBlock) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Tags) > 0 { @@ -8723,9 +8594,6 @@ func (m *ResponseBeginBlock) Size() (n int) { } func (m *ResponseCheckTx) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Code != 0 { @@ -8762,9 +8630,6 @@ func (m *ResponseCheckTx) Size() (n int) { } func (m *ResponseDeliverTx) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Code != 0 { @@ -8801,9 +8666,6 @@ func (m *ResponseDeliverTx) Size() (n int) { } func (m *ResponseEndBlock) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.ValidatorUpdates) > 0 { @@ -8829,9 +8691,6 @@ func (m *ResponseEndBlock) Size() (n int) { } func (m *ResponseCommit) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Data) @@ -8845,9 +8704,6 @@ func (m *ResponseCommit) Size() (n int) { } func (m *ConsensusParams) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.BlockSize != nil { @@ -8865,9 +8721,6 @@ func (m *ConsensusParams) Size() (n int) { } func (m *BlockSize) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.MaxBytes != 0 { @@ -8883,9 +8736,6 @@ func (m *BlockSize) Size() (n int) { } func (m *EvidenceParams) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.MaxAge != 0 { @@ -8898,9 +8748,6 @@ func (m *EvidenceParams) Size() (n int) { } func (m *LastCommitInfo) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Round != 0 { @@ -8919,9 +8766,6 @@ func (m *LastCommitInfo) Size() (n int) { } func (m *Header) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.ChainID) @@ -8984,9 +8828,6 @@ func (m *Header) Size() (n int) { } func (m *BlockID) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Hash) @@ -9002,9 +8843,6 @@ func (m *BlockID) Size() (n int) { } func (m *PartSetHeader) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Total != 0 { @@ -9021,9 +8859,6 @@ func (m *PartSetHeader) Size() (n int) { } func (m *Validator) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Address) @@ -9040,9 +8875,6 @@ func (m *Validator) Size() (n int) { } func (m *ValidatorUpdate) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.PubKey.Size() @@ -9057,9 +8889,6 @@ func (m *ValidatorUpdate) Size() (n int) { } func (m *VoteInfo) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.Validator.Size() @@ -9074,9 +8903,6 @@ func (m *VoteInfo) Size() (n int) { } func (m *PubKey) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -9094,9 +8920,6 @@ func (m *PubKey) Size() (n int) { } func (m *Evidence) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -11907,7 +11730,7 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11917,21 +11740,23 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...) if m.Proof == nil { - m.Proof = []byte{} + m.Proof = &merkle.Proof{} + } + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex case 9: @@ -14503,140 +14328,142 @@ var ( ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_8495fed925debe52) } +func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_03c41ca87033c976) } func init() { - golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_8495fed925debe52) -} - -var fileDescriptor_types_8495fed925debe52 = []byte{ - // 2062 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0x4f, 0x6f, 0x23, 0x49, - 0x15, 0x4f, 0xdb, 0x8e, 0xed, 0x7e, 0x49, 0xec, 0x4c, 0x25, 0x93, 0x78, 0x0c, 0x24, 0xa3, 0x06, - 0x76, 0x13, 0x36, 0x9b, 0xac, 0xb2, 0x2c, 0xca, 0xec, 0x2c, 0x2b, 0xc5, 0x33, 0x03, 0x89, 0x76, - 0x81, 0xd0, 0x33, 0x13, 0x2e, 0x48, 0xad, 0xb2, 0xbb, 0x62, 0xb7, 0xc6, 0xee, 0xee, 0xed, 0x2e, - 0x67, 0x9d, 0x39, 0x72, 0xde, 0xc3, 0x1e, 0x90, 0xf8, 0x0a, 0x7c, 0x01, 0x24, 0x8e, 0x9c, 0xd0, - 0x1e, 0x11, 0x02, 0x71, 0x1b, 0x20, 0x88, 0x03, 0x7c, 0x02, 0x8e, 0xa8, 0x5e, 0x55, 0xf5, 0xbf, - 0xb4, 0x47, 0x33, 0xc3, 0x8d, 0x4b, 0xab, 0xab, 0xde, 0x7b, 0x55, 0xf5, 0x5e, 0xbd, 0xf7, 0x7e, - 0xef, 0x15, 0x6c, 0xd0, 0xfe, 0xc0, 0x3b, 0xe0, 0x57, 0x21, 0x8b, 0xe5, 0x77, 0x3f, 0x8c, 0x02, - 0x1e, 0x90, 0x45, 0x1c, 0x74, 0xdf, 0x1d, 0x7a, 0x7c, 0x34, 0xed, 0xef, 0x0f, 0x82, 0xc9, 0xc1, - 0x30, 0x18, 0x06, 0x07, 0x48, 0xed, 0x4f, 0x2f, 0x70, 0x84, 0x03, 0xfc, 0x93, 0x52, 0xdd, 0xed, - 0x61, 0x10, 0x0c, 0xc7, 0x2c, 0xe5, 0xe2, 0xde, 0x84, 0xc5, 0x9c, 0x4e, 0x42, 0xc5, 0x70, 0x94, - 0x59, 0x8f, 0x33, 0xdf, 0x65, 0xd1, 0xc4, 0xf3, 0x79, 0xf6, 0x77, 0xec, 0xf5, 0xe3, 0x83, 0x41, - 0x30, 0x99, 0x04, 0x7e, 0xf6, 0x40, 0xd6, 0xef, 0x6b, 0xd0, 0xb0, 0xd9, 0x67, 0x53, 0x16, 0x73, - 0xb2, 0x03, 0x35, 0x36, 0x18, 0x05, 0x9d, 0xca, 0x5d, 0x63, 0x67, 0xe9, 0x90, 0xec, 0x4b, 0x3e, - 0x45, 0x7d, 0x34, 0x18, 0x05, 0x27, 0x0b, 0x36, 0x72, 0x90, 0x77, 0x60, 0xf1, 0x62, 0x3c, 0x8d, - 0x47, 0x9d, 0x2a, 0xb2, 0xae, 0xe5, 0x59, 0x7f, 0x20, 0x48, 0x27, 0x0b, 0xb6, 0xe4, 0x11, 0xcb, - 0x7a, 0xfe, 0x45, 0xd0, 0xa9, 0x95, 0x2d, 0x7b, 0xea, 0x5f, 0xe0, 0xb2, 0x82, 0x83, 0x1c, 0x01, - 0xc4, 0x8c, 0x3b, 0x41, 0xc8, 0xbd, 0xc0, 0xef, 0x2c, 0x22, 0xff, 0x66, 0x9e, 0xff, 0x31, 0xe3, - 0x3f, 0x41, 0xf2, 0xc9, 0x82, 0x6d, 0xc6, 0x7a, 0x20, 0x24, 0x3d, 0xdf, 0xe3, 0xce, 0x60, 0x44, - 0x3d, 0xbf, 0x53, 0x2f, 0x93, 0x3c, 0xf5, 0x3d, 0xfe, 0x40, 0x90, 0x85, 0xa4, 0xa7, 0x07, 0x42, - 0x95, 0xcf, 0xa6, 0x2c, 0xba, 0xea, 0x34, 0xca, 0x54, 0xf9, 0xa9, 0x20, 0x09, 0x55, 0x90, 0x87, - 0xdc, 0x87, 0xa5, 0x3e, 0x1b, 0x7a, 0xbe, 0xd3, 0x1f, 0x07, 0x83, 0x67, 0x9d, 0x26, 0x8a, 0x74, - 0xf2, 0x22, 0x3d, 0xc1, 0xd0, 0x13, 0xf4, 0x93, 0x05, 0x1b, 0xfa, 0xc9, 0x88, 0x1c, 0x42, 0x73, - 0x30, 0x62, 0x83, 0x67, 0x0e, 0x9f, 0x75, 0x4c, 0x94, 0xbc, 0x9d, 0x97, 0x7c, 0x20, 0xa8, 0x4f, - 0x66, 0x27, 0x0b, 0x76, 0x63, 0x20, 0x7f, 0xc9, 0x07, 0x60, 0x32, 0xdf, 0x55, 0xdb, 0x2d, 0xa1, - 0xd0, 0x46, 0xe1, 0x5e, 0x7c, 0x57, 0x6f, 0xd6, 0x64, 0xea, 0x9f, 0xec, 0x43, 0x5d, 0xdc, 0xb5, - 0xc7, 0x3b, 0xcb, 0x28, 0xb3, 0x5e, 0xd8, 0x08, 0x69, 0x27, 0x0b, 0xb6, 0xe2, 0x12, 0xe6, 0x73, - 0xd9, 0xd8, 0xbb, 0x64, 0x91, 0x38, 0xdc, 0x5a, 0x99, 0xf9, 0x1e, 0x4a, 0x3a, 0x1e, 0xcf, 0x74, - 0xf5, 0xa0, 0xd7, 0x80, 0xc5, 0x4b, 0x3a, 0x9e, 0x32, 0xeb, 0x6d, 0x58, 0xca, 0x78, 0x0a, 0xe9, - 0x40, 0x63, 0xc2, 0xe2, 0x98, 0x0e, 0x59, 0xc7, 0xb8, 0x6b, 0xec, 0x98, 0xb6, 0x1e, 0x5a, 0x2d, - 0x58, 0xce, 0xfa, 0x49, 0x46, 0x50, 0xf8, 0x82, 0x10, 0xbc, 0x64, 0x51, 0x2c, 0x1c, 0x40, 0x09, - 0xaa, 0xa1, 0xf5, 0x21, 0xac, 0x16, 0x9d, 0x80, 0xac, 0x42, 0xf5, 0x19, 0xbb, 0x52, 0x9c, 0xe2, - 0x97, 0xac, 0xab, 0x03, 0xa1, 0x17, 0x9b, 0xb6, 0x3a, 0xdd, 0x97, 0x95, 0x44, 0x38, 0xf1, 0x03, - 0x72, 0x04, 0x35, 0x11, 0x48, 0x28, 0xbd, 0x74, 0xd8, 0xdd, 0x97, 0x51, 0xb6, 0xaf, 0xa3, 0x6c, - 0xff, 0x89, 0x8e, 0xb2, 0x5e, 0xf3, 0xab, 0x17, 0xdb, 0x0b, 0x5f, 0xfe, 0x75, 0xdb, 0xb0, 0x51, - 0x82, 0xdc, 0x11, 0x57, 0x49, 0x3d, 0xdf, 0xf1, 0x5c, 0xb5, 0x4f, 0x03, 0xc7, 0xa7, 0x2e, 0x39, - 0x86, 0xd5, 0x41, 0xe0, 0xc7, 0xcc, 0x8f, 0xa7, 0xb1, 0x13, 0xd2, 0x88, 0x4e, 0x62, 0x15, 0x25, - 0xfa, 0xe2, 0x1e, 0x68, 0xf2, 0x19, 0x52, 0xed, 0xf6, 0x20, 0x3f, 0x41, 0x3e, 0x02, 0xb8, 0xa4, - 0x63, 0xcf, 0xa5, 0x3c, 0x88, 0xe2, 0x4e, 0xed, 0x6e, 0x35, 0x23, 0x7c, 0xae, 0x09, 0x4f, 0x43, - 0x97, 0x72, 0xd6, 0xab, 0x89, 0x93, 0xd9, 0x19, 0x7e, 0xf2, 0x16, 0xb4, 0x69, 0x18, 0x3a, 0x31, - 0xa7, 0x9c, 0x39, 0xfd, 0x2b, 0xce, 0x62, 0x8c, 0xa4, 0x65, 0x7b, 0x85, 0x86, 0xe1, 0x63, 0x31, - 0xdb, 0x13, 0x93, 0x96, 0x9b, 0xdc, 0x03, 0x3a, 0x39, 0x21, 0x50, 0x73, 0x29, 0xa7, 0x68, 0x8d, - 0x65, 0x1b, 0xff, 0xc5, 0x5c, 0x48, 0xf9, 0x48, 0xe9, 0x88, 0xff, 0x64, 0x03, 0xea, 0x23, 0xe6, - 0x0d, 0x47, 0x1c, 0xd5, 0xaa, 0xda, 0x6a, 0x24, 0x0c, 0x1f, 0x46, 0xc1, 0x25, 0xc3, 0x38, 0x6f, - 0xda, 0x72, 0x60, 0xfd, 0xd3, 0x80, 0x5b, 0x37, 0x02, 0x43, 0xac, 0x3b, 0xa2, 0xf1, 0x48, 0xef, - 0x25, 0xfe, 0xc9, 0x3b, 0x62, 0x5d, 0xea, 0xb2, 0x48, 0xe5, 0x9f, 0x15, 0xa5, 0xf1, 0x09, 0x4e, - 0x2a, 0x45, 0x15, 0x0b, 0x79, 0x04, 0xab, 0x63, 0x1a, 0x73, 0x47, 0xfa, 0xaf, 0x83, 0xf9, 0xa5, - 0x9a, 0x8b, 0xa9, 0x4f, 0xa9, 0xf6, 0x73, 0xe1, 0x56, 0x4a, 0xbc, 0x35, 0xce, 0xcd, 0x92, 0x13, - 0x58, 0xef, 0x5f, 0x3d, 0xa7, 0x3e, 0xf7, 0x7c, 0xe6, 0xdc, 0xb0, 0x79, 0x5b, 0x2d, 0xf5, 0xe8, - 0xd2, 0x73, 0x99, 0x3f, 0xd0, 0xc6, 0x5e, 0x4b, 0x44, 0x92, 0xcb, 0x88, 0xad, 0xbb, 0xd0, 0xca, - 0x47, 0x31, 0x69, 0x41, 0x85, 0xcf, 0x94, 0x86, 0x15, 0x3e, 0xb3, 0xac, 0xc4, 0x03, 0x93, 0x50, - 0xba, 0xc1, 0xb3, 0x0b, 0xed, 0x42, 0x58, 0x67, 0xcc, 0x6d, 0x64, 0xcd, 0x6d, 0xb5, 0x61, 0x25, - 0x17, 0xcd, 0xd6, 0x17, 0x8b, 0xd0, 0xb4, 0x59, 0x1c, 0x0a, 0x67, 0x22, 0x47, 0x60, 0xb2, 0xd9, - 0x80, 0xc9, 0x44, 0x6a, 0x14, 0xd2, 0x94, 0xe4, 0x79, 0xa4, 0xe9, 0x22, 0xa0, 0x13, 0x66, 0xb2, - 0x9b, 0x03, 0x81, 0xb5, 0xa2, 0x50, 0x16, 0x05, 0xf6, 0xf2, 0x28, 0xb0, 0x5e, 0xe0, 0x2d, 0xc0, - 0xc0, 0x6e, 0x0e, 0x06, 0x8a, 0x0b, 0xe7, 0x70, 0xe0, 0x5e, 0x09, 0x0e, 0x14, 0x8f, 0x3f, 0x07, - 0x08, 0xee, 0x95, 0x00, 0x41, 0xe7, 0xc6, 0x5e, 0xa5, 0x48, 0xb0, 0x97, 0x47, 0x82, 0xa2, 0x3a, - 0x05, 0x28, 0xf8, 0xa8, 0x0c, 0x0a, 0xee, 0x14, 0x64, 0xe6, 0x62, 0xc1, 0xfb, 0x37, 0xb0, 0x60, - 0xa3, 0x20, 0x5a, 0x02, 0x06, 0xf7, 0x72, 0x59, 0x1a, 0x4a, 0x75, 0x2b, 0x4f, 0xd3, 0xe4, 0x7b, - 0x37, 0x71, 0x64, 0xb3, 0x78, 0xb5, 0x65, 0x40, 0x72, 0x50, 0x00, 0x92, 0xdb, 0xc5, 0x53, 0x16, - 0x90, 0x24, 0xc5, 0x83, 0x5d, 0x11, 0xf7, 0x05, 0x4f, 0x13, 0x39, 0x82, 0x45, 0x51, 0x10, 0xa9, - 0x84, 0x2d, 0x07, 0xd6, 0x8e, 0xc8, 0x44, 0xa9, 0x7f, 0xbd, 0x04, 0x3b, 0xd0, 0xe9, 0x33, 0xde, - 0x65, 0xfd, 0xca, 0x48, 0x65, 0x31, 0xa2, 0xb3, 0x59, 0xcc, 0x54, 0x59, 0x2c, 0x03, 0x29, 0x95, - 0x1c, 0xa4, 0x90, 0xef, 0xc0, 0x2d, 0x4c, 0x23, 0x68, 0x17, 0x27, 0x97, 0xd6, 0xda, 0x82, 0x20, - 0x0d, 0x22, 0xf3, 0xdb, 0xbb, 0xb0, 0x96, 0xe1, 0x15, 0x29, 0x16, 0x53, 0x58, 0x0d, 0x83, 0x77, - 0x35, 0xe1, 0x3e, 0x0e, 0xc3, 0x13, 0x1a, 0x8f, 0xac, 0x1f, 0xa5, 0xfa, 0xa7, 0x70, 0x45, 0xa0, - 0x36, 0x08, 0x5c, 0xa9, 0xd6, 0x8a, 0x8d, 0xff, 0x02, 0xc2, 0xc6, 0xc1, 0x10, 0x77, 0x35, 0x6d, - 0xf1, 0x2b, 0xb8, 0x92, 0x48, 0x31, 0x65, 0x48, 0x58, 0xbf, 0x34, 0xd2, 0xf5, 0x52, 0x04, 0x2b, - 0x03, 0x1b, 0xe3, 0x7f, 0x01, 0x9b, 0xca, 0xeb, 0x81, 0x8d, 0xf5, 0x1b, 0x23, 0xbd, 0x91, 0x04, - 0x46, 0xde, 0x4c, 0x45, 0xe1, 0x1c, 0x9e, 0xef, 0xb2, 0x19, 0x06, 0x7c, 0xd5, 0x96, 0x03, 0x8d, - 0xf0, 0x75, 0x34, 0x73, 0x1e, 0xe1, 0x1b, 0x38, 0x27, 0x07, 0x0a, 0x7e, 0x82, 0x0b, 0x8c, 0xc4, - 0x65, 0x5b, 0x0e, 0x32, 0xd9, 0xd3, 0xcc, 0x65, 0xcf, 0x33, 0x20, 0x37, 0x63, 0x94, 0x7c, 0x08, - 0x35, 0x4e, 0x87, 0xc2, 0x84, 0xc2, 0x0a, 0xad, 0x7d, 0x59, 0x2f, 0xef, 0x7f, 0x72, 0x7e, 0x46, - 0xbd, 0xa8, 0xb7, 0x21, 0xb4, 0xff, 0xf7, 0x8b, 0xed, 0x96, 0xe0, 0xd9, 0x0b, 0x26, 0x1e, 0x67, - 0x93, 0x90, 0x5f, 0xd9, 0x28, 0x63, 0xfd, 0xd9, 0x10, 0xb9, 0x3b, 0x17, 0xbb, 0xa5, 0xb6, 0xd0, - 0x0e, 0x5a, 0xc9, 0xc0, 0xec, 0xab, 0xd9, 0xe7, 0x1b, 0x00, 0x43, 0x1a, 0x3b, 0x9f, 0x53, 0x9f, - 0x33, 0x57, 0x19, 0xc9, 0x1c, 0xd2, 0xf8, 0x67, 0x38, 0x21, 0x6a, 0x12, 0x41, 0x9e, 0xc6, 0xcc, - 0x45, 0x6b, 0x55, 0xed, 0xc6, 0x90, 0xc6, 0x4f, 0x63, 0xe6, 0x26, 0x7a, 0x35, 0xde, 0x40, 0xaf, - 0xbf, 0x64, 0x1c, 0x2f, 0x05, 0xae, 0xff, 0x07, 0xcd, 0xfe, 0x65, 0x08, 0x44, 0xce, 0x27, 0x3f, - 0x72, 0x0a, 0xb7, 0x12, 0xf7, 0x76, 0xa6, 0xe8, 0xf6, 0xda, 0x1f, 0x5e, 0x1e, 0x15, 0xab, 0x97, - 0xf9, 0xe9, 0x98, 0xfc, 0x18, 0x36, 0x0b, 0xc1, 0x99, 0x2c, 0x58, 0x79, 0x69, 0x8c, 0xde, 0xce, - 0xc7, 0xa8, 0x5e, 0x4f, 0xeb, 0x5a, 0x7d, 0x03, 0x5d, 0xbf, 0x25, 0xca, 0x93, 0x6c, 0xca, 0x2e, - 0xbb, 0x2d, 0xeb, 0x17, 0x06, 0xb4, 0x0b, 0x87, 0x21, 0x07, 0x00, 0x32, 0xe3, 0xc5, 0xde, 0x73, - 0x5d, 0x2a, 0xaf, 0xaa, 0x83, 0xa3, 0xc9, 0x1e, 0x7b, 0xcf, 0x99, 0x6d, 0xf6, 0xf5, 0x2f, 0xf9, - 0x18, 0xda, 0x4c, 0x15, 0x4c, 0x3a, 0x25, 0x55, 0x72, 0xd8, 0xa1, 0xcb, 0x29, 0xa5, 0x6d, 0x8b, - 0xe5, 0xc6, 0xd6, 0x31, 0x98, 0xc9, 0xba, 0xe4, 0x6b, 0x60, 0x4e, 0xe8, 0x4c, 0x95, 0xb1, 0xb2, - 0x00, 0x6a, 0x4e, 0xe8, 0x0c, 0x2b, 0x58, 0xb2, 0x09, 0x0d, 0x41, 0x1c, 0x52, 0xb9, 0x43, 0xd5, - 0xae, 0x4f, 0xe8, 0xec, 0x87, 0x34, 0xb6, 0x76, 0xa1, 0x95, 0xdf, 0x44, 0xb3, 0x6a, 0x48, 0x91, - 0xac, 0xc7, 0x43, 0x66, 0x3d, 0x86, 0x56, 0xbe, 0x52, 0x14, 0x89, 0x24, 0x0a, 0xa6, 0xbe, 0x8b, - 0x8c, 0x8b, 0xb6, 0x1c, 0x88, 0x36, 0xf1, 0x32, 0x90, 0x57, 0x97, 0x2d, 0x0d, 0xcf, 0x03, 0xce, - 0x32, 0xf5, 0xa5, 0xe4, 0xb1, 0xfe, 0x58, 0x83, 0xba, 0x2c, 0x5b, 0xc9, 0x5b, 0x99, 0x4e, 0x01, - 0x31, 0xa9, 0xb7, 0x74, 0xfd, 0x62, 0xbb, 0x81, 0xe9, 0xfb, 0xf4, 0x61, 0xda, 0x36, 0xa4, 0x89, - 0xaa, 0x92, 0xab, 0xaa, 0x75, 0x8f, 0x52, 0x7d, 0xed, 0x1e, 0x65, 0x13, 0x1a, 0xfe, 0x74, 0xe2, - 0xf0, 0x59, 0x8c, 0xb1, 0x56, 0xb5, 0xeb, 0xfe, 0x74, 0xf2, 0x64, 0x16, 0x0b, 0x9b, 0xf2, 0x80, - 0xd3, 0x31, 0x92, 0x64, 0xb0, 0x35, 0x71, 0x42, 0x10, 0x8f, 0x60, 0x25, 0x83, 0x72, 0x9e, 0xab, - 0x4a, 0xa8, 0x56, 0xf6, 0xc6, 0x4f, 0x1f, 0x2a, 0x75, 0x97, 0x12, 0xd4, 0x3b, 0x75, 0xc9, 0x4e, - 0xbe, 0x24, 0x47, 0x70, 0x94, 0x19, 0x3a, 0x53, 0x75, 0x0b, 0x68, 0x14, 0x07, 0x10, 0xee, 0x26, - 0x59, 0x64, 0xba, 0x6e, 0x8a, 0x09, 0x24, 0xbe, 0x0d, 0xed, 0x14, 0x5f, 0x24, 0x8b, 0x29, 0x57, - 0x49, 0xa7, 0x91, 0xf1, 0x3d, 0x58, 0xf7, 0xd9, 0x8c, 0x3b, 0x45, 0x6e, 0x40, 0x6e, 0x22, 0x68, - 0xe7, 0x79, 0x89, 0x6f, 0x43, 0x2b, 0x0d, 0x48, 0xe4, 0x5d, 0x92, 0x8d, 0x51, 0x32, 0x8b, 0x6c, - 0x77, 0xa0, 0x99, 0xa0, 0xfb, 0x32, 0x32, 0x34, 0xa8, 0x04, 0xf5, 0xa4, 0x5e, 0x88, 0x58, 0x3c, - 0x1d, 0x73, 0xb5, 0xc8, 0x0a, 0xf2, 0x60, 0xbd, 0x60, 0xcb, 0x79, 0xe4, 0xfd, 0x26, 0xac, 0x24, - 0x71, 0x80, 0x7c, 0x2d, 0xe4, 0x5b, 0xd6, 0x93, 0xc8, 0xb4, 0x0b, 0xab, 0x61, 0x14, 0x84, 0x41, - 0xcc, 0x22, 0x87, 0xba, 0x6e, 0xc4, 0xe2, 0xb8, 0xd3, 0x96, 0xeb, 0xe9, 0xf9, 0x63, 0x39, 0x6d, - 0xfd, 0x1c, 0x1a, 0xca, 0xfa, 0xa5, 0xed, 0xd3, 0xf7, 0x61, 0x39, 0xa4, 0x91, 0x38, 0x53, 0xb6, - 0x89, 0xd2, 0x45, 0xec, 0x19, 0x8d, 0x44, 0xd7, 0x9c, 0xeb, 0xa5, 0x96, 0x90, 0x5f, 0x4e, 0x59, - 0xf7, 0x60, 0x25, 0xc7, 0x23, 0xc2, 0x00, 0x9d, 0x42, 0x87, 0x01, 0x0e, 0x92, 0x9d, 0x2b, 0xe9, - 0xce, 0xd6, 0x7d, 0x30, 0x13, 0x43, 0x8b, 0x5a, 0x4b, 0xeb, 0x61, 0x28, 0xdb, 0xc9, 0x21, 0x02, - 0x74, 0xf0, 0x39, 0x8b, 0x54, 0x7d, 0x25, 0x07, 0xd6, 0x53, 0x68, 0x17, 0xf2, 0x29, 0xd9, 0x83, - 0x46, 0x38, 0xed, 0x3b, 0xba, 0xaf, 0x4f, 0x3b, 0xc1, 0xb3, 0x69, 0xff, 0x13, 0x76, 0xa5, 0x3b, - 0xc1, 0x10, 0x47, 0xe9, 0xb2, 0x95, 0xec, 0xb2, 0x63, 0x68, 0xea, 0xd0, 0x24, 0xdf, 0x05, 0x33, - 0xf1, 0x91, 0x42, 0x02, 0x4b, 0xb6, 0x56, 0x8b, 0xa6, 0x8c, 0xe2, 0xaa, 0x63, 0x6f, 0xe8, 0x33, - 0xd7, 0x49, 0xe3, 0x01, 0xf7, 0x68, 0xda, 0x6d, 0x49, 0xf8, 0x54, 0x3b, 0xbf, 0xf5, 0x1e, 0xd4, - 0xe5, 0xd9, 0x84, 0x7d, 0xc4, 0xca, 0xba, 0xfc, 0x14, 0xff, 0xa5, 0x99, 0xf6, 0x4f, 0x06, 0x34, - 0x75, 0x8a, 0x2a, 0x15, 0xca, 0x1d, 0xba, 0xf2, 0xaa, 0x87, 0x9e, 0xd7, 0x9b, 0xeb, 0x2c, 0x52, - 0x7b, 0xed, 0x2c, 0xb2, 0x07, 0x44, 0x26, 0x8b, 0xcb, 0x80, 0x7b, 0xfe, 0xd0, 0x91, 0xb6, 0x96, - 0x59, 0x63, 0x15, 0x29, 0xe7, 0x48, 0x38, 0x13, 0xf3, 0x87, 0x5f, 0x2c, 0x42, 0xfb, 0xb8, 0xf7, - 0xe0, 0xf4, 0x38, 0x0c, 0xc7, 0xde, 0x80, 0x62, 0xcd, 0x7b, 0x00, 0x35, 0xac, 0xea, 0x4b, 0xde, - 0x13, 0xbb, 0x65, 0xed, 0x25, 0x39, 0x84, 0x45, 0x2c, 0xee, 0x49, 0xd9, 0xb3, 0x62, 0xb7, 0xb4, - 0xcb, 0x14, 0x9b, 0xc8, 0xf2, 0xff, 0xe6, 0xeb, 0x62, 0xb7, 0xac, 0xd5, 0x24, 0x1f, 0x83, 0x99, - 0x96, 0xe5, 0xf3, 0xde, 0x18, 0xbb, 0x73, 0x9b, 0x4e, 0x21, 0x9f, 0x56, 0x43, 0xf3, 0x9e, 0xca, - 0xba, 0x73, 0xbb, 0x33, 0x72, 0x04, 0x0d, 0x5d, 0x25, 0x96, 0xbf, 0x02, 0x76, 0xe7, 0x34, 0x84, - 0xc2, 0x3c, 0xb2, 0xd2, 0x2e, 0x7b, 0xaa, 0xec, 0x96, 0x76, 0xad, 0xe4, 0x03, 0xa8, 0x2b, 0xd8, - 0x2f, 0x7d, 0x09, 0xec, 0x96, 0xb7, 0x75, 0x42, 0xc9, 0xb4, 0xd7, 0x98, 0xf7, 0x9c, 0xda, 0x9d, - 0xdb, 0x5e, 0x93, 0x63, 0x80, 0x4c, 0x75, 0x3d, 0xf7, 0x9d, 0xb4, 0x3b, 0xbf, 0x6d, 0x26, 0xf7, - 0xa1, 0x99, 0x3e, 0x85, 0x94, 0xbf, 0x7c, 0x76, 0xe7, 0x75, 0xb2, 0xbd, 0xaf, 0xff, 0xe7, 0xef, - 0x5b, 0xc6, 0xaf, 0xaf, 0xb7, 0x8c, 0xdf, 0x5e, 0x6f, 0x19, 0x5f, 0x5d, 0x6f, 0x19, 0x7f, 0xb8, - 0xde, 0x32, 0xfe, 0x76, 0xbd, 0x65, 0xfc, 0xee, 0x1f, 0x5b, 0x46, 0xbf, 0x8e, 0xee, 0xff, 0xfe, - 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xef, 0x95, 0x6c, 0x08, 0xac, 0x17, 0x00, 0x00, + golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_03c41ca87033c976) +} + +var fileDescriptor_types_03c41ca87033c976 = []byte{ + // 2089 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0x4f, 0x73, 0x1b, 0x49, + 0x15, 0xf7, 0x48, 0xb2, 0xa4, 0x79, 0xb6, 0x25, 0xa7, 0x93, 0xd8, 0x8a, 0x00, 0x3b, 0x35, 0x0b, + 0xbb, 0x36, 0xeb, 0x95, 0xb7, 0xbc, 0x2c, 0xe5, 0x6c, 0x96, 0xad, 0xb2, 0x92, 0x80, 0x5d, 0xbb, + 0x80, 0x99, 0x24, 0xe6, 0x42, 0xd5, 0x54, 0x4b, 0xd3, 0x96, 0xa6, 0x22, 0xcd, 0xcc, 0xce, 0xb4, + 0xbc, 0x72, 0x8e, 0x9c, 0xf7, 0xb0, 0x07, 0xaa, 0xf8, 0x0a, 0x7c, 0x04, 0x8e, 0x1c, 0x28, 0x6a, + 0x8f, 0x14, 0x05, 0xc5, 0x2d, 0x80, 0x29, 0x0e, 0xf0, 0x09, 0x38, 0x52, 0xfd, 0xba, 0x7b, 0xfe, + 0x79, 0x14, 0x36, 0xe1, 0xc6, 0x45, 0xea, 0xee, 0xf7, 0x5e, 0x77, 0xbf, 0x37, 0xef, 0xbd, 0xdf, + 0x7b, 0x0d, 0x1b, 0x74, 0x30, 0xf4, 0xf6, 0xf9, 0x65, 0xc8, 0x62, 0xf9, 0xdb, 0x0b, 0xa3, 0x80, + 0x07, 0x64, 0x19, 0x27, 0xdd, 0x77, 0x46, 0x1e, 0x1f, 0xcf, 0x06, 0xbd, 0x61, 0x30, 0xdd, 0x1f, + 0x05, 0xa3, 0x60, 0x1f, 0xa9, 0x83, 0xd9, 0x39, 0xce, 0x70, 0x82, 0x23, 0x29, 0xd5, 0xdd, 0x1e, + 0x05, 0xc1, 0x68, 0xc2, 0x52, 0x2e, 0xee, 0x4d, 0x59, 0xcc, 0xe9, 0x34, 0x54, 0x0c, 0x87, 0x99, + 0xfd, 0x38, 0xf3, 0x5d, 0x16, 0x4d, 0x3d, 0x9f, 0x67, 0x87, 0x13, 0x6f, 0x10, 0xef, 0x0f, 0x83, + 0xe9, 0x34, 0xf0, 0xb3, 0x17, 0xea, 0xde, 0xff, 0xaf, 0x92, 0xc3, 0xe8, 0x32, 0xe4, 0xc1, 0xfe, + 0x94, 0x45, 0xcf, 0x26, 0x4c, 0xfd, 0x49, 0x61, 0xeb, 0x77, 0x35, 0x68, 0xd8, 0xec, 0xd3, 0x19, + 0x8b, 0x39, 0xd9, 0x81, 0x1a, 0x1b, 0x8e, 0x83, 0x4e, 0xe5, 0xae, 0xb1, 0xb3, 0x72, 0x40, 0x7a, + 0xf2, 0x10, 0x45, 0x7d, 0x34, 0x1c, 0x07, 0xc7, 0x4b, 0x36, 0x72, 0x90, 0xb7, 0x61, 0xf9, 0x7c, + 0x32, 0x8b, 0xc7, 0x9d, 0x2a, 0xb2, 0xde, 0xcc, 0xb3, 0x7e, 0x5f, 0x90, 0x8e, 0x97, 0x6c, 0xc9, + 0x23, 0xb6, 0xf5, 0xfc, 0xf3, 0xa0, 0x53, 0x2b, 0xdb, 0xf6, 0xc4, 0x3f, 0xc7, 0x6d, 0x05, 0x07, + 0x39, 0x04, 0x88, 0x19, 0x77, 0x82, 0x90, 0x7b, 0x81, 0xdf, 0x59, 0x46, 0xfe, 0xcd, 0x3c, 0xff, + 0x63, 0xc6, 0x7f, 0x8c, 0xe4, 0xe3, 0x25, 0xdb, 0x8c, 0xf5, 0x44, 0x48, 0x7a, 0xbe, 0xc7, 0x9d, + 0xe1, 0x98, 0x7a, 0x7e, 0xa7, 0x5e, 0x26, 0x79, 0xe2, 0x7b, 0xfc, 0x81, 0x20, 0x0b, 0x49, 0x4f, + 0x4f, 0x84, 0x2a, 0x9f, 0xce, 0x58, 0x74, 0xd9, 0x69, 0x94, 0xa9, 0xf2, 0x13, 0x41, 0x12, 0xaa, + 0x20, 0x0f, 0xb9, 0x0f, 0x2b, 0x03, 0x36, 0xf2, 0x7c, 0x67, 0x30, 0x09, 0x86, 0xcf, 0x3a, 0x4d, + 0x14, 0xe9, 0xe4, 0x45, 0xfa, 0x82, 0xa1, 0x2f, 0xe8, 0xc7, 0x4b, 0x36, 0x0c, 0x92, 0x19, 0x39, + 0x80, 0xe6, 0x70, 0xcc, 0x86, 0xcf, 0x1c, 0x3e, 0xef, 0x98, 0x28, 0x79, 0x3b, 0x2f, 0xf9, 0x40, + 0x50, 0x9f, 0xcc, 0x8f, 0x97, 0xec, 0xc6, 0x50, 0x0e, 0xc9, 0xfb, 0x60, 0x32, 0xdf, 0x55, 0xc7, + 0xad, 0xa0, 0xd0, 0x46, 0xe1, 0xbb, 0xf8, 0xae, 0x3e, 0xac, 0xc9, 0xd4, 0x98, 0xf4, 0xa0, 0x2e, + 0x1c, 0xc5, 0xe3, 0x9d, 0x55, 0x94, 0xb9, 0x55, 0x38, 0x08, 0x69, 0xc7, 0x4b, 0xb6, 0xe2, 0x12, + 0xe6, 0x73, 0xd9, 0xc4, 0xbb, 0x60, 0x91, 0xb8, 0xdc, 0xcd, 0x32, 0xf3, 0x3d, 0x94, 0x74, 0xbc, + 0x9e, 0xe9, 0xea, 0x49, 0xbf, 0x01, 0xcb, 0x17, 0x74, 0x32, 0x63, 0xd6, 0x5b, 0xb0, 0x92, 0xf1, + 0x14, 0xd2, 0x81, 0xc6, 0x94, 0xc5, 0x31, 0x1d, 0xb1, 0x8e, 0x71, 0xd7, 0xd8, 0x31, 0x6d, 0x3d, + 0xb5, 0x5a, 0xb0, 0x9a, 0xf5, 0x93, 0x8c, 0xa0, 0xf0, 0x05, 0x21, 0x78, 0xc1, 0xa2, 0x58, 0x38, + 0x80, 0x12, 0x54, 0x53, 0xeb, 0x03, 0x58, 0x2f, 0x3a, 0x01, 0x59, 0x87, 0xea, 0x33, 0x76, 0xa9, + 0x38, 0xc5, 0x90, 0xdc, 0x52, 0x17, 0x42, 0x2f, 0x36, 0x6d, 0x75, 0xbb, 0x2f, 0x2a, 0x89, 0x70, + 0xe2, 0x07, 0xe4, 0x10, 0x6a, 0x22, 0x0a, 0x51, 0x7a, 0xe5, 0xa0, 0xdb, 0x93, 0x21, 0xda, 0xd3, + 0x21, 0xda, 0x7b, 0xa2, 0x43, 0xb4, 0xdf, 0xfc, 0xf2, 0xc5, 0xf6, 0xd2, 0x17, 0x7f, 0xd9, 0x36, + 0x6c, 0x94, 0x20, 0x77, 0xc4, 0xa7, 0xa4, 0x9e, 0xef, 0x78, 0xae, 0x3a, 0xa7, 0x81, 0xf3, 0x13, + 0x97, 0x1c, 0xc1, 0xfa, 0x30, 0xf0, 0x63, 0xe6, 0xc7, 0xb3, 0xd8, 0x09, 0x69, 0x44, 0xa7, 0xb1, + 0x8a, 0x12, 0xfd, 0xe1, 0x1e, 0x68, 0xf2, 0x29, 0x52, 0xed, 0xf6, 0x30, 0xbf, 0x40, 0x3e, 0x04, + 0xb8, 0xa0, 0x13, 0xcf, 0xa5, 0x3c, 0x88, 0xe2, 0x4e, 0xed, 0x6e, 0x35, 0x23, 0x7c, 0xa6, 0x09, + 0x4f, 0x43, 0x97, 0x72, 0xd6, 0xaf, 0x89, 0x9b, 0xd9, 0x19, 0x7e, 0xf2, 0x26, 0xb4, 0x69, 0x18, + 0x3a, 0x31, 0xa7, 0x9c, 0x39, 0x83, 0x4b, 0xce, 0x62, 0x8c, 0xa4, 0x55, 0x7b, 0x8d, 0x86, 0xe1, + 0x63, 0xb1, 0xda, 0x17, 0x8b, 0x96, 0x9b, 0x7c, 0x07, 0x74, 0x72, 0x42, 0xa0, 0xe6, 0x52, 0x4e, + 0xd1, 0x1a, 0xab, 0x36, 0x8e, 0xc5, 0x5a, 0x48, 0xf9, 0x58, 0xe9, 0x88, 0x63, 0xb2, 0x01, 0xf5, + 0x31, 0xf3, 0x46, 0x63, 0x8e, 0x6a, 0x55, 0x6d, 0x35, 0x13, 0x86, 0x0f, 0xa3, 0xe0, 0x82, 0x61, + 0x9c, 0x37, 0x6d, 0x39, 0xb1, 0xfe, 0x61, 0xc0, 0x8d, 0x6b, 0x81, 0x21, 0xf6, 0x1d, 0xd3, 0x78, + 0xac, 0xcf, 0x12, 0x63, 0xf2, 0xb6, 0xd8, 0x97, 0xba, 0x2c, 0x52, 0xf9, 0x67, 0x4d, 0x69, 0x7c, + 0x8c, 0x8b, 0x4a, 0x51, 0xc5, 0x42, 0x1e, 0xc1, 0xfa, 0x84, 0xc6, 0xdc, 0x91, 0xfe, 0xeb, 0x60, + 0x7e, 0xa9, 0xe6, 0x62, 0xea, 0x13, 0xaa, 0xfd, 0x5c, 0xb8, 0x95, 0x12, 0x6f, 0x4d, 0x72, 0xab, + 0xe4, 0x18, 0x6e, 0x0d, 0x2e, 0x9f, 0x53, 0x9f, 0x7b, 0x3e, 0x73, 0xae, 0xd9, 0xbc, 0xad, 0xb6, + 0x7a, 0x74, 0xe1, 0xb9, 0xcc, 0x1f, 0x6a, 0x63, 0xdf, 0x4c, 0x44, 0x92, 0x8f, 0x11, 0x5b, 0x77, + 0xa1, 0x95, 0x8f, 0x62, 0xd2, 0x82, 0x0a, 0x9f, 0x2b, 0x0d, 0x2b, 0x7c, 0x6e, 0x59, 0x89, 0x07, + 0x26, 0xa1, 0x74, 0x8d, 0x67, 0x17, 0xda, 0x85, 0xb0, 0xce, 0x98, 0xdb, 0xc8, 0x9a, 0xdb, 0x6a, + 0xc3, 0x5a, 0x2e, 0x9a, 0xad, 0xcf, 0x97, 0xa1, 0x69, 0xb3, 0x38, 0x14, 0xce, 0x44, 0x0e, 0xc1, + 0x64, 0xf3, 0x21, 0x93, 0x89, 0xd4, 0x28, 0xa4, 0x29, 0xc9, 0xf3, 0x48, 0xd3, 0x45, 0x40, 0x27, + 0xcc, 0x64, 0x37, 0x07, 0x02, 0x37, 0x8b, 0x42, 0x59, 0x14, 0xd8, 0xcb, 0xa3, 0xc0, 0xad, 0x02, + 0x6f, 0x01, 0x06, 0x76, 0x73, 0x30, 0x50, 0xdc, 0x38, 0x87, 0x03, 0xf7, 0x4a, 0x70, 0xa0, 0x78, + 0xfd, 0x05, 0x40, 0x70, 0xaf, 0x04, 0x08, 0x3a, 0xd7, 0xce, 0x2a, 0x45, 0x82, 0xbd, 0x3c, 0x12, + 0x14, 0xd5, 0x29, 0x40, 0xc1, 0x87, 0x65, 0x50, 0x70, 0xa7, 0x20, 0xb3, 0x10, 0x0b, 0xde, 0xbb, + 0x86, 0x05, 0x1b, 0x05, 0xd1, 0x12, 0x30, 0xb8, 0x97, 0xcb, 0xd2, 0x50, 0xaa, 0x5b, 0x79, 0x9a, + 0x26, 0xdf, 0xbd, 0x8e, 0x23, 0x9b, 0xc5, 0x4f, 0x5b, 0x06, 0x24, 0xfb, 0x05, 0x20, 0xb9, 0x5d, + 0xbc, 0x65, 0x01, 0x49, 0x52, 0x3c, 0xd8, 0x15, 0x71, 0x5f, 0xf0, 0x34, 0x91, 0x23, 0x58, 0x14, + 0x05, 0x91, 0x4a, 0xd8, 0x72, 0x62, 0xed, 0x88, 0x4c, 0x94, 0xfa, 0xd7, 0x4b, 0xb0, 0x03, 0x9d, + 0x3e, 0xe3, 0x5d, 0xd6, 0x2f, 0x8d, 0x54, 0x16, 0x23, 0x3a, 0x9b, 0xc5, 0x4c, 0x95, 0xc5, 0x32, + 0x90, 0x52, 0xc9, 0x41, 0x0a, 0xf9, 0x36, 0xdc, 0xc0, 0x34, 0x82, 0x76, 0x71, 0x72, 0x69, 0xad, + 0x2d, 0x08, 0xd2, 0x20, 0x32, 0xbf, 0xbd, 0x03, 0x37, 0x33, 0xbc, 0x22, 0xc5, 0x62, 0x0a, 0xab, + 0x61, 0xf0, 0xae, 0x27, 0xdc, 0x47, 0x61, 0x78, 0x4c, 0xe3, 0xb1, 0xf5, 0xc3, 0x54, 0xff, 0x14, + 0xae, 0x08, 0xd4, 0x86, 0x81, 0x2b, 0xd5, 0x5a, 0xb3, 0x71, 0x2c, 0x20, 0x6c, 0x12, 0x8c, 0xf0, + 0x54, 0xd3, 0x16, 0x43, 0xc1, 0x95, 0x44, 0x8a, 0x29, 0x43, 0xc2, 0xfa, 0x85, 0x91, 0xee, 0x97, + 0x22, 0x58, 0x19, 0xd8, 0x18, 0xff, 0x0b, 0xd8, 0x54, 0x5e, 0x0d, 0x6c, 0xac, 0xdf, 0x1a, 0xe9, + 0x17, 0x49, 0x60, 0xe4, 0xf5, 0x54, 0x14, 0xce, 0xe1, 0xf9, 0x2e, 0x9b, 0x63, 0xc0, 0x57, 0x6d, + 0x39, 0xd1, 0x08, 0x5f, 0x47, 0x33, 0xe7, 0x11, 0xbe, 0x81, 0x6b, 0x72, 0x42, 0xde, 0x40, 0xf8, + 0x09, 0xce, 0x55, 0x24, 0xae, 0xf5, 0x54, 0x99, 0x7b, 0x2a, 0x16, 0x6d, 0x49, 0xcb, 0x24, 0x53, + 0x33, 0x97, 0x4c, 0x4f, 0x81, 0x5c, 0x0f, 0x59, 0xf2, 0x01, 0xd4, 0x38, 0x1d, 0x09, 0x8b, 0x0a, + 0xa3, 0xb4, 0x7a, 0xb2, 0xf6, 0xee, 0x7d, 0x7c, 0x76, 0x4a, 0xbd, 0xa8, 0xbf, 0x21, 0x8c, 0xf1, + 0xaf, 0x17, 0xdb, 0x2d, 0xc1, 0xb3, 0x17, 0x4c, 0x3d, 0xce, 0xa6, 0x21, 0xbf, 0xb4, 0x51, 0xc6, + 0xfa, 0x93, 0x21, 0x52, 0x79, 0x2e, 0x94, 0x4b, 0x4d, 0xa3, 0xfd, 0xb5, 0x92, 0x41, 0xdd, 0xaf, + 0x66, 0xae, 0x6f, 0x00, 0x8c, 0x68, 0xec, 0x7c, 0x46, 0x7d, 0xce, 0x5c, 0x65, 0x33, 0x73, 0x44, + 0xe3, 0x9f, 0xe2, 0x82, 0x28, 0x51, 0x04, 0x79, 0x16, 0x33, 0x17, 0x8d, 0x57, 0xb5, 0x1b, 0x23, + 0x1a, 0x3f, 0x8d, 0x99, 0x9b, 0xe8, 0xd5, 0x78, 0x0d, 0xbd, 0xfe, 0x9c, 0xf1, 0xc3, 0x14, 0xc7, + 0xfe, 0x1f, 0x34, 0xfb, 0xa7, 0x21, 0x00, 0x3a, 0x9f, 0x0b, 0xc9, 0x09, 0xdc, 0x48, 0xbc, 0xdd, + 0x99, 0x61, 0x14, 0x68, 0x7f, 0x78, 0x79, 0x90, 0xac, 0x5f, 0xe4, 0x97, 0x63, 0xf2, 0x23, 0xd8, + 0x2c, 0xc4, 0x6a, 0xb2, 0x61, 0xe5, 0xa5, 0x21, 0x7b, 0x3b, 0x1f, 0xb2, 0x7a, 0x3f, 0xad, 0x6b, + 0xf5, 0x35, 0x74, 0xfd, 0xa6, 0xa8, 0x56, 0xb2, 0x19, 0xbc, 0xec, 0x6b, 0x59, 0x3f, 0x37, 0xa0, + 0x5d, 0xb8, 0x0c, 0xd9, 0x07, 0x90, 0x09, 0x30, 0xf6, 0x9e, 0xeb, 0xca, 0x79, 0x5d, 0x5d, 0x1c, + 0x4d, 0xf6, 0xd8, 0x7b, 0xce, 0x6c, 0x73, 0xa0, 0x87, 0xe4, 0x23, 0x68, 0x33, 0x55, 0x3f, 0xe9, + 0x0c, 0x55, 0xc9, 0x41, 0x89, 0xae, 0xae, 0x94, 0xb6, 0x2d, 0x96, 0x9b, 0x5b, 0x47, 0x60, 0x26, + 0xfb, 0x92, 0xaf, 0x81, 0x39, 0xa5, 0x73, 0x55, 0xd5, 0xca, 0x7a, 0xa8, 0x39, 0xa5, 0x73, 0x2c, + 0x68, 0xc9, 0x26, 0x34, 0x04, 0x71, 0x44, 0xe5, 0x09, 0x55, 0xbb, 0x3e, 0xa5, 0xf3, 0x1f, 0xd0, + 0xd8, 0xda, 0x85, 0x56, 0xfe, 0x10, 0xcd, 0xaa, 0x11, 0x46, 0xb2, 0x1e, 0x8d, 0x98, 0xf5, 0x18, + 0x5a, 0xf9, 0xc2, 0x51, 0x64, 0x9b, 0x28, 0x98, 0xf9, 0x2e, 0x32, 0x2e, 0xdb, 0x72, 0x22, 0xba, + 0xc6, 0x8b, 0x40, 0x7e, 0xba, 0x6c, 0xa5, 0x78, 0x16, 0x70, 0x96, 0x29, 0x37, 0x25, 0x8f, 0xf5, + 0x87, 0x1a, 0xd4, 0x65, 0x15, 0x4b, 0xde, 0xcc, 0x34, 0x0e, 0x08, 0x51, 0xfd, 0x95, 0xab, 0x17, + 0xdb, 0x0d, 0xcc, 0xe6, 0x27, 0x0f, 0xd3, 0x2e, 0x22, 0x4d, 0x54, 0x95, 0x5c, 0x91, 0xad, 0x5b, + 0x96, 0xea, 0x2b, 0xb7, 0x2c, 0x9b, 0xd0, 0xf0, 0x67, 0x53, 0x87, 0xcf, 0x63, 0x8c, 0xb5, 0xaa, + 0x5d, 0xf7, 0x67, 0xd3, 0x27, 0xf3, 0x58, 0xd8, 0x94, 0x07, 0x9c, 0x4e, 0x90, 0x24, 0x83, 0xad, + 0x89, 0x0b, 0x82, 0x78, 0x08, 0x6b, 0x19, 0xd0, 0xf3, 0x5c, 0x55, 0x51, 0xb5, 0xb2, 0x5f, 0xfc, + 0xe4, 0xa1, 0x52, 0x77, 0x25, 0x01, 0xc1, 0x13, 0x97, 0xec, 0xe4, 0x2b, 0x74, 0xc4, 0x4a, 0x99, + 0xb0, 0x33, 0x45, 0xb8, 0x40, 0x4a, 0x71, 0x01, 0xe1, 0x6e, 0x92, 0xa5, 0x89, 0x2c, 0x4d, 0xb1, + 0x80, 0xc4, 0xb7, 0xa0, 0x9d, 0xc2, 0x8d, 0x64, 0x31, 0xe5, 0x2e, 0xe9, 0x32, 0x32, 0xbe, 0x0b, + 0xb7, 0x7c, 0x36, 0xe7, 0x4e, 0x91, 0x1b, 0x90, 0x9b, 0x08, 0xda, 0x59, 0x5e, 0xe2, 0x5b, 0xd0, + 0x4a, 0x03, 0x12, 0x79, 0x57, 0x64, 0x9f, 0x94, 0xac, 0x22, 0xdb, 0x1d, 0x68, 0x26, 0x60, 0xbf, + 0x8a, 0x0c, 0x0d, 0x2a, 0x31, 0x3e, 0x29, 0x1f, 0x22, 0x16, 0xcf, 0x26, 0x5c, 0x6d, 0xb2, 0x86, + 0x3c, 0x58, 0x3e, 0xd8, 0x72, 0x1d, 0x79, 0xdf, 0x80, 0xb5, 0x24, 0x0e, 0x90, 0xaf, 0x85, 0x7c, + 0xab, 0x7a, 0x11, 0x99, 0x76, 0x61, 0x3d, 0x8c, 0x82, 0x30, 0x88, 0x59, 0xe4, 0x50, 0xd7, 0x8d, + 0x58, 0x1c, 0x77, 0xda, 0x72, 0x3f, 0xbd, 0x7e, 0x24, 0x97, 0xad, 0x9f, 0x41, 0x43, 0x59, 0xbf, + 0xb4, 0x9b, 0xfa, 0x1e, 0xac, 0x86, 0x34, 0x12, 0x77, 0xca, 0xf6, 0x54, 0xba, 0xa6, 0x3d, 0xa5, + 0x91, 0x68, 0xa2, 0x73, 0xad, 0xd5, 0x0a, 0xf2, 0xcb, 0x25, 0xeb, 0x1e, 0xac, 0xe5, 0x78, 0x44, + 0x18, 0xa0, 0x53, 0xe8, 0x30, 0xc0, 0x49, 0x72, 0x72, 0x25, 0x3d, 0xd9, 0xba, 0x0f, 0x66, 0x62, + 0x68, 0x51, 0x7a, 0x69, 0x3d, 0x0c, 0x65, 0x3b, 0x39, 0xc5, 0x76, 0x31, 0xf8, 0x8c, 0x45, 0xaa, + 0xdc, 0x92, 0x13, 0xeb, 0x29, 0xb4, 0x0b, 0xf9, 0x94, 0xec, 0x41, 0x23, 0x9c, 0x0d, 0x1c, 0xdd, + 0xe6, 0xa7, 0x8d, 0xe1, 0xe9, 0x6c, 0xf0, 0x31, 0xbb, 0xd4, 0x8d, 0x61, 0x88, 0xb3, 0x74, 0xdb, + 0x4a, 0x76, 0xdb, 0x09, 0x34, 0x75, 0x68, 0x92, 0xef, 0x80, 0x99, 0xf8, 0x48, 0x21, 0x81, 0x25, + 0x47, 0xab, 0x4d, 0x53, 0x46, 0xf1, 0xa9, 0x63, 0x6f, 0xe4, 0x33, 0xd7, 0x49, 0xe3, 0x01, 0xcf, + 0x68, 0xda, 0x6d, 0x49, 0xf8, 0x44, 0x3b, 0xbf, 0xf5, 0x2e, 0xd4, 0xe5, 0xdd, 0x84, 0x7d, 0xc4, + 0xce, 0xba, 0x1a, 0x15, 0xe3, 0xd2, 0x4c, 0xfb, 0x47, 0x03, 0x9a, 0x3a, 0x45, 0x95, 0x0a, 0xe5, + 0x2e, 0x5d, 0xf9, 0xaa, 0x97, 0x5e, 0xd4, 0xaa, 0xeb, 0x2c, 0x52, 0x7b, 0xe5, 0x2c, 0xb2, 0x07, + 0x44, 0x26, 0x8b, 0x8b, 0x80, 0x7b, 0xfe, 0xc8, 0x91, 0xb6, 0x96, 0x59, 0x63, 0x1d, 0x29, 0x67, + 0x48, 0x38, 0x15, 0xeb, 0x07, 0x9f, 0x2f, 0x43, 0xfb, 0xa8, 0xff, 0xe0, 0xe4, 0x28, 0x0c, 0x27, + 0xde, 0x90, 0x62, 0x09, 0xbc, 0x0f, 0x35, 0x2c, 0xf2, 0x4b, 0x9e, 0x17, 0xbb, 0x65, 0xdd, 0x26, + 0x39, 0x80, 0x65, 0xac, 0xf5, 0x49, 0xd9, 0x2b, 0x63, 0xb7, 0xb4, 0xe9, 0x14, 0x87, 0xc8, 0x6e, + 0xe0, 0xfa, 0x63, 0x63, 0xb7, 0xac, 0xf3, 0x24, 0x1f, 0x81, 0x99, 0x56, 0xe9, 0x8b, 0x9e, 0x1c, + 0xbb, 0x0b, 0x7b, 0x50, 0x21, 0x9f, 0x56, 0x43, 0x8b, 0x5e, 0xce, 0xba, 0x0b, 0x9b, 0x35, 0x72, + 0x08, 0x0d, 0x5d, 0x25, 0x96, 0x3f, 0x0a, 0x76, 0x17, 0xf4, 0x87, 0xc2, 0x3c, 0xb2, 0xf0, 0x2e, + 0x7b, 0xb9, 0xec, 0x96, 0x36, 0xb1, 0xe4, 0x7d, 0xa8, 0x2b, 0xd8, 0x2f, 0x7d, 0x18, 0xec, 0x96, + 0x77, 0x79, 0x42, 0xc9, 0xb4, 0xf5, 0x58, 0xf4, 0xba, 0xda, 0x5d, 0xd8, 0x6d, 0x93, 0x23, 0x80, + 0x4c, 0x75, 0xbd, 0xf0, 0xd9, 0xb4, 0xbb, 0xb8, 0x8b, 0x26, 0xf7, 0xa1, 0x99, 0xbe, 0x8c, 0x94, + 0x3f, 0x84, 0x76, 0x17, 0x35, 0xb6, 0xfd, 0xaf, 0xff, 0xfb, 0x6f, 0x5b, 0xc6, 0xaf, 0xae, 0xb6, + 0x8c, 0x5f, 0x5f, 0x6d, 0x19, 0x5f, 0x5e, 0x6d, 0x19, 0xbf, 0xbf, 0xda, 0x32, 0xfe, 0x7a, 0xb5, + 0x65, 0xfc, 0xe6, 0xef, 0x5b, 0xc6, 0xa0, 0x8e, 0xee, 0xff, 0xde, 0x7f, 0x02, 0x00, 0x00, 0xff, + 0xff, 0x51, 0x4f, 0x34, 0x66, 0xf8, 0x17, 0x00, 0x00, } diff --git a/abci/types/types.proto b/abci/types/types.proto index d23ac513e..b62162c47 100644 --- a/abci/types/types.proto +++ b/abci/types/types.proto @@ -6,6 +6,7 @@ package types; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; import "github.com/tendermint/tendermint/libs/common/types.proto"; +import "github.com/tendermint/tendermint/crypto/merkle/merkle.proto"; // This file is copied from http://github.com/tendermint/abci // NOTE: When using custom types, mind the warnings. @@ -154,7 +155,7 @@ message ResponseQuery { int64 index = 5; bytes key = 6; bytes value = 7; - bytes proof = 8; + merkle.Proof proof = 8; int64 height = 9; } diff --git a/abci/types/typespb_test.go b/abci/types/typespb_test.go index 5da925e17..0ae0fea0d 100644 --- a/abci/types/typespb_test.go +++ b/abci/types/typespb_test.go @@ -14,6 +14,7 @@ import fmt "fmt" import math "math" import _ "github.com/gogo/protobuf/gogoproto" import _ "github.com/golang/protobuf/ptypes/timestamp" +import _ "github.com/tendermint/tendermint/crypto/merkle" import _ "github.com/tendermint/tendermint/libs/common" // Reference imports to suppress errors if they are not otherwise used. diff --git a/crypto/merkle/compile.sh b/crypto/merkle/compile.sh new file mode 100644 index 000000000..8e4c739f4 --- /dev/null +++ b/crypto/merkle/compile.sh @@ -0,0 +1,6 @@ +#! /bin/bash + +protoc --gogo_out=. -I $GOPATH/src/ -I . -I $GOPATH/src/github.com/gogo/protobuf/protobuf merkle.proto +echo "--> adding nolint declarations to protobuf generated files" +awk '/package merkle/ { print "//nolint: gas"; print; next }1' merkle.pb.go > merkle.pb.go.new +mv merkle.pb.go.new merkle.pb.go diff --git a/crypto/merkle/merkle.pb.go b/crypto/merkle/merkle.pb.go new file mode 100644 index 000000000..75e1b08c3 --- /dev/null +++ b/crypto/merkle/merkle.pb.go @@ -0,0 +1,792 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: crypto/merkle/merkle.proto + +package merkle + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import bytes "bytes" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// ProofOp defines an operation used for calculating Merkle root +// The data could be arbitrary format, providing nessecary data +// for example neighbouring node hash +type ProofOp struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProofOp) Reset() { *m = ProofOp{} } +func (m *ProofOp) String() string { return proto.CompactTextString(m) } +func (*ProofOp) ProtoMessage() {} +func (*ProofOp) Descriptor() ([]byte, []int) { + return fileDescriptor_merkle_5d3f6051907285da, []int{0} +} +func (m *ProofOp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProofOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProofOp.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ProofOp) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProofOp.Merge(dst, src) +} +func (m *ProofOp) XXX_Size() int { + return m.Size() +} +func (m *ProofOp) XXX_DiscardUnknown() { + xxx_messageInfo_ProofOp.DiscardUnknown(m) +} + +var xxx_messageInfo_ProofOp proto.InternalMessageInfo + +func (m *ProofOp) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *ProofOp) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *ProofOp) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +// Proof is Merkle proof defined by the list of ProofOps +type Proof struct { + Ops []ProofOp `protobuf:"bytes,1,rep,name=ops" json:"ops"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Proof) Reset() { *m = Proof{} } +func (m *Proof) String() string { return proto.CompactTextString(m) } +func (*Proof) ProtoMessage() {} +func (*Proof) Descriptor() ([]byte, []int) { + return fileDescriptor_merkle_5d3f6051907285da, []int{1} +} +func (m *Proof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Proof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Proof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Proof) XXX_Merge(src proto.Message) { + xxx_messageInfo_Proof.Merge(dst, src) +} +func (m *Proof) XXX_Size() int { + return m.Size() +} +func (m *Proof) XXX_DiscardUnknown() { + xxx_messageInfo_Proof.DiscardUnknown(m) +} + +var xxx_messageInfo_Proof proto.InternalMessageInfo + +func (m *Proof) GetOps() []ProofOp { + if m != nil { + return m.Ops + } + return nil +} + +func init() { + proto.RegisterType((*ProofOp)(nil), "merkle.ProofOp") + proto.RegisterType((*Proof)(nil), "merkle.Proof") +} +func (this *ProofOp) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ProofOp) + if !ok { + that2, ok := that.(ProofOp) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if !bytes.Equal(this.Key, that1.Key) { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Proof) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Proof) + if !ok { + that2, ok := that.(Proof) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Ops) != len(that1.Ops) { + return false + } + for i := range this.Ops { + if !this.Ops[i].Equal(&that1.Ops[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (m *ProofOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProofOp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Type) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintMerkle(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintMerkle(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Data) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintMerkle(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Proof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Proof) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ops) > 0 { + for _, msg := range m.Ops { + dAtA[i] = 0xa + i++ + i = encodeVarintMerkle(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintMerkle(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedProofOp(r randyMerkle, easy bool) *ProofOp { + this := &ProofOp{} + this.Type = string(randStringMerkle(r)) + v1 := r.Intn(100) + this.Key = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Key[i] = byte(r.Intn(256)) + } + v2 := r.Intn(100) + this.Data = make([]byte, v2) + for i := 0; i < v2; i++ { + this.Data[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedMerkle(r, 4) + } + return this +} + +func NewPopulatedProof(r randyMerkle, easy bool) *Proof { + this := &Proof{} + if r.Intn(10) != 0 { + v3 := r.Intn(5) + this.Ops = make([]ProofOp, v3) + for i := 0; i < v3; i++ { + v4 := NewPopulatedProofOp(r, easy) + this.Ops[i] = *v4 + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedMerkle(r, 2) + } + return this +} + +type randyMerkle interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneMerkle(r randyMerkle) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringMerkle(r randyMerkle) string { + v5 := r.Intn(100) + tmps := make([]rune, v5) + for i := 0; i < v5; i++ { + tmps[i] = randUTF8RuneMerkle(r) + } + return string(tmps) +} +func randUnrecognizedMerkle(r randyMerkle, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldMerkle(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldMerkle(dAtA []byte, r randyMerkle, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key)) + v6 := r.Int63() + if r.Intn(2) == 0 { + v6 *= -1 + } + dAtA = encodeVarintPopulateMerkle(dAtA, uint64(v6)) + case 1: + dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateMerkle(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateMerkle(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *ProofOp) Size() (n int) { + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovMerkle(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovMerkle(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovMerkle(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Proof) Size() (n int) { + var l int + _ = l + if len(m.Ops) > 0 { + for _, e := range m.Ops { + l = e.Size() + n += 1 + l + sovMerkle(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovMerkle(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozMerkle(x uint64) (n int) { + return sovMerkle(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ProofOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkle + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProofOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProofOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkle + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMerkle + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkle + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMerkle + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkle + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMerkle + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMerkle(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMerkle + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Proof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkle + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Proof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Proof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ops", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkle + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMerkle + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ops = append(m.Ops, ProofOp{}) + if err := m.Ops[len(m.Ops)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMerkle(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMerkle + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMerkle(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMerkle + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMerkle + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMerkle + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthMerkle + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMerkle + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipMerkle(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthMerkle = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMerkle = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("crypto/merkle/merkle.proto", fileDescriptor_merkle_5d3f6051907285da) } + +var fileDescriptor_merkle_5d3f6051907285da = []byte{ + // 200 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4a, 0x2e, 0xaa, 0x2c, + 0x28, 0xc9, 0xd7, 0xcf, 0x4d, 0x2d, 0xca, 0xce, 0x49, 0x85, 0x52, 0x7a, 0x05, 0x45, 0xf9, 0x25, + 0xf9, 0x42, 0x6c, 0x10, 0x9e, 0x94, 0x6e, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, + 0xae, 0x7e, 0x7a, 0x7e, 0x7a, 0xbe, 0x3e, 0x58, 0x3a, 0xa9, 0x34, 0x0d, 0xcc, 0x03, 0x73, 0xc0, + 0x2c, 0x88, 0x36, 0x25, 0x67, 0x2e, 0xf6, 0x80, 0xa2, 0xfc, 0xfc, 0x34, 0xff, 0x02, 0x21, 0x21, + 0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x30, 0x5b, 0x48, + 0x80, 0x8b, 0x39, 0x3b, 0xb5, 0x52, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc4, 0x04, 0xa9, + 0x4a, 0x49, 0x2c, 0x49, 0x94, 0x60, 0x06, 0x0b, 0x81, 0xd9, 0x4a, 0x06, 0x5c, 0xac, 0x60, 0x43, + 0x84, 0xd4, 0xb9, 0x98, 0xf3, 0x0b, 0x8a, 0x25, 0x18, 0x15, 0x98, 0x35, 0xb8, 0x8d, 0xf8, 0xf5, + 0xa0, 0x0e, 0x84, 0x5a, 0xe0, 0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x48, 0x85, 0x93, 0xc8, + 0x8f, 0x87, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, + 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x49, 0x6c, 0x60, 0x37, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, + 0xb9, 0x2b, 0x0f, 0xd1, 0xe8, 0x00, 0x00, 0x00, +} diff --git a/crypto/merkle/merkle.proto b/crypto/merkle/merkle.proto new file mode 100644 index 000000000..8a6c467d4 --- /dev/null +++ b/crypto/merkle/merkle.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; +package merkle; + +// For more information on gogo.proto, see: +// https://github.com/gogo/protobuf/blob/master/extensions.md +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.sizer_all) = true; + +option (gogoproto.populate_all) = true; +option (gogoproto.equal_all) = true; + +//---------------------------------------- +// Message types + +// ProofOp defines an operation used for calculating Merkle root +// The data could be arbitrary format, providing nessecary data +// for example neighbouring node hash +message ProofOp { + string type = 1; + bytes key = 2; + bytes data = 3; +} + +// Proof is Merkle proof defined by the list of ProofOps +message Proof { + repeated ProofOp ops = 1 [(gogoproto.nullable)=false]; +} diff --git a/crypto/merkle/proof.go b/crypto/merkle/proof.go new file mode 100644 index 000000000..7da894953 --- /dev/null +++ b/crypto/merkle/proof.go @@ -0,0 +1,132 @@ +package merkle + +import ( + "bytes" + + cmn "github.com/tendermint/tmlibs/common" +) + +//---------------------------------------- +// ProofOp gets converted to an instance of ProofOperator: + +// ProofOperator is a layer for calculating intermediate Merkle root +// Run() takes a list of bytes because it can be more than one +// for example in range proofs +// ProofOp() defines custom encoding which can be decoded later with +// OpDecoder +type ProofOperator interface { + Run([][]byte) ([][]byte, error) + GetKey() []byte + ProofOp() ProofOp +} + +//---------------------------------------- +// Operations on a list of ProofOperators + +// ProofOperators is a slice of ProofOperator(s) +// Each operator will be applied to the input value sequencially +// and the last Merkle root will be verified with already known data +type ProofOperators []ProofOperator + +func (poz ProofOperators) VerifyValue(root []byte, keypath string, value []byte) (err error) { + return poz.Verify(root, keypath, [][]byte{value}) +} + +func (poz ProofOperators) Verify(root []byte, keypath string, args [][]byte) (err error) { + keys, err := KeyPathToKeys(keypath) + if err != nil { + return + } + + for i, op := range poz { + key := op.GetKey() + if len(key) != 0 { + if !bytes.Equal(keys[0], key) { + return cmn.NewError("Key mismatch on operation #%d: expected %+v but %+v", i, []byte(keys[0]), []byte(key)) + } + keys = keys[1:] + } + args, err = op.Run(args) + if err != nil { + return + } + } + if !bytes.Equal(root, args[0]) { + return cmn.NewError("Calculated root hash is invalid: expected %+v but %+v", root, args[0]) + } + if len(keys) != 0 { + return cmn.NewError("Keypath not consumed all") + } + return nil +} + +//---------------------------------------- +// ProofRuntime - main entrypoint + +type OpDecoder func(ProofOp) (ProofOperator, error) + +type ProofRuntime struct { + decoders map[string]OpDecoder +} + +func NewProofRuntime() *ProofRuntime { + return &ProofRuntime{ + decoders: make(map[string]OpDecoder), + } +} + +func (prt *ProofRuntime) RegisterOpDecoder(typ string, dec OpDecoder) { + _, ok := prt.decoders[typ] + if ok { + panic("already registered for type " + typ) + } + prt.decoders[typ] = dec +} + +func (prt *ProofRuntime) Decode(pop ProofOp) (ProofOperator, error) { + decoder := prt.decoders[pop.Type] + if decoder == nil { + return nil, cmn.NewError("unrecognized proof type %v", pop.Type) + } + return decoder(pop) +} + +func (prt *ProofRuntime) DecodeProof(proof *Proof) (poz ProofOperators, err error) { + poz = ProofOperators(nil) + for _, pop := range proof.Ops { + operator, err := prt.Decode(pop) + if err != nil { + return nil, cmn.ErrorWrap(err, "decoding a proof operator") + } + poz = append(poz, operator) + } + return +} + +func (prt *ProofRuntime) VerifyValue(proof *Proof, root []byte, keypath string, value []byte) (err error) { + return prt.Verify(proof, root, keypath, [][]byte{value}) +} + +// TODO In the long run we'll need a method of classifcation of ops, +// whether existence or absence or perhaps a third? +func (prt *ProofRuntime) VerifyAbsence(proof *Proof, root []byte, keypath string) (err error) { + return prt.Verify(proof, root, keypath, nil) +} + +func (prt *ProofRuntime) Verify(proof *Proof, root []byte, keypath string, args [][]byte) (err error) { + poz, err := prt.DecodeProof(proof) + if err != nil { + return cmn.ErrorWrap(err, "decoding proof") + } + return poz.Verify(root, keypath, args) +} + +// DefaultProofRuntime only knows about Simple value +// proofs. +// To use e.g. IAVL proofs, register op-decoders as +// defined in the IAVL package. +func DefaultProofRuntime() (prt *ProofRuntime) { + prt = NewProofRuntime() + prt.RegisterOpDecoder(ProofOpSimpleValue, SimpleValueOpDecoder) + return +} diff --git a/crypto/merkle/proof_key_path.go b/crypto/merkle/proof_key_path.go new file mode 100644 index 000000000..d74dac51d --- /dev/null +++ b/crypto/merkle/proof_key_path.go @@ -0,0 +1,107 @@ +package merkle + +import ( + "encoding/hex" + "fmt" + "net/url" + "strings" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +/* + + For generalized Merkle proofs, each layer of the proof may require an + optional key. The key may be encoded either by URL-encoding or + (upper-case) hex-encoding. + TODO: In the future, more encodings may be supported, like base32 (e.g. + /32:) + + For example, for a Cosmos-SDK application where the first two proof layers + are SimpleValueOps, and the third proof layer is an IAVLValueOp, the keys + might look like: + + 0: []byte("App") + 1: []byte("IBC") + 2: []byte{0x01, 0x02, 0x03} + + Assuming that we know that the first two layers are always ASCII texts, we + probably want to use URLEncoding for those, whereas the third layer will + require HEX encoding for efficient representation. + + kp := new(KeyPath) + kp.AppendKey([]byte("App"), KeyEncodingURL) + kp.AppendKey([]byte("IBC"), KeyEncodingURL) + kp.AppendKey([]byte{0x01, 0x02, 0x03}, KeyEncodingURL) + kp.String() // Should return "/App/IBC/x:010203" + + NOTE: All encodings *MUST* work compatibly, such that you can choose to use + whatever encoding, and the decoded keys will always be the same. In other + words, it's just as good to encode all three keys using URL encoding or HEX + encoding... it just wouldn't be optimal in terms of readability or space + efficiency. + + NOTE: Punycode will never be supported here, because not all values can be + decoded. For example, no string decodes to the string "xn--blah" in + Punycode. + +*/ + +type keyEncoding int + +const ( + KeyEncodingURL keyEncoding = iota + KeyEncodingHex + KeyEncodingMax +) + +type Key struct { + name []byte + enc keyEncoding +} + +type KeyPath []Key + +func (pth KeyPath) AppendKey(key []byte, enc keyEncoding) KeyPath { + return append(pth, Key{key, enc}) +} + +func (pth KeyPath) String() string { + res := "" + for _, key := range pth { + switch key.enc { + case KeyEncodingURL: + res += "/" + url.PathEscape(string(key.name)) + case KeyEncodingHex: + res += "/x:" + fmt.Sprintf("%X", key.name) + default: + panic("unexpected key encoding type") + } + } + return res +} + +func KeyPathToKeys(path string) (keys [][]byte, err error) { + if path == "" || path[0] != '/' { + return nil, cmn.NewError("key path string must start with a forward slash '/'") + } + parts := strings.Split(path[1:], "/") + keys = make([][]byte, len(parts)) + for i, part := range parts { + if strings.HasPrefix(part, "x:") { + hexPart := part[2:] + key, err := hex.DecodeString(hexPart) + if err != nil { + return nil, cmn.ErrorWrap(err, "decoding hex-encoded part #%d: /%s", i, part) + } + keys[i] = key + } else { + key, err := url.PathUnescape(part) + if err != nil { + return nil, cmn.ErrorWrap(err, "decoding url-encoded part #%d: /%s", i, part) + } + keys[i] = []byte(key) // TODO Test this with random bytes, I'm not sure that it works for arbitrary bytes... + } + } + return keys, nil +} diff --git a/crypto/merkle/proof_key_path_test.go b/crypto/merkle/proof_key_path_test.go new file mode 100644 index 000000000..48fda3032 --- /dev/null +++ b/crypto/merkle/proof_key_path_test.go @@ -0,0 +1,41 @@ +package merkle + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestKeyPath(t *testing.T) { + var path KeyPath + keys := make([][]byte, 10) + alphanum := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + + for d := 0; d < 1e4; d++ { + path = nil + + for i := range keys { + enc := keyEncoding(rand.Intn(int(KeyEncodingMax))) + keys[i] = make([]byte, rand.Uint32()%20) + switch enc { + case KeyEncodingURL: + for j := range keys[i] { + keys[i][j] = alphanum[rand.Intn(len(alphanum))] + } + case KeyEncodingHex: + rand.Read(keys[i]) + default: + panic("Unexpected encoding") + } + path = path.AppendKey(keys[i], enc) + } + + res, err := KeyPathToKeys(path.String()) + require.Nil(t, err) + + for i, key := range keys { + require.Equal(t, key, res[i]) + } + } +} diff --git a/crypto/merkle/proof_simple_value.go b/crypto/merkle/proof_simple_value.go new file mode 100644 index 000000000..28935e2cc --- /dev/null +++ b/crypto/merkle/proof_simple_value.go @@ -0,0 +1,91 @@ +package merkle + +import ( + "bytes" + "fmt" + + "github.com/tendermint/tendermint/crypto/tmhash" + cmn "github.com/tendermint/tendermint/libs/common" +) + +const ProofOpSimpleValue = "simple:v" + +// SimpleValueOp takes a key and a single value as argument and +// produces the root hash. The corresponding tree structure is +// the SimpleMap tree. SimpleMap takes a Hasher, and currently +// Tendermint uses aminoHasher. SimpleValueOp should support +// the hash function as used in aminoHasher. TODO support +// additional hash functions here as options/args to this +// operator. +// +// If the produced root hash matches the expected hash, the +// proof is good. +type SimpleValueOp struct { + // Encoded in ProofOp.Key. + key []byte + + // To encode in ProofOp.Data + Proof *SimpleProof `json:"simple-proof"` +} + +var _ ProofOperator = SimpleValueOp{} + +func NewSimpleValueOp(key []byte, proof *SimpleProof) SimpleValueOp { + return SimpleValueOp{ + key: key, + Proof: proof, + } +} + +func SimpleValueOpDecoder(pop ProofOp) (ProofOperator, error) { + if pop.Type != ProofOpSimpleValue { + return nil, cmn.NewError("unexpected ProofOp.Type; got %v, want %v", pop.Type, ProofOpSimpleValue) + } + var op SimpleValueOp // a bit strange as we'll discard this, but it works. + err := cdc.UnmarshalBinary(pop.Data, &op) + if err != nil { + return nil, cmn.ErrorWrap(err, "decoding ProofOp.Data into SimpleValueOp") + } + return NewSimpleValueOp(pop.Key, op.Proof), nil +} + +func (op SimpleValueOp) ProofOp() ProofOp { + bz := cdc.MustMarshalBinary(op) + return ProofOp{ + Type: ProofOpSimpleValue, + Key: op.key, + Data: bz, + } +} + +func (op SimpleValueOp) String() string { + return fmt.Sprintf("SimpleValueOp{%v}", op.GetKey()) +} + +func (op SimpleValueOp) Run(args [][]byte) ([][]byte, error) { + if len(args) != 1 { + return nil, cmn.NewError("expected 1 arg, got %v", len(args)) + } + value := args[0] + hasher := tmhash.New() + hasher.Write(value) // does not error + vhash := hasher.Sum(nil) + + // Wrap to hash the KVPair. + hasher = tmhash.New() + encodeByteSlice(hasher, []byte(op.key)) // does not error + encodeByteSlice(hasher, []byte(vhash)) // does not error + kvhash := hasher.Sum(nil) + + if !bytes.Equal(kvhash, op.Proof.LeafHash) { + return nil, cmn.NewError("leaf hash mismatch: want %X got %X", op.Proof.LeafHash, kvhash) + } + + return [][]byte{ + op.Proof.ComputeRootHash(), + }, nil +} + +func (op SimpleValueOp) GetKey() []byte { + return op.key +} diff --git a/crypto/merkle/simple_proof.go b/crypto/merkle/simple_proof.go index 2541b6d38..306505fc2 100644 --- a/crypto/merkle/simple_proof.go +++ b/crypto/merkle/simple_proof.go @@ -2,12 +2,24 @@ package merkle import ( "bytes" + "errors" "fmt" + + cmn "github.com/tendermint/tendermint/libs/common" ) -// SimpleProof represents a simple merkle proof. +// SimpleProof represents a simple Merkle proof. +// NOTE: The convention for proofs is to include leaf hashes but to +// exclude the root hash. +// This convention is implemented across IAVL range proofs as well. +// Keep this consistent unless there's a very good reason to change +// everything. This also affects the generalized proof system as +// well. type SimpleProof struct { - Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. + Total int `json:"total"` // Total number of items. + Index int `json:"index"` // Index of item to prove. + LeafHash []byte `json:"leaf_hash"` // Hash of item value. + Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. } // SimpleProofsFromHashers computes inclusion proof for given items. @@ -18,7 +30,10 @@ func SimpleProofsFromHashers(items []Hasher) (rootHash []byte, proofs []*SimpleP proofs = make([]*SimpleProof, len(items)) for i, trail := range trails { proofs[i] = &SimpleProof{ - Aunts: trail.FlattenAunts(), + Total: len(items), + Index: i, + LeafHash: trail.Hash, + Aunts: trail.FlattenAunts(), } } return @@ -49,11 +64,33 @@ func SimpleProofsFromMap(m map[string]Hasher) (rootHash []byte, proofs map[strin return } -// Verify that leafHash is a leaf hash of the simple-merkle-tree -// which hashes to rootHash. -func (sp *SimpleProof) Verify(index int, total int, leafHash []byte, rootHash []byte) bool { - computedHash := computeHashFromAunts(index, total, leafHash, sp.Aunts) - return computedHash != nil && bytes.Equal(computedHash, rootHash) +// Verify that the SimpleProof proves the root hash. +// Check sp.Index/sp.Total manually if needed +func (sp *SimpleProof) Verify(rootHash []byte, leafHash []byte) error { + if sp.Total < 0 { + return errors.New("Proof total must be positive") + } + if sp.Index < 0 { + return errors.New("Proof index cannot be negative") + } + if !bytes.Equal(sp.LeafHash, leafHash) { + return cmn.NewError("invalid leaf hash: wanted %X got %X", leafHash, sp.LeafHash) + } + computedHash := sp.ComputeRootHash() + if !bytes.Equal(computedHash, rootHash) { + return cmn.NewError("invalid root hash: wanted %X got %X", rootHash, computedHash) + } + return nil +} + +// Compute the root hash given a leaf hash. Does not verify the result. +func (sp *SimpleProof) ComputeRootHash() []byte { + return computeHashFromAunts( + sp.Index, + sp.Total, + sp.LeafHash, + sp.Aunts, + ) } // String implements the stringer interface for SimpleProof. diff --git a/crypto/merkle/simple_tree_test.go b/crypto/merkle/simple_tree_test.go index e2dccd3b3..b299aba78 100644 --- a/crypto/merkle/simple_tree_test.go +++ b/crypto/merkle/simple_tree_test.go @@ -1,13 +1,13 @@ package merkle import ( - "bytes" + "testing" + + "github.com/stretchr/testify/require" cmn "github.com/tendermint/tendermint/libs/common" . "github.com/tendermint/tendermint/libs/test" - "testing" - "github.com/tendermint/tendermint/crypto/tmhash" ) @@ -30,60 +30,43 @@ func TestSimpleProof(t *testing.T) { rootHash2, proofs := SimpleProofsFromHashers(items) - if !bytes.Equal(rootHash, rootHash2) { - t.Errorf("Unmatched root hashes: %X vs %X", rootHash, rootHash2) - } + require.Equal(t, rootHash, rootHash2, "Unmatched root hashes: %X vs %X", rootHash, rootHash2) // For each item, check the trail. for i, item := range items { itemHash := item.Hash() proof := proofs[i] + // Check total/index + require.Equal(t, proof.Index, i, "Unmatched indicies: %d vs %d", proof.Index, i) + + require.Equal(t, proof.Total, total, "Unmatched totals: %d vs %d", proof.Total, total) + // Verify success - ok := proof.Verify(i, total, itemHash, rootHash) - if !ok { - t.Errorf("Verification failed for index %v.", i) - } - - // Wrong item index should make it fail - { - ok = proof.Verify((i+1)%total, total, itemHash, rootHash) - if ok { - t.Errorf("Expected verification to fail for wrong index %v.", i) - } - } + err := proof.Verify(rootHash, itemHash) + require.NoError(t, err, "Verificatior failed: %v.", err) // Trail too long should make it fail origAunts := proof.Aunts proof.Aunts = append(proof.Aunts, cmn.RandBytes(32)) - { - ok = proof.Verify(i, total, itemHash, rootHash) - if ok { - t.Errorf("Expected verification to fail for wrong trail length.") - } - } + err = proof.Verify(rootHash, itemHash) + require.Error(t, err, "Expected verification to fail for wrong trail length") + proof.Aunts = origAunts // Trail too short should make it fail proof.Aunts = proof.Aunts[0 : len(proof.Aunts)-1] - { - ok = proof.Verify(i, total, itemHash, rootHash) - if ok { - t.Errorf("Expected verification to fail for wrong trail length.") - } - } + err = proof.Verify(rootHash, itemHash) + require.Error(t, err, "Expected verification to fail for wrong trail length") + proof.Aunts = origAunts // Mutating the itemHash should make it fail. - ok = proof.Verify(i, total, MutateByteSlice(itemHash), rootHash) - if ok { - t.Errorf("Expected verification to fail for mutated leaf hash") - } + err = proof.Verify(rootHash, MutateByteSlice(itemHash)) + require.Error(t, err, "Expected verification to fail for mutated leaf hash") // Mutating the rootHash should make it fail. - ok = proof.Verify(i, total, itemHash, MutateByteSlice(rootHash)) - if ok { - t.Errorf("Expected verification to fail for mutated root hash") - } + err = proof.Verify(MutateByteSlice(rootHash), itemHash) + require.Error(t, err, "Expected verification to fail for mutated root hash") } } diff --git a/crypto/merkle/wire.go b/crypto/merkle/wire.go new file mode 100644 index 000000000..c20ec9aa4 --- /dev/null +++ b/crypto/merkle/wire.go @@ -0,0 +1,12 @@ +package merkle + +import ( + "github.com/tendermint/go-amino" +) + +var cdc *amino.Codec + +func init() { + cdc = amino.NewCodec() + cdc.Seal() +} diff --git a/docs/app-dev/app-development.md b/docs/app-dev/app-development.md index 3aaebb230..2618bb1e9 100644 --- a/docs/app-dev/app-development.md +++ b/docs/app-dev/app-development.md @@ -431,17 +431,30 @@ Note: these query formats are subject to change! In go: ``` -func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) { - if reqQuery.Prove { - value, proof, exists := app.state.Proof(reqQuery.Data) - resQuery.Index = -1 // TODO make Proof return index - resQuery.Key = reqQuery.Data - resQuery.Value = value - resQuery.Proof = proof - if exists { - resQuery.Log = "exists" - } else { - resQuery.Log = "does not exist" + func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) { + if reqQuery.Prove { + value, proof, exists := app.state.GetWithProof(reqQuery.Data) + resQuery.Index = -1 // TODO make Proof return index + resQuery.Key = reqQuery.Data + resQuery.Value = value + resQuery.Proof = proof + if exists { + resQuery.Log = "exists" + } else { + resQuery.Log = "does not exist" + } + return + } else { + index, value, exists := app.state.Get(reqQuery.Data) + resQuery.Index = int64(index) + resQuery.Value = value + if exists { + resQuery.Log = "exists" + } else { + resQuery.Log = "does not exist" + } + return + } } return } else { @@ -461,22 +474,25 @@ func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery type In Java: ``` -ResponseQuery requestQuery(RequestQuery req) { - final boolean isProveQuery = req.getProve(); - final ResponseQuery.Builder responseBuilder = ResponseQuery.newBuilder(); - - if (isProveQuery) { - com.app.example.ProofResult proofResult = generateProof(req.getData().toByteArray()); - final byte[] proofAsByteArray = proofResult.getAsByteArray(); - - responseBuilder.setProof(ByteString.copyFrom(proofAsByteArray)); - responseBuilder.setKey(req.getData()); - responseBuilder.setValue(ByteString.copyFrom(proofResult.getData())); - responseBuilder.setLog(result.getLogValue()); - } else { - byte[] queryData = req.getData().toByteArray(); - - final com.app.example.QueryResult result = generateQueryResult(queryData); + ResponseQuery requestQuery(RequestQuery req) { + final boolean isProveQuery = req.getProve(); + final ResponseQuery.Builder responseBuilder = ResponseQuery.newBuilder(); + byte[] queryData = req.getData().toByteArray(); + + if (isProveQuery) { + com.app.example.QueryResultWithProof result = generateQueryResultWithProof(queryData); + responseBuilder.setIndex(result.getLeftIndex()); + responseBuilder.setKey(req.getData()); + responseBuilder.setValue(result.getValueOrNull(0)); + responseBuilder.setHeight(result.getHeight()); + responseBuilder.setProof(result.getProof()); + responseBuilder.setLog(result.getLogValue()); + } else { + com.app.example.QueryResult result = generateQueryResult(queryData); + responseBuilder.setIndex(result.getIndex()); + responseBuilder.setValue(result.getValue()); + responseBuilder.setLog(result.getLogValue()); + } responseBuilder.setIndex(result.getIndex()); responseBuilder.setValue(ByteString.copyFrom(result.getValue())); diff --git a/lite/errors/errors.go b/lite/errors/errors.go index 61426b234..59b6380d8 100644 --- a/lite/errors/errors.go +++ b/lite/errors/errors.go @@ -41,6 +41,12 @@ func (e errUnknownValidators) Error() string { e.chainID, e.height) } +type errEmptyTree struct{} + +func (e errEmptyTree) Error() string { + return "Tree is empty" +} + //---------------------------------------- // Methods for above error types @@ -110,3 +116,18 @@ func IsErrUnknownValidators(err error) bool { } return false } + +//----------------- +// ErrEmptyTree + +func ErrEmptyTree() error { + return cmn.ErrorWrap(errEmptyTree{}, "") +} + +func IsErrEmptyTree(err error) bool { + if err_, ok := err.(cmn.Error); ok { + _, ok := err_.Data().(errEmptyTree) + return ok + } + return false +} diff --git a/lite/proxy/proof.go b/lite/proxy/proof.go new file mode 100644 index 000000000..452dee277 --- /dev/null +++ b/lite/proxy/proof.go @@ -0,0 +1,14 @@ +package proxy + +import ( + "github.com/tendermint/tendermint/crypto/merkle" +) + +func defaultProofRuntime() *merkle.ProofRuntime { + prt := merkle.NewProofRuntime() + prt.RegisterOpDecoder( + merkle.ProofOpSimpleValue, + merkle.SimpleValueOpDecoder, + ) + return prt +} diff --git a/lite/proxy/query.go b/lite/proxy/query.go index 84ff98b47..3acf826b8 100644 --- a/lite/proxy/query.go +++ b/lite/proxy/query.go @@ -3,127 +3,95 @@ package proxy import ( "fmt" - "github.com/pkg/errors" - cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/lite" + lerr "github.com/tendermint/tendermint/lite/errors" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" ) -// KeyProof represents a proof of existence or absence of a single key. -// Copied from iavl repo. TODO -type KeyProof interface { - // Verify verfies the proof is valid. To verify absence, - // the value should be nil. - Verify(key, value, root []byte) error - - // Root returns the root hash of the proof. - Root() []byte - - // Serialize itself - Bytes() []byte -} - // GetWithProof will query the key on the given node, and verify it has // a valid proof, as defined by the Verifier. // // If there is any error in checking, returns an error. -// If val is non-empty, proof should be KeyExistsProof -// If val is empty, proof should be KeyMissingProof -func GetWithProof(key []byte, reqHeight int64, node rpcclient.Client, +func GetWithProof(prt *merkle.ProofRuntime, key []byte, reqHeight int64, node rpcclient.Client, cert lite.Verifier) ( - val cmn.HexBytes, height int64, proof KeyProof, err error) { + val cmn.HexBytes, height int64, proof *merkle.Proof, err error) { if reqHeight < 0 { - err = errors.Errorf("Height cannot be negative") + err = cmn.NewError("Height cannot be negative") return } - _resp, proof, err := GetWithProofOptions("/key", key, - rpcclient.ABCIQueryOptions{Height: int64(reqHeight)}, + res, err := GetWithProofOptions(prt, "/key", key, + rpcclient.ABCIQueryOptions{Height: int64(reqHeight), Prove: true}, node, cert) - if _resp != nil { - resp := _resp.Response - val, height = resp.Value, resp.Height + if err != nil { + return } + + resp := res.Response + val, height = resp.Value, resp.Height return val, height, proof, err } -// GetWithProofOptions is useful if you want full access to the ABCIQueryOptions -func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOptions, +// GetWithProofOptions is useful if you want full access to the ABCIQueryOptions. +// XXX Usage of path? It's not used, and sometimes it's /, sometimes /key, sometimes /store. +func GetWithProofOptions(prt *merkle.ProofRuntime, path string, key []byte, opts rpcclient.ABCIQueryOptions, node rpcclient.Client, cert lite.Verifier) ( - *ctypes.ResultABCIQuery, KeyProof, error) { + *ctypes.ResultABCIQuery, error) { + + if !opts.Prove { + return nil, cmn.NewError("require ABCIQueryOptions.Prove to be true") + } - _resp, err := node.ABCIQueryWithOptions(path, key, opts) + res, err := node.ABCIQueryWithOptions(path, key, opts) if err != nil { - return nil, nil, err + return nil, err } - resp := _resp.Response + resp := res.Response - // make sure the proof is the proper height + // Validate the response, e.g. height. if resp.IsErr() { - err = errors.Errorf("Query error for key %d: %d", key, resp.Code) - return nil, nil, err + err = cmn.NewError("Query error for key %d: %d", key, resp.Code) + return nil, err } - if len(resp.Key) == 0 || len(resp.Proof) == 0 { - return nil, nil, ErrNoData() + + if len(resp.Key) == 0 || resp.Proof == nil { + return nil, lerr.ErrEmptyTree() } if resp.Height == 0 { - return nil, nil, errors.New("Height returned is zero") + return nil, cmn.NewError("Height returned is zero") } // AppHash for height H is in header H+1 signedHeader, err := GetCertifiedCommit(resp.Height+1, node, cert) if err != nil { - return nil, nil, err + return nil, err } - _ = signedHeader - return &ctypes.ResultABCIQuery{Response: resp}, nil, nil - - /* // TODO refactor so iavl stuff is not in tendermint core - // https://github.com/tendermint/tendermint/issues/1183 - if len(resp.Value) > 0 { - // The key was found, construct a proof of existence. - proof, err := iavl.ReadKeyProof(resp.Proof) + // Validate the proof against the certified header to ensure data integrity. + if resp.Value != nil { + // Value exists + // XXX How do we encode the key into a string... + err = prt.VerifyValue(resp.Proof, signedHeader.AppHash, string(resp.Key), resp.Value) if err != nil { - return nil, nil, errors.Wrap(err, "Error reading proof") + return nil, cmn.ErrorWrap(err, "Couldn't verify value proof") } - - eproof, ok := proof.(*iavl.KeyExistsProof) - if !ok { - return nil, nil, errors.New("Expected KeyExistsProof for non-empty value") - } - + return &ctypes.ResultABCIQuery{Response: resp}, nil + } else { + // Value absent // Validate the proof against the certified header to ensure data integrity. - err = eproof.Verify(resp.Key, resp.Value, signedHeader.AppHash) + // XXX How do we encode the key into a string... + err = prt.VerifyAbsence(resp.Proof, signedHeader.AppHash, string(resp.Key)) if err != nil { - return nil, nil, errors.Wrap(err, "Couldn't verify proof") + return nil, cmn.ErrorWrap(err, "Couldn't verify absence proof") } - return &ctypes.ResultABCIQuery{Response: resp}, eproof, nil - } - - // The key wasn't found, construct a proof of non-existence. - proof, err := iavl.ReadKeyProof(resp.Proof) - if err != nil { - return nil, nil, errors.Wrap(err, "Error reading proof") - } - - aproof, ok := proof.(*iavl.KeyAbsentProof) - if !ok { - return nil, nil, errors.New("Expected KeyAbsentProof for empty Value") - } - - // Validate the proof against the certified header to ensure data integrity. - err = aproof.Verify(resp.Key, nil, signedHeader.AppHash) - if err != nil { - return nil, nil, errors.Wrap(err, "Couldn't verify proof") + return &ctypes.ResultABCIQuery{Response: resp}, nil } - return &ctypes.ResultABCIQuery{Response: resp}, aproof, ErrNoData() - */ } // GetCertifiedCommit gets the signed header for a given height and certifies diff --git a/lite/proxy/query_test.go b/lite/proxy/query_test.go index 7f759cc69..0e30d7558 100644 --- a/lite/proxy/query_test.go +++ b/lite/proxy/query_test.go @@ -4,12 +4,12 @@ import ( "fmt" "os" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/lite" certclient "github.com/tendermint/tendermint/lite/client" nm "github.com/tendermint/tendermint/node" @@ -20,6 +20,7 @@ import ( var node *nm.Node var chainID = "tendermint_test" // TODO use from config. +var waitForEventTimeout = 5 * time.Second // TODO fix tests!! @@ -38,70 +39,87 @@ func kvstoreTx(k, v []byte) []byte { return []byte(fmt.Sprintf("%s=%s", k, v)) } +// TODO: enable it after general proof format has been adapted +// in abci/examples/kvstore.go func _TestAppProofs(t *testing.T) { assert, require := assert.New(t), require.New(t) + prt := defaultProofRuntime() cl := client.NewLocal(node) client.WaitForHeight(cl, 1, nil) + // This sets up our trust on the node based on some past point. + source := certclient.NewProvider(chainID, cl) + seed, err := source.LatestFullCommit(chainID, 1, 1) + require.NoError(err, "%#v", err) + cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators) + + // Wait for tx confirmation. + done := make(chan int64) + go func() { + evtTyp := types.EventTx + _, err = client.WaitForOneEvent(cl, evtTyp, waitForEventTimeout) + require.Nil(err, "%#v", err) + close(done) + }() + + // Submit a transaction. k := []byte("my-key") v := []byte("my-value") - tx := kvstoreTx(k, v) br, err := cl.BroadcastTxCommit(tx) - require.NoError(err, "%+v", err) + require.NoError(err, "%#v", err) require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx) require.EqualValues(0, br.DeliverTx.Code) brh := br.Height - // This sets up our trust on the node based on some past point. - source := certclient.NewProvider(chainID, cl) - seed, err := source.LatestFullCommit(chainID, brh-2, brh-2) - require.NoError(err, "%+v", err) - cert := lite.NewBaseVerifier("my-chain", seed.Height(), seed.Validators) - - client.WaitForHeight(cl, 3, nil) + // Fetch latest after tx commit. + <-done latest, err := source.LatestFullCommit(chainID, 1, 1<<63-1) - require.NoError(err, "%+v", err) + require.NoError(err, "%#v", err) rootHash := latest.SignedHeader.AppHash + if rootHash == nil { + // Fetch one block later, AppHash hasn't been committed yet. + // TODO find a way to avoid doing this. + client.WaitForHeight(cl, latest.SignedHeader.Height+1, nil) + latest, err = source.LatestFullCommit(chainID, latest.SignedHeader.Height+1, 1<<63-1) + require.NoError(err, "%#v", err) + rootHash = latest.SignedHeader.AppHash + } + require.NotNil(rootHash) // verify a query before the tx block has no data (and valid non-exist proof) - bs, height, proof, err := GetWithProof(k, brh-1, cl, cert) - fmt.Println(bs, height, proof, err) - require.NotNil(err) - require.True(IsErrNoData(err), err.Error()) + bs, height, proof, err := GetWithProof(prt, k, brh-1, cl, cert) + require.NoError(err, "%#v", err) + // require.NotNil(proof) + // TODO: Ensure that *some* keys will be there, ensuring that proof is nil, + // (currently there's a race condition) + // and ensure that proof proves absence of k. require.Nil(bs) // but given that block it is good - bs, height, proof, err = GetWithProof(k, brh, cl, cert) - require.NoError(err, "%+v", err) + bs, height, proof, err = GetWithProof(prt, k, brh, cl, cert) + require.NoError(err, "%#v", err) require.NotNil(proof) - require.True(height >= int64(latest.Height())) - - // Alexis there is a bug here, somehow the above code gives us rootHash = nil - // and proof.Verify doesn't care, while proofNotExists.Verify fails. - // I am hacking this in to make it pass, but please investigate further. - rootHash = proof.Root() + require.Equal(height, brh) - //err = wire.ReadBinaryBytes(bs, &data) - //require.NoError(err, "%+v", err) assert.EqualValues(v, bs) - err = proof.Verify(k, bs, rootHash) - assert.NoError(err, "%+v", err) + err = prt.VerifyValue(proof, rootHash, string(k), bs) // XXX key encoding + assert.NoError(err, "%#v", err) // Test non-existing key. missing := []byte("my-missing-key") - bs, _, proof, err = GetWithProof(missing, 0, cl, cert) - require.True(IsErrNoData(err)) + bs, _, proof, err = GetWithProof(prt, missing, 0, cl, cert) + require.NoError(err) require.Nil(bs) require.NotNil(proof) - err = proof.Verify(missing, nil, rootHash) - assert.NoError(err, "%+v", err) - err = proof.Verify(k, nil, rootHash) - assert.Error(err) + err = prt.VerifyAbsence(proof, rootHash, string(missing)) // XXX VerifyAbsence(), keyencoding + assert.NoError(err, "%#v", err) + err = prt.VerifyAbsence(proof, rootHash, string(k)) // XXX VerifyAbsence(), keyencoding + assert.Error(err, "%#v", err) } -func _TestTxProofs(t *testing.T) { +func TestTxProofs(t *testing.T) { assert, require := assert.New(t), require.New(t) cl := client.NewLocal(node) @@ -109,15 +127,15 @@ func _TestTxProofs(t *testing.T) { tx := kvstoreTx([]byte("key-a"), []byte("value-a")) br, err := cl.BroadcastTxCommit(tx) - require.NoError(err, "%+v", err) + require.NoError(err, "%#v", err) require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx) require.EqualValues(0, br.DeliverTx.Code) brh := br.Height source := certclient.NewProvider(chainID, cl) seed, err := source.LatestFullCommit(chainID, brh-2, brh-2) - require.NoError(err, "%+v", err) - cert := lite.NewBaseVerifier("my-chain", seed.Height(), seed.Validators) + require.NoError(err, "%#v", err) + cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators) // First let's make sure a bogus transaction hash returns a valid non-existence proof. key := types.Tx([]byte("bogus")).Hash() @@ -128,12 +146,12 @@ func _TestTxProofs(t *testing.T) { // Now let's check with the real tx hash. key = types.Tx(tx).Hash() res, err = cl.Tx(key, true) - require.NoError(err, "%+v", err) + require.NoError(err, "%#v", err) require.NotNil(res) err = res.Proof.Validate(key) - assert.NoError(err, "%+v", err) + assert.NoError(err, "%#v", err) commit, err := GetCertifiedCommit(br.Height, cl, cert) - require.Nil(err, "%+v", err) + require.Nil(err, "%#v", err) require.Equal(res.Proof.RootHash, commit.Header.DataHash) } diff --git a/lite/proxy/wrapper.go b/lite/proxy/wrapper.go index 4c0df0229..7ddb3b8ad 100644 --- a/lite/proxy/wrapper.go +++ b/lite/proxy/wrapper.go @@ -3,6 +3,7 @@ package proxy import ( cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/lite" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" @@ -15,6 +16,7 @@ var _ rpcclient.Client = Wrapper{} type Wrapper struct { rpcclient.Client cert *lite.DynamicVerifier + prt *merkle.ProofRuntime } // SecureClient uses a given Verifier to wrap an connection to an untrusted @@ -22,7 +24,8 @@ type Wrapper struct { // // If it is wrapping an HTTP rpcclient, it will also wrap the websocket interface func SecureClient(c rpcclient.Client, cert *lite.DynamicVerifier) Wrapper { - wrap := Wrapper{c, cert} + prt := defaultProofRuntime() + wrap := Wrapper{c, cert, prt} // TODO: no longer possible as no more such interface exposed.... // if we wrap http client, then we can swap out the event switch to filter // if hc, ok := c.(*rpcclient.HTTP); ok { @@ -36,7 +39,7 @@ func SecureClient(c rpcclient.Client, cert *lite.DynamicVerifier) Wrapper { func (w Wrapper) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - res, _, err := GetWithProofOptions(path, data, opts, w.Client, w.cert) + res, err := GetWithProofOptions(w.prt, path, data, opts, w.Client, w.cert) return res, err } diff --git a/rpc/client/httpclient.go b/rpc/client/httpclient.go index a9c64f5da..a1b59ffa0 100644 --- a/rpc/client/httpclient.go +++ b/rpc/client/httpclient.go @@ -75,7 +75,7 @@ func (c *HTTP) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuer func (c *HTTP) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { result := new(ctypes.ResultABCIQuery) _, err := c.rpc.Call("abci_query", - map[string]interface{}{"path": path, "data": data, "height": opts.Height, "trusted": opts.Trusted}, + map[string]interface{}{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove}, result) if err != nil { return nil, errors.Wrap(err, "ABCIQuery") diff --git a/rpc/client/localclient.go b/rpc/client/localclient.go index b3c5e3090..8d89b7150 100644 --- a/rpc/client/localclient.go +++ b/rpc/client/localclient.go @@ -61,7 +61,7 @@ func (c *Local) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQue } func (Local) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - return core.ABCIQuery(path, data, opts.Height, opts.Trusted) + return core.ABCIQuery(path, data, opts.Height, opts.Prove) } func (Local) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index 022e4f363..3a0ed79cd 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -31,10 +31,18 @@ func (a ABCIApp) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQu } func (a ABCIApp) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - q := a.App.Query(abci.RequestQuery{Data: data, Path: path, Height: opts.Height, Prove: opts.Trusted}) + q := a.App.Query(abci.RequestQuery{ + Data: data, + Path: path, + Height: opts.Height, + Prove: opts.Prove, + }) return &ctypes.ResultABCIQuery{q}, nil } +// NOTE: Caller should call a.App.Commit() separately, +// this function does not actually wait for a commit. +// TODO: Make it wait for a commit and set res.Height appropriately. func (a ABCIApp) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { res := ctypes.ResultBroadcastTxCommit{} res.CheckTx = a.App.CheckTx(tx) @@ -42,6 +50,7 @@ func (a ABCIApp) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit return &res, nil } res.DeliverTx = a.App.DeliverTx(tx) + res.Height = -1 // TODO return &res, nil } @@ -86,7 +95,7 @@ func (m ABCIMock) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQ } func (m ABCIMock) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - res, err := m.Query.GetResponse(QueryArgs{path, data, opts.Height, opts.Trusted}) + res, err := m.Query.GetResponse(QueryArgs{path, data, opts.Height, opts.Prove}) if err != nil { return nil, err } @@ -133,10 +142,10 @@ func NewABCIRecorder(client client.ABCIClient) *ABCIRecorder { } type QueryArgs struct { - Path string - Data cmn.HexBytes - Height int64 - Trusted bool + Path string + Data cmn.HexBytes + Height int64 + Prove bool } func (r *ABCIRecorder) addCall(call Call) { @@ -161,7 +170,7 @@ func (r *ABCIRecorder) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts res, err := r.Client.ABCIQueryWithOptions(path, data, opts) r.addCall(Call{ Name: "abci_query", - Args: QueryArgs{path, data, opts.Height, opts.Trusted}, + Args: QueryArgs{path, data, opts.Height, opts.Prove}, Response: res, Error: err, }) diff --git a/rpc/client/mock/abci_test.go b/rpc/client/mock/abci_test.go index 327ec9e7b..ca220c84e 100644 --- a/rpc/client/mock/abci_test.go +++ b/rpc/client/mock/abci_test.go @@ -51,7 +51,7 @@ func TestABCIMock(t *testing.T) { assert.Equal("foobar", err.Error()) // query always returns the response - _query, err := m.ABCIQueryWithOptions("/", nil, client.ABCIQueryOptions{Trusted: true}) + _query, err := m.ABCIQueryWithOptions("/", nil, client.ABCIQueryOptions{Prove: false}) query := _query.Response require.Nil(err) require.NotNil(query) @@ -98,7 +98,7 @@ func TestABCIRecorder(t *testing.T) { _, err := r.ABCIInfo() assert.Nil(err, "expected no err on info") - _, err = r.ABCIQueryWithOptions("path", cmn.HexBytes("data"), client.ABCIQueryOptions{Trusted: false}) + _, err = r.ABCIQueryWithOptions("path", cmn.HexBytes("data"), client.ABCIQueryOptions{Prove: false}) assert.NotNil(err, "expected error on query") require.Equal(2, len(r.Calls)) @@ -122,7 +122,7 @@ func TestABCIRecorder(t *testing.T) { require.True(ok) assert.Equal("path", qa.Path) assert.EqualValues("data", qa.Data) - assert.False(qa.Trusted) + assert.False(qa.Prove) // now add some broadcasts (should all err) txs := []types.Tx{{1}, {2}, {3}} @@ -173,9 +173,17 @@ func TestABCIApp(t *testing.T) { require.NotNil(res.DeliverTx) assert.True(res.DeliverTx.IsOK()) + // commit + // TODO: This may not be necessary in the future + if res.Height == -1 { + m.App.Commit() + } + // check the key - _qres, err := m.ABCIQueryWithOptions("/key", cmn.HexBytes(key), client.ABCIQueryOptions{Trusted: true}) + _qres, err := m.ABCIQueryWithOptions("/key", cmn.HexBytes(key), client.ABCIQueryOptions{Prove: true}) qres := _qres.Response require.Nil(err) assert.EqualValues(value, qres.Value) + + // XXX Check proof } diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index c57878499..ef2d4f197 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -1,3 +1,5 @@ +package mock + /* package mock returns a Client implementation that accepts various (mock) implementations of the various methods. @@ -11,7 +13,6 @@ For real clients, you probably want the "http" package. If you want to directly call a tendermint node in process, you can use the "local" package. */ -package mock import ( "reflect" @@ -87,7 +88,7 @@ func (c Client) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQue } func (c Client) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - return core.ABCIQuery(path, data, opts.Height, opts.Trusted) + return core.ABCIQuery(path, data, opts.Height, opts.Prove) } func (c Client) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index 767ae6847..602525b51 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -166,10 +166,10 @@ func TestAppCalls(t *testing.T) { if err := client.WaitForHeight(c, apph, nil); err != nil { t.Error(err) } - _qres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Trusted: true}) + _qres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Prove: false}) qres := _qres.Response if assert.Nil(err) && assert.True(qres.IsOK()) { - // assert.Equal(k, data.GetKey()) // only returned for proofs + assert.Equal(k, qres.Key) assert.EqualValues(v, qres.Value) } @@ -221,10 +221,12 @@ func TestAppCalls(t *testing.T) { assert.Equal(block.Block.LastCommit, commit2.Commit) // and we got a proof that works! - _pres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Trusted: false}) + _pres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Prove: true}) pres := _pres.Response assert.Nil(err) assert.True(pres.IsOK()) + + // XXX Test proof } } @@ -310,7 +312,7 @@ func TestTx(t *testing.T) { // time to verify the proof proof := ptx.Proof if tc.prove && assert.EqualValues(t, tx, proof.Data) { - assert.True(t, proof.Proof.Verify(proof.Index, proof.Total, txHash, proof.RootHash)) + assert.NoError(t, proof.Proof.Verify(proof.RootHash, txHash)) } } } @@ -348,7 +350,7 @@ func TestTxSearch(t *testing.T) { // time to verify the proof proof := ptx.Proof if assert.EqualValues(t, tx, proof.Data) { - assert.True(t, proof.Proof.Verify(proof.Index, proof.Total, txHash, proof.RootHash)) + assert.NoError(t, proof.Proof.Verify(proof.RootHash, txHash)) } // query by height @@ -362,7 +364,7 @@ func TestTxSearch(t *testing.T) { require.Len(t, result.Txs, 0) // we query using a tag (see kvstore application) - result, err = c.TxSearch("app.creator='jae'", false, 1, 30) + result, err = c.TxSearch("app.creator='Cosmoshi Netowoko'", false, 1, 30) require.Nil(t, err, "%+v", err) if len(result.Txs) == 0 { t.Fatal("expected a lot of transactions") diff --git a/rpc/client/types.go b/rpc/client/types.go index 89bd2f98c..6a23fa450 100644 --- a/rpc/client/types.go +++ b/rpc/client/types.go @@ -3,10 +3,9 @@ package client // ABCIQueryOptions can be used to provide options for ABCIQuery call other // than the DefaultABCIQueryOptions. type ABCIQueryOptions struct { - Height int64 - Trusted bool + Height int64 + Prove bool } -// DefaultABCIQueryOptions are latest height (0) and trusted equal to false -// (which will result in a proof being returned). -var DefaultABCIQueryOptions = ABCIQueryOptions{Height: 0, Trusted: false} +// DefaultABCIQueryOptions are latest height (0) and prove false. +var DefaultABCIQueryOptions = ABCIQueryOptions{Height: 0, Prove: false} diff --git a/rpc/core/abci.go b/rpc/core/abci.go index 9c7af92cd..47219563c 100644 --- a/rpc/core/abci.go +++ b/rpc/core/abci.go @@ -1,8 +1,6 @@ package core import ( - "fmt" - abci "github.com/tendermint/tendermint/abci/types" cmn "github.com/tendermint/tendermint/libs/common" ctypes "github.com/tendermint/tendermint/rpc/core/types" @@ -12,7 +10,7 @@ import ( // Query the application for some information. // // ```shell -// curl 'localhost:26657/abci_query?path=""&data="abcd"&trusted=false' +// curl 'localhost:26657/abci_query?path=""&data="abcd"&prove=false' // ``` // // ```go @@ -47,18 +45,14 @@ import ( // |-----------+--------+---------+----------+------------------------------------------------| // | path | string | false | false | Path to the data ("/a/b/c") | // | data | []byte | false | true | Data | -// | height | int64 | 0 | false | Height (0 means latest) | -// | trusted | bool | false | false | Does not include a proof of the data inclusion | -func ABCIQuery(path string, data cmn.HexBytes, height int64, trusted bool) (*ctypes.ResultABCIQuery, error) { - if height < 0 { - return nil, fmt.Errorf("height must be non-negative") - } - +// | height | int64 | 0 | false | Height (0 means latest) | +// | prove | bool | false | false | Includes proof if true | +func ABCIQuery(path string, data cmn.HexBytes, height int64, prove bool) (*ctypes.ResultABCIQuery, error) { resQuery, err := proxyAppQuery.QuerySync(abci.RequestQuery{ Path: path, Data: data, Height: height, - Prove: !trusted, + Prove: prove, }) if err != nil { return nil, err diff --git a/test/app/kvstore_test.sh b/test/app/kvstore_test.sh index 67f6b583c..034e28878 100755 --- a/test/app/kvstore_test.sh +++ b/test/app/kvstore_test.sh @@ -41,7 +41,7 @@ set -e # we should not be able to look up the value RESPONSE=`abci-cli query \"$VALUE\"` set +e -A=`echo $RESPONSE | grep $VALUE` +A=`echo $RESPONSE | grep \"value: $VALUE\"` if [[ $? == 0 ]]; then echo "Found '$VALUE' for $VALUE when we should not have. Response:" echo "$RESPONSE" diff --git a/types/block.go b/types/block.go index 14f975483..5610cc799 100644 --- a/types/block.go +++ b/types/block.go @@ -709,7 +709,6 @@ func (h hasher) Hash() []byte { } } return hasher.Sum(nil) - } func aminoHash(item interface{}) []byte { diff --git a/types/part_set.go b/types/part_set.go index f6d7f6b6e..8c8151ba8 100644 --- a/types/part_set.go +++ b/types/part_set.go @@ -190,7 +190,7 @@ func (ps *PartSet) AddPart(part *Part) (bool, error) { } // Check hash proof - if !part.Proof.Verify(part.Index, ps.total, part.Hash(), ps.Hash()) { + if part.Proof.Verify(ps.Hash(), part.Hash()) != nil { return false, ErrPartSetInvalidProof } diff --git a/types/results_test.go b/types/results_test.go index 8cbe319ff..808033850 100644 --- a/types/results_test.go +++ b/types/results_test.go @@ -38,8 +38,8 @@ func TestABCIResults(t *testing.T) { for i, res := range results { proof := results.ProveResult(i) - valid := proof.Verify(i, len(results), res.Hash(), root) - assert.True(t, valid, "%d", i) + valid := proof.Verify(root, res.Hash()) + assert.NoError(t, valid, "%d", i) } } diff --git a/types/tx.go b/types/tx.go index 489f0b232..41fc310f1 100644 --- a/types/tx.go +++ b/types/tx.go @@ -77,8 +77,6 @@ func (txs Txs) Proof(i int) TxProof { root, proofs := merkle.SimpleProofsFromHashers(hashers) return TxProof{ - Index: i, - Total: l, RootHash: root, Data: txs[i], Proof: *proofs[i], @@ -87,10 +85,9 @@ func (txs Txs) Proof(i int) TxProof { // TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. type TxProof struct { - Index, Total int - RootHash cmn.HexBytes - Data Tx - Proof merkle.SimpleProof + RootHash cmn.HexBytes + Data Tx + Proof merkle.SimpleProof } // LeadHash returns the hash of the this proof refers to. @@ -104,14 +101,14 @@ func (tp TxProof) Validate(dataHash []byte) error { if !bytes.Equal(dataHash, tp.RootHash) { return errors.New("Proof matches different data hash") } - if tp.Index < 0 { + if tp.Proof.Index < 0 { return errors.New("Proof index cannot be negative") } - if tp.Total <= 0 { + if tp.Proof.Total <= 0 { return errors.New("Proof total must be positive") } - valid := tp.Proof.Verify(tp.Index, tp.Total, tp.LeafHash(), tp.RootHash) - if !valid { + valid := tp.Proof.Verify(tp.RootHash, tp.LeafHash()) + if valid != nil { return errors.New("Proof is not internally consistent") } return nil diff --git a/types/tx_test.go b/types/tx_test.go index df7a74496..9fb8ff34d 100644 --- a/types/tx_test.go +++ b/types/tx_test.go @@ -69,8 +69,8 @@ func TestValidTxProof(t *testing.T) { leaf := txs[i] leafHash := leaf.Hash() proof := txs.Proof(i) - assert.Equal(t, i, proof.Index, "%d: %d", h, i) - assert.Equal(t, len(txs), proof.Total, "%d: %d", h, i) + assert.Equal(t, i, proof.Proof.Index, "%d: %d", h, i) + assert.Equal(t, len(txs), proof.Proof.Total, "%d: %d", h, i) assert.EqualValues(t, root, proof.RootHash, "%d: %d", h, i) assert.EqualValues(t, leaf, proof.Data, "%d: %d", h, i) assert.EqualValues(t, leafHash, proof.LeafHash(), "%d: %d", h, i) @@ -128,7 +128,7 @@ func assertBadProof(t *testing.T, root []byte, bad []byte, good TxProof) { // This can happen if we have a slightly different total (where the // path ends up the same). If it is something else, we have a real // problem. - assert.NotEqual(t, proof.Total, good.Total, "bad: %#v\ngood: %#v", proof, good) + assert.NotEqual(t, proof.Proof.Total, good.Proof.Total, "bad: %#v\ngood: %#v", proof, good) } } } From f36ed7e7ffb83d4a28bc38eab5686e17238e6be1 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 28 Sep 2018 23:32:13 -0400 Subject: [PATCH 014/113] General Merkle Follow Up (#2510) * tmlibs -> libs * update changelog * address some comments from review of #2298 --- CHANGELOG_PENDING.md | 16 ++++++++++++---- README.md | 11 ++++++----- config/config.go | 2 +- crypto/merkle/proof.go | 24 +++++++++++++----------- crypto/merkle/proof_key_path.go | 6 +++++- crypto/merkle/proof_simple_value.go | 2 +- libs/common/types.pb.go | 4 ++-- lite/doc.go | 2 +- lite/dynamic_verifier.go | 6 +++--- 9 files changed, 44 insertions(+), 29 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index c6346f6a8..bf381dce2 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -5,21 +5,29 @@ Special thanks to external contributors on this release: BREAKING CHANGES: * CLI/RPC/Config -- [config] `mempool.wal` is disabled by default + * [config] `mempool.wal` is disabled by default + * [rpc] \#2298 `/abci_query` takes `prove` argument instead of `trusted` and switches the default + behaviour to `prove=false` + * [privval] \#2459 Split `SocketPVMsg`s implementations into Request and Response, where the Response may contain a error message (returned by the remote signer). * Apps + * [abci] \#2298 ResponseQuery.Proof is now a structured merkle.Proof, not just + arbitrary bytes * Go API -- [node] Remove node.RunForever -- [config] \#2232 timeouts as time.Duration, not ints + * [node] Remove node.RunForever + * [config] \#2232 timeouts as time.Duration, not ints + * [rpc/client] \#2298 `ABCIQueryOptions.Trusted` -> `ABCIQueryOptions.Prove` + * [types] \#2298 Remove `Index` and `Total` fields from `TxProof`. + * [crypto/merkle & lite] \#2298 Various changes to accomodate General Merkle trees * Blockchain Protocol * [types] \#2459 `Vote`/`Proposal`/`Heartbeat` use amino encoding instead of JSON in `SignBytes`. - * [privval] \#2459 Split `SocketPVMsg`s implementations into Request and Response, where the Response may contain a error message (returned by the remote signer). * P2P Protocol FEATURES: +- [crypto/merkle] \#2298 General Merkle Proof scheme for chaining various types of Merkle trees together IMPROVEMENTS: - [consensus] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) add additional metrics diff --git a/README.md b/README.md index 2e4146f40..069f9f13e 100644 --- a/README.md +++ b/README.md @@ -118,11 +118,12 @@ CHANGELOG even if they don't lead to MINOR version bumps: - rpc/client - config - node -- libs/bech32 -- libs/common -- libs/db -- libs/errors -- libs/log +- libs + - bech32 + - common + - db + - errors + - log Exported objects in these packages that are not covered by the versioning scheme are explicitly marked by `// UNSTABLE` in their go doc comment and may change at any diff --git a/config/config.go b/config/config.go index 2ccb49083..619c0410f 100644 --- a/config/config.go +++ b/config/config.go @@ -20,7 +20,7 @@ const ( // generate the config.toml. Please reflect any changes // made here in the defaultConfigTemplate constant in // config/toml.go -// NOTE: tmlibs/cli must know to look in the config dir! +// NOTE: libs/cli must know to look in the config dir! var ( DefaultTendermintDir = ".tendermint" defaultConfigDir = "config" diff --git a/crypto/merkle/proof.go b/crypto/merkle/proof.go index 7da894953..3059ed3b7 100644 --- a/crypto/merkle/proof.go +++ b/crypto/merkle/proof.go @@ -3,17 +3,19 @@ package merkle import ( "bytes" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) //---------------------------------------- // ProofOp gets converted to an instance of ProofOperator: -// ProofOperator is a layer for calculating intermediate Merkle root -// Run() takes a list of bytes because it can be more than one -// for example in range proofs -// ProofOp() defines custom encoding which can be decoded later with -// OpDecoder +// ProofOperator is a layer for calculating intermediate Merkle roots +// when a series of Merkle trees are chained together. +// Run() takes leaf values from a tree and returns the Merkle +// root for the corresponding tree. It takes and returns a list of bytes +// to allow multiple leaves to be part of a single proof, for instance in a range proof. +// ProofOp() encodes the ProofOperator in a generic way so it can later be +// decoded with OpDecoder. type ProofOperator interface { Run([][]byte) ([][]byte, error) GetKey() []byte @@ -23,8 +25,8 @@ type ProofOperator interface { //---------------------------------------- // Operations on a list of ProofOperators -// ProofOperators is a slice of ProofOperator(s) -// Each operator will be applied to the input value sequencially +// ProofOperators is a slice of ProofOperator(s). +// Each operator will be applied to the input value sequentially // and the last Merkle root will be verified with already known data type ProofOperators []ProofOperator @@ -91,8 +93,8 @@ func (prt *ProofRuntime) Decode(pop ProofOp) (ProofOperator, error) { return decoder(pop) } -func (prt *ProofRuntime) DecodeProof(proof *Proof) (poz ProofOperators, err error) { - poz = ProofOperators(nil) +func (prt *ProofRuntime) DecodeProof(proof *Proof) (ProofOperators, error) { + var poz ProofOperators for _, pop := range proof.Ops { operator, err := prt.Decode(pop) if err != nil { @@ -100,7 +102,7 @@ func (prt *ProofRuntime) DecodeProof(proof *Proof) (poz ProofOperators, err erro } poz = append(poz, operator) } - return + return poz, nil } func (prt *ProofRuntime) VerifyValue(proof *Proof, root []byte, keypath string, value []byte) (err error) { diff --git a/crypto/merkle/proof_key_path.go b/crypto/merkle/proof_key_path.go index d74dac51d..aec93e826 100644 --- a/crypto/merkle/proof_key_path.go +++ b/crypto/merkle/proof_key_path.go @@ -35,6 +35,8 @@ import ( kp.AppendKey([]byte{0x01, 0x02, 0x03}, KeyEncodingURL) kp.String() // Should return "/App/IBC/x:010203" + NOTE: Key paths must begin with a `/`. + NOTE: All encodings *MUST* work compatibly, such that you can choose to use whatever encoding, and the decoded keys will always be the same. In other words, it's just as good to encode all three keys using URL encoding or HEX @@ -52,7 +54,7 @@ type keyEncoding int const ( KeyEncodingURL keyEncoding = iota KeyEncodingHex - KeyEncodingMax + KeyEncodingMax // Number of known encodings. Used for testing ) type Key struct { @@ -81,6 +83,8 @@ func (pth KeyPath) String() string { return res } +// Decode a path to a list of keys. Path must begin with `/`. +// Each key must use a known encoding. func KeyPathToKeys(path string) (keys [][]byte, err error) { if path == "" || path[0] != '/' { return nil, cmn.NewError("key path string must start with a forward slash '/'") diff --git a/crypto/merkle/proof_simple_value.go b/crypto/merkle/proof_simple_value.go index 28935e2cc..5b7b52329 100644 --- a/crypto/merkle/proof_simple_value.go +++ b/crypto/merkle/proof_simple_value.go @@ -25,7 +25,7 @@ type SimpleValueOp struct { key []byte // To encode in ProofOp.Data - Proof *SimpleProof `json:"simple-proof"` + Proof *SimpleProof `json:"simple_proof"` } var _ ProofOperator = SimpleValueOp{} diff --git a/libs/common/types.pb.go b/libs/common/types.pb.go index 9cd62273b..716d28a06 100644 --- a/libs/common/types.pb.go +++ b/libs/common/types.pb.go @@ -26,7 +26,7 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -// Define these here for compatibility but use tmlibs/common.KVPair. +// Define these here for compatibility but use libs/common.KVPair. type KVPair struct { Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` @@ -82,7 +82,7 @@ func (m *KVPair) GetValue() []byte { return nil } -// Define these here for compatibility but use tmlibs/common.KI64Pair. +// Define these here for compatibility but use libs/common.KI64Pair. type KI64Pair struct { Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` diff --git a/lite/doc.go b/lite/doc.go index 2a0ba23ea..00dcce68c 100644 --- a/lite/doc.go +++ b/lite/doc.go @@ -88,7 +88,7 @@ type PersistentProvider interface { } ``` -* DBProvider - persistence provider for use with any tmlibs/DB. +* DBProvider - persistence provider for use with any libs/DB. * MultiProvider - combine multiple providers. The suggested use for local light clients is client.NewHTTPProvider(...) for diff --git a/lite/dynamic_verifier.go b/lite/dynamic_verifier.go index 2dee69f9d..6a7720913 100644 --- a/lite/dynamic_verifier.go +++ b/lite/dynamic_verifier.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "sync" + log "github.com/tendermint/tendermint/libs/log" lerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" @@ -25,10 +26,9 @@ type DynamicVerifier struct { // This is a source of new info, like a node rpc, or other import method. source Provider - // pending map for synchronize concurrent verification requests + // pending map to synchronize concurrent verification requests + mtx sync.Mutex pendingVerifications map[int64]chan struct{} - - mtx sync.Mutex } // NewDynamicVerifier returns a new DynamicVerifier. It uses the From ead9fc0179ac773f93dd044b10b9c3ba48269e0f Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 30 Sep 2018 12:35:52 -0400 Subject: [PATCH 015/113] Docs cleanup (#2522) * minor doc cleanup * docs/tools: link fixes and readme * docs/networks: networks/local/README.md * docs: update vuepress config * docs: fixes from review --- docs/.vuepress/config.js | 6 +- docs/README.md | 46 ++-- docs/introduction/README.md | 15 ++ docs/introduction/introduction.md | 2 + docs/introduction/quick-start.md | 36 +-- docs/introduction/what-is-tendermint.md | 332 ++++++++++++++++++++++++ docs/networks/README.md | 9 + docs/networks/deploy-testnets.md | 23 +- docs/networks/docker-compose.md | 85 ++++++ docs/networks/terraform-and-ansible.md | 8 +- docs/spec/abci/README.md | 2 +- docs/spec/abci/abci.md | 6 +- docs/tendermint-core/README.md | 4 + docs/tools/README.md | 4 + docs/tools/benchmarking.md | 2 +- docs/tools/monitoring.md | 8 +- networks/local/README.md | 4 + networks/remote/README.md | 2 +- 18 files changed, 504 insertions(+), 90 deletions(-) create mode 100644 docs/introduction/README.md create mode 100644 docs/introduction/what-is-tendermint.md create mode 100644 docs/networks/README.md create mode 100644 docs/networks/docker-compose.md create mode 100644 docs/tendermint-core/README.md create mode 100644 docs/tools/README.md diff --git a/docs/.vuepress/config.js b/docs/.vuepress/config.js index 892ea2042..b4e2c3fa2 100644 --- a/docs/.vuepress/config.js +++ b/docs/.vuepress/config.js @@ -11,12 +11,12 @@ module.exports = { nav: [{ text: "Back to Tendermint", link: "https://tendermint.com" }], sidebar: [ { - title: "Getting Started", + title: "Introduction", collapsable: false, children: [ "/introduction/quick-start", "/introduction/install", - "/introduction/introduction" + "/introduction/what-is-tendermint" ] }, { @@ -48,7 +48,7 @@ module.exports = { title: "Networks", collapsable: false, children: [ - "/networks/deploy-testnets", + "/networks/docker-compose", "/networks/terraform-and-ansible", ] }, diff --git a/docs/README.md b/docs/README.md index 2ecf625e8..15ce74e39 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,43 +1,27 @@ # Tendermint -Welcome to the Tendermint Core documentation! Below you'll find an -overview of the documentation. +Welcome to the Tendermint Core documentation! -Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state -transition machine - written in any programming language - and securely -replicates it on many machines. In other words, a blockchain. +Tendermint Core is a blockchain application platform; it provides the equivalent +of a web-server, database, and supporting libraries for blockchain applications +written in any programming language. Like a web-server serving web applications, +Tendermint serves blockchain applications. -Tendermint requires an application running over the Application Blockchain -Interface (ABCI) - and comes packaged with an example application to do so. +More formally, Tendermint Core performs Byzantine Fault Tolerant (BFT) +State Machine Replication (SMR) for arbitrary deterministic, finite state machines. +For more background, see [What is +Tendermint?](introduction/what-is-tendermint.md). -## Getting Started +To get started quickly with an example application, see the [quick start guide](introduction/quick-start.md). -Here you'll find quick start guides and links to more advanced "get up and running" -documentation. +To learn about application development on Tendermint, see the [Application Blockchain Interface](spec/abci). -## Core +For more details on using Tendermint, see the respective documentation for +[Tendermint Core](tendermint-core), [benchmarking and monitoring](tools), and [network deployments](networks). -Details about the core functionality and configuration of Tendermint. +## Contribute -## Tools - -Benchmarking and monitoring tools. - -## Networks - -Setting up testnets manually or automated, local or in the cloud. - -## Apps - -Building appplications with the ABCI. - -## Specification - -Dive deep into the spec. There's one for each Tendermint and the ABCI - -## Edit the Documentation - -See [this file](./DOCS_README.md) for details of the build process and +To contribute to the documentation, see [this file](./DOCS_README.md) for details of the build process and considerations when making changes. ## Version diff --git a/docs/introduction/README.md b/docs/introduction/README.md new file mode 100644 index 000000000..ad9a93dd9 --- /dev/null +++ b/docs/introduction/README.md @@ -0,0 +1,15 @@ +# Introduction + +## Quick Start + +Get Tendermint up-and-running quickly with the [quick-start guide](quick-start.md)! + +## Install + +Detailed [installation instructions](install.md). + +## What is Tendermint? + +Dive into [what Tendermint is and why](what-is-tendermint.md)! + + diff --git a/docs/introduction/introduction.md b/docs/introduction/introduction.md index 389bf9658..f80a159ca 100644 --- a/docs/introduction/introduction.md +++ b/docs/introduction/introduction.md @@ -1,5 +1,7 @@ # What is Tendermint? +DEPRECATED! See [What is Tendermint?](what-is-tendermint.md). + Tendermint is software for securely and consistently replicating an application on many machines. By securely, we mean that Tendermint works even if up to 1/3 of machines fail in arbitrary ways. By consistently, diff --git a/docs/introduction/quick-start.md b/docs/introduction/quick-start.md index c10ba10a1..05facadf4 100644 --- a/docs/introduction/quick-start.md +++ b/docs/introduction/quick-start.md @@ -1,4 +1,4 @@ -# Tendermint +# Quick Start ## Overview @@ -9,45 +9,21 @@ works and want to get started right away, continue. ### Quick Install -On a fresh Ubuntu 16.04 machine can be done with [this script](https://git.io/fFfOR), like so: +To quickly get Tendermint installed on a fresh +Ubuntu 16.04 machine, use [this script](https://git.io/fFfOR). + +WARNING: do not run this on your local machine. ``` curl -L https://git.io/fFfOR | bash source ~/.profile ``` -WARNING: do not run the above on your local machine. - The script is also used to facilitate cluster deployment below. ### Manual Install -Requires: - -- `go` minimum version 1.10 -- `$GOPATH` environment variable must be set -- `$GOPATH/bin` must be on your `$PATH` (see [here](https://github.com/tendermint/tendermint/wiki/Setting-GOPATH)) - -To install Tendermint, run: - -``` -go get github.com/tendermint/tendermint -cd $GOPATH/src/github.com/tendermint/tendermint -make get_tools && make get_vendor_deps -make install -``` - -Note that `go get` may return an error but it can be ignored. - -Confirm installation: - -``` -$ tendermint version -0.23.0 -``` - -Note: see the [releases page](https://github.com/tendermint/tendermint/releases) and the latest version -should match what you see above. +For manual installation, see the [install instructions](install.md) ## Initialization diff --git a/docs/introduction/what-is-tendermint.md b/docs/introduction/what-is-tendermint.md new file mode 100644 index 000000000..389bf9658 --- /dev/null +++ b/docs/introduction/what-is-tendermint.md @@ -0,0 +1,332 @@ +# What is Tendermint? + +Tendermint is software for securely and consistently replicating an +application on many machines. By securely, we mean that Tendermint works +even if up to 1/3 of machines fail in arbitrary ways. By consistently, +we mean that every non-faulty machine sees the same transaction log and +computes the same state. Secure and consistent replication is a +fundamental problem in distributed systems; it plays a critical role in +the fault tolerance of a broad range of applications, from currencies, +to elections, to infrastructure orchestration, and beyond. + +The ability to tolerate machines failing in arbitrary ways, including +becoming malicious, is known as Byzantine fault tolerance (BFT). The +theory of BFT is decades old, but software implementations have only +became popular recently, due largely to the success of "blockchain +technology" like Bitcoin and Ethereum. Blockchain technology is just a +reformalization of BFT in a more modern setting, with emphasis on +peer-to-peer networking and cryptographic authentication. The name +derives from the way transactions are batched in blocks, where each +block contains a cryptographic hash of the previous one, forming a +chain. In practice, the blockchain data structure actually optimizes BFT +design. + +Tendermint consists of two chief technical components: a blockchain +consensus engine and a generic application interface. The consensus +engine, called Tendermint Core, ensures that the same transactions are +recorded on every machine in the same order. The application interface, +called the Application BlockChain Interface (ABCI), enables the +transactions to be processed in any programming language. Unlike other +blockchain and consensus solutions, which come pre-packaged with built +in state machines (like a fancy key-value store, or a quirky scripting +language), developers can use Tendermint for BFT state machine +replication of applications written in whatever programming language and +development environment is right for them. + +Tendermint is designed to be easy-to-use, simple-to-understand, highly +performant, and useful for a wide variety of distributed applications. + +## Tendermint vs. X + +Tendermint is broadly similar to two classes of software. The first +class consists of distributed key-value stores, like Zookeeper, etcd, +and consul, which use non-BFT consensus. The second class is known as +"blockchain technology", and consists of both cryptocurrencies like +Bitcoin and Ethereum, and alternative distributed ledger designs like +Hyperledger's Burrow. + +### Zookeeper, etcd, consul + +Zookeeper, etcd, and consul are all implementations of a key-value store +atop a classical, non-BFT consensus algorithm. Zookeeper uses a version +of Paxos called Zookeeper Atomic Broadcast, while etcd and consul use +the Raft consensus algorithm, which is much younger and simpler. A +typical cluster contains 3-5 machines, and can tolerate crash failures +in up to 1/2 of the machines, but even a single Byzantine fault can +destroy the system. + +Each offering provides a slightly different implementation of a +featureful key-value store, but all are generally focused around +providing basic services to distributed systems, such as dynamic +configuration, service discovery, locking, leader-election, and so on. + +Tendermint is in essence similar software, but with two key differences: + +- It is Byzantine Fault Tolerant, meaning it can only tolerate up to a + 1/3 of failures, but those failures can include arbitrary behaviour - + including hacking and malicious attacks. - It does not specify a + particular application, like a fancy key-value store. Instead, it + focuses on arbitrary state machine replication, so developers can build + the application logic that's right for them, from key-value store to + cryptocurrency to e-voting platform and beyond. + +The layout of this Tendermint website content is also ripped directly +and without shame from [consul.io](https://www.consul.io/) and the other +[Hashicorp sites](https://www.hashicorp.com/#tools). + +### Bitcoin, Ethereum, etc. + +Tendermint emerged in the tradition of cryptocurrencies like Bitcoin, +Ethereum, etc. with the goal of providing a more efficient and secure +consensus algorithm than Bitcoin's Proof of Work. In the early days, +Tendermint had a simple currency built in, and to participate in +consensus, users had to "bond" units of the currency into a security +deposit which could be revoked if they misbehaved -this is what made +Tendermint a Proof-of-Stake algorithm. + +Since then, Tendermint has evolved to be a general purpose blockchain +consensus engine that can host arbitrary application states. That means +it can be used as a plug-and-play replacement for the consensus engines +of other blockchain software. So one can take the current Ethereum code +base, whether in Rust, or Go, or Haskell, and run it as a ABCI +application using Tendermint consensus. Indeed, [we did that with +Ethereum](https://github.com/cosmos/ethermint). And we plan to do +the same for Bitcoin, ZCash, and various other deterministic +applications as well. + +Another example of a cryptocurrency application built on Tendermint is +[the Cosmos network](http://cosmos.network). + +### Other Blockchain Projects + +[Fabric](https://github.com/hyperledger/fabric) takes a similar approach +to Tendermint, but is more opinionated about how the state is managed, +and requires that all application behaviour runs in potentially many +docker containers, modules it calls "chaincode". It uses an +implementation of [PBFT](http://pmg.csail.mit.edu/papers/osdi99.pdf). +from a team at IBM that is [augmented to handle potentially +non-deterministic +chaincode](https://www.zurich.ibm.com/~cca/papers/sieve.pdf) It is +possible to implement this docker-based behaviour as a ABCI app in +Tendermint, though extending Tendermint to handle non-determinism +remains for future work. + +[Burrow](https://github.com/hyperledger/burrow) is an implementation of +the Ethereum Virtual Machine and Ethereum transaction mechanics, with +additional features for a name-registry, permissions, and native +contracts, and an alternative blockchain API. It uses Tendermint as its +consensus engine, and provides a particular application state. + +## ABCI Overview + +The [Application BlockChain Interface +(ABCI)](https://github.com/tendermint/tendermint/tree/develop/abci) +allows for Byzantine Fault Tolerant replication of applications +written in any programming language. + +### Motivation + +Thus far, all blockchains "stacks" (such as +[Bitcoin](https://github.com/bitcoin/bitcoin)) have had a monolithic +design. That is, each blockchain stack is a single program that handles +all the concerns of a decentralized ledger; this includes P2P +connectivity, the "mempool" broadcasting of transactions, consensus on +the most recent block, account balances, Turing-complete contracts, +user-level permissions, etc. + +Using a monolithic architecture is typically bad practice in computer +science. It makes it difficult to reuse components of the code, and +attempts to do so result in complex maintenance procedures for forks of +the codebase. This is especially true when the codebase is not modular +in design and suffers from "spaghetti code". + +Another problem with monolithic design is that it limits you to the +language of the blockchain stack (or vice versa). In the case of +Ethereum which supports a Turing-complete bytecode virtual-machine, it +limits you to languages that compile down to that bytecode; today, those +are Serpent and Solidity. + +In contrast, our approach is to decouple the consensus engine and P2P +layers from the details of the application state of the particular +blockchain application. We do this by abstracting away the details of +the application to an interface, which is implemented as a socket +protocol. + +Thus we have an interface, the Application BlockChain Interface (ABCI), +and its primary implementation, the Tendermint Socket Protocol (TSP, or +Teaspoon). + +### Intro to ABCI + +[Tendermint Core](https://github.com/tendermint/tendermint) (the +"consensus engine") communicates with the application via a socket +protocol that satisfies the ABCI. + +To draw an analogy, lets talk about a well-known cryptocurrency, +Bitcoin. Bitcoin is a cryptocurrency blockchain where each node +maintains a fully audited Unspent Transaction Output (UTXO) database. If +one wanted to create a Bitcoin-like system on top of ABCI, Tendermint +Core would be responsible for + +- Sharing blocks and transactions between nodes +- Establishing a canonical/immutable order of transactions + (the blockchain) + +The application will be responsible for + +- Maintaining the UTXO database +- Validating cryptographic signatures of transactions +- Preventing transactions from spending non-existent transactions +- Allowing clients to query the UTXO database. + +Tendermint is able to decompose the blockchain design by offering a very +simple API (ie. the ABCI) between the application process and consensus +process. + +The ABCI consists of 3 primary message types that get delivered from the +core to the application. The application replies with corresponding +response messages. + +The messages are specified here: [ABCI Message +Types](https://github.com/tendermint/tendermint/blob/develop/abci/README.md#message-types). + +The **DeliverTx** message is the work horse of the application. Each +transaction in the blockchain is delivered with this message. The +application needs to validate each transaction received with the +**DeliverTx** message against the current state, application protocol, +and the cryptographic credentials of the transaction. A validated +transaction then needs to update the application state — by binding a +value into a key values store, or by updating the UTXO database, for +instance. + +The **CheckTx** message is similar to **DeliverTx**, but it's only for +validating transactions. Tendermint Core's mempool first checks the +validity of a transaction with **CheckTx**, and only relays valid +transactions to its peers. For instance, an application may check an +incrementing sequence number in the transaction and return an error upon +**CheckTx** if the sequence number is old. Alternatively, they might use +a capabilities based system that requires capabilities to be renewed +with every transaction. + +The **Commit** message is used to compute a cryptographic commitment to +the current application state, to be placed into the next block header. +This has some handy properties. Inconsistencies in updating that state +will now appear as blockchain forks which catches a whole class of +programming errors. This also simplifies the development of secure +lightweight clients, as Merkle-hash proofs can be verified by checking +against the block hash, and that the block hash is signed by a quorum. + +There can be multiple ABCI socket connections to an application. +Tendermint Core creates three ABCI connections to the application; one +for the validation of transactions when broadcasting in the mempool, one +for the consensus engine to run block proposals, and one more for +querying the application state. + +It's probably evident that applications designers need to very carefully +design their message handlers to create a blockchain that does anything +useful but this architecture provides a place to start. The diagram +below illustrates the flow of messages via ABCI. + +![](../imgs/abci.png) + +## A Note on Determinism + +The logic for blockchain transaction processing must be deterministic. +If the application logic weren't deterministic, consensus would not be +reached among the Tendermint Core replica nodes. + +Solidity on Ethereum is a great language of choice for blockchain +applications because, among other reasons, it is a completely +deterministic programming language. However, it's also possible to +create deterministic applications using existing popular languages like +Java, C++, Python, or Go. Game programmers and blockchain developers are +already familiar with creating deterministic programs by avoiding +sources of non-determinism such as: + +- random number generators (without deterministic seeding) +- race conditions on threads (or avoiding threads altogether) +- the system clock +- uninitialized memory (in unsafe programming languages like C + or C++) +- [floating point + arithmetic](http://gafferongames.com/networking-for-game-programmers/floating-point-determinism/) +- language features that are random (e.g. map iteration in Go) + +While programmers can avoid non-determinism by being careful, it is also +possible to create a special linter or static analyzer for each language +to check for determinism. In the future we may work with partners to +create such tools. + +## Consensus Overview + +Tendermint is an easy-to-understand, mostly asynchronous, BFT consensus +protocol. The protocol follows a simple state machine that looks like +this: + +![](../imgs/consensus_logic.png) + +Participants in the protocol are called **validators**; they take turns +proposing blocks of transactions and voting on them. Blocks are +committed in a chain, with one block at each **height**. A block may +fail to be committed, in which case the protocol moves to the next +**round**, and a new validator gets to propose a block for that height. +Two stages of voting are required to successfully commit a block; we +call them **pre-vote** and **pre-commit**. A block is committed when +more than 2/3 of validators pre-commit for the same block in the same +round. + +There is a picture of a couple doing the polka because validators are +doing something like a polka dance. When more than two-thirds of the +validators pre-vote for the same block, we call that a **polka**. Every +pre-commit must be justified by a polka in the same round. + +Validators may fail to commit a block for a number of reasons; the +current proposer may be offline, or the network may be slow. Tendermint +allows them to establish that a validator should be skipped. Validators +wait a small amount of time to receive a complete proposal block from +the proposer before voting to move to the next round. This reliance on a +timeout is what makes Tendermint a weakly synchronous protocol, rather +than an asynchronous one. However, the rest of the protocol is +asynchronous, and validators only make progress after hearing from more +than two-thirds of the validator set. A simplifying element of +Tendermint is that it uses the same mechanism to commit a block as it +does to skip to the next round. + +Assuming less than one-third of the validators are Byzantine, Tendermint +guarantees that safety will never be violated - that is, validators will +never commit conflicting blocks at the same height. To do this it +introduces a few **locking** rules which modulate which paths can be +followed in the flow diagram. Once a validator precommits a block, it is +locked on that block. Then, + +1. it must prevote for the block it is locked on +2. it can only unlock, and precommit for a new block, if there is a + polka for that block in a later round + +## Stake + +In many systems, not all validators will have the same "weight" in the +consensus protocol. Thus, we are not so much interested in one-third or +two-thirds of the validators, but in those proportions of the total +voting power, which may not be uniformly distributed across individual +validators. + +Since Tendermint can replicate arbitrary applications, it is possible to +define a currency, and denominate the voting power in that currency. +When voting power is denominated in a native currency, the system is +often referred to as Proof-of-Stake. Validators can be forced, by logic +in the application, to "bond" their currency holdings in a security +deposit that can be destroyed if they're found to misbehave in the +consensus protocol. This adds an economic element to the security of the +protocol, allowing one to quantify the cost of violating the assumption +that less than one-third of voting power is Byzantine. + +The [Cosmos Network](https://cosmos.network) is designed to use this +Proof-of-Stake mechanism across an array of cryptocurrencies implemented +as ABCI applications. + +The following diagram is Tendermint in a (technical) nutshell. [See here +for high resolution +version](https://github.com/mobfoundry/hackatom/blob/master/tminfo.pdf). + +![](../imgs/tm-transaction-flow.png) diff --git a/docs/networks/README.md b/docs/networks/README.md new file mode 100644 index 000000000..b1ba27126 --- /dev/null +++ b/docs/networks/README.md @@ -0,0 +1,9 @@ +# Networks + +Use [Docker Compose](docker-compose.md) to spin up Tendermint testnets on your +local machine. + +Use [Terraform and Ansible](terraform-and-ansible.md) to deploy Tendermint +testnets to the cloud. + +See the `tendermint testnet --help` command for more help initializing testnets. diff --git a/docs/networks/deploy-testnets.md b/docs/networks/deploy-testnets.md index 35732f9b3..4df6916bb 100644 --- a/docs/networks/deploy-testnets.md +++ b/docs/networks/deploy-testnets.md @@ -1,8 +1,8 @@ # Deploy a Testnet -Now that we've seen how ABCI works, and even played with a few -applications on a single validator node, it's time to deploy a test -network to four validator nodes. +DEPRECATED DOCS! + +See [Networks](../networks). ## Manual Deployments @@ -21,17 +21,16 @@ Here are the steps to setting up a testnet manually: 3. Generate a private key and a node key for each validator using `tendermint init` 4. Compile a list of public keys for each validator into a - `genesis.json` file and replace the existing file with it. -5. Run - `tendermint node --proxy_app=kvstore --p2p.persistent_peers=< peer addresses >` on each node, where `< peer addresses >` is a comma separated - list of the ID@IP:PORT combination for each node. The default port for - Tendermint is `26656`. The ID of a node can be obtained by running - `tendermint show_node_id` command. Thus, if the IP addresses of your nodes - were `192.168.0.1, 192.168.0.2, 192.168.0.3, 192.168.0.4`, the command - would look like: + new `genesis.json` file and replace the existing file with it. +5. Get the node IDs of any peers you want other peers to connect to by + running `tendermint show_node_id` on the relevant machine +6. Set the `p2p.persistent_peers` in the config for all nodes to the comma + separated list of `ID@IP:PORT` for all nodes. Default port is 26656. + +Then start the node ``` -tendermint node --proxy_app=kvstore --p2p.persistent_peers=96663a3dd0d7b9d17d4c8211b191af259621c693@192.168.0.1:26656, 429fcf25974313b95673f58d77eacdd434402665@192.168.0.2:26656, 0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@192.168.0.3:26656, f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@192.168.0.4:26656 +tendermint node --proxy_app=kvstore ``` After a few seconds, all the nodes should connect to each other and diff --git a/docs/networks/docker-compose.md b/docs/networks/docker-compose.md new file mode 100644 index 000000000..a1924eb9d --- /dev/null +++ b/docs/networks/docker-compose.md @@ -0,0 +1,85 @@ +# Docker Compose + +With Docker Compose, we can spin up local testnets in a single command: + +``` +make localnet-start +``` + +## Requirements + +- [Install tendermint](/docs/install.md) +- [Install docker](https://docs.docker.com/engine/installation/) +- [Install docker-compose](https://docs.docker.com/compose/install/) + +## Build + +Build the `tendermint` binary and the `tendermint/localnode` docker image. + +Note the binary will be mounted into the container so it can be updated without +rebuilding the image. + +``` +cd $GOPATH/src/github.com/tendermint/tendermint + +# Build the linux binary in ./build +make build-linux + +# Build tendermint/localnode image +make build-docker-localnode +``` + + +## Run a testnet + +To start a 4 node testnet run: + +``` +make localnet-start +``` + +The nodes bind their RPC servers to ports 26657, 26660, 26662, and 26664 on the host. +This file creates a 4-node network using the localnode image. +The nodes of the network expose their P2P and RPC endpoints to the host machine on ports 26656-26657, 26659-26660, 26661-26662, and 26663-26664 respectively. + +To update the binary, just rebuild it and restart the nodes: + +``` +make build-linux +make localnet-stop +make localnet-start +``` + +## Configuration + +The `make localnet-start` creates files for a 4-node testnet in `./build` by calling the `tendermint testnet` command. + +The `./build` directory is mounted to the `/tendermint` mount point to attach the binary and config files to the container. + +For instance, to create a single node testnet: + +``` +cd $GOPATH/src/github.com/tendermint/tendermint + +# Clear the build folder +rm -rf ./build + +# Build binary +make build-linux + +# Create configuration +docker run -e LOG="stdout" -v `pwd`/build:/tendermint tendermint/localnode testnet --o . --v 1 + +#Run the node +docker run -v `pwd`/build:/tendermint tendermint/localnode + +``` + +## Logging + +Log is saved under the attached volume, in the `tendermint.log` file. If the `LOG` environment variable is set to `stdout` at start, the log is not saved, but printed on the screen. + +## Special binaries + +If you have multiple binaries with different names, you can specify which one to run with the BINARY environment variable. The path of the binary is relative to the attached volume. + diff --git a/docs/networks/terraform-and-ansible.md b/docs/networks/terraform-and-ansible.md index 5a4b9c53b..c08ade17a 100644 --- a/docs/networks/terraform-and-ansible.md +++ b/docs/networks/terraform-and-ansible.md @@ -29,7 +29,7 @@ export SSH_KEY_FILE="$HOME/.ssh/id_rsa.pub" These will be used by both `terraform` and `ansible`. -### Terraform +## Terraform This step will create four Digital Ocean droplets. First, go to the correct directory: @@ -49,7 +49,7 @@ and you will get a list of IP addresses that belong to your droplets. With the droplets created and running, let's setup Ansible. -### Ansible +## Ansible The playbooks in [the ansible directory](https://github.com/tendermint/tendermint/tree/master/networks/remote/ansible) @@ -144,7 +144,7 @@ Peek at the logs with the status role: ansible-playbook -i inventory/digital_ocean.py -l sentrynet status.yml ``` -### Logging +## Logging The crudest way is the status role described above. You can also ship logs to Logz.io, an Elastic stack (Elastic search, Logstash and Kibana) @@ -160,7 +160,7 @@ go get github.com/mheese/journalbeat ansible-playbook -i inventory/digital_ocean.py -l sentrynet logzio.yml -e LOGZIO_TOKEN=ABCDEFGHIJKLMNOPQRSTUVWXYZ012345 ``` -### Cleanup +## Cleanup To remove your droplets, run: diff --git a/docs/spec/abci/README.md b/docs/spec/abci/README.md index c0956db6f..02e369bf7 100644 --- a/docs/spec/abci/README.md +++ b/docs/spec/abci/README.md @@ -1,7 +1,7 @@ # ABCI ABCI is the interface between Tendermint (a state-machine replication engine) -and an application (the actual state machine). It consists of a set of +and your application (the actual state machine). It consists of a set of *methods*, where each method has a corresponding `Request` and `Response` message type. Tendermint calls the ABCI methods on the ABCI application by sending the `Request*` messages and receiving the `Response*` messages in return. diff --git a/docs/spec/abci/abci.md b/docs/spec/abci/abci.md index a12170981..0e9b3d782 100644 --- a/docs/spec/abci/abci.md +++ b/docs/spec/abci/abci.md @@ -7,9 +7,9 @@ file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.pro ABCI methods are split across 3 separate ABCI *connections*: -- `Consensus Connection: InitChain, BeginBlock, DeliverTx, EndBlock, Commit` -- `Mempool Connection: CheckTx` -- `Info Connection: Info, SetOption, Query` +- `Consensus Connection`: `InitChain, BeginBlock, DeliverTx, EndBlock, Commit` +- `Mempool Connection`: `CheckTx` +- `Info Connection`: `Info, SetOption, Query` The `Consensus Connection` is driven by a consensus protocol and is responsible for block execution. diff --git a/docs/tendermint-core/README.md b/docs/tendermint-core/README.md new file mode 100644 index 000000000..7f5dc6772 --- /dev/null +++ b/docs/tendermint-core/README.md @@ -0,0 +1,4 @@ +# Tendermint Core + +See the side-bar for details on the various features of Tendermint Core. + diff --git a/docs/tools/README.md b/docs/tools/README.md new file mode 100644 index 000000000..b08416bb3 --- /dev/null +++ b/docs/tools/README.md @@ -0,0 +1,4 @@ +# Tools + +Tendermint comes with some tools for [benchmarking](benchmarking.md) +and [monitoring](monitoring.md). diff --git a/docs/tools/benchmarking.md b/docs/tools/benchmarking.md index 691d3b6ed..e17c28564 100644 --- a/docs/tools/benchmarking.md +++ b/docs/tools/benchmarking.md @@ -20,7 +20,7 @@ Blocks/sec 0.818 0.386 1 9 ## Quick Start -[Install Tendermint](../introduction/install) +[Install Tendermint](../introduction/install.md) This currently is setup to work on tendermint's develop branch. Please ensure you are on that. (If not, update `tendermint` and `tmlibs` in gopkg.toml to use the master branch.) diff --git a/docs/tools/monitoring.md b/docs/tools/monitoring.md index bd0105c8e..c0fa94c09 100644 --- a/docs/tools/monitoring.md +++ b/docs/tools/monitoring.md @@ -33,21 +33,21 @@ docker run -it --rm -p "26670:26670" --link=tm tendermint/monitor tm:26657 ### Using Binaries -[Install Tendermint](https://github.com/tendermint/tendermint#install) +[Install Tendermint](../introduction/install.md). -then run: +Start a Tendermint node: ``` tendermint init tendermint node --proxy_app=kvstore ``` +In another window, run the monitor: + ``` tm-monitor localhost:26657 ``` -with the last command being in a seperate window. - ## Usage ``` diff --git a/networks/local/README.md b/networks/local/README.md index 09a0b12cb..8d4299693 100644 --- a/networks/local/README.md +++ b/networks/local/README.md @@ -1,5 +1,9 @@ # Local Cluster with Docker Compose +DEPRECATED! + +See the [docs](https://tendermint.com/docs/networks/docker-compose.html). + ## Requirements - [Install tendermint](/docs/install.md) diff --git a/networks/remote/README.md b/networks/remote/README.md index 2094fcc98..4c035be8c 100644 --- a/networks/remote/README.md +++ b/networks/remote/README.md @@ -1,3 +1,3 @@ # Remote Cluster with Terraform and Ansible -See the [docs](/docs/terraform-and-ansible.md) +See the [docs](https://tendermint.com/docs/networks/terraform-and-ansible.html). From 69c7aa77bcc84cb92aadaa91023a32ec3951184b Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Sun, 30 Sep 2018 10:26:14 -0700 Subject: [PATCH 016/113] clist: speedup Next by removing defers (#2511) This change doubles the speed of the mempool's reaping. Before: BenchmarkReap-8 5000 365390 ns/op 122887 B/op After: BenchmarkReap-8 10000 158274 ns/op 122882 B/op --- libs/clist/clist.go | 6 ++--- mempool/bench_test.go | 55 +++++++++++++++++++++++++++++++++++++++++ mempool/mempool_test.go | 29 ---------------------- 3 files changed, 58 insertions(+), 32 deletions(-) create mode 100644 mempool/bench_test.go diff --git a/libs/clist/clist.go b/libs/clist/clist.go index c69d3d5f3..393bdf73f 100644 --- a/libs/clist/clist.go +++ b/libs/clist/clist.go @@ -113,9 +113,9 @@ func (e *CElement) NextWaitChan() <-chan struct{} { // Nonblocking, may return nil if at the end. func (e *CElement) Next() *CElement { e.mtx.RLock() - defer e.mtx.RUnlock() - - return e.next + val := e.next + e.mtx.RUnlock() + return val } // Nonblocking, may return nil if at the end. diff --git a/mempool/bench_test.go b/mempool/bench_test.go new file mode 100644 index 000000000..68b033caa --- /dev/null +++ b/mempool/bench_test.go @@ -0,0 +1,55 @@ +package mempool + +import ( + "encoding/binary" + "testing" + + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/proxy" +) + +func BenchmarkReap(b *testing.B) { + app := kvstore.NewKVStoreApplication() + cc := proxy.NewLocalClientCreator(app) + mempool := newMempoolWithApp(cc) + + size := 10000 + for i := 0; i < size; i++ { + tx := make([]byte, 8) + binary.BigEndian.PutUint64(tx, uint64(i)) + mempool.CheckTx(tx, nil) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + mempool.ReapMaxBytesMaxGas(100000000, 10000000) + } +} + +func BenchmarkCacheInsertTime(b *testing.B) { + cache := newMapTxCache(b.N) + txs := make([][]byte, b.N) + for i := 0; i < b.N; i++ { + txs[i] = make([]byte, 8) + binary.BigEndian.PutUint64(txs[i], uint64(i)) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + cache.Push(txs[i]) + } +} + +// This benchmark is probably skewed, since we actually will be removing +// txs in parallel, which may cause some overhead due to mutex locking. +func BenchmarkCacheRemoveTime(b *testing.B) { + cache := newMapTxCache(b.N) + txs := make([][]byte, b.N) + for i := 0; i < b.N; i++ { + txs[i] = make([]byte, 8) + binary.BigEndian.PutUint64(txs[i], uint64(i)) + cache.Push(txs[i]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + cache.Remove(txs[i]) + } +} diff --git a/mempool/mempool_test.go b/mempool/mempool_test.go index 4f66da36c..5aabd00ee 100644 --- a/mempool/mempool_test.go +++ b/mempool/mempool_test.go @@ -399,35 +399,6 @@ func TestMempoolCloseWAL(t *testing.T) { require.Equal(t, 1, len(m3), "expecting the wal match in") } -func BenchmarkCacheInsertTime(b *testing.B) { - cache := newMapTxCache(b.N) - txs := make([][]byte, b.N) - for i := 0; i < b.N; i++ { - txs[i] = make([]byte, 8) - binary.BigEndian.PutUint64(txs[i], uint64(i)) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - cache.Push(txs[i]) - } -} - -// This benchmark is probably skewed, since we actually will be removing -// txs in parallel, which may cause some overhead due to mutex locking. -func BenchmarkCacheRemoveTime(b *testing.B) { - cache := newMapTxCache(b.N) - txs := make([][]byte, b.N) - for i := 0; i < b.N; i++ { - txs[i] = make([]byte, 8) - binary.BigEndian.PutUint64(txs[i], uint64(i)) - cache.Push(txs[i]) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - cache.Remove(txs[i]) - } -} - func checksumIt(data []byte) string { h := md5.New() h.Write(data) From 52e21cebcfe65522f629b457e39b9dc8b2c30297 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 30 Sep 2018 13:28:34 -0400 Subject: [PATCH 017/113] remove some xxx comments and the config.mempool.recheck_empty (#2505) * remove some XXX * config: remove Mempool.RecheckEmpty * docs: remove recheck_empty --- CHANGELOG_PENDING.md | 1 + config/config.go | 14 ++++++-------- config/toml.go | 1 - consensus/types/round_state.go | 4 ++-- docs/spec/reactors/mempool/config.md | 11 +++-------- docs/tendermint-core/configuration.md | 7 +++---- mempool/mempool.go | 4 +--- node/node.go | 1 - privval/priv_validator.go | 6 ++++-- types/params.go | 2 -- types/vote.go | 4 ++-- 11 files changed, 22 insertions(+), 33 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index bf381dce2..bca7ba478 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -5,6 +5,7 @@ Special thanks to external contributors on this release: BREAKING CHANGES: * CLI/RPC/Config + * [config] \#2505 Remove Mempool.RecheckEmpty (it was effectively useless anyways) * [config] `mempool.wal` is disabled by default * [rpc] \#2298 `/abci_query` takes `prove` argument instead of `trusted` and switches the default behaviour to `prove=false` diff --git a/config/config.go b/config/config.go index 619c0410f..8ff800053 100644 --- a/config/config.go +++ b/config/config.go @@ -488,20 +488,18 @@ func DefaultFuzzConnConfig() *FuzzConnConfig { // MempoolConfig defines the configuration options for the Tendermint mempool type MempoolConfig struct { - RootDir string `mapstructure:"home"` - Recheck bool `mapstructure:"recheck"` - RecheckEmpty bool `mapstructure:"recheck_empty"` - Broadcast bool `mapstructure:"broadcast"` - WalPath string `mapstructure:"wal_dir"` - Size int `mapstructure:"size"` - CacheSize int `mapstructure:"cache_size"` + RootDir string `mapstructure:"home"` + Recheck bool `mapstructure:"recheck"` + Broadcast bool `mapstructure:"broadcast"` + WalPath string `mapstructure:"wal_dir"` + Size int `mapstructure:"size"` + CacheSize int `mapstructure:"cache_size"` } // DefaultMempoolConfig returns a default configuration for the Tendermint mempool func DefaultMempoolConfig() *MempoolConfig { return &MempoolConfig{ Recheck: true, - RecheckEmpty: true, Broadcast: true, WalPath: "", // Each signature verification takes .5ms, size reduced until we implement diff --git a/config/toml.go b/config/toml.go index 846b33d16..ddfe5f055 100644 --- a/config/toml.go +++ b/config/toml.go @@ -213,7 +213,6 @@ dial_timeout = "{{ .P2P.DialTimeout }}" [mempool] recheck = {{ .Mempool.Recheck }} -recheck_empty = {{ .Mempool.RecheckEmpty }} broadcast = {{ .Mempool.Broadcast }} wal_dir = "{{ js .Mempool.WalPath }}" diff --git a/consensus/types/round_state.go b/consensus/types/round_state.go index c22880c2b..d3f6468bf 100644 --- a/consensus/types/round_state.go +++ b/consensus/types/round_state.go @@ -107,8 +107,8 @@ func (rs *RoundState) RoundStateSimple() RoundStateSimple { // RoundStateEvent returns the H/R/S of the RoundState as an event. func (rs *RoundState) RoundStateEvent() types.EventDataRoundState { - // XXX: copy the RoundState - // if we want to avoid this, we may need synchronous events after all + // copy the RoundState. + // TODO: if we want to avoid this, we may need synchronous events after all rsCopy := *rs edrs := types.EventDataRoundState{ Height: rs.Height, diff --git a/docs/spec/reactors/mempool/config.md b/docs/spec/reactors/mempool/config.md index 3e3c0d373..4fb756fa4 100644 --- a/docs/spec/reactors/mempool/config.md +++ b/docs/spec/reactors/mempool/config.md @@ -6,23 +6,21 @@ as command-line flags, but they can also be passed in as environmental variables or in the config.toml file. The following are all equivalent: -Flag: `--mempool.recheck_empty=false` +Flag: `--mempool.recheck=false` -Environment: `TM_MEMPOOL_RECHECK_EMPTY=false` +Environment: `TM_MEMPOOL_RECHECK=false` Config: ``` [mempool] -recheck_empty = false +recheck = false ``` ## Recheck `--mempool.recheck=false` (default: true) -`--mempool.recheck_empty=false` (default: true) - Recheck determines if the mempool rechecks all pending transactions after a block was committed. Once a block is committed, the mempool removes all valid transactions @@ -31,9 +29,6 @@ that were successfully included in the block. If `recheck` is true, then it will rerun CheckTx on all remaining transactions with the new block state. -If the block contained no transactions, it will skip the -recheck unless `recheck_empty` is true. - ## Broadcast `--mempool.broadcast=false` (default: true) diff --git a/docs/tendermint-core/configuration.md b/docs/tendermint-core/configuration.md index c5b07497c..8b3c3c22f 100644 --- a/docs/tendermint-core/configuration.md +++ b/docs/tendermint-core/configuration.md @@ -156,7 +156,6 @@ dial_timeout = "3s" [mempool] recheck = true -recheck_empty = true broadcast = true wal_dir = "data/mempool.wal" @@ -203,15 +202,15 @@ indexer = "kv" # Comma-separated list of tags to index (by default the only tag is "tx.hash") # # You can also index transactions by height by adding "tx.height" tag here. -# +# # It's recommended to index only a subset of tags due to possible memory # bloat. This is, of course, depends on the indexer's DB and the volume of # transactions. index_tags = "" # When set to true, tells indexer to index all tags (predefined tags: -# "tx.hash", "tx.height" and all tags from DeliverTx responses). -# +# "tx.hash", "tx.height" and all tags from DeliverTx responses). +# # Note this may be not desirable (see the comment above). IndexTags has a # precedence over IndexAllTags (i.e. when given both, IndexTags will be # indexed). diff --git a/mempool/mempool.go b/mempool/mempool.go index 2096912f5..db5f6160c 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -513,9 +513,7 @@ func (mem *Mempool) Update( // Remove transactions that are already in txs. goodTxs := mem.filterTxs(txsMap) // Recheck mempool txs if any txs were committed in the block - // NOTE/XXX: in some apps a tx could be invalidated due to EndBlock, - // so we really still do need to recheck, but this is for debugging - if mem.config.Recheck && (mem.config.RecheckEmpty || len(goodTxs) > 0) { + if mem.config.Recheck && len(goodTxs) > 0 { mem.logger.Info("Recheck txs", "numtxs", len(goodTxs), "height", height) mem.recheckTxs(goodTxs) // At this point, mem.txs are being rechecked. diff --git a/node/node.go b/node/node.go index bba4dbda5..9f9e3636f 100644 --- a/node/node.go +++ b/node/node.go @@ -359,7 +359,6 @@ func NewNode(config *cfg.Config, // Filter peers by addr or pubkey with an ABCI query. // If the query return code is OK, add peer. - // XXX: Query format subject to change if config.FilterPeers { connFilters = append( connFilters, diff --git a/privval/priv_validator.go b/privval/priv_validator.go index 8091744ce..e606b826a 100644 --- a/privval/priv_validator.go +++ b/privval/priv_validator.go @@ -38,14 +38,16 @@ func voteToStep(vote *types.Vote) int8 { // FilePV implements PrivValidator using data persisted to disk // to prevent double signing. // NOTE: the directory containing the pv.filePath must already exist. +// It includes the LastSignature and LastSignBytes so we don't lose the signature +// if the process crashes after signing but before the resulting consensus message is processed. type FilePV struct { Address types.Address `json:"address"` PubKey crypto.PubKey `json:"pub_key"` LastHeight int64 `json:"last_height"` LastRound int `json:"last_round"` LastStep int8 `json:"last_step"` - LastSignature []byte `json:"last_signature,omitempty"` // so we dont lose signatures XXX Why would we lose signatures? - LastSignBytes cmn.HexBytes `json:"last_signbytes,omitempty"` // so we dont lose signatures XXX Why would we lose signatures? + LastSignature []byte `json:"last_signature,omitempty"` + LastSignBytes cmn.HexBytes `json:"last_signbytes,omitempty"` PrivKey crypto.PrivKey `json:"priv_key"` // For persistence. diff --git a/types/params.go b/types/params.go index a7301d063..014694ccb 100644 --- a/types/params.go +++ b/types/params.go @@ -99,8 +99,6 @@ func (params ConsensusParams) Update(params2 *abci.ConsensusParams) ConsensusPar } // we must defensively consider any structs may be nil - // XXX: it's cast city over here. It's ok because we only do int32->int - // but still, watch it champ. if params2.BlockSize != nil { res.BlockSize.MaxBytes = params2.BlockSize.MaxBytes res.BlockSize.MaxGas = params2.BlockSize.MaxGas diff --git a/types/vote.go b/types/vote.go index ba2f1dfe4..5a31f0e2b 100644 --- a/types/vote.go +++ b/types/vote.go @@ -61,8 +61,8 @@ func IsVoteTypeValid(type_ byte) bool { } } -// Address is hex bytes. TODO: crypto.Address -type Address = cmn.HexBytes +// Address is hex bytes. +type Address = crypto.Address // Represents a prevote, precommit, or commit vote from validators for consensus. type Vote struct { From ccd04587ff3f8806af7f3af10a187520e4defd29 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 30 Sep 2018 15:08:01 -0400 Subject: [PATCH 018/113] docs/spec/abci: consensus params and general merkle (#2524) * docs: links to dirs need a slash * docs/spec/abci: consensus params and general merkle --- docs/DOCS_README.md | 2 ++ docs/README.md | 4 ++-- docs/spec/abci/abci.md | 49 +++++++++++++++++++++++++++++++++++++++--- docs/spec/abci/apps.md | 32 +++++++++++++++++++++++++-- 4 files changed, 80 insertions(+), 7 deletions(-) diff --git a/docs/DOCS_README.md b/docs/DOCS_README.md index e2f22ff6d..a7671c360 100644 --- a/docs/DOCS_README.md +++ b/docs/DOCS_README.md @@ -35,6 +35,8 @@ of the sidebar. **NOTE:** Strongly consider the existing links - both within this directory and to the website docs - when moving or deleting files. +Links to directories *MUST* end in a `/`. + Relative links should be used nearly everywhere, having discovered and weighed the following: ### Relative diff --git a/docs/README.md b/docs/README.md index 15ce74e39..c32935477 100644 --- a/docs/README.md +++ b/docs/README.md @@ -14,10 +14,10 @@ Tendermint?](introduction/what-is-tendermint.md). To get started quickly with an example application, see the [quick start guide](introduction/quick-start.md). -To learn about application development on Tendermint, see the [Application Blockchain Interface](spec/abci). +To learn about application development on Tendermint, see the [Application Blockchain Interface](spec/abci/). For more details on using Tendermint, see the respective documentation for -[Tendermint Core](tendermint-core), [benchmarking and monitoring](tools), and [network deployments](networks). +[Tendermint Core](tendermint-core/), [benchmarking and monitoring](tools/), and [network deployments](networks/). ## Contribute diff --git a/docs/spec/abci/abci.md b/docs/spec/abci/abci.md index 0e9b3d782..1306128f6 100644 --- a/docs/spec/abci/abci.md +++ b/docs/spec/abci/abci.md @@ -190,9 +190,9 @@ Commit are included in the header of the next block. of Path. - `Path (string)`: Path of request, like an HTTP GET path. Can be used with or in liue of Data. - - Apps MUST interpret '/store' as a query by key on the + - Apps MUST interpret '/store' as a query by key on the underlying store. The key SHOULD be specified in the Data field. - - Apps SHOULD allow queries over specific types like + - Apps SHOULD allow queries over specific types like '/accounts/...' or '/votes/...' - `Height (int64)`: The block height for which you want the query (default=0 returns data for the latest committed block). Note @@ -209,7 +209,7 @@ Commit are included in the header of the next block. - `Index (int64)`: The index of the key in the tree. - `Key ([]byte)`: The key of the matching data. - `Value ([]byte)`: The value of the matching data. - - `Proof ([]byte)`: Serialized proof for the data, if requested, to be + - `Proof (Proof)`: Serialized proof for the value data, if requested, to be verified against the `AppHash` for the given Height. - `Height (int64)`: The block height from which data was derived. Note that this is the height of the block containing the @@ -218,6 +218,8 @@ Commit are included in the header of the next block. - **Usage**: - Query for data from the application at current or past height. - Optionally return Merkle proof. + - Merkle proof includes self-describing `type` field to support many types + of Merkle trees and encoding formats. ### BeginBlock @@ -413,3 +415,44 @@ Commit are included in the header of the next block. - `Round (int32)`: Commit round. - `Votes ([]VoteInfo)`: List of validators addresses in the last validator set with their voting power and whether or not they signed a vote. + +### ConsensusParams + +- **Fields**: + - `BlockSize (BlockSize)`: Parameters limiting the size of a block. + - `EvidenceParams (EvidenceParams)`: Parameters limiting the validity of + evidence of byzantine behaviour. + +### BlockSize + +- **Fields**: + - `MaxBytes (int64)`: Max size of a block, in bytes. + - `MaxGas (int64)`: Max sum of `GasWanted` in a proposed block. + - NOTE: blocks that violate this may be committed if there are Byzantine proposers. + It's the application's responsibility to handle this when processing a + block! + +### EvidenceParams + +- **Fields**: + - `MaxAge (int64)`: Max age of evidence, in blocks. Evidence older than this + is considered stale and ignored. + - This should correspond with an app's "unbonding period" or other + similar mechanism for handling Nothing-At-Stake attacks. + - NOTE: this should change to time (instead of blocks)! + +### Proof + +- **Fields**: + - `Ops ([]ProofOp)`: List of chained Merkle proofs, of possibly different types + - The Merkle root of one op is the value being proven in the next op. + - The Merkle root of the final op should equal the ultimate root hash being + verified against. + +### ProofOp + +- **Fields**: + - `Type (string)`: Type of Merkle proof and how it's encoded. + - `Key ([]byte)`: Key in the Merkle tree that this proof is for. + - `Data ([]byte)`: Encoded Merkle proof for the key. + diff --git a/docs/spec/abci/apps.md b/docs/spec/abci/apps.md index a8f377718..acf2c4e66 100644 --- a/docs/spec/abci/apps.md +++ b/docs/spec/abci/apps.md @@ -247,8 +247,12 @@ Must have `0 < MaxAge`. ### Updates -The application may set the consensus params during InitChain, and update them during -EndBlock. +The application may set the ConsensusParams during InitChain, and update them during +EndBlock. If the ConsensusParams is empty, it will be ignored. Each field +that is not empty will be applied in full. For instance, if updating the +BlockSize.MaxBytes, applications must also set the other BlockSize fields (like +BlockSize.MaxGas), even if they are unchanged, as they will otherwise cause the +value to be updated to 0. #### InitChain @@ -312,6 +316,30 @@ their state as follows: For instance, this allows an application's lite-client to verify proofs of absence in the application state, something which is much less efficient to do using the block hash. +Some applications (eg. Ethereum, Cosmos-SDK) have multiple "levels" of Merkle trees, +where the leaves of one tree are the root hashes of others. To support this, and +the general variability in Merkle proofs, the `ResponseQuery.Proof` has some minimal structure: + +``` +message Proof { + repeated ProofOp ops +} + +message ProofOp { + string type = 1; + bytes key = 2; + bytes data = 3; +} +``` + +Each `ProofOp` contains a proof for a single key in a single Merkle tree, of the specified `type`. +This allows ABCI to support many different kinds of Merkle trees, encoding +formats, and proofs (eg. of presence and absence) just by varying the `type`. +The `data` contains the actual encoded proof, encoded according to the `type`. +When verifying the full proof, the root hash for one ProofOp is the value being +verified for the next ProofOp in the list. The root hash of the final ProofOp in +the list should match the `AppHash` being verified against. + ### Peer Filtering When Tendermint connects to a peer, it sends two queries to the ABCI application From 32e274cff09fcaf6ad3ac4db1693aed28de52daa Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 1 Oct 2018 16:38:35 +0400 Subject: [PATCH 019/113] config: Refactor ValidateBasic (#2503) * timeouts as time.Duration are also breaking for old configs * split BaseConfig#ValidateBasic into smaller methods --- CHANGELOG_PENDING.md | 5 +- config/config.go | 184 ++++++++++++++++++++++++++----------------- 2 files changed, 113 insertions(+), 76 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index bca7ba478..0c8677303 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -5,12 +5,13 @@ Special thanks to external contributors on this release: BREAKING CHANGES: * CLI/RPC/Config + * [config] \#2232 timeouts as time.Duration, not ints * [config] \#2505 Remove Mempool.RecheckEmpty (it was effectively useless anyways) * [config] `mempool.wal` is disabled by default * [rpc] \#2298 `/abci_query` takes `prove` argument instead of `trusted` and switches the default behaviour to `prove=false` - * [privval] \#2459 Split `SocketPVMsg`s implementations into Request and Response, where the Response may contain a error message (returned by the remote signer). - + * [privval] \#2459 Split `SocketPVMsg`s implementations into Request and Response, where the Response may contain a error message (returned by the remote signer) + * Apps * [abci] \#2298 ResponseQuery.Proof is now a structured merkle.Proof, not just arbitrary bytes diff --git a/config/config.go b/config/config.go index 8ff800053..8f3d6d180 100644 --- a/config/config.go +++ b/config/config.go @@ -1,11 +1,12 @@ package config import ( - "errors" "fmt" "os" "path/filepath" "time" + + "github.com/pkg/errors" ) const ( @@ -93,83 +94,22 @@ func (cfg *Config) SetRoot(root string) *Config { // ValidateBasic performs basic validation (checking param bounds, etc.) and // returns an error if any check fails. func (cfg *Config) ValidateBasic() error { - // RPCConfig - if cfg.RPC.GRPCMaxOpenConnections < 0 { - return errors.New("[rpc] grpc_max_open_connections can't be negative") - } - if cfg.RPC.MaxOpenConnections < 0 { - return errors.New("[rpc] max_open_connections can't be negative") - } - - // P2PConfig - if cfg.P2P.MaxNumInboundPeers < 0 { - return errors.New("[p2p] max_num_inbound_peers can't be negative") - } - if cfg.P2P.MaxNumOutboundPeers < 0 { - return errors.New("[p2p] max_num_outbound_peers can't be negative") - } - if cfg.P2P.FlushThrottleTimeout < 0 { - return errors.New("[p2p] flush_throttle_timeout can't be negative") - } - if cfg.P2P.MaxPacketMsgPayloadSize < 0 { - return errors.New("[p2p] max_packet_msg_payload_size can't be negative") - } - if cfg.P2P.SendRate < 0 { - return errors.New("[p2p] send_rate can't be negative") - } - if cfg.P2P.RecvRate < 0 { - return errors.New("[p2p] recv_rate can't be negative") - } - - // MempoolConfig - if cfg.Mempool.Size < 0 { - return errors.New("[mempool] size can't be negative") - } - if cfg.Mempool.CacheSize < 0 { - return errors.New("[mempool] cache_size can't be negative") - } - - // ConsensusConfig - if cfg.Consensus.TimeoutPropose < 0 { - return errors.New("[consensus] timeout_propose can't be negative") - } - if cfg.Consensus.TimeoutProposeDelta < 0 { - return errors.New("[consensus] timeout_propose_delta can't be negative") - } - if cfg.Consensus.TimeoutPrevote < 0 { - return errors.New("[consensus] timeout_prevote can't be negative") - } - if cfg.Consensus.TimeoutPrevoteDelta < 0 { - return errors.New("[consensus] timeout_prevote_delta can't be negative") - } - if cfg.Consensus.TimeoutPrecommit < 0 { - return errors.New("[consensus] timeout_precommit can't be negative") + if err := cfg.RPC.ValidateBasic(); err != nil { + return errors.Wrap(err, "Error in [rpc] section") } - if cfg.Consensus.TimeoutPrecommitDelta < 0 { - return errors.New("[consensus] timeout_precommit_delta can't be negative") + if err := cfg.P2P.ValidateBasic(); err != nil { + return errors.Wrap(err, "Error in [p2p] section") } - if cfg.Consensus.TimeoutCommit < 0 { - return errors.New("[consensus] timeout_commit can't be negative") + if err := cfg.Mempool.ValidateBasic(); err != nil { + return errors.Wrap(err, "Error in [mempool] section") } - if cfg.Consensus.CreateEmptyBlocksInterval < 0 { - return errors.New("[consensus] create_empty_blocks_interval can't be negative") + if err := cfg.Consensus.ValidateBasic(); err != nil { + return errors.Wrap(err, "Error in [consensus] section") } - if cfg.Consensus.PeerGossipSleepDuration < 0 { - return errors.New("[consensus] peer_gossip_sleep_duration can't be negative") - } - if cfg.Consensus.PeerQueryMaj23SleepDuration < 0 { - return errors.New("[consensus] peer_query_maj23_sleep_duration can't be negative") - } - if cfg.Consensus.BlockTimeIota < 0 { - return errors.New("[consensus] blocktime_iota can't be negative") - } - - // InstrumentationConfig - if cfg.Instrumentation.MaxOpenConnections < 0 { - return errors.New("[instrumentation] max_open_connections can't be negative") - } - - return nil + return errors.Wrap( + cfg.Instrumentation.ValidateBasic(), + "Error in [instrumentation] section", + ) } //----------------------------------------------------------------------------- @@ -348,6 +288,18 @@ func TestRPCConfig() *RPCConfig { return cfg } +// ValidateBasic performs basic validation (checking param bounds, etc.) and +// returns an error if any check fails. +func (cfg *RPCConfig) ValidateBasic() error { + if cfg.GRPCMaxOpenConnections < 0 { + return errors.New("grpc_max_open_connections can't be negative") + } + if cfg.MaxOpenConnections < 0 { + return errors.New("max_open_connections can't be negative") + } + return nil +} + //----------------------------------------------------------------------------- // P2PConfig @@ -463,6 +415,30 @@ func (cfg *P2PConfig) AddrBookFile() string { return rootify(cfg.AddrBook, cfg.RootDir) } +// ValidateBasic performs basic validation (checking param bounds, etc.) and +// returns an error if any check fails. +func (cfg *P2PConfig) ValidateBasic() error { + if cfg.MaxNumInboundPeers < 0 { + return errors.New("max_num_inbound_peers can't be negative") + } + if cfg.MaxNumOutboundPeers < 0 { + return errors.New("max_num_outbound_peers can't be negative") + } + if cfg.FlushThrottleTimeout < 0 { + return errors.New("flush_throttle_timeout can't be negative") + } + if cfg.MaxPacketMsgPayloadSize < 0 { + return errors.New("max_packet_msg_payload_size can't be negative") + } + if cfg.SendRate < 0 { + return errors.New("send_rate can't be negative") + } + if cfg.RecvRate < 0 { + return errors.New("recv_rate can't be negative") + } + return nil +} + // FuzzConnConfig is a FuzzedConnection configuration. type FuzzConnConfig struct { Mode int @@ -521,6 +497,18 @@ func (cfg *MempoolConfig) WalDir() string { return rootify(cfg.WalPath, cfg.RootDir) } +// ValidateBasic performs basic validation (checking param bounds, etc.) and +// returns an error if any check fails. +func (cfg *MempoolConfig) ValidateBasic() error { + if cfg.Size < 0 { + return errors.New("size can't be negative") + } + if cfg.CacheSize < 0 { + return errors.New("cache_size can't be negative") + } + return nil +} + //----------------------------------------------------------------------------- // ConsensusConfig @@ -641,6 +629,45 @@ func (cfg *ConsensusConfig) SetWalFile(walFile string) { cfg.walFile = walFile } +// ValidateBasic performs basic validation (checking param bounds, etc.) and +// returns an error if any check fails. +func (cfg *ConsensusConfig) ValidateBasic() error { + if cfg.TimeoutPropose < 0 { + return errors.New("timeout_propose can't be negative") + } + if cfg.TimeoutProposeDelta < 0 { + return errors.New("timeout_propose_delta can't be negative") + } + if cfg.TimeoutPrevote < 0 { + return errors.New("timeout_prevote can't be negative") + } + if cfg.TimeoutPrevoteDelta < 0 { + return errors.New("timeout_prevote_delta can't be negative") + } + if cfg.TimeoutPrecommit < 0 { + return errors.New("timeout_precommit can't be negative") + } + if cfg.TimeoutPrecommitDelta < 0 { + return errors.New("timeout_precommit_delta can't be negative") + } + if cfg.TimeoutCommit < 0 { + return errors.New("timeout_commit can't be negative") + } + if cfg.CreateEmptyBlocksInterval < 0 { + return errors.New("create_empty_blocks_interval can't be negative") + } + if cfg.PeerGossipSleepDuration < 0 { + return errors.New("peer_gossip_sleep_duration can't be negative") + } + if cfg.PeerQueryMaj23SleepDuration < 0 { + return errors.New("peer_query_maj23_sleep_duration can't be negative") + } + if cfg.BlockTimeIota < 0 { + return errors.New("blocktime_iota can't be negative") + } + return nil +} + //----------------------------------------------------------------------------- // TxIndexConfig @@ -726,6 +753,15 @@ func TestInstrumentationConfig() *InstrumentationConfig { return DefaultInstrumentationConfig() } +// ValidateBasic performs basic validation (checking param bounds, etc.) and +// returns an error if any check fails. +func (cfg *InstrumentationConfig) ValidateBasic() error { + if cfg.MaxOpenConnections < 0 { + return errors.New("max_open_connections can't be negative") + } + return nil +} + //----------------------------------------------------------------------------- // Utils From fd1b8598bcfed552f208e33fadebf368b80b1daf Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Tue, 2 Oct 2018 00:47:20 -0700 Subject: [PATCH 020/113] Make block_test.go more table driven (#2526) --- types/block_test.go | 126 ++++++++++++++++++++------------------------ 1 file changed, 56 insertions(+), 70 deletions(-) diff --git a/types/block_test.go b/types/block_test.go index ffd73eae0..c99fb6b07 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -1,14 +1,13 @@ package types import ( + "crypto/rand" "math" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" ) @@ -45,51 +44,37 @@ func TestBlockValidateBasic(t *testing.T) { ev := NewMockGoodEvidence(h, 0, valSet.Validators[0].Address) evList := []Evidence{ev} - block := MakeBlock(h, txs, commit, evList) - require.NotNil(t, block) - block.ProposerAddress = valSet.GetProposer().Address - - // proper block must pass - err = block.ValidateBasic() - require.NoError(t, err) - - // tamper with NumTxs - block = MakeBlock(h, txs, commit, evList) - block.NumTxs++ - err = block.ValidateBasic() - require.Error(t, err) - - // remove 1/2 the commits - block = MakeBlock(h, txs, commit, evList) - block.LastCommit.Precommits = commit.Precommits[:commit.Size()/2] - block.LastCommit.hash = nil // clear hash or change wont be noticed - err = block.ValidateBasic() - require.Error(t, err) - - // tamper with LastCommitHash - block = MakeBlock(h, txs, commit, evList) - block.LastCommitHash = []byte("something else") - err = block.ValidateBasic() - require.Error(t, err) - - // tamper with data - block = MakeBlock(h, txs, commit, evList) - block.Data.Txs[0] = Tx("something else") - block.Data.hash = nil // clear hash or change wont be noticed - err = block.ValidateBasic() - require.Error(t, err) - - // tamper with DataHash - block = MakeBlock(h, txs, commit, evList) - block.DataHash = cmn.RandBytes(len(block.DataHash)) - err = block.ValidateBasic() - require.Error(t, err) - - // tamper with evidence - block = MakeBlock(h, txs, commit, evList) - block.EvidenceHash = []byte("something else") - err = block.ValidateBasic() - require.Error(t, err) + testCases := []struct { + testName string + malleateBlock func(*Block) + expErr bool + }{ + {"Make Block", func(blk *Block) {}, false}, + {"Make Block w/ proposer Addr", func(blk *Block) { blk.ProposerAddress = valSet.GetProposer().Address }, false}, + {"Increase NumTxs", func(blk *Block) { blk.NumTxs++ }, true}, + {"Remove 1/2 the commits", func(blk *Block) { + blk.LastCommit.Precommits = commit.Precommits[:commit.Size()/2] + blk.LastCommit.hash = nil // clear hash or change wont be noticed + }, true}, + {"Remove LastCommitHash", func(blk *Block) { blk.LastCommitHash = []byte("something else") }, true}, + {"Tampered Data", func(blk *Block) { + blk.Data.Txs[0] = Tx("something else") + blk.Data.hash = nil // clear hash or change wont be noticed + }, true}, + {"Tampered DataHash", func(blk *Block) { + blk.DataHash = cmn.RandBytes(len(blk.DataHash)) + }, true}, + {"Tampered EvidenceHash", func(blk *Block) { + blk.EvidenceHash = []byte("something else") + }, true}, + } + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + block := MakeBlock(h, txs, commit, evList) + tc.malleateBlock(block) + assert.Equal(t, tc.expErr, block.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } } func TestBlockHash(t *testing.T) { @@ -161,7 +146,11 @@ func TestBlockString(t *testing.T) { } func makeBlockIDRandom() BlockID { - blockHash, blockPartsHeader := crypto.CRandBytes(tmhash.Size), PartSetHeader{123, crypto.CRandBytes(tmhash.Size)} + blockHash := make([]byte, tmhash.Size) + partSetHash := make([]byte, tmhash.Size) + rand.Read(blockHash) + rand.Read(partSetHash) + blockPartsHeader := PartSetHeader{123, partSetHash} return BlockID{blockHash, blockPartsHeader} } @@ -211,28 +200,25 @@ func TestCommit(t *testing.T) { } func TestCommitValidateBasic(t *testing.T) { - commit := randCommit() - assert.NoError(t, commit.ValidateBasic()) - - // nil precommit is OK - commit = randCommit() - commit.Precommits[0] = nil - assert.NoError(t, commit.ValidateBasic()) - - // tamper with types - commit = randCommit() - commit.Precommits[0].Type = VoteTypePrevote - assert.Error(t, commit.ValidateBasic()) - - // tamper with height - commit = randCommit() - commit.Precommits[0].Height = int64(100) - assert.Error(t, commit.ValidateBasic()) - - // tamper with round - commit = randCommit() - commit.Precommits[0].Round = 100 - assert.Error(t, commit.ValidateBasic()) + testCases := []struct { + testName string + malleateCommit func(*Commit) + expectErr bool + }{ + {"Random Commit", func(com *Commit) {}, false}, + {"Nil precommit", func(com *Commit) { com.Precommits[0] = nil }, false}, + {"Incorrect signature", func(com *Commit) { com.Precommits[0].Signature = []byte{0} }, false}, + {"Incorrect type", func(com *Commit) { com.Precommits[0].Type = VoteTypePrevote }, true}, + {"Incorrect height", func(com *Commit) { com.Precommits[0].Height = int64(100) }, true}, + {"Incorrect round", func(com *Commit) { com.Precommits[0].Round = 100 }, true}, + } + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + com := randCommit() + tc.malleateCommit(com) + assert.Equal(t, tc.expectErr, com.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } } func TestMaxHeaderBytes(t *testing.T) { From 5c6999cf8f0fc09bec6da16d7038a0f1b6ca4d71 Mon Sep 17 00:00:00 2001 From: goolAdapter <267310165@qq.com> Date: Tue, 2 Oct 2018 15:52:56 +0800 Subject: [PATCH 021/113] fix evidence db iter leak (#2516) Also make reversing a slice more efficient --- CHANGELOG_PENDING.md | 1 + evidence/store.go | 9 +++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 0c8677303..6d9813350 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -39,3 +39,4 @@ IMPROVEMENTS: BUG FIXES: - [autofile] \#2428 Group.RotateFile need call Flush() before rename (@goolAdapter) - [node] \#2434 Make node respond to signal interrupts while sleeping for genesis time +- [evidence] \#2515 fix db iter leak (@goolAdapter) diff --git a/evidence/store.go b/evidence/store.go index 9d0010a81..ccfd2d487 100644 --- a/evidence/store.go +++ b/evidence/store.go @@ -79,11 +79,11 @@ func NewEvidenceStore(db dbm.DB) *EvidenceStore { func (store *EvidenceStore) PriorityEvidence() (evidence []types.Evidence) { // reverse the order so highest priority is first l := store.listEvidence(baseKeyOutqueue, -1) - l2 := make([]types.Evidence, len(l)) - for i := range l { - l2[i] = l[len(l)-1-i] + for i, j := 0, len(l)-1; i < j; i, j = i+1, j-1 { + l[i], l[j] = l[j], l[i] } - return l2 + + return l } // PendingEvidence returns known uncommitted evidence up to maxBytes. @@ -98,6 +98,7 @@ func (store *EvidenceStore) PendingEvidence(maxBytes int64) (evidence []types.Ev func (store *EvidenceStore) listEvidence(prefixKey string, maxBytes int64) (evidence []types.Evidence) { var bytes int64 iter := dbm.IteratePrefix(store.db, []byte(prefixKey)) + defer iter.Close() for ; iter.Valid(); iter.Next() { val := iter.Value() From f3d08f969dbd5a219eca472fe4eb9f91e460573f Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 3 Oct 2018 04:31:04 +0400 Subject: [PATCH 022/113] [rpc] fix /abci_query: trusted was renamed to prove (#2531) --- rpc/core/routes.go | 2 +- types/block_test.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/rpc/core/routes.go b/rpc/core/routes.go index 639a2d08a..736ded607 100644 --- a/rpc/core/routes.go +++ b/rpc/core/routes.go @@ -36,7 +36,7 @@ var Routes = map[string]*rpc.RPCFunc{ "broadcast_tx_async": rpc.NewRPCFunc(BroadcastTxAsync, "tx"), // abci API - "abci_query": rpc.NewRPCFunc(ABCIQuery, "path,data,height,trusted"), + "abci_query": rpc.NewRPCFunc(ABCIQuery, "path,data,height,prove"), "abci_info": rpc.NewRPCFunc(ABCIInfo, ""), } diff --git a/types/block_test.go b/types/block_test.go index c99fb6b07..43366a63b 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" ) From c94133ed1b0dc203041d00b31194f2c7567e952a Mon Sep 17 00:00:00 2001 From: JamesRay <66258875@qq.com> Date: Wed, 3 Oct 2018 14:28:46 +0800 Subject: [PATCH 023/113] Fix a bug in bit_array's sub function (#2506) --- libs/common/bit_array.go | 2 +- libs/common/bit_array_test.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/libs/common/bit_array.go b/libs/common/bit_array.go index abf6110d8..aa470bbdb 100644 --- a/libs/common/bit_array.go +++ b/libs/common/bit_array.go @@ -189,7 +189,7 @@ func (bA *BitArray) Sub(o *BitArray) *BitArray { if bA.Bits > o.Bits { c := bA.copy() for i := 0; i < len(o.Elems)-1; i++ { - c.Elems[i] &= ^c.Elems[i] + c.Elems[i] &= ^o.Elems[i] } i := len(o.Elems) - 1 if i >= 0 { diff --git a/libs/common/bit_array_test.go b/libs/common/bit_array_test.go index b1efd3f62..3e2f17ce1 100644 --- a/libs/common/bit_array_test.go +++ b/libs/common/bit_array_test.go @@ -131,6 +131,34 @@ func TestSub2(t *testing.T) { } } +func TestSub3(t *testing.T) { + + bA1, _ := randBitArray(231) + bA2, _ := randBitArray(81) + bA3 := bA1.Sub(bA2) + + bNil := (*BitArray)(nil) + require.Equal(t, bNil.Sub(bA1), (*BitArray)(nil)) + require.Equal(t, bA1.Sub(nil), (*BitArray)(nil)) + require.Equal(t, bNil.Sub(nil), (*BitArray)(nil)) + + if bA3.Bits != bA1.Bits { + t.Error("Expected bA1 bits") + } + if len(bA3.Elems) != len(bA1.Elems) { + t.Error("Expected bA1 elems length") + } + for i := 0; i < bA3.Bits; i++ { + expected := bA1.GetIndex(i) + if i < bA2.Bits && bA2.GetIndex(i){ + expected = false + } + if bA3.GetIndex(i) != expected { + t.Error("Wrong bit from bA3") + } + } +} + func TestPickRandom(t *testing.T) { for idx := 0; idx < 123; idx++ { bA1 := NewBitArray(123) From 0755a5203da8fd5aab74373f2d2d537c3f17bf8a Mon Sep 17 00:00:00 2001 From: ValarDragon Date: Tue, 2 Oct 2018 16:03:59 -0700 Subject: [PATCH 024/113] bit_array: Simplify subtraction also, fix potential bug in Or function --- CHANGELOG_PENDING.md | 3 +- libs/common/bit_array.go | 46 +++++++-------- libs/common/bit_array_test.go | 103 +++++++++------------------------- 3 files changed, 48 insertions(+), 104 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 6d9813350..81380e7c6 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -11,7 +11,7 @@ BREAKING CHANGES: * [rpc] \#2298 `/abci_query` takes `prove` argument instead of `trusted` and switches the default behaviour to `prove=false` * [privval] \#2459 Split `SocketPVMsg`s implementations into Request and Response, where the Response may contain a error message (returned by the remote signer) - + * Apps * [abci] \#2298 ResponseQuery.Proof is now a structured merkle.Proof, not just arbitrary bytes @@ -40,3 +40,4 @@ BUG FIXES: - [autofile] \#2428 Group.RotateFile need call Flush() before rename (@goolAdapter) - [node] \#2434 Make node respond to signal interrupts while sleeping for genesis time - [evidence] \#2515 fix db iter leak (@goolAdapter) +- [common/bit_array] Fixed a bug in the `Or` function diff --git a/libs/common/bit_array.go b/libs/common/bit_array.go index aa470bbdb..161f21fce 100644 --- a/libs/common/bit_array.go +++ b/libs/common/bit_array.go @@ -119,14 +119,13 @@ func (bA *BitArray) Or(o *BitArray) *BitArray { } bA.mtx.Lock() o.mtx.Lock() - defer func() { - bA.mtx.Unlock() - o.mtx.Unlock() - }() c := bA.copyBits(MaxInt(bA.Bits, o.Bits)) - for i := 0; i < len(c.Elems); i++ { + smaller := MinInt(len(bA.Elems), len(o.Elems)) + for i := 0; i < smaller; i++ { c.Elems[i] |= o.Elems[i] } + bA.mtx.Unlock() + o.mtx.Unlock() return c } @@ -173,8 +172,9 @@ func (bA *BitArray) not() *BitArray { } // Sub subtracts the two bit-arrays bitwise, without carrying the bits. -// This is essentially bA.And(o.Not()). -// If bA is longer than o, o is right padded with zeroes. +// Note that carryless subtraction of a - b is (a and not b). +// The output is the same as bA, regardless of o's size. +// If bA is longer than o, o is right padded with zeroes func (bA *BitArray) Sub(o *BitArray) *BitArray { if bA == nil || o == nil { // TODO: Decide if we should do 1's complement here? @@ -182,24 +182,20 @@ func (bA *BitArray) Sub(o *BitArray) *BitArray { } bA.mtx.Lock() o.mtx.Lock() - defer func() { - bA.mtx.Unlock() - o.mtx.Unlock() - }() - if bA.Bits > o.Bits { - c := bA.copy() - for i := 0; i < len(o.Elems)-1; i++ { - c.Elems[i] &= ^o.Elems[i] - } - i := len(o.Elems) - 1 - if i >= 0 { - for idx := i * 64; idx < o.Bits; idx++ { - c.setIndex(idx, c.getIndex(idx) && !o.getIndex(idx)) - } - } - return c - } - return bA.and(o.not()) // Note degenerate case where o == nil + // output is the same size as bA + c := bA.copyBits(bA.Bits) + // Only iterate to the minimum size between the two. + // If o is longer, those bits are ignored. + // If bA is longer, then skipping those iterations is equivalent + // to right padding with 0's + smaller := MinInt(len(bA.Elems), len(o.Elems)) + for i := 0; i < smaller; i++ { + // &^ is and not in golang + c.Elems[i] &^= o.Elems[i] + } + bA.mtx.Unlock() + o.mtx.Unlock() + return c } // IsEmpty returns true iff all bits in the bit array are 0 diff --git a/libs/common/bit_array_test.go b/libs/common/bit_array_test.go index 3e2f17ce1..bc117b2a0 100644 --- a/libs/common/bit_array_test.go +++ b/libs/common/bit_array_test.go @@ -75,87 +75,34 @@ func TestOr(t *testing.T) { } } -func TestSub1(t *testing.T) { - - bA1, _ := randBitArray(31) - bA2, _ := randBitArray(51) - bA3 := bA1.Sub(bA2) - - bNil := (*BitArray)(nil) - require.Equal(t, bNil.Sub(bA1), (*BitArray)(nil)) - require.Equal(t, bA1.Sub(nil), (*BitArray)(nil)) - require.Equal(t, bNil.Sub(nil), (*BitArray)(nil)) - - if bA3.Bits != bA1.Bits { - t.Error("Expected bA1 bits") - } - if len(bA3.Elems) != len(bA1.Elems) { - t.Error("Expected bA1 elems length") - } - for i := 0; i < bA3.Bits; i++ { - expected := bA1.GetIndex(i) - if bA2.GetIndex(i) { - expected = false - } - if bA3.GetIndex(i) != expected { - t.Error("Wrong bit from bA3", i, bA1.GetIndex(i), bA2.GetIndex(i), bA3.GetIndex(i)) - } - } -} - -func TestSub2(t *testing.T) { - - bA1, _ := randBitArray(51) - bA2, _ := randBitArray(31) - bA3 := bA1.Sub(bA2) - - bNil := (*BitArray)(nil) - require.Equal(t, bNil.Sub(bA1), (*BitArray)(nil)) - require.Equal(t, bA1.Sub(nil), (*BitArray)(nil)) - require.Equal(t, bNil.Sub(nil), (*BitArray)(nil)) - - if bA3.Bits != bA1.Bits { - t.Error("Expected bA1 bits") - } - if len(bA3.Elems) != len(bA1.Elems) { - t.Error("Expected bA1 elems length") - } - for i := 0; i < bA3.Bits; i++ { - expected := bA1.GetIndex(i) - if i < bA2.Bits && bA2.GetIndex(i) { - expected = false - } - if bA3.GetIndex(i) != expected { - t.Error("Wrong bit from bA3") - } +func TestSub(t *testing.T) { + testCases := []struct { + initBA string + subtractingBA string + expectedBA string + }{ + {`null`, `null`, `null`}, + {`"x"`, `null`, `null`}, + {`null`, `"x"`, `null`}, + {`"x"`, `"x"`, `"_"`}, + {`"xxxxxx"`, `"x_x_x_"`, `"_x_x_x"`}, + {`"x_x_x_"`, `"xxxxxx"`, `"______"`}, + {`"xxxxxx"`, `"x_x_x_xxxx"`, `"_x_x_x"`}, + {`"x_x_x_xxxx"`, `"xxxxxx"`, `"______xxxx"`}, + {`"xxxxxxxxxx"`, `"x_x_x_"`, `"_x_x_xxxxx"`}, + {`"x_x_x_"`, `"xxxxxxxxxx"`, `"______"`}, } -} - -func TestSub3(t *testing.T) { - - bA1, _ := randBitArray(231) - bA2, _ := randBitArray(81) - bA3 := bA1.Sub(bA2) + for _, tc := range testCases { + var bA *BitArray + err := json.Unmarshal([]byte(tc.initBA), &bA) + require.Nil(t, err) - bNil := (*BitArray)(nil) - require.Equal(t, bNil.Sub(bA1), (*BitArray)(nil)) - require.Equal(t, bA1.Sub(nil), (*BitArray)(nil)) - require.Equal(t, bNil.Sub(nil), (*BitArray)(nil)) + var o *BitArray + err = json.Unmarshal([]byte(tc.subtractingBA), &o) + require.Nil(t, err) - if bA3.Bits != bA1.Bits { - t.Error("Expected bA1 bits") - } - if len(bA3.Elems) != len(bA1.Elems) { - t.Error("Expected bA1 elems length") - } - for i := 0; i < bA3.Bits; i++ { - expected := bA1.GetIndex(i) - if i < bA2.Bits && bA2.GetIndex(i){ - expected = false - } - if bA3.GetIndex(i) != expected { - t.Error("Wrong bit from bA3") - } + got, _ := json.Marshal(bA.Sub(o)) + require.Equal(t, tc.expectedBA, string(got), "%s minus %s doesn't equal %s", tc.initBA, tc.subtractingBA, tc.expectedBA) } } From cb2e58411f670549f7e1f66173f78c14116c77a0 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 3 Oct 2018 10:53:29 +0400 Subject: [PATCH 025/113] add a missing changelog entry --- CHANGELOG_PENDING.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 81380e7c6..a9538dd10 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -1,6 +1,7 @@ # Pending Special thanks to external contributors on this release: +@goolAdapter, @bradyjoestar BREAKING CHANGES: @@ -41,3 +42,4 @@ BUG FIXES: - [node] \#2434 Make node respond to signal interrupts while sleeping for genesis time - [evidence] \#2515 fix db iter leak (@goolAdapter) - [common/bit_array] Fixed a bug in the `Or` function +- [common/bit_array] Fixed a bug in the `Sub` function (@bradyjoestar) From 12675ecd92c4f3566f48d42147321c533f963640 Mon Sep 17 00:00:00 2001 From: Zarko Milosevic Date: Thu, 4 Oct 2018 15:37:13 +0200 Subject: [PATCH 026/113] consensus: Wait timeout precommit before starting new round (#2493) * Disable transitioning to new round upon 2/3+ of Precommit nils Pull in ensureVote test function from https://github.com/tendermint/tendermint/pull/2132 * Add several ensureX test methods to wrap channel read with timeout * Revert panic in tests --- CHANGELOG_PENDING.md | 2 + config/config.go | 6 +- consensus/common_test.go | 88 +++++++++++-- consensus/reactor.go | 2 +- consensus/state.go | 21 +-- consensus/state_test.go | 237 ++++++++++++++++++---------------- lite/dynamic_verifier_test.go | 1 + p2p/conn/connection.go | 2 +- p2p/peer.go | 2 +- p2p/test_util.go | 2 +- 10 files changed, 224 insertions(+), 139 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index a9538dd10..2a0b58f60 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -40,6 +40,8 @@ IMPROVEMENTS: BUG FIXES: - [autofile] \#2428 Group.RotateFile need call Flush() before rename (@goolAdapter) - [node] \#2434 Make node respond to signal interrupts while sleeping for genesis time +- [consensus] [\#1690](https://github.com/tendermint/tendermint/issues/1690) wait for +timeoutPrecommit before starting next round - [evidence] \#2515 fix db iter leak (@goolAdapter) - [common/bit_array] Fixed a bug in the `Or` function - [common/bit_array] Fixed a bug in the `Sub` function (@bradyjoestar) diff --git a/config/config.go b/config/config.go index 8f3d6d180..1f9ff3e13 100644 --- a/config/config.go +++ b/config/config.go @@ -475,9 +475,9 @@ type MempoolConfig struct { // DefaultMempoolConfig returns a default configuration for the Tendermint mempool func DefaultMempoolConfig() *MempoolConfig { return &MempoolConfig{ - Recheck: true, - Broadcast: true, - WalPath: "", + Recheck: true, + Broadcast: true, + WalPath: "", // Each signature verification takes .5ms, size reduced until we implement // ABCI Recheck Size: 5000, diff --git a/consensus/common_test.go b/consensus/common_test.go index d7e661481..2a5cc8e79 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -7,6 +7,7 @@ import ( "io/ioutil" "os" "path" + "reflect" "sort" "sync" "testing" @@ -306,23 +307,94 @@ func randConsensusState(nValidators int) (*ConsensusState, []*validatorStub) { //------------------------------------------------------------------------------- +func ensureNoNewEvent(ch <-chan interface{}, timeout time.Duration, + errorMessage string) { + select { + case <-time.After(timeout): + break + case <-ch: + panic(errorMessage) + } +} + func ensureNoNewStep(stepCh <-chan interface{}) { - timer := time.NewTimer(ensureTimeout) + ensureNoNewEvent(stepCh, ensureTimeout, "We should be stuck waiting, "+ + "not moving to the next step") +} + +func ensureNoNewTimeout(stepCh <-chan interface{}, timeout int64) { + timeoutDuration := time.Duration(timeout*5) * time.Nanosecond + ensureNoNewEvent(stepCh, timeoutDuration, "We should be stuck waiting, "+ + "not moving to the next step") +} + +func ensureNewEvent(ch <-chan interface{}, timeout time.Duration, errorMessage string) { select { - case <-timer.C: + case <-time.After(timeout): + panic(errorMessage) + case <-ch: break - case <-stepCh: - panic("We should be stuck waiting, not moving to the next step") } } func ensureNewStep(stepCh <-chan interface{}) { - timer := time.NewTimer(ensureTimeout) + ensureNewEvent(stepCh, ensureTimeout, + "Timeout expired while waiting for NewStep event") +} + +func ensureNewRound(roundCh <-chan interface{}) { + ensureNewEvent(roundCh, ensureTimeout, + "Timeout expired while waiting for NewRound event") +} + +func ensureNewTimeout(timeoutCh <-chan interface{}, timeout int64) { + timeoutDuration := time.Duration(timeout*5) * time.Nanosecond + ensureNewEvent(timeoutCh, timeoutDuration, + "Timeout expired while waiting for NewTimeout event") +} + +func ensureNewProposal(proposalCh <-chan interface{}) { + ensureNewEvent(proposalCh, ensureTimeout, + "Timeout expired while waiting for NewProposal event") +} + +func ensureNewBlock(blockCh <-chan interface{}) { + ensureNewEvent(blockCh, ensureTimeout, + "Timeout expired while waiting for NewBlock event") +} + +func ensureNewVote(voteCh <-chan interface{}) { + ensureNewEvent(voteCh, ensureTimeout, + "Timeout expired while waiting for NewVote event") +} + +func ensureNewUnlock(unlockCh <-chan interface{}) { + ensureNewEvent(unlockCh, ensureTimeout, + "Timeout expired while waiting for NewUnlock event") +} + +func ensureVote(voteCh chan interface{}, height int64, round int, + voteType byte) { select { - case <-timer.C: - panic("We shouldnt be stuck waiting") - case <-stepCh: + case <-time.After(ensureTimeout): break + case v := <-voteCh: + edv, ok := v.(types.EventDataVote) + if !ok { + panic(fmt.Sprintf("expected a *types.Vote, "+ + "got %v. wrong subscription channel?", + reflect.TypeOf(v))) + } + vote := edv.Vote + if vote.Height != height { + panic(fmt.Sprintf("expected height %v, got %v", height, vote.Height)) + } + if vote.Round != round { + panic(fmt.Sprintf("expected round %v, got %v", round, vote.Round)) + } + if vote.Type != voteType { + panic(fmt.Sprintf("expected type %v, got %v", voteType, vote.Type)) + } } } diff --git a/consensus/reactor.go b/consensus/reactor.go index 16e2e7e2e..376b8eda9 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -55,7 +55,7 @@ func NewConsensusReactor(consensusState *ConsensusState, fastSync bool, options conR := &ConsensusReactor{ conS: consensusState, fastSync: fastSync, - metrics: NopMetrics(), + metrics: NopMetrics(), } conR.updateFastSyncingMetric() conR.BaseReactor = *p2p.NewBaseReactor("ConsensusReactor", conR) diff --git a/consensus/state.go b/consensus/state.go index 35bbca0f6..0100a1504 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -1642,21 +1642,14 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, precommits := cs.Votes.Precommits(vote.Round) cs.Logger.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort()) blockID, ok := precommits.TwoThirdsMajority() - if ok { - if len(blockID.Hash) == 0 { - cs.enterNewRound(height, vote.Round+1) - } else { - cs.enterNewRound(height, vote.Round) - cs.enterPrecommit(height, vote.Round) - cs.enterCommit(height, vote.Round) - - if cs.config.SkipTimeoutCommit && precommits.HasAll() { - // if we have all the votes now, - // go straight to new round (skip timeout commit) - // cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight) - cs.enterNewRound(cs.Height, 0) - } + if ok && len(blockID.Hash) != 0 { + // Executed as TwoThirdsMajority could be from a higher round + cs.enterNewRound(height, vote.Round) + cs.enterPrecommit(height, vote.Round) + cs.enterCommit(height, vote.Round) + if cs.config.SkipTimeoutCommit && precommits.HasAll() { + cs.enterNewRound(cs.Height, 0) } } else if cs.Round <= vote.Round && precommits.HasTwoThirdsAny() { cs.enterNewRound(height, vote.Round) diff --git a/consensus/state_test.go b/consensus/state_test.go index 4c34d9d2f..831f77f4a 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -67,23 +67,23 @@ func TestStateProposerSelection0(t *testing.T) { startTestRound(cs1, height, round) - // wait for new round so proposer is set - <-newRoundCh + // Wait for new round so proposer is set. + ensureNewRound(newRoundCh) - // lets commit a block and ensure proposer for the next height is correct + // Commit a block and ensure proposer for the next height is correct. prop := cs1.GetRoundState().Validators.GetProposer() if !bytes.Equal(prop.Address, cs1.privValidator.GetAddress()) { t.Fatalf("expected proposer to be validator %d. Got %X", 0, prop.Address) } - // wait for complete proposal - <-proposalCh + // Wait for complete proposal. + ensureNewProposal(proposalCh) rs := cs1.GetRoundState() signAddVotes(cs1, types.VoteTypePrecommit, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:]...) - // wait for new round so next validator is set - <-newRoundCh + // Wait for new round so next validator is set. + ensureNewRound(newRoundCh) prop = cs1.GetRoundState().Validators.GetProposer() if !bytes.Equal(prop.Address, vss[1].GetAddress()) { @@ -102,7 +102,7 @@ func TestStateProposerSelection2(t *testing.T) { incrementRound(vss[1:]...) startTestRound(cs1, cs1.Height, 2) - <-newRoundCh // wait for the new round + ensureNewRound(newRoundCh) // wait for the new round // everyone just votes nil. we get a new proposer each round for i := 0; i < len(vss); i++ { @@ -114,8 +114,7 @@ func TestStateProposerSelection2(t *testing.T) { rs := cs1.GetRoundState() signAddVotes(cs1, types.VoteTypePrecommit, nil, rs.ProposalBlockParts.Header(), vss[1:]...) - <-newRoundCh // wait for the new round event each round - + ensureNewRound(newRoundCh) // wait for the new round event each round incrementRound(vss[1:]...) } @@ -133,13 +132,7 @@ func TestStateEnterProposeNoPrivValidator(t *testing.T) { startTestRound(cs, height, round) // if we're not a validator, EnterPropose should timeout - ticker := time.NewTicker(ensureProposeTimeout(cs.config.TimeoutPropose)) - select { - case <-timeoutCh: - case <-ticker.C: - panic("Expected EnterPropose to timeout") - - } + ensureNewTimeout(timeoutCh, cs.config.TimeoutPropose.Nanoseconds()) if cs.GetRoundState().Proposal != nil { t.Error("Expected to make no proposal, since no privValidator") @@ -159,7 +152,7 @@ func TestStateEnterProposeYesPrivValidator(t *testing.T) { cs.enterNewRound(height, round) cs.startRoutines(3) - <-proposalCh + ensureNewProposal(proposalCh) // Check that Proposal, ProposalBlock, ProposalBlockParts are set. rs := cs.GetRoundState() @@ -174,13 +167,7 @@ func TestStateEnterProposeYesPrivValidator(t *testing.T) { } // if we're a validator, enterPropose should not timeout - ticker := time.NewTicker(ensureProposeTimeout(cs.config.TimeoutPropose)) - select { - case <-timeoutCh: - panic("Expected EnterPropose not to timeout") - case <-ticker.C: - - } + ensureNoNewTimeout(timeoutCh, cs.config.TimeoutPropose.Nanoseconds()) } func TestStateBadProposal(t *testing.T) { @@ -221,19 +208,19 @@ func TestStateBadProposal(t *testing.T) { startTestRound(cs1, height, round) // wait for proposal - <-proposalCh + ensureNewProposal(proposalCh) // wait for prevote - <-voteCh + ensureNewVote(voteCh) validatePrevote(t, cs1, round, vss[0], nil) // add bad prevote from vs2 and wait for it signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) - <-voteCh + ensureNewVote(voteCh) // wait for precommit - <-voteCh + ensureNewVote(voteCh) validatePrecommit(t, cs1, round, 0, vss[0], nil, nil) signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) @@ -261,19 +248,19 @@ func TestStateFullRound1(t *testing.T) { startTestRound(cs, height, round) - <-newRoundCh + ensureNewRound(newRoundCh) // grab proposal re := <-propCh propBlockHash := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState).ProposalBlock.Hash() - <-voteCh // wait for prevote + ensureNewVote(voteCh) // wait for prevote validatePrevote(t, cs, round, vss[0], propBlockHash) - <-voteCh // wait for precommit + ensureNewVote(voteCh) // wait for precommit // we're going to roll right into new height - <-newRoundCh + ensureNewRound(newRoundCh) validateLastPrecommit(t, cs, vss[0], propBlockHash) } @@ -288,8 +275,8 @@ func TestStateFullRoundNil(t *testing.T) { cs.enterPrevote(height, round) cs.startRoutines(4) - <-voteCh // prevote - <-voteCh // precommit + ensureNewVote(voteCh) // prevote + ensureNewVote(voteCh) // precommit // should prevote and precommit nil validatePrevoteAndPrecommit(t, cs, round, 0, vss[0], nil, nil) @@ -308,7 +295,7 @@ func TestStateFullRound2(t *testing.T) { // start round and wait for propose and prevote startTestRound(cs1, height, round) - <-voteCh // prevote + ensureNewVote(voteCh) // prevote // we should be stuck in limbo waiting for more prevotes rs := cs1.GetRoundState() @@ -316,9 +303,9 @@ func TestStateFullRound2(t *testing.T) { // prevote arrives from vs2: signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propPartsHeader, vs2) - <-voteCh + ensureNewVote(voteCh) - <-voteCh //precommit + ensureNewVote(voteCh) //precommit // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, 0, 0, vss[0], propBlockHash, propBlockHash) @@ -327,10 +314,10 @@ func TestStateFullRound2(t *testing.T) { // precommit arrives from vs2: signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propPartsHeader, vs2) - <-voteCh + ensureNewVote(voteCh) // wait to finish commit, propose in next height - <-newBlockCh + ensureNewBlock(newBlockCh) } //------------------------------------------------------------------------------------------ @@ -363,14 +350,14 @@ func TestStateLockNoPOL(t *testing.T) { rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) theBlockHash := rs.ProposalBlock.Hash() - <-voteCh // prevote + ensureNewVote(voteCh) // prevote // we should now be stuck in limbo forever, waiting for more prevotes // prevote arrives from vs2: signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2) - <-voteCh // prevote + ensureNewVote(voteCh) // prevote - <-voteCh // precommit + ensureNewVote(voteCh) // precommit // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash) @@ -382,15 +369,15 @@ func TestStateLockNoPOL(t *testing.T) { copy(hash, theBlockHash) hash[0] = byte((hash[0] + 1) % 255) signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) - <-voteCh // precommit + ensureNewVote(voteCh) // precommit // (note we're entering precommit for a second time this round) // but with invalid args. then we enterPrecommitWait, and the timeout to new round - <-timeoutWaitCh + ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds()) /// - <-newRoundCh + ensureNewRound(newRoundCh) t.Log("#### ONTO ROUND 1") /* Round2 (cs1, B) // B B2 @@ -407,20 +394,20 @@ func TestStateLockNoPOL(t *testing.T) { } // wait to finish prevote - <-voteCh + ensureNewVote(voteCh) // we should have prevoted our locked block validatePrevote(t, cs1, 1, vss[0], rs.LockedBlock.Hash()) // add a conflicting prevote from the other validator signAddVotes(cs1, types.VoteTypePrevote, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) - <-voteCh + ensureNewVote(voteCh) // now we're going to enter prevote again, but with invalid args // and then prevote wait, which should timeout. then wait for precommit - <-timeoutWaitCh + ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrevote.Nanoseconds()) - <-voteCh // precommit + ensureNewVote(voteCh) // precommit // the proposed block should still be locked and our precommit added // we should precommit nil and be locked on the proposal @@ -429,13 +416,13 @@ func TestStateLockNoPOL(t *testing.T) { // add conflicting precommit from vs2 // NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) - <-voteCh + ensureNewVote(voteCh) // (note we're entering precommit for a second time this round, but with invalid args // then we enterPrecommitWait and timeout into NewRound - <-timeoutWaitCh + ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds()) - <-newRoundCh + ensureNewRound(newRoundCh) t.Log("#### ONTO ROUND 2") /* Round3 (vs2, _) // B, B2 @@ -451,22 +438,22 @@ func TestStateLockNoPOL(t *testing.T) { panic(fmt.Sprintf("Expected proposal block to be locked block. Got %v, Expected %v", rs.ProposalBlock, rs.LockedBlock)) } - <-voteCh // prevote + ensureNewVote(voteCh) // prevote validatePrevote(t, cs1, 2, vss[0], rs.LockedBlock.Hash()) signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) - <-voteCh + ensureNewVote(voteCh) - <-timeoutWaitCh // prevote wait - <-voteCh // precommit + ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrevote.Nanoseconds()) + ensureNewVote(voteCh) // precommit validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height - <-voteCh + ensureNewVote(voteCh) - <-timeoutWaitCh + ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds()) // before we time out into new round, set next proposal block prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) @@ -476,7 +463,7 @@ func TestStateLockNoPOL(t *testing.T) { incrementRound(vs2) - <-newRoundCh + ensureNewRound(newRoundCh) t.Log("#### ONTO ROUND 3") /* Round4 (vs2, C) // B C // B C @@ -488,22 +475,22 @@ func TestStateLockNoPOL(t *testing.T) { t.Fatal(err) } - <-proposalCh - <-voteCh // prevote + ensureNewProposal(proposalCh) + ensureNewVote(voteCh) // prevote // prevote for locked block (not proposal) validatePrevote(t, cs1, 0, vss[0], cs1.LockedBlock.Hash()) signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) - <-voteCh + ensureNewVote(voteCh) - <-timeoutWaitCh - <-voteCh + ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrevote.Nanoseconds()) + ensureNewVote(voteCh) validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height - <-voteCh + ensureNewVote(voteCh) } // 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka @@ -531,18 +518,18 @@ func TestStateLockPOLRelock(t *testing.T) { // start round and wait for propose and prevote startTestRound(cs1, cs1.Height, 0) - <-newRoundCh + ensureNewRound(newRoundCh) re := <-proposalCh rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) theBlockHash := rs.ProposalBlock.Hash() - <-voteCh // prevote + ensureNewVote(voteCh) // prevote signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2, vs3, vs4) // prevotes discardFromChan(voteCh, 3) - <-voteCh // our precommit + ensureNewVote(voteCh) // our precommit // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash) @@ -560,14 +547,14 @@ func TestStateLockPOLRelock(t *testing.T) { incrementRound(vs2, vs3, vs4) // timeout to new round - <-timeoutWaitCh + ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds()) //XXX: this isnt guaranteed to get there before the timeoutPropose ... if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } - <-newRoundCh + ensureNewRound(newRoundCh) t.Log("### ONTO ROUND 1") /* @@ -585,7 +572,7 @@ func TestStateLockPOLRelock(t *testing.T) { } // go to prevote, prevote for locked block (not proposal), move on - <-voteCh + ensureNewVote(voteCh) validatePrevote(t, cs1, 0, vss[0], theBlockHash) // now lets add prevotes from everyone else for the new block @@ -625,6 +612,8 @@ func TestStateLockPOLRelock(t *testing.T) { func TestStateLockPOLUnlock(t *testing.T) { cs1, vss := randConsensusState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] + h := cs1.GetRoundState().Height + r := cs1.GetRoundState().Round partSize := types.BlockPartSizeBytes @@ -644,20 +633,20 @@ func TestStateLockPOLUnlock(t *testing.T) { */ // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, 0) - <-newRoundCh + startTestRound(cs1, h, r) + ensureNewRound(newRoundCh) re := <-proposalCh rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) theBlockHash := rs.ProposalBlock.Hash() - <-voteCh // prevote + ensureVote(voteCh, h, r, types.VoteTypePrevote) signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2, vs3, vs4) - <-voteCh //precommit + ensureVote(voteCh, h, r, types.VoteTypePrecommit) // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash) + validatePrecommit(t, cs1, r, 0, vss[0], theBlockHash, theBlockHash) rs = cs1.GetRoundState() @@ -681,7 +670,7 @@ func TestStateLockPOLUnlock(t *testing.T) { t.Fatal(err) } - <-newRoundCh + ensureNewRound(newRoundCh) t.Log("#### ONTO ROUND 1") /* Round2 (vs2, C) // B nil nil nil // nil nil nil _ @@ -698,21 +687,21 @@ func TestStateLockPOLUnlock(t *testing.T) { } // go to prevote, prevote for locked block (not proposal) - <-voteCh + ensureVote(voteCh, h, r+1, types.VoteTypePrevote) validatePrevote(t, cs1, 0, vss[0], lockedBlockHash) // now lets add prevotes from everyone else for nil (a polka!) signAddVotes(cs1, types.VoteTypePrevote, nil, types.PartSetHeader{}, vs2, vs3, vs4) // the polka makes us unlock and precommit nil - <-unlockCh - <-voteCh // precommit + ensureNewUnlock(unlockCh) + ensureVote(voteCh, h, r+1, types.VoteTypePrecommit) // we should have unlocked and committed nil // NOTE: since we don't relock on nil, the lock round is 0 - validatePrecommit(t, cs1, 1, 0, vss[0], nil, nil) + validatePrecommit(t, cs1, r+1, 0, vss[0], nil, nil) signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3) - <-newRoundCh + ensureNewRound(newRoundCh) } // 4 vals @@ -722,6 +711,8 @@ func TestStateLockPOLUnlock(t *testing.T) { func TestStateLockPOLSafety1(t *testing.T) { cs1, vss := randConsensusState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] + h := cs1.GetRoundState().Height + r := cs1.GetRoundState().Round partSize := types.BlockPartSizeBytes @@ -733,12 +724,12 @@ func TestStateLockPOLSafety1(t *testing.T) { // start round and wait for propose and prevote startTestRound(cs1, cs1.Height, 0) - <-newRoundCh + ensureNewRound(newRoundCh) re := <-proposalCh rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) propBlock := rs.ProposalBlock - <-voteCh // prevote + ensureVote(voteCh, h, r, types.VoteTypePrevote) validatePrevote(t, cs1, 0, vss[0], propBlock.Hash()) @@ -747,6 +738,8 @@ func TestStateLockPOLSafety1(t *testing.T) { // before we time out into new round, set next proposer // and next proposal block + + //TODO: Should we remove this? /* _, v1 := cs1.Validators.GetByAddress(vss[0].Address) v1.VotingPower = 1 @@ -759,6 +752,11 @@ func TestStateLockPOLSafety1(t *testing.T) { // we do see them precommit nil signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3, vs4) + ensureVote(voteCh, h, r, types.VoteTypePrecommit) + + ensureNewRound(newRoundCh) + t.Log("### ONTO ROUND 1") + prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) propBlockHash := propBlock.Hash() propBlockParts := propBlock.MakePartSet(partSize) @@ -769,9 +767,6 @@ func TestStateLockPOLSafety1(t *testing.T) { if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } - - <-newRoundCh - t.Log("### ONTO ROUND 1") /*Round2 // we timeout and prevote our lock // a polka happened but we didn't see it! @@ -792,24 +787,24 @@ func TestStateLockPOLSafety1(t *testing.T) { } t.Logf("new prop hash %v", fmt.Sprintf("%X", propBlockHash)) // go to prevote, prevote for proposal block - <-voteCh + ensureVote(voteCh, h, r+1, types.VoteTypePrevote) validatePrevote(t, cs1, 1, vss[0], propBlockHash) // now we see the others prevote for it, so we should lock on it signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) - <-voteCh // precommit + ensureVote(voteCh, h, r+1, types.VoteTypePrecommit) // we should have precommitted validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash) signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3) - <-timeoutWaitCh + ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds()) incrementRound(vs2, vs3, vs4) - <-newRoundCh + ensureNewRound(newRoundCh) t.Log("### ONTO ROUND 2") /*Round3 @@ -817,10 +812,10 @@ func TestStateLockPOLSafety1(t *testing.T) { */ // timeout of propose - <-timeoutProposeCh + ensureNewTimeout(timeoutProposeCh, cs1.config.TimeoutPropose.Nanoseconds()) // finish prevote - <-voteCh + ensureVote(voteCh, h, r+2, types.VoteTypePrevote) // we should prevote what we're locked on validatePrevote(t, cs1, 2, vss[0], propBlockHash) @@ -845,6 +840,8 @@ func TestStateLockPOLSafety1(t *testing.T) { func TestStateLockPOLSafety2(t *testing.T) { cs1, vss := randConsensusState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] + h := cs1.GetRoundState().Height + r := cs1.GetRoundState().Round partSize := types.BlockPartSizeBytes @@ -876,20 +873,19 @@ func TestStateLockPOLSafety2(t *testing.T) { t.Log("### ONTO Round 1") // jump in at round 1 - height := cs1.Height - startTestRound(cs1, height, 1) - <-newRoundCh + startTestRound(cs1, h, r+1) + ensureNewRound(newRoundCh) if err := cs1.SetProposalAndBlock(prop1, propBlock1, propBlockParts1, "some peer"); err != nil { t.Fatal(err) } - <-proposalCh + ensureNewProposal(proposalCh) - <-voteCh // prevote + ensureVote(voteCh, h, r+1, types.VoteTypePrevote) signAddVotes(cs1, types.VoteTypePrevote, propBlockHash1, propBlockParts1.Header(), vs2, vs3, vs4) - <-voteCh // precommit + ensureVote(voteCh, h, r+1, types.VoteTypePrecommit) // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash1, propBlockHash1) @@ -900,10 +896,10 @@ func TestStateLockPOLSafety2(t *testing.T) { incrementRound(vs2, vs3, vs4) // timeout of precommit wait to new round - <-timeoutWaitCh + ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds()) // in round 2 we see the polkad block from round 0 - newProp := types.NewProposal(height, 2, propBlockParts0.Header(), 0, propBlockID1) + newProp := types.NewProposal(h, 2, propBlockParts0.Header(), 0, propBlockID1) if err := vs3.SignProposal(config.ChainID(), newProp); err != nil { t.Fatal(err) } @@ -914,7 +910,7 @@ func TestStateLockPOLSafety2(t *testing.T) { // Add the pol votes addVotes(cs1, prevotes...) - <-newRoundCh + ensureNewRound(newRoundCh) t.Log("### ONTO Round 2") /*Round2 // now we see the polka from round 1, but we shouldnt unlock @@ -936,6 +932,26 @@ func TestStateLockPOLSafety2(t *testing.T) { } +// 4 vals, 3 Nil Precommits at P0 +// What we want: +// P0 waits for timeoutPrecommit before starting next round +func TestWaitingTimeoutOnNilPolka(t *testing.T) { + cs1, vss := randConsensusState(4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + + // start round + startTestRound(cs1, cs1.Height, 0) + ensureNewRound(newRoundCh) + + signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3, vs4) + + ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewRound(newRoundCh) +} + //------------------------------------------------------------------------------------------ // SlashingSuite // TODO: Slashing @@ -1024,7 +1040,8 @@ func TestStateSlashingPrecommits(t *testing.T) { func TestStateHalt1(t *testing.T) { cs1, vss := randConsensusState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - + h := cs1.GetRoundState().Height + r := cs1.GetRoundState().Round partSize := types.BlockPartSizeBytes proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) @@ -1035,16 +1052,16 @@ func TestStateHalt1(t *testing.T) { // start round and wait for propose and prevote startTestRound(cs1, cs1.Height, 0) - <-newRoundCh + ensureNewRound(newRoundCh) re := <-proposalCh rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) propBlock := rs.ProposalBlock propBlockParts := propBlock.MakePartSet(partSize) - <-voteCh // prevote + ensureVote(voteCh, h, r, types.VoteTypePrevote) signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlockParts.Header(), vs3, vs4) - <-voteCh // precommit + ensureVote(voteCh, h, r, types.VoteTypePrecommit) // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, 0, 0, vss[0], propBlock.Hash(), propBlock.Hash()) @@ -1058,7 +1075,7 @@ func TestStateHalt1(t *testing.T) { incrementRound(vs2, vs3, vs4) // timeout to new round - <-timeoutWaitCh + ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds()) re = <-newRoundCh rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) @@ -1069,14 +1086,14 @@ func TestStateHalt1(t *testing.T) { */ // go to prevote, prevote for locked block - <-voteCh // prevote + ensureVote(voteCh, h, r+1, types.VoteTypePrevote) validatePrevote(t, cs1, 0, vss[0], rs.LockedBlock.Hash()) // now we receive the precommit from the previous round addVotes(cs1, precommit4) // receiving that precommit should take us straight to commit - <-newBlockCh + ensureNewBlock(newBlockCh) re = <-newRoundCh rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) diff --git a/lite/dynamic_verifier_test.go b/lite/dynamic_verifier_test.go index 401c14871..9ff8ed81f 100644 --- a/lite/dynamic_verifier_test.go +++ b/lite/dynamic_verifier_test.go @@ -4,6 +4,7 @@ import ( "fmt" "sync" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index 2eb210e3c..0e33adab9 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -756,7 +756,7 @@ func (ch *Channel) recvPacketMsg(packet PacketMsg) ([]byte, error) { func (ch *Channel) updateStats() { // Exponential decay of stats. // TODO: optimize. - atomic.StoreInt64(&ch.recentlySent, int64(float64(atomic.LoadInt64(&ch.recentlySent)) * 0.8)) + atomic.StoreInt64(&ch.recentlySent, int64(float64(atomic.LoadInt64(&ch.recentlySent))*0.8)) } //---------------------------------------- diff --git a/p2p/peer.go b/p2p/peer.go index 064f91817..ba22695e7 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -102,7 +102,7 @@ type peer struct { // User data Data *cmn.CMap - metrics *Metrics + metrics *Metrics metricsTicker *time.Ticker } diff --git a/p2p/test_util.go b/p2p/test_util.go index 3d48aaac4..e35e0989f 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -28,7 +28,7 @@ func CreateRandomPeer(outbound bool) *peer { ID: netAddr.ID, ListenAddr: netAddr.DialString(), }, - mconn: &conn.MConnection{}, + mconn: &conn.MConnection{}, metrics: NopMetrics(), } p.SetLogger(log.TestingLogger().With("peer", addr)) From 303649818c0356e46f00db0d48a62e2846bc74c0 Mon Sep 17 00:00:00 2001 From: Zach Date: Thu, 4 Oct 2018 17:22:41 -0400 Subject: [PATCH 027/113] update docs links & sidebar (#2541) * docs: fix links * docs: add readme from each section to the sidebar --- docs/.vuepress/config.js | 10 +++++++--- docs/app-dev/getting-started.md | 3 +-- docs/app-dev/subscribing-to-events-via-websocket.md | 4 ++-- docs/introduction/README.md | 8 ++++---- docs/networks/README.md | 6 +++--- docs/spec/consensus/consensus.md | 2 +- docs/tendermint-core/README.md | 2 +- docs/tools/README.md | 6 +++--- 8 files changed, 22 insertions(+), 19 deletions(-) diff --git a/docs/.vuepress/config.js b/docs/.vuepress/config.js index b4e2c3fa2..ce9491cb9 100644 --- a/docs/.vuepress/config.js +++ b/docs/.vuepress/config.js @@ -14,6 +14,7 @@ module.exports = { title: "Introduction", collapsable: false, children: [ + "/introduction/", "/introduction/quick-start", "/introduction/install", "/introduction/what-is-tendermint" @@ -23,6 +24,7 @@ module.exports = { title: "Tendermint Core", collapsable: false, children: [ + "/tendermint-core/", "/tendermint-core/using-tendermint", "/tendermint-core/configuration", "/tendermint-core/rpc", @@ -40,14 +42,16 @@ module.exports = { title: "Tools", collapsable: false, children: [ - "tools/benchmarking", - "tools/monitoring" + "/tools/", + "/tools/benchmarking", + "/tools/monitoring" ] }, { title: "Networks", collapsable: false, children: [ + "/networks/", "/networks/docker-compose", "/networks/terraform-and-ansible", ] @@ -99,7 +103,7 @@ module.exports = { ] }, { - title: "ABCI Specification", + title: "ABCI Spec", collapsable: false, children: [ "/spec/abci/abci", diff --git a/docs/app-dev/getting-started.md b/docs/app-dev/getting-started.md index 066deaacd..14aa0dc30 100644 --- a/docs/app-dev/getting-started.md +++ b/docs/app-dev/getting-started.md @@ -7,8 +7,7 @@ application you want to run. So, to run a complete blockchain that does something useful, you must start two programs: one is Tendermint Core, the other is your application, which can be written in any programming language. Recall from [the intro to -ABCI](../introduction/introduction.html#abci-overview) that Tendermint Core handles all -the p2p and consensus stuff, and just forwards transactions to the +ABCI](../introduction/what-is-tendermint.md#abci-overview) that Tendermint Core handles all the p2p and consensus stuff, and just forwards transactions to the application when they need to be validated, or when they're ready to be committed to a block. diff --git a/docs/app-dev/subscribing-to-events-via-websocket.md b/docs/app-dev/subscribing-to-events-via-websocket.md index 69ab59f50..499548094 100644 --- a/docs/app-dev/subscribing-to-events-via-websocket.md +++ b/docs/app-dev/subscribing-to-events-via-websocket.md @@ -20,7 +20,7 @@ method via Websocket. } ``` -Check out [API docs](https://tendermint.github.io/slate/#subscribe) for +Check out [API docs](https://tendermint.com/rpc/) for more information on query syntax and other options. You can also use tags, given you had included them into DeliverTx @@ -32,7 +32,7 @@ transactions](./indexing-transactions.md) for details. When validator set changes, ValidatorSetUpdates event is published. The event carries a list of pubkey/power pairs. The list is the same Tendermint receives from ABCI application (see [EndBlock -section](https://tendermint.com/docs/app-dev/abci-spec.html#endblock) in +section](../spec/abci/abci.md#endblock) in the ABCI spec). Response: diff --git a/docs/introduction/README.md b/docs/introduction/README.md index ad9a93dd9..7f3f97a27 100644 --- a/docs/introduction/README.md +++ b/docs/introduction/README.md @@ -1,15 +1,15 @@ -# Introduction +# Overview ## Quick Start -Get Tendermint up-and-running quickly with the [quick-start guide](quick-start.md)! +Get Tendermint up-and-running quickly with the [quick-start guide](./quick-start.md)! ## Install -Detailed [installation instructions](install.md). +Detailed [installation instructions](./install.md). ## What is Tendermint? -Dive into [what Tendermint is and why](what-is-tendermint.md)! +Dive into [what Tendermint is and why](./what-is-tendermint.md)! diff --git a/docs/networks/README.md b/docs/networks/README.md index b1ba27126..aa53afb08 100644 --- a/docs/networks/README.md +++ b/docs/networks/README.md @@ -1,9 +1,9 @@ -# Networks +# Overview -Use [Docker Compose](docker-compose.md) to spin up Tendermint testnets on your +Use [Docker Compose](./docker-compose.md) to spin up Tendermint testnets on your local machine. -Use [Terraform and Ansible](terraform-and-ansible.md) to deploy Tendermint +Use [Terraform and Ansible](./terraform-and-ansible.md) to deploy Tendermint testnets to the cloud. See the `tendermint testnet --help` command for more help initializing testnets. diff --git a/docs/spec/consensus/consensus.md b/docs/spec/consensus/consensus.md index b58184c7a..acd07397a 100644 --- a/docs/spec/consensus/consensus.md +++ b/docs/spec/consensus/consensus.md @@ -241,7 +241,7 @@ commit-set) are each justified in the JSet with no duplicitous vote signatures (by the committers). - **Lemma**: When a fork is detected by the existence of two - conflicting [commits](./validators.html#commiting-a-block), the + conflicting [commits](../blockchain/blockchain.md#commit), the union of the JSets for both commits (if they can be compiled) must include double-signing by at least 1/3+ of the validator set. **Proof**: The commit cannot be at the same round, because that diff --git a/docs/tendermint-core/README.md b/docs/tendermint-core/README.md index 7f5dc6772..88228a581 100644 --- a/docs/tendermint-core/README.md +++ b/docs/tendermint-core/README.md @@ -1,4 +1,4 @@ -# Tendermint Core +# Overview See the side-bar for details on the various features of Tendermint Core. diff --git a/docs/tools/README.md b/docs/tools/README.md index b08416bb3..ef1ae7c22 100644 --- a/docs/tools/README.md +++ b/docs/tools/README.md @@ -1,4 +1,4 @@ -# Tools +# Overview -Tendermint comes with some tools for [benchmarking](benchmarking.md) -and [monitoring](monitoring.md). +Tendermint comes with some tools for [benchmarking](./benchmarking.md) +and [monitoring](./monitoring.md). From f11aef20a0556da9e37799f8c6e1b737201c72d4 Mon Sep 17 00:00:00 2001 From: Jeremiah Andrews Date: Thu, 4 Oct 2018 14:54:45 -0700 Subject: [PATCH 028/113] Add ADR for Commit changes (#2374) --- docs/architecture/adr-025-commit.md | 75 +++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 docs/architecture/adr-025-commit.md diff --git a/docs/architecture/adr-025-commit.md b/docs/architecture/adr-025-commit.md new file mode 100644 index 000000000..3f2527951 --- /dev/null +++ b/docs/architecture/adr-025-commit.md @@ -0,0 +1,75 @@ +# ADR 025 Commit + +## Context +Currently the `Commit` structure contains a lot of potentially redundant or unnecessary data. +In particular it contains an array of every precommit from the validators, which includes many copies of the same data. Such as `Height`, `Round`, `Type`, and `BlockID`. Also the `ValidatorIndex` could be derived from the vote's position in the array, and the `ValidatorAddress` could potentially be derived from runtime context. The only truely necessary data is the `Signature` and `Timestamp` associated with each `Vote`. + +``` +type Commit struct { + BlockID BlockID `json:"block_id"` + Precommits []*Vote `json:"precommits"` +} +type Vote struct { + ValidatorAddress Address `json:"validator_address"` + ValidatorIndex int `json:"validator_index"` + Height int64 `json:"height"` + Round int `json:"round"` + Timestamp time.Time `json:"timestamp"` + Type byte `json:"type"` + BlockID BlockID `json:"block_id"` + Signature []byte `json:"signature"` +} +``` +References: +[#1648](https://github.com/tendermint/tendermint/issues/1648) +[#2179](https://github.com/tendermint/tendermint/issues/2179) +[#2226](https://github.com/tendermint/tendermint/issues/2226) + +## Proposed Solution +We can improve efficiency by replacing the usage of the `Vote` struct with a subset of each vote, and by storing the constant values (`Height`, `Round`, `BlockID`) in the Commit itself. +``` +type Commit struct { + Height int64 + Round int + BlockID BlockID `json:"block_id"` + Precommits []*CommitSig `json:"precommits"` +} +type CommitSig struct { + ValidatorAddress Address + Signature []byte + Timestamp time.Time +} +``` +Continuing to store the `ValidatorAddress` in the `CommitSig` takes up extra space, but simplifies the process and allows for easier debugging. + +## Status +Proposed + +## Consequences + +### Positive +The size of a `Commit` transmitted over the network goes from: + +|BlockID| + n * (|Address| + |ValidatorIndex| + |Height| + |Round| + |Timestamp| + |Type| + |BlockID| + |Signature|) + +to: + + +|BlockID|+|Height|+|Round| + n*(|Address| + |Signature| + |Timestamp|) + +This saves: + +n * (|BlockID| + |ValidatorIndex| + |Type|) + (n-1) * (Height + Round) + +In the current context, this would concretely be: +(assuming all ints are int64, and hashes are 32 bytes) + +n *(72 + 8 + 1 + 8 + 8) - 16 = n * 97 - 16 + +With 100 validators this is a savings of almost 10KB on every block. + +### Negative +This would add some complexity to the processing and verification of blocks and commits, as votes would have to be reconstructed to be verified and gossiped. The reconstruction could be relatively straightforward, only requiring the copying of data from the `Commit` itself into the newly created `Vote`. + +### Neutral +This design leaves the `ValidatorAddress` in the `CommitSig` and in the `Vote`. These could be removed at some point for additional savings, but that would introduce more complexity, and make printing of `Commit` and `VoteSet` objects less informative, which could harm debugging efficiency and UI/UX. \ No newline at end of file From 5b1b1ea58a5515545e7efd668a58b2b4d9ba3968 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 5 Oct 2018 01:57:59 +0400 Subject: [PATCH 029/113] [libs/autofile] fix DATA RACE by removing openFile() call (#2539) There's a time window after we call RotateFile() where autofile#index+1 does not exist. It will be created during the next call to Write(). BUT if somebody calls NewReader() before Write(), it will fail with "open /tmp/wal#index+1/wal: no such file or directory" We must create file (either by calling gr.Head.openFile() or directly) during NewReader() to ensure read calls succeed. Closes #2538 --- libs/autofile/group.go | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/libs/autofile/group.go b/libs/autofile/group.go index 807f7e1ed..ea272b61a 100644 --- a/libs/autofile/group.go +++ b/libs/autofile/group.go @@ -280,7 +280,7 @@ func (g *Group) RotateFile() { headPath := g.Head.Path if err := g.headBuf.Flush(); err != nil { - panic(err) //panic is used for consistent with below + panic(err) } if err := g.Head.Sync(); err != nil { @@ -296,12 +296,6 @@ func (g *Group) RotateFile() { panic(err) } - //make sure head file exist, there is a window time between rename and next write - //when NewReader(maxIndex), lead to "open /tmp/wal058868562/wal: no such file or directory" - if err := g.Head.openFile(); err != nil { - panic(err) - } - g.maxIndex++ } @@ -684,7 +678,6 @@ func (gr *GroupReader) ReadLine() (string, error) { // IF index > gr.Group.maxIndex, returns io.EOF // CONTRACT: caller should hold gr.mtx func (gr *GroupReader) openFile(index int) error { - // Lock on Group to ensure that head doesn't move in the meanwhile. gr.Group.mtx.Lock() defer gr.Group.mtx.Unlock() @@ -694,7 +687,7 @@ func (gr *GroupReader) openFile(index int) error { } curFilePath := filePathForIndex(gr.Head.Path, index, gr.Group.maxIndex) - curFile, err := os.Open(curFilePath) + curFile, err := os.OpenFile(curFilePath, os.O_RDONLY|os.O_CREATE, autoFilePerms) if err != nil { return err } From be1760cc258605c7d729fc130097547c7773ab4d Mon Sep 17 00:00:00 2001 From: JamesRay <66258875@qq.com> Date: Wed, 19 Sep 2018 11:52:40 +0800 Subject: [PATCH 030/113] Create adr-021-check block txs before prevote.md --- .../adr-021-check block txs before prevote.md | 119 ++++++++++++++++++ 1 file changed, 119 insertions(+) create mode 100644 docs/architecture/adr-021-check block txs before prevote.md diff --git a/docs/architecture/adr-021-check block txs before prevote.md b/docs/architecture/adr-021-check block txs before prevote.md new file mode 100644 index 000000000..712e1a7fd --- /dev/null +++ b/docs/architecture/adr-021-check block txs before prevote.md @@ -0,0 +1,119 @@ +# ADR 021: check block txs before prevote + +## Changelog + +19-09-2018: Initial Draft + +## Context + +We currently check a tx's validity through 2 ways. + +1. Through checkTx in mempool connection. +2. Through deliverTx in consensus connection. + +The 1st is called when external tx comes in, so the node should be a proposer this time. The 2nd is called when external block comes in and reach the commit phase, the node doesn't need to be the proposer of the block, however it should check the txs in that block. + +In the 2nd situation, if there are many invalid txs in the block, it would be too late for all nodes to discover that most txs in the block are invalid, and we'd better not record invalid txs in the blockchain too. + +## Proposed solution + +Therefore, we should find a way to check the txs' validity before send out a prevote. Currently we have cs.isProposalComplete() to judge whether a block is complete. We can have + +``` +func (blockExec *BlockExecutor) CheckBlock(block *types.Block) error { + // check txs of block. + for _, tx := range block.Txs { + reqRes := blockExec.proxyApp.CheckTxAsync(tx) + reqRes.Wait() + if reqRes.Response == nil || reqRes.Response.GetCheckTx() == nil || reqRes.Response.GetCheckTx().Code != abci.CodeTypeOK { + return errors.Errorf("tx %v check failed. response: %v", tx, reqRes.Response) + } + } + return nil +} +``` + +such a method in BlockExecutor to check all txs' validity in that block. + +However, this method should not be implemented like that, because checkTx will share the same state used in mempool in the app. So we should define a new interface method checkBlock in Application to indicate it to use the same state as deliverTx. + +``` +type Application interface { + // Info/Query Connection + Info(RequestInfo) ResponseInfo // Return application info + SetOption(RequestSetOption) ResponseSetOption // Set application option + Query(RequestQuery) ResponseQuery // Query for state + + // Mempool Connection + CheckTx(tx []byte) ResponseCheckTx // Validate a tx for the mempool + + // Consensus Connection + InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain with validators and other info from TendermintCore + CheckBlock(RequestCheckBlock) ResponseCheckBlock + BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block + DeliverTx(tx []byte) ResponseDeliverTx // Deliver a tx for full processing + EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set + Commit() ResponseCommit // Commit the state and return the application Merkle root hash +} +``` + +All app should implement that method. For example, counter: + +``` +func (app *CounterApplication) CheckBlock(block types.Request_CheckBlock) types.ResponseCheckBlock { + if app.serial { + app.originalTxCount = app.txCount //backup the txCount state + for _, tx := range block.CheckBlock.Block.Txs { + if len(tx) > 8 { + return types.ResponseCheckBlock{ + Code: code.CodeTypeEncodingError, + Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))} + } + tx8 := make([]byte, 8) + copy(tx8[len(tx8)-len(tx):], tx) + txValue := binary.BigEndian.Uint64(tx8) + if txValue < uint64(app.txCount) { + return types.ResponseCheckBlock{ + Code: code.CodeTypeBadNonce, + Log: fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue)} + } + app.txCount++ + } + } + return types.ResponseCheckBlock{Code: code.CodeTypeOK} +} +``` + +In BeginBlock, the app should restore the state to the orignal state before checking the block: + +``` +func (app *CounterApplication) DeliverTx(tx []byte) types.ResponseDeliverTx { + if app.serial { + app.txCount = app.originalTxCount //restore the txCount state + } + app.txCount++ + return types.ResponseDeliverTx{Code: code.CodeTypeOK} +} +``` + +The txCount is like the nonce in ethermint, it should be restored when entering the deliverTx phase. While some operation like checking the tx signature needs not to be done again. So the deliverTx can focus on how a tx can be applied, ignoring the checking of the tx, because all the checking has already been done in the checkBlock phase before. + +An optional optimization is alter the deliverTx to deliverBlock. For the block has already been checked by checkBlock, so all the txs in it are valid. So the app can cache the block, and in the deliverBlock phase, it just needs to apply the block in the cache. This optimization can save network current in deliverTx. + + + +## Status + +Proposed. + +## Consequences + +### Positive + +- more robust to defend the adversary to propose a block full of invalid txs. + +### Negative + +- add a new interface method. app logic needs to adjust to appeal to it. + +### Neutral From c15fc9ff63c3a96fe66cec33cd17944db4770d7b Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 4 Oct 2018 20:11:21 -0400 Subject: [PATCH 031/113] adr-029: update CheckBlock --- ...prevote.md => adr-029-check-tx-consensus.md} | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) rename docs/architecture/{adr-021-check block txs before prevote.md => adr-029-check-tx-consensus.md} (90%) diff --git a/docs/architecture/adr-021-check block txs before prevote.md b/docs/architecture/adr-029-check-tx-consensus.md similarity index 90% rename from docs/architecture/adr-021-check block txs before prevote.md rename to docs/architecture/adr-029-check-tx-consensus.md index 712e1a7fd..c1b882c61 100644 --- a/docs/architecture/adr-021-check block txs before prevote.md +++ b/docs/architecture/adr-029-check-tx-consensus.md @@ -1,7 +1,9 @@ -# ADR 021: check block txs before prevote +# ADR 029: Check block txs before prevote ## Changelog +04-10-2018: Update with link to issue +[#2384](https://github.com/tendermint/tendermint/issues/2384) and reason for rejection 19-09-2018: Initial Draft ## Context @@ -13,11 +15,11 @@ We currently check a tx's validity through 2 ways. The 1st is called when external tx comes in, so the node should be a proposer this time. The 2nd is called when external block comes in and reach the commit phase, the node doesn't need to be the proposer of the block, however it should check the txs in that block. -In the 2nd situation, if there are many invalid txs in the block, it would be too late for all nodes to discover that most txs in the block are invalid, and we'd better not record invalid txs in the blockchain too. +In the 2nd situation, if there are many invalid txs in the block, it would be too late for all nodes to discover that most txs in the block are invalid, and we'd better not record invalid txs in the blockchain too. ## Proposed solution -Therefore, we should find a way to check the txs' validity before send out a prevote. Currently we have cs.isProposalComplete() to judge whether a block is complete. We can have +Therefore, we should find a way to check the txs' validity before send out a prevote. Currently we have cs.isProposalComplete() to judge whether a block is complete. We can have ``` func (blockExec *BlockExecutor) CheckBlock(block *types.Block) error { @@ -104,7 +106,11 @@ An optional optimization is alter the deliverTx to deliverBlock. For the block h ## Status -Proposed. +Rejected + +## Decision + +Performance impact is considered too great. See [#2384](https://github.com/tendermint/tendermint/issues/2384) ## Consequences @@ -115,5 +121,8 @@ Proposed. ### Negative - add a new interface method. app logic needs to adjust to appeal to it. +- sending all the tx data over the ABCI twice +- potentially redundant validations (eg. signature checks in both CheckBlock and + DeliverTx) ### Neutral From d2be7482e12835f36b2dfd5eea880f260dbfdc13 Mon Sep 17 00:00:00 2001 From: Ismail Khoffi Date: Fri, 5 Oct 2018 02:28:27 +0200 Subject: [PATCH 032/113] [ADR][DRAFT] 024: SignBytes and validator types in privval (#2445) * first draft for ADR summarizing discussion from: https://github.com/tendermint/tendermint/issues/1622 * fix link and add comment about pub-key per message and fix link * fix link and add comment about pub-key per message; also: - fix link - add little diagram - fix typo * Add a slightly different approach * typo and ADR number --- docs/architecture/adr-024-sign-bytes.md | 234 ++++++++++++++++++++++++ 1 file changed, 234 insertions(+) create mode 100644 docs/architecture/adr-024-sign-bytes.md diff --git a/docs/architecture/adr-024-sign-bytes.md b/docs/architecture/adr-024-sign-bytes.md new file mode 100644 index 000000000..34bf6e51e --- /dev/null +++ b/docs/architecture/adr-024-sign-bytes.md @@ -0,0 +1,234 @@ +# ADR 024: SignBytes and validator types in privval + +## Context + +Currently, the messages exchanged between tendermint and a (potentially remote) signer/validator, +namely votes, proposals, and heartbeats, are encoded as a JSON string +(e.g., via `Vote.SignBytes(...)`) and then +signed . JSON encoding is sub-optimal for both, hardware wallets +and for usage in ethereum smart contracts. Both is laid down in detail in [issue#1622]. + +Also, there are currently no differences between sign-request and -replies. Also, there is no possibility +for a remote signer to include an error code or message in case something went wrong. +The messages exchanged between tendermint and a remote signer currently live in +[privval/socket.go] and encapsulate the corresponding types in [types]. + + +[privval/socket.go]: https://github.com/tendermint/tendermint/blob/d419fffe18531317c28c29a292ad7d253f6cafdf/privval/socket.go#L496-L502 +[issue#1622]: https://github.com/tendermint/tendermint/issues/1622 +[types]: https://github.com/tendermint/tendermint/tree/master/types + + +## Decision + +- restructure vote, proposal, and heartbeat such that their encoding is easily parseable by +hardware devices and smart contracts using a binary encoding format ([amino] in this case) +- split up the messages exchanged between tendermint and remote signers into requests and +responses (see details below) +- include an error type in responses + +### Overview +``` ++--------------+ +----------------+ +| | SignXRequest | | +|Remote signer |<---------------------+ tendermint | +| (e.g. KMS) | | | +| +--------------------->| | ++--------------+ SignedXReply +----------------+ + + +SignXRequest { + x: X +} + +SignedXReply { + x: X + sig: Signature // []byte + err: Error{ + code: int + desc: string + } +} +``` + +TODO: Alternatively, the type `X` might directly include the signature. A lot of places expect a vote with a +signature and do not necessarily deal with "Replies". +Still exploring what would work best here. +This would look like (exemplified using X = Vote): +``` +Vote { + // all fields besides signature +} + +SignedVote { + Vote Vote + Signature []byte +} + +SignVoteRequest { + Vote Vote +} + +SignedVoteReply { + Vote SignedVote + Err Error +} +``` + +**Note:** There was a related discussion around including a fingerprint of, or, the whole public-key +into each sign-request to tell the signer which corresponding private-key to +use to sign the message. This is particularly relevant in the context of the KMS +but is currently not considered in this ADR. + + +[amino]: https://github.com/tendermint/go-amino/ + +### Vote + +As explained in [issue#1622] `Vote` will be changed to contain the following fields +(notation in protobuf-like syntax for easy readability): + +```proto +// vanilla protobuf / amino encoded +message Vote { + Version fixed32 + Height sfixed64 + Round sfixed32 + VoteType fixed32 + Timestamp Timestamp // << using protobuf definition + BlockID BlockID // << as already defined + ChainID string // at the end because length could vary a lot +} + +// this is an amino registered type; like currently privval.SignVoteMsg: +// registered with "tendermint/socketpv/SignVoteRequest" +message SignVoteRequest { + Vote vote +} + +// amino registered type +// registered with "tendermint/socketpv/SignedVoteReply" +message SignedVoteReply { + Vote Vote + Signature Signature + Err Error +} + +// we will use this type everywhere below +message Error { + Type uint // error code + Description string // optional description +} + +``` + +The `ChainID` gets moved into the vote message directly. Previously, it was injected +using the [Signable] interface method `SignBytes(chainID string) []byte`. Also, the +signature won't be included directly, only in the corresponding `SignedVoteReply` message. + +[Signable]: https://github.com/tendermint/tendermint/blob/d419fffe18531317c28c29a292ad7d253f6cafdf/types/signable.go#L9-L11 + +### Proposal + +```proto +// vanilla protobuf / amino encoded +message Proposal { + Height sfixed64 + Round sfixed32 + Timestamp Timestamp // << using protobuf definition + BlockPartsHeader PartSetHeader // as already defined + POLRound sfixed32 + POLBlockID BlockID // << as already defined +} + +// amino registered with "tendermint/socketpv/SignProposalRequest" +message SignProposalRequest { + Proposal proposal +} + +// amino registered with "tendermint/socketpv/SignProposalReply" +message SignProposalReply { + Prop Proposal + Sig Signature + Err Error // as defined above +} +``` + +### Heartbeat + +**TODO**: clarify if heartbeat also needs a fixed offset and update the fields accordingly: + +```proto +message Heartbeat { + ValidatorAddress Address + ValidatorIndex int + Height int64 + Round int + Sequence int +} +// amino registered with "tendermint/socketpv/SignHeartbeatRequest" +message SignHeartbeatRequest { + Hb Heartbeat +} + +// amino registered with "tendermint/socketpv/SignHeartbeatReply" +message SignHeartbeatReply { + Hb Heartbeat + Sig Signature + Err Error // as defined above +} + +``` + +## PubKey + +TBA - this needs further thoughts: e.g. what todo like in the case of the KMS which holds +several keys? How does it know with which key to reply? + +## SignBytes +`SignBytes` will not require a `ChainID` parameter: + +```golang +type Signable interface { + SignBytes() []byte +} + +``` +And the implementation for vote, heartbeat, proposal will look like: +```golang +// type T is one of vote, sign, proposal +func (tp *T) SignBytes() []byte { + bz, err := cdc.MarshalBinary(tp) + if err != nil { + panic(err) + } + return bz +} +``` + +## Status + +DRAFT + +## Consequences + + + +### Positive + +The most relevant positive effect is that the signing bytes can easily be parsed by a +hardware module and a smart contract. Besides that: + +- clearer separation between requests and responses +- added error messages enable better error handling + + +### Negative + +- relatively huge change / refactoring touching quite some code +- lot's of places assume a `Vote` with a signature included -> they will need to +- need to modify some interfaces + +### Neutral + +not even the swiss are neutral From e6a55b7be00e46b7b202fe36f72d23e2f567153d Mon Sep 17 00:00:00 2001 From: Alexander Simmerl Date: Fri, 5 Oct 2018 02:35:35 +0200 Subject: [PATCH 033/113] consensus: Add ADR for first stage consensus refactor (#2462) --- .../adr-030-consensus-refactor.md | 152 ++++++++++++++++++ 1 file changed, 152 insertions(+) create mode 100644 docs/architecture/adr-030-consensus-refactor.md diff --git a/docs/architecture/adr-030-consensus-refactor.md b/docs/architecture/adr-030-consensus-refactor.md new file mode 100644 index 000000000..d48cfe103 --- /dev/null +++ b/docs/architecture/adr-030-consensus-refactor.md @@ -0,0 +1,152 @@ +# ADR 030: Consensus Refactor + +## Context + +One of the biggest challenges this project faces is to proof that the +implementations of the specifications are correct, much like we strive to +formaly verify our alogrithms and protocols we should work towards high +confidence about the correctness of our program code. One of those is the core +of Tendermint - Consensus - which currently resides in the `consensus` package. +Over time there has been high friction making changes to the package due to the +algorithm being scattered in a side-effectful container (the current +`ConsensusState`). In order to test the algorithm a large object-graph needs to +be set up and even than the non-deterministic parts of the container makes will +prevent high certainty. Where ideally we have a 1-to-1 representation of the +[spec](https://github.com/tendermint/spec), ready and easy to test for domain +experts. + +Addresses: + +- [#1495](https://github.com/tendermint/tendermint/issues/1495) +- [#1692](https://github.com/tendermint/tendermint/issues/1692) + +## Decision + +To remedy these issues we plan a gradual, non-invasive refactoring of the +`consensus` package. Starting of by isolating the consensus alogrithm into +a pure function and a finite state machine to address the most pressuring issue +of lack of confidence. Doing so while leaving the rest of the package in tact +and have follow-up optional changes to improve the sepration of concerns. + +### Implementation changes + +The core of Consensus can be modelled as a function with clear defined inputs: + +* `State` - data container for current round, height, etc. +* `Event`- significant events in the network + +producing clear outputs; + +* `State` - updated input +* `Message` - signal what actions to perform + +```go +type Event int + +const ( + EventUnknown Event = iota + EventProposal + Majority23PrevotesBlock + Majority23PrecommitBlock + Majority23PrevotesAny + Majority23PrecommitAny + TimeoutNewRound + TimeoutPropose + TimeoutPrevotes + TimeoutPrecommit +) + +type Message int + +const ( + MeesageUnknown Message = iota + MessageProposal + MessageVotes + MessageDecision +) + +type State struct { + height uint64 + round uint64 + step uint64 + lockedValue interface{} // TODO: Define proper type. + lockedRound interface{} // TODO: Define proper type. + validValue interface{} // TODO: Define proper type. + validRound interface{} // TODO: Define proper type. + // From the original notes: valid(v) + valid interface{} // TODO: Define proper type. + // From the original notes: proposer(h, r) + proposer interface{} // TODO: Define proper type. +} + +func Consensus(Event, State) (State, Message) { + // Consolidate implementation. +} +``` + +Tracking of relevant information to feed `Event` into the function and act on +the output is left to the `ConsensusExecutor` (formerly `ConsensusState`). + +Benefits for testing surfacing nicely as testing for a sequence of events +against algorithm could be as simple as the following example: + +``` go +func TestConsensusXXX(t *testing.T) { + type expected struct { + message Message + state State + } + + // Setup order of events, initial state and expectation. + var ( + events = []struct { + event Event + want expected + }{ + // ... + } + state = State{ + // ... + } + ) + + for _, e := range events { + sate, msg = Consensus(e.event, state) + + // Test message expectation. + if msg != e.want.message { + t.Fatalf("have %v, want %v", msg, e.want.message) + } + + // Test state expectation. + if !reflect.DeepEqual(state, e.want.state) { + t.Fatalf("have %v, want %v", state, e.want.state) + } + } +} +``` + +### Implementation roadmap + +* implement proposed implementation +* replace currently scattered calls in `ConsensusState` with calls to the new + `Consensus` function +* rename `ConsensusState` to `ConsensusExecutor` to avoid confusion +* propose design for improved separation and clear information flow between + `ConsensusExecutor` and `ConsensusReactor` + +## Status + +Draft. + +## Consequences + +### Positive + +- isolated implementation of the algorithm +- improved testability - simpler to proof correctness +- clearer separation of concerns - easier to reason + +### Negative + +### Neutral From 5b120d788ae2668372040a42002b801bbadbcb42 Mon Sep 17 00:00:00 2001 From: bradyjoestar Date: Thu, 4 Oct 2018 17:39:24 -0700 Subject: [PATCH 034/113] lite support maxOpenConnections (#2413) --- cmd/tendermint/commands/lite.go | 12 +++++++----- lite/proxy/proxy.go | 5 ++--- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/cmd/tendermint/commands/lite.go b/cmd/tendermint/commands/lite.go index 150371d62..bc51d7de2 100644 --- a/cmd/tendermint/commands/lite.go +++ b/cmd/tendermint/commands/lite.go @@ -26,10 +26,11 @@ just with added trust and running locally.`, } var ( - listenAddr string - nodeAddr string - chainID string - home string + listenAddr string + nodeAddr string + chainID string + home string + maxOpenConnections int cacheSize int ) @@ -38,6 +39,7 @@ func init() { LiteCmd.Flags().StringVar(&nodeAddr, "node", "tcp://localhost:26657", "Connect to a Tendermint node at this address") LiteCmd.Flags().StringVar(&chainID, "chain-id", "tendermint", "Specify the Tendermint chain ID") LiteCmd.Flags().StringVar(&home, "home-dir", ".tendermint-lite", "Specify the home directory") + LiteCmd.Flags().IntVar(&maxOpenConnections,"max-open-connections",900,"Maximum number of simultaneous connections (including WebSocket).") LiteCmd.Flags().IntVar(&cacheSize, "cache-size", 10, "Specify the memory trust store cache size") } @@ -79,7 +81,7 @@ func runProxy(cmd *cobra.Command, args []string) error { sc := proxy.SecureClient(node, cert) logger.Info("Starting proxy...") - err = proxy.StartProxy(sc, listenAddr, logger) + err = proxy.StartProxy(sc, listenAddr, logger, maxOpenConnections) if err != nil { return cmn.ErrorWrap(err, "starting proxy") } diff --git a/lite/proxy/proxy.go b/lite/proxy/proxy.go index 0294ddf68..ffd9db1d7 100644 --- a/lite/proxy/proxy.go +++ b/lite/proxy/proxy.go @@ -19,7 +19,7 @@ const ( // StartProxy will start the websocket manager on the client, // set up the rpc routes to proxy via the given client, // and start up an http/rpc server on the location given by bind (eg. :1234) -func StartProxy(c rpcclient.Client, listenAddr string, logger log.Logger) error { +func StartProxy(c rpcclient.Client, listenAddr string, logger log.Logger, maxOpenConnections int) error { err := c.Start() if err != nil { return err @@ -38,8 +38,7 @@ func StartProxy(c rpcclient.Client, listenAddr string, logger log.Logger) error core.SetLogger(logger) mux.HandleFunc(wsEndpoint, wm.WebsocketHandler) - // TODO: limit max number of open connections rpc.Config{MaxOpenConnections: X} - _, err = rpc.StartHTTPServer(listenAddr, mux, logger, rpc.Config{}) + _, err = rpc.StartHTTPServer(listenAddr, mux, logger, rpc.Config{MaxOpenConnections: maxOpenConnections}) return err } From c648c93807f306faaee7c327e6f9f59d933e41cf Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Fri, 5 Oct 2018 00:00:50 -0700 Subject: [PATCH 035/113] Fix random distribution in bitArray.PickRandom (#2534) * Fix random distribution in bitArray.PickRandom Previously it was very biased. 63 "_" followed by a single "x" had much greater odds of being chosen. Additionally, the last element was skewed. This fixes that by first preproccessing the set of all true indices, and then randomly selecting a single element from there. This commit also makes the code here significantly simpler, and improves test cases. * unlock mtx right after we select true indices --- CHANGELOG_PENDING.md | 1 + libs/common/bit_array.go | 66 +++++++++++++++++++---------------- libs/common/bit_array_test.go | 33 ++++++++++++------ 3 files changed, 59 insertions(+), 41 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 2a0b58f60..450079072 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -45,3 +45,4 @@ timeoutPrecommit before starting next round - [evidence] \#2515 fix db iter leak (@goolAdapter) - [common/bit_array] Fixed a bug in the `Or` function - [common/bit_array] Fixed a bug in the `Sub` function (@bradyjoestar) +- [common] \#2534 make bit array's PickRandom choose uniformly from true bits diff --git a/libs/common/bit_array.go b/libs/common/bit_array.go index 161f21fce..ebd6cc4a0 100644 --- a/libs/common/bit_array.go +++ b/libs/common/bit_array.go @@ -234,49 +234,53 @@ func (bA *BitArray) IsFull() bool { return (lastElem+1)&((uint64(1)< 0 { - randBitStart := RandIntn(64) - for j := 0; j < 64; j++ { - bitIdx := ((j + randBitStart) % 64) - if (bA.Elems[elemIdx] & (uint64(1) << uint(bitIdx))) > 0 { - return 64*elemIdx + bitIdx, true - } - } - PanicSanity("should not happen") - } - } else { - // Special case for last elem, to ignore straggler bits - elemBits := bA.Bits % 64 - if elemBits == 0 { - elemBits = 64 - } - randBitStart := RandIntn(elemBits) - for j := 0; j < elemBits; j++ { - bitIdx := ((j + randBitStart) % elemBits) - if (bA.Elems[elemIdx] & (uint64(1) << uint(bitIdx))) > 0 { - return 64*elemIdx + bitIdx, true - } + + return trueIndices[RandIntn(len(trueIndices))], true +} + +func (bA *BitArray) getTrueIndices() []int { + trueIndices := make([]int, 0, bA.Bits) + curBit := 0 + numElems := len(bA.Elems) + // set all true indices + for i := 0; i < numElems-1; i++ { + elem := bA.Elems[i] + if elem == 0 { + curBit += 64 + continue + } + for j := 0; j < 64; j++ { + if (elem & (uint64(1) << uint64(j))) > 0 { + trueIndices = append(trueIndices, curBit) } + curBit++ + } + } + // handle last element + lastElem := bA.Elems[numElems-1] + numFinalBits := bA.Bits - curBit + for i := 0; i < numFinalBits; i++ { + if (lastElem & (uint64(1) << uint64(i))) > 0 { + trueIndices = append(trueIndices, curBit) } + curBit++ } - return 0, false + return trueIndices } // String returns a string representation of BitArray: BA{}, diff --git a/libs/common/bit_array_test.go b/libs/common/bit_array_test.go index bc117b2a0..09ec8af25 100644 --- a/libs/common/bit_array_test.go +++ b/libs/common/bit_array_test.go @@ -107,16 +107,29 @@ func TestSub(t *testing.T) { } func TestPickRandom(t *testing.T) { - for idx := 0; idx < 123; idx++ { - bA1 := NewBitArray(123) - bA1.SetIndex(idx, true) - index, ok := bA1.PickRandom() - if !ok { - t.Fatal("Expected to pick element but got none") - } - if index != idx { - t.Fatalf("Expected to pick element at %v but got wrong index", idx) - } + empty16Bits := "________________" + empty64Bits := empty16Bits + empty16Bits + empty16Bits + empty16Bits + testCases := []struct { + bA string + ok bool + }{ + {`null`, false}, + {`"x"`, true}, + {`"` + empty16Bits + `"`, false}, + {`"x` + empty16Bits + `"`, true}, + {`"` + empty16Bits + `x"`, true}, + {`"x` + empty16Bits + `x"`, true}, + {`"` + empty64Bits + `"`, false}, + {`"x` + empty64Bits + `"`, true}, + {`"` + empty64Bits + `x"`, true}, + {`"x` + empty64Bits + `x"`, true}, + } + for _, tc := range testCases { + var bitArr *BitArray + err := json.Unmarshal([]byte(tc.bA), &bitArr) + require.NoError(t, err) + _, ok := bitArr.PickRandom() + require.Equal(t, tc.ok, ok, "PickRandom got an unexpected result on input %s", tc.bA) } } From 6e5f58191e5bbe1140f238f735d1a33f3a757440 Mon Sep 17 00:00:00 2001 From: Zach Date: Fri, 5 Oct 2018 09:51:23 -0400 Subject: [PATCH 036/113] add spec/abci/readme to sidebar (#2551) --- docs/.vuepress/config.js | 1 + docs/spec/abci/README.md | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/docs/.vuepress/config.js b/docs/.vuepress/config.js index ce9491cb9..342c5eac3 100644 --- a/docs/.vuepress/config.js +++ b/docs/.vuepress/config.js @@ -106,6 +106,7 @@ module.exports = { title: "ABCI Spec", collapsable: false, children: [ + "/spec/abci/", "/spec/abci/abci", "/spec/abci/apps", "/spec/abci/client-server" diff --git a/docs/spec/abci/README.md b/docs/spec/abci/README.md index 02e369bf7..bb1c38b6e 100644 --- a/docs/spec/abci/README.md +++ b/docs/spec/abci/README.md @@ -1,4 +1,4 @@ -# ABCI +# Overview ABCI is the interface between Tendermint (a state-machine replication engine) and your application (the actual state machine). It consists of a set of @@ -11,9 +11,9 @@ This allows Tendermint to run applications written in any programming language. This specification is split as follows: -- [Methods and Types](abci.md) - complete details on all ABCI methods and +- [Methods and Types](./abci.md) - complete details on all ABCI methods and message types -- [Applications](apps.md) - how to manage ABCI application state and other +- [Applications](./apps.md) - how to manage ABCI application state and other details about building ABCI applications -- [Client and Server](client-server.md) - for those looking to implement their +- [Client and Server](./client-server.md) - for those looking to implement their own ABCI application servers From dfda7b442f5605acef89d751427b943d16303b84 Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Fri, 5 Oct 2018 16:26:52 -0700 Subject: [PATCH 037/113] types: Remove pubkey from validator hash (#2512) * types: Remove pubkey from validator hash * undo lock file change * Update Spec --- CHANGELOG_PENDING.md | 1 + docs/spec/blockchain/state.md | 5 ++++- types/validator.go | 2 -- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 450079072..740969625 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -26,6 +26,7 @@ BREAKING CHANGES: * Blockchain Protocol * [types] \#2459 `Vote`/`Proposal`/`Heartbeat` use amino encoding instead of JSON in `SignBytes`. + * [types] \#2512 Remove the pubkey field from the validator hash * P2P Protocol diff --git a/docs/spec/blockchain/state.md b/docs/spec/blockchain/state.md index 349fd4223..e904bb339 100644 --- a/docs/spec/blockchain/state.md +++ b/docs/spec/blockchain/state.md @@ -45,7 +45,7 @@ processing transactions, like gas variables and tags - see ### Validator A validator is an active participant in the consensus with a public key and a voting power. -Validator's also contain an address which is derived from the PubKey: +Validator's also contain an address field, which is a hash digest of the PubKey. ```go type Validator struct { @@ -55,6 +55,9 @@ type Validator struct { } ``` +When hashing the Validator struct, the pubkey is not hashed, +because the address is already the hash of the pubkey. + The `state.Validators`, `state.LastValidators`, and `state.NextValidators`, must always by sorted by validator address, so that there is a canonical order for computing the SimpleMerkleRoot. diff --git a/types/validator.go b/types/validator.go index e43acf09d..46d1a7a9f 100644 --- a/types/validator.go +++ b/types/validator.go @@ -73,11 +73,9 @@ func (v *Validator) String() string { func (v *Validator) Hash() []byte { return aminoHash(struct { Address Address - PubKey crypto.PubKey VotingPower int64 }{ v.Address, - v.PubKey, v.VotingPower, }) } From 2d726a620ba1e8e64ee21d7d686e7a594567f806 Mon Sep 17 00:00:00 2001 From: Joon Date: Sat, 6 Oct 2018 12:44:53 +0900 Subject: [PATCH 038/113] add adr (#2553) --- .../adr-026-general-merkle-proof.md | 47 +++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 docs/architecture/adr-026-general-merkle-proof.md diff --git a/docs/architecture/adr-026-general-merkle-proof.md b/docs/architecture/adr-026-general-merkle-proof.md new file mode 100644 index 000000000..af81947cb --- /dev/null +++ b/docs/architecture/adr-026-general-merkle-proof.md @@ -0,0 +1,47 @@ +# ADR 026: General Merkle Proof + +## Context + +We are using raw `[]byte` for merkle proofs in `abci.ResponseQuery`. It makes hard to handle multilayer merkle proofs and general cases. Here, new interface `ProofOperator` is defined. The users can defines their own Merkle proof format and layer them easily. + +Goals: +- Layer Merkle proofs without decoding/reencoding +- Provide general way to chain proofs +- Make the proof format extensible, allowing thirdparty proof types + +## Decision + +### ProofOperator + +`type ProofOperator` is an interface for Merkle proofs. The definition is: + +```go +type ProofOperator interface { + Run([][]byte) ([][]byte, error) + GetKey() []byte + ProofOp() ProofOp +} +``` + +Since a proof can treat various data type, `Run()` takes `[][]byte` as the argument, not `[]byte`. For example, a range proof's `Run()` can take multiple key-values as its argument. It will then return the root of the tree for the further process, calculated with the input value. + +`ProofOperator` does not have to be a Merkle proof - it can be a function that transforms the argument for intermediate process e.g. prepending the length to the `[]byte`. + +### ProofOp + +`type ProofOp` is a protobuf message which is a triple of `Type string`, `Key []byte`, and `Data []byte`. `ProofOperator` and `ProofOp`are interconvertible, using `ProofOperator.ProofOp()` and `OpDecoder()`, where `OpDecoder` is a function that each proof type can register for their own encoding scheme. For example, we can add an byte for encoding scheme before the serialized proof, supporting JSON decoding. + +## Status + +## Consequences + +### Positive + +- Layering becomes easier (no encoding/decoding at each step) +- Thirdparty proof format is available + +### Negative + +- Larger size for abci.ResponseQuery +- Unintuitive proof chaining(it is not clear what `Run()` is doing) +- Additional codes for registering `OpDecoder`s From f471fc4963d41abdc3fac42f7107452157625a21 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sat, 6 Oct 2018 09:20:15 -0400 Subject: [PATCH 039/113] abci: codespace (#2557) * abci: codespace * changelog --- CHANGELOG_PENDING.md | 5 +- abci/types/types.pb.go | 508 +++++++++++++++++++++++++++-------------- abci/types/types.proto | 3 + docs/spec/abci/abci.md | 8 + 4 files changed, 345 insertions(+), 179 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 740969625..bed71d1f3 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -26,12 +26,13 @@ BREAKING CHANGES: * Blockchain Protocol * [types] \#2459 `Vote`/`Proposal`/`Heartbeat` use amino encoding instead of JSON in `SignBytes`. - * [types] \#2512 Remove the pubkey field from the validator hash + * [types] \#2512 Remove the pubkey field from the validator hash * P2P Protocol FEATURES: - [crypto/merkle] \#2298 General Merkle Proof scheme for chaining various types of Merkle trees together +- [abci] \#2557 Add `Codespace` field to `Response{CheckTx, DeliverTx, Query}` IMPROVEMENTS: - [consensus] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) add additional metrics @@ -41,7 +42,7 @@ IMPROVEMENTS: BUG FIXES: - [autofile] \#2428 Group.RotateFile need call Flush() before rename (@goolAdapter) - [node] \#2434 Make node respond to signal interrupts while sleeping for genesis time -- [consensus] [\#1690](https://github.com/tendermint/tendermint/issues/1690) wait for +- [consensus] [\#1690](https://github.com/tendermint/tendermint/issues/1690) wait for timeoutPrecommit before starting next round - [evidence] \#2515 fix db iter leak (@goolAdapter) - [common/bit_array] Fixed a bug in the `Or` function diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 427315df3..1ec516024 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -61,7 +61,7 @@ func (m *Request) Reset() { *m = Request{} } func (m *Request) String() string { return proto.CompactTextString(m) } func (*Request) ProtoMessage() {} func (*Request) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{0} + return fileDescriptor_types_4a7ab597ee120b05, []int{0} } func (m *Request) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -483,7 +483,7 @@ func (m *RequestEcho) Reset() { *m = RequestEcho{} } func (m *RequestEcho) String() string { return proto.CompactTextString(m) } func (*RequestEcho) ProtoMessage() {} func (*RequestEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{1} + return fileDescriptor_types_4a7ab597ee120b05, []int{1} } func (m *RequestEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -529,7 +529,7 @@ func (m *RequestFlush) Reset() { *m = RequestFlush{} } func (m *RequestFlush) String() string { return proto.CompactTextString(m) } func (*RequestFlush) ProtoMessage() {} func (*RequestFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{2} + return fileDescriptor_types_4a7ab597ee120b05, []int{2} } func (m *RequestFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -569,7 +569,7 @@ func (m *RequestInfo) Reset() { *m = RequestInfo{} } func (m *RequestInfo) String() string { return proto.CompactTextString(m) } func (*RequestInfo) ProtoMessage() {} func (*RequestInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{3} + return fileDescriptor_types_4a7ab597ee120b05, []int{3} } func (m *RequestInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -618,7 +618,7 @@ func (m *RequestSetOption) Reset() { *m = RequestSetOption{} } func (m *RequestSetOption) String() string { return proto.CompactTextString(m) } func (*RequestSetOption) ProtoMessage() {} func (*RequestSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{4} + return fileDescriptor_types_4a7ab597ee120b05, []int{4} } func (m *RequestSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -676,7 +676,7 @@ func (m *RequestInitChain) Reset() { *m = RequestInitChain{} } func (m *RequestInitChain) String() string { return proto.CompactTextString(m) } func (*RequestInitChain) ProtoMessage() {} func (*RequestInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{5} + return fileDescriptor_types_4a7ab597ee120b05, []int{5} } func (m *RequestInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -754,7 +754,7 @@ func (m *RequestQuery) Reset() { *m = RequestQuery{} } func (m *RequestQuery) String() string { return proto.CompactTextString(m) } func (*RequestQuery) ProtoMessage() {} func (*RequestQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{6} + return fileDescriptor_types_4a7ab597ee120b05, []int{6} } func (m *RequestQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -826,7 +826,7 @@ func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} } func (m *RequestBeginBlock) String() string { return proto.CompactTextString(m) } func (*RequestBeginBlock) ProtoMessage() {} func (*RequestBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{7} + return fileDescriptor_types_4a7ab597ee120b05, []int{7} } func (m *RequestBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -894,7 +894,7 @@ func (m *RequestCheckTx) Reset() { *m = RequestCheckTx{} } func (m *RequestCheckTx) String() string { return proto.CompactTextString(m) } func (*RequestCheckTx) ProtoMessage() {} func (*RequestCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{8} + return fileDescriptor_types_4a7ab597ee120b05, []int{8} } func (m *RequestCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -941,7 +941,7 @@ func (m *RequestDeliverTx) Reset() { *m = RequestDeliverTx{} } func (m *RequestDeliverTx) String() string { return proto.CompactTextString(m) } func (*RequestDeliverTx) ProtoMessage() {} func (*RequestDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{9} + return fileDescriptor_types_4a7ab597ee120b05, []int{9} } func (m *RequestDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -988,7 +988,7 @@ func (m *RequestEndBlock) Reset() { *m = RequestEndBlock{} } func (m *RequestEndBlock) String() string { return proto.CompactTextString(m) } func (*RequestEndBlock) ProtoMessage() {} func (*RequestEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{10} + return fileDescriptor_types_4a7ab597ee120b05, []int{10} } func (m *RequestEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1034,7 +1034,7 @@ func (m *RequestCommit) Reset() { *m = RequestCommit{} } func (m *RequestCommit) String() string { return proto.CompactTextString(m) } func (*RequestCommit) ProtoMessage() {} func (*RequestCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{11} + return fileDescriptor_types_4a7ab597ee120b05, []int{11} } func (m *RequestCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1087,7 +1087,7 @@ func (m *Response) Reset() { *m = Response{} } func (m *Response) String() string { return proto.CompactTextString(m) } func (*Response) ProtoMessage() {} func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{12} + return fileDescriptor_types_4a7ab597ee120b05, []int{12} } func (m *Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1540,7 +1540,7 @@ func (m *ResponseException) Reset() { *m = ResponseException{} } func (m *ResponseException) String() string { return proto.CompactTextString(m) } func (*ResponseException) ProtoMessage() {} func (*ResponseException) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{13} + return fileDescriptor_types_4a7ab597ee120b05, []int{13} } func (m *ResponseException) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1587,7 +1587,7 @@ func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } func (*ResponseEcho) ProtoMessage() {} func (*ResponseEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{14} + return fileDescriptor_types_4a7ab597ee120b05, []int{14} } func (m *ResponseEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1633,7 +1633,7 @@ func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } func (*ResponseFlush) ProtoMessage() {} func (*ResponseFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{15} + return fileDescriptor_types_4a7ab597ee120b05, []int{15} } func (m *ResponseFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1676,7 +1676,7 @@ func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } func (*ResponseInfo) ProtoMessage() {} func (*ResponseInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{16} + return fileDescriptor_types_4a7ab597ee120b05, []int{16} } func (m *ResponseInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1748,7 +1748,7 @@ func (m *ResponseSetOption) Reset() { *m = ResponseSetOption{} } func (m *ResponseSetOption) String() string { return proto.CompactTextString(m) } func (*ResponseSetOption) ProtoMessage() {} func (*ResponseSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{17} + return fileDescriptor_types_4a7ab597ee120b05, []int{17} } func (m *ResponseSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1810,7 +1810,7 @@ func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } func (*ResponseInitChain) ProtoMessage() {} func (*ResponseInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{18} + return fileDescriptor_types_4a7ab597ee120b05, []int{18} } func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1863,6 +1863,7 @@ type ResponseQuery struct { Value []byte `protobuf:"bytes,7,opt,name=value,proto3" json:"value,omitempty"` Proof *merkle.Proof `protobuf:"bytes,8,opt,name=proof" json:"proof,omitempty"` Height int64 `protobuf:"varint,9,opt,name=height,proto3" json:"height,omitempty"` + Codespace string `protobuf:"bytes,10,opt,name=codespace,proto3" json:"codespace,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1872,7 +1873,7 @@ func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } func (*ResponseQuery) ProtoMessage() {} func (*ResponseQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{19} + return fileDescriptor_types_4a7ab597ee120b05, []int{19} } func (m *ResponseQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1957,6 +1958,13 @@ func (m *ResponseQuery) GetHeight() int64 { return 0 } +func (m *ResponseQuery) GetCodespace() string { + if m != nil { + return m.Codespace + } + return "" +} + type ResponseBeginBlock struct { Tags []common.KVPair `protobuf:"bytes,1,rep,name=tags" json:"tags,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -1968,7 +1976,7 @@ func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } func (*ResponseBeginBlock) ProtoMessage() {} func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{20} + return fileDescriptor_types_4a7ab597ee120b05, []int{20} } func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2012,6 +2020,7 @@ type ResponseCheckTx struct { GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` Tags []common.KVPair `protobuf:"bytes,7,rep,name=tags" json:"tags,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2021,7 +2030,7 @@ func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } func (*ResponseCheckTx) ProtoMessage() {} func (*ResponseCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{21} + return fileDescriptor_types_4a7ab597ee120b05, []int{21} } func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2099,6 +2108,13 @@ func (m *ResponseCheckTx) GetTags() []common.KVPair { return nil } +func (m *ResponseCheckTx) GetCodespace() string { + if m != nil { + return m.Codespace + } + return "" +} + type ResponseDeliverTx struct { Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` @@ -2107,6 +2123,7 @@ type ResponseDeliverTx struct { GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` Tags []common.KVPair `protobuf:"bytes,7,rep,name=tags" json:"tags,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2116,7 +2133,7 @@ func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } func (*ResponseDeliverTx) ProtoMessage() {} func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{22} + return fileDescriptor_types_4a7ab597ee120b05, []int{22} } func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2194,6 +2211,13 @@ func (m *ResponseDeliverTx) GetTags() []common.KVPair { return nil } +func (m *ResponseDeliverTx) GetCodespace() string { + if m != nil { + return m.Codespace + } + return "" +} + type ResponseEndBlock struct { ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates" json:"validator_updates"` ConsensusParamUpdates *ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates" json:"consensus_param_updates,omitempty"` @@ -2207,7 +2231,7 @@ func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } func (*ResponseEndBlock) ProtoMessage() {} func (*ResponseEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{23} + return fileDescriptor_types_4a7ab597ee120b05, []int{23} } func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2269,7 +2293,7 @@ func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } func (*ResponseCommit) ProtoMessage() {} func (*ResponseCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{24} + return fileDescriptor_types_4a7ab597ee120b05, []int{24} } func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2319,7 +2343,7 @@ func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } func (m *ConsensusParams) String() string { return proto.CompactTextString(m) } func (*ConsensusParams) ProtoMessage() {} func (*ConsensusParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{25} + return fileDescriptor_types_4a7ab597ee120b05, []int{25} } func (m *ConsensusParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2377,7 +2401,7 @@ func (m *BlockSize) Reset() { *m = BlockSize{} } func (m *BlockSize) String() string { return proto.CompactTextString(m) } func (*BlockSize) ProtoMessage() {} func (*BlockSize) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{26} + return fileDescriptor_types_4a7ab597ee120b05, []int{26} } func (m *BlockSize) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2433,7 +2457,7 @@ func (m *EvidenceParams) Reset() { *m = EvidenceParams{} } func (m *EvidenceParams) String() string { return proto.CompactTextString(m) } func (*EvidenceParams) ProtoMessage() {} func (*EvidenceParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{27} + return fileDescriptor_types_4a7ab597ee120b05, []int{27} } func (m *EvidenceParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2481,7 +2505,7 @@ func (m *LastCommitInfo) Reset() { *m = LastCommitInfo{} } func (m *LastCommitInfo) String() string { return proto.CompactTextString(m) } func (*LastCommitInfo) ProtoMessage() {} func (*LastCommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{28} + return fileDescriptor_types_4a7ab597ee120b05, []int{28} } func (m *LastCommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2554,7 +2578,7 @@ func (m *Header) Reset() { *m = Header{} } func (m *Header) String() string { return proto.CompactTextString(m) } func (*Header) ProtoMessage() {} func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{29} + return fileDescriptor_types_4a7ab597ee120b05, []int{29} } func (m *Header) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2700,7 +2724,7 @@ func (m *BlockID) Reset() { *m = BlockID{} } func (m *BlockID) String() string { return proto.CompactTextString(m) } func (*BlockID) ProtoMessage() {} func (*BlockID) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{30} + return fileDescriptor_types_4a7ab597ee120b05, []int{30} } func (m *BlockID) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2755,7 +2779,7 @@ func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } func (*PartSetHeader) ProtoMessage() {} func (*PartSetHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{31} + return fileDescriptor_types_4a7ab597ee120b05, []int{31} } func (m *PartSetHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2812,7 +2836,7 @@ func (m *Validator) Reset() { *m = Validator{} } func (m *Validator) String() string { return proto.CompactTextString(m) } func (*Validator) ProtoMessage() {} func (*Validator) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{32} + return fileDescriptor_types_4a7ab597ee120b05, []int{32} } func (m *Validator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2868,7 +2892,7 @@ func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } func (*ValidatorUpdate) ProtoMessage() {} func (*ValidatorUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{33} + return fileDescriptor_types_4a7ab597ee120b05, []int{33} } func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2924,7 +2948,7 @@ func (m *VoteInfo) Reset() { *m = VoteInfo{} } func (m *VoteInfo) String() string { return proto.CompactTextString(m) } func (*VoteInfo) ProtoMessage() {} func (*VoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{34} + return fileDescriptor_types_4a7ab597ee120b05, []int{34} } func (m *VoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2979,7 +3003,7 @@ func (m *PubKey) Reset() { *m = PubKey{} } func (m *PubKey) String() string { return proto.CompactTextString(m) } func (*PubKey) ProtoMessage() {} func (*PubKey) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{35} + return fileDescriptor_types_4a7ab597ee120b05, []int{35} } func (m *PubKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3037,7 +3061,7 @@ func (m *Evidence) Reset() { *m = Evidence{} } func (m *Evidence) String() string { return proto.CompactTextString(m) } func (*Evidence) ProtoMessage() {} func (*Evidence) Descriptor() ([]byte, []int) { - return fileDescriptor_types_03c41ca87033c976, []int{36} + return fileDescriptor_types_4a7ab597ee120b05, []int{36} } func (m *Evidence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4354,6 +4378,9 @@ func (this *ResponseQuery) Equal(that interface{}) bool { if this.Height != that1.Height { return false } + if this.Codespace != that1.Codespace { + return false + } if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { return false } @@ -4436,6 +4463,9 @@ func (this *ResponseCheckTx) Equal(that interface{}) bool { return false } } + if this.Codespace != that1.Codespace { + return false + } if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { return false } @@ -4486,6 +4516,9 @@ func (this *ResponseDeliverTx) Equal(that interface{}) bool { return false } } + if this.Codespace != that1.Codespace { + return false + } if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { return false } @@ -6393,6 +6426,12 @@ func (m *ResponseQuery) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintTypes(dAtA, i, uint64(m.Height)) } + if len(m.Codespace) > 0 { + dAtA[i] = 0x52 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) + i += copy(dAtA[i:], m.Codespace) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -6492,6 +6531,12 @@ func (m *ResponseCheckTx) MarshalTo(dAtA []byte) (int, error) { i += n } } + if len(m.Codespace) > 0 { + dAtA[i] = 0x42 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) + i += copy(dAtA[i:], m.Codespace) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -6558,6 +6603,12 @@ func (m *ResponseDeliverTx) MarshalTo(dAtA []byte) (int, error) { i += n } } + if len(m.Codespace) > 0 { + dAtA[i] = 0x42 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) + i += copy(dAtA[i:], m.Codespace) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -7598,8 +7649,9 @@ func NewPopulatedResponseQuery(r randyTypes, easy bool) *ResponseQuery { if r.Intn(2) == 0 { this.Height *= -1 } + this.Codespace = string(randStringTypes(r)) if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 10) + this.XXX_unrecognized = randUnrecognizedTypes(r, 11) } return this } @@ -7646,8 +7698,9 @@ func NewPopulatedResponseCheckTx(r randyTypes, easy bool) *ResponseCheckTx { this.Tags[i] = *v22 } } + this.Codespace = string(randStringTypes(r)) if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 8) + this.XXX_unrecognized = randUnrecognizedTypes(r, 9) } return this } @@ -7678,8 +7731,9 @@ func NewPopulatedResponseDeliverTx(r randyTypes, easy bool) *ResponseDeliverTx { this.Tags[i] = *v25 } } + this.Codespace = string(randStringTypes(r)) if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 8) + this.XXX_unrecognized = randUnrecognizedTypes(r, 9) } return this } @@ -8572,6 +8626,10 @@ func (m *ResponseQuery) Size() (n int) { if m.Height != 0 { n += 1 + sovTypes(uint64(m.Height)) } + l = len(m.Codespace) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -8623,6 +8681,10 @@ func (m *ResponseCheckTx) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) } } + l = len(m.Codespace) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -8659,6 +8721,10 @@ func (m *ResponseDeliverTx) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) } } + l = len(m.Codespace) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -11778,6 +11844,35 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { break } } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Codespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -12088,6 +12183,35 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Codespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -12316,6 +12440,35 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Codespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -14328,142 +14481,143 @@ var ( ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_03c41ca87033c976) } +func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_4a7ab597ee120b05) } func init() { - golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_03c41ca87033c976) -} - -var fileDescriptor_types_03c41ca87033c976 = []byte{ - // 2089 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0x4f, 0x73, 0x1b, 0x49, - 0x15, 0xf7, 0x48, 0xb2, 0xa4, 0x79, 0xb6, 0x25, 0xa7, 0x93, 0xd8, 0x8a, 0x00, 0x3b, 0x35, 0x0b, - 0xbb, 0x36, 0xeb, 0x95, 0xb7, 0xbc, 0x2c, 0xe5, 0x6c, 0x96, 0xad, 0xb2, 0x92, 0x80, 0x5d, 0xbb, - 0x80, 0x99, 0x24, 0xe6, 0x42, 0xd5, 0x54, 0x4b, 0xd3, 0x96, 0xa6, 0x22, 0xcd, 0xcc, 0xce, 0xb4, - 0xbc, 0x72, 0x8e, 0x9c, 0xf7, 0xb0, 0x07, 0xaa, 0xf8, 0x0a, 0x7c, 0x04, 0x8e, 0x1c, 0x28, 0x6a, - 0x8f, 0x14, 0x05, 0xc5, 0x2d, 0x80, 0x29, 0x0e, 0xf0, 0x09, 0x38, 0x52, 0xfd, 0xba, 0x7b, 0xfe, - 0x79, 0x14, 0x36, 0xe1, 0xc6, 0x45, 0xea, 0xee, 0xf7, 0x5e, 0x77, 0xbf, 0x37, 0xef, 0xbd, 0xdf, - 0x7b, 0x0d, 0x1b, 0x74, 0x30, 0xf4, 0xf6, 0xf9, 0x65, 0xc8, 0x62, 0xf9, 0xdb, 0x0b, 0xa3, 0x80, - 0x07, 0x64, 0x19, 0x27, 0xdd, 0x77, 0x46, 0x1e, 0x1f, 0xcf, 0x06, 0xbd, 0x61, 0x30, 0xdd, 0x1f, - 0x05, 0xa3, 0x60, 0x1f, 0xa9, 0x83, 0xd9, 0x39, 0xce, 0x70, 0x82, 0x23, 0x29, 0xd5, 0xdd, 0x1e, - 0x05, 0xc1, 0x68, 0xc2, 0x52, 0x2e, 0xee, 0x4d, 0x59, 0xcc, 0xe9, 0x34, 0x54, 0x0c, 0x87, 0x99, - 0xfd, 0x38, 0xf3, 0x5d, 0x16, 0x4d, 0x3d, 0x9f, 0x67, 0x87, 0x13, 0x6f, 0x10, 0xef, 0x0f, 0x83, - 0xe9, 0x34, 0xf0, 0xb3, 0x17, 0xea, 0xde, 0xff, 0xaf, 0x92, 0xc3, 0xe8, 0x32, 0xe4, 0xc1, 0xfe, - 0x94, 0x45, 0xcf, 0x26, 0x4c, 0xfd, 0x49, 0x61, 0xeb, 0x77, 0x35, 0x68, 0xd8, 0xec, 0xd3, 0x19, - 0x8b, 0x39, 0xd9, 0x81, 0x1a, 0x1b, 0x8e, 0x83, 0x4e, 0xe5, 0xae, 0xb1, 0xb3, 0x72, 0x40, 0x7a, - 0xf2, 0x10, 0x45, 0x7d, 0x34, 0x1c, 0x07, 0xc7, 0x4b, 0x36, 0x72, 0x90, 0xb7, 0x61, 0xf9, 0x7c, - 0x32, 0x8b, 0xc7, 0x9d, 0x2a, 0xb2, 0xde, 0xcc, 0xb3, 0x7e, 0x5f, 0x90, 0x8e, 0x97, 0x6c, 0xc9, - 0x23, 0xb6, 0xf5, 0xfc, 0xf3, 0xa0, 0x53, 0x2b, 0xdb, 0xf6, 0xc4, 0x3f, 0xc7, 0x6d, 0x05, 0x07, - 0x39, 0x04, 0x88, 0x19, 0x77, 0x82, 0x90, 0x7b, 0x81, 0xdf, 0x59, 0x46, 0xfe, 0xcd, 0x3c, 0xff, - 0x63, 0xc6, 0x7f, 0x8c, 0xe4, 0xe3, 0x25, 0xdb, 0x8c, 0xf5, 0x44, 0x48, 0x7a, 0xbe, 0xc7, 0x9d, - 0xe1, 0x98, 0x7a, 0x7e, 0xa7, 0x5e, 0x26, 0x79, 0xe2, 0x7b, 0xfc, 0x81, 0x20, 0x0b, 0x49, 0x4f, - 0x4f, 0x84, 0x2a, 0x9f, 0xce, 0x58, 0x74, 0xd9, 0x69, 0x94, 0xa9, 0xf2, 0x13, 0x41, 0x12, 0xaa, - 0x20, 0x0f, 0xb9, 0x0f, 0x2b, 0x03, 0x36, 0xf2, 0x7c, 0x67, 0x30, 0x09, 0x86, 0xcf, 0x3a, 0x4d, - 0x14, 0xe9, 0xe4, 0x45, 0xfa, 0x82, 0xa1, 0x2f, 0xe8, 0xc7, 0x4b, 0x36, 0x0c, 0x92, 0x19, 0x39, - 0x80, 0xe6, 0x70, 0xcc, 0x86, 0xcf, 0x1c, 0x3e, 0xef, 0x98, 0x28, 0x79, 0x3b, 0x2f, 0xf9, 0x40, - 0x50, 0x9f, 0xcc, 0x8f, 0x97, 0xec, 0xc6, 0x50, 0x0e, 0xc9, 0xfb, 0x60, 0x32, 0xdf, 0x55, 0xc7, - 0xad, 0xa0, 0xd0, 0x46, 0xe1, 0xbb, 0xf8, 0xae, 0x3e, 0xac, 0xc9, 0xd4, 0x98, 0xf4, 0xa0, 0x2e, - 0x1c, 0xc5, 0xe3, 0x9d, 0x55, 0x94, 0xb9, 0x55, 0x38, 0x08, 0x69, 0xc7, 0x4b, 0xb6, 0xe2, 0x12, - 0xe6, 0x73, 0xd9, 0xc4, 0xbb, 0x60, 0x91, 0xb8, 0xdc, 0xcd, 0x32, 0xf3, 0x3d, 0x94, 0x74, 0xbc, - 0x9e, 0xe9, 0xea, 0x49, 0xbf, 0x01, 0xcb, 0x17, 0x74, 0x32, 0x63, 0xd6, 0x5b, 0xb0, 0x92, 0xf1, - 0x14, 0xd2, 0x81, 0xc6, 0x94, 0xc5, 0x31, 0x1d, 0xb1, 0x8e, 0x71, 0xd7, 0xd8, 0x31, 0x6d, 0x3d, - 0xb5, 0x5a, 0xb0, 0x9a, 0xf5, 0x93, 0x8c, 0xa0, 0xf0, 0x05, 0x21, 0x78, 0xc1, 0xa2, 0x58, 0x38, - 0x80, 0x12, 0x54, 0x53, 0xeb, 0x03, 0x58, 0x2f, 0x3a, 0x01, 0x59, 0x87, 0xea, 0x33, 0x76, 0xa9, - 0x38, 0xc5, 0x90, 0xdc, 0x52, 0x17, 0x42, 0x2f, 0x36, 0x6d, 0x75, 0xbb, 0x2f, 0x2a, 0x89, 0x70, - 0xe2, 0x07, 0xe4, 0x10, 0x6a, 0x22, 0x0a, 0x51, 0x7a, 0xe5, 0xa0, 0xdb, 0x93, 0x21, 0xda, 0xd3, - 0x21, 0xda, 0x7b, 0xa2, 0x43, 0xb4, 0xdf, 0xfc, 0xf2, 0xc5, 0xf6, 0xd2, 0x17, 0x7f, 0xd9, 0x36, - 0x6c, 0x94, 0x20, 0x77, 0xc4, 0xa7, 0xa4, 0x9e, 0xef, 0x78, 0xae, 0x3a, 0xa7, 0x81, 0xf3, 0x13, - 0x97, 0x1c, 0xc1, 0xfa, 0x30, 0xf0, 0x63, 0xe6, 0xc7, 0xb3, 0xd8, 0x09, 0x69, 0x44, 0xa7, 0xb1, - 0x8a, 0x12, 0xfd, 0xe1, 0x1e, 0x68, 0xf2, 0x29, 0x52, 0xed, 0xf6, 0x30, 0xbf, 0x40, 0x3e, 0x04, - 0xb8, 0xa0, 0x13, 0xcf, 0xa5, 0x3c, 0x88, 0xe2, 0x4e, 0xed, 0x6e, 0x35, 0x23, 0x7c, 0xa6, 0x09, - 0x4f, 0x43, 0x97, 0x72, 0xd6, 0xaf, 0x89, 0x9b, 0xd9, 0x19, 0x7e, 0xf2, 0x26, 0xb4, 0x69, 0x18, - 0x3a, 0x31, 0xa7, 0x9c, 0x39, 0x83, 0x4b, 0xce, 0x62, 0x8c, 0xa4, 0x55, 0x7b, 0x8d, 0x86, 0xe1, - 0x63, 0xb1, 0xda, 0x17, 0x8b, 0x96, 0x9b, 0x7c, 0x07, 0x74, 0x72, 0x42, 0xa0, 0xe6, 0x52, 0x4e, - 0xd1, 0x1a, 0xab, 0x36, 0x8e, 0xc5, 0x5a, 0x48, 0xf9, 0x58, 0xe9, 0x88, 0x63, 0xb2, 0x01, 0xf5, - 0x31, 0xf3, 0x46, 0x63, 0x8e, 0x6a, 0x55, 0x6d, 0x35, 0x13, 0x86, 0x0f, 0xa3, 0xe0, 0x82, 0x61, - 0x9c, 0x37, 0x6d, 0x39, 0xb1, 0xfe, 0x61, 0xc0, 0x8d, 0x6b, 0x81, 0x21, 0xf6, 0x1d, 0xd3, 0x78, - 0xac, 0xcf, 0x12, 0x63, 0xf2, 0xb6, 0xd8, 0x97, 0xba, 0x2c, 0x52, 0xf9, 0x67, 0x4d, 0x69, 0x7c, - 0x8c, 0x8b, 0x4a, 0x51, 0xc5, 0x42, 0x1e, 0xc1, 0xfa, 0x84, 0xc6, 0xdc, 0x91, 0xfe, 0xeb, 0x60, - 0x7e, 0xa9, 0xe6, 0x62, 0xea, 0x13, 0xaa, 0xfd, 0x5c, 0xb8, 0x95, 0x12, 0x6f, 0x4d, 0x72, 0xab, - 0xe4, 0x18, 0x6e, 0x0d, 0x2e, 0x9f, 0x53, 0x9f, 0x7b, 0x3e, 0x73, 0xae, 0xd9, 0xbc, 0xad, 0xb6, - 0x7a, 0x74, 0xe1, 0xb9, 0xcc, 0x1f, 0x6a, 0x63, 0xdf, 0x4c, 0x44, 0x92, 0x8f, 0x11, 0x5b, 0x77, - 0xa1, 0x95, 0x8f, 0x62, 0xd2, 0x82, 0x0a, 0x9f, 0x2b, 0x0d, 0x2b, 0x7c, 0x6e, 0x59, 0x89, 0x07, - 0x26, 0xa1, 0x74, 0x8d, 0x67, 0x17, 0xda, 0x85, 0xb0, 0xce, 0x98, 0xdb, 0xc8, 0x9a, 0xdb, 0x6a, - 0xc3, 0x5a, 0x2e, 0x9a, 0xad, 0xcf, 0x97, 0xa1, 0x69, 0xb3, 0x38, 0x14, 0xce, 0x44, 0x0e, 0xc1, - 0x64, 0xf3, 0x21, 0x93, 0x89, 0xd4, 0x28, 0xa4, 0x29, 0xc9, 0xf3, 0x48, 0xd3, 0x45, 0x40, 0x27, - 0xcc, 0x64, 0x37, 0x07, 0x02, 0x37, 0x8b, 0x42, 0x59, 0x14, 0xd8, 0xcb, 0xa3, 0xc0, 0xad, 0x02, - 0x6f, 0x01, 0x06, 0x76, 0x73, 0x30, 0x50, 0xdc, 0x38, 0x87, 0x03, 0xf7, 0x4a, 0x70, 0xa0, 0x78, - 0xfd, 0x05, 0x40, 0x70, 0xaf, 0x04, 0x08, 0x3a, 0xd7, 0xce, 0x2a, 0x45, 0x82, 0xbd, 0x3c, 0x12, - 0x14, 0xd5, 0x29, 0x40, 0xc1, 0x87, 0x65, 0x50, 0x70, 0xa7, 0x20, 0xb3, 0x10, 0x0b, 0xde, 0xbb, - 0x86, 0x05, 0x1b, 0x05, 0xd1, 0x12, 0x30, 0xb8, 0x97, 0xcb, 0xd2, 0x50, 0xaa, 0x5b, 0x79, 0x9a, - 0x26, 0xdf, 0xbd, 0x8e, 0x23, 0x9b, 0xc5, 0x4f, 0x5b, 0x06, 0x24, 0xfb, 0x05, 0x20, 0xb9, 0x5d, - 0xbc, 0x65, 0x01, 0x49, 0x52, 0x3c, 0xd8, 0x15, 0x71, 0x5f, 0xf0, 0x34, 0x91, 0x23, 0x58, 0x14, - 0x05, 0x91, 0x4a, 0xd8, 0x72, 0x62, 0xed, 0x88, 0x4c, 0x94, 0xfa, 0xd7, 0x4b, 0xb0, 0x03, 0x9d, - 0x3e, 0xe3, 0x5d, 0xd6, 0x2f, 0x8d, 0x54, 0x16, 0x23, 0x3a, 0x9b, 0xc5, 0x4c, 0x95, 0xc5, 0x32, - 0x90, 0x52, 0xc9, 0x41, 0x0a, 0xf9, 0x36, 0xdc, 0xc0, 0x34, 0x82, 0x76, 0x71, 0x72, 0x69, 0xad, - 0x2d, 0x08, 0xd2, 0x20, 0x32, 0xbf, 0xbd, 0x03, 0x37, 0x33, 0xbc, 0x22, 0xc5, 0x62, 0x0a, 0xab, - 0x61, 0xf0, 0xae, 0x27, 0xdc, 0x47, 0x61, 0x78, 0x4c, 0xe3, 0xb1, 0xf5, 0xc3, 0x54, 0xff, 0x14, - 0xae, 0x08, 0xd4, 0x86, 0x81, 0x2b, 0xd5, 0x5a, 0xb3, 0x71, 0x2c, 0x20, 0x6c, 0x12, 0x8c, 0xf0, - 0x54, 0xd3, 0x16, 0x43, 0xc1, 0x95, 0x44, 0x8a, 0x29, 0x43, 0xc2, 0xfa, 0x85, 0x91, 0xee, 0x97, - 0x22, 0x58, 0x19, 0xd8, 0x18, 0xff, 0x0b, 0xd8, 0x54, 0x5e, 0x0d, 0x6c, 0xac, 0xdf, 0x1a, 0xe9, - 0x17, 0x49, 0x60, 0xe4, 0xf5, 0x54, 0x14, 0xce, 0xe1, 0xf9, 0x2e, 0x9b, 0x63, 0xc0, 0x57, 0x6d, - 0x39, 0xd1, 0x08, 0x5f, 0x47, 0x33, 0xe7, 0x11, 0xbe, 0x81, 0x6b, 0x72, 0x42, 0xde, 0x40, 0xf8, - 0x09, 0xce, 0x55, 0x24, 0xae, 0xf5, 0x54, 0x99, 0x7b, 0x2a, 0x16, 0x6d, 0x49, 0xcb, 0x24, 0x53, - 0x33, 0x97, 0x4c, 0x4f, 0x81, 0x5c, 0x0f, 0x59, 0xf2, 0x01, 0xd4, 0x38, 0x1d, 0x09, 0x8b, 0x0a, - 0xa3, 0xb4, 0x7a, 0xb2, 0xf6, 0xee, 0x7d, 0x7c, 0x76, 0x4a, 0xbd, 0xa8, 0xbf, 0x21, 0x8c, 0xf1, - 0xaf, 0x17, 0xdb, 0x2d, 0xc1, 0xb3, 0x17, 0x4c, 0x3d, 0xce, 0xa6, 0x21, 0xbf, 0xb4, 0x51, 0xc6, - 0xfa, 0x93, 0x21, 0x52, 0x79, 0x2e, 0x94, 0x4b, 0x4d, 0xa3, 0xfd, 0xb5, 0x92, 0x41, 0xdd, 0xaf, - 0x66, 0xae, 0x6f, 0x00, 0x8c, 0x68, 0xec, 0x7c, 0x46, 0x7d, 0xce, 0x5c, 0x65, 0x33, 0x73, 0x44, - 0xe3, 0x9f, 0xe2, 0x82, 0x28, 0x51, 0x04, 0x79, 0x16, 0x33, 0x17, 0x8d, 0x57, 0xb5, 0x1b, 0x23, - 0x1a, 0x3f, 0x8d, 0x99, 0x9b, 0xe8, 0xd5, 0x78, 0x0d, 0xbd, 0xfe, 0x9c, 0xf1, 0xc3, 0x14, 0xc7, - 0xfe, 0x1f, 0x34, 0xfb, 0xa7, 0x21, 0x00, 0x3a, 0x9f, 0x0b, 0xc9, 0x09, 0xdc, 0x48, 0xbc, 0xdd, - 0x99, 0x61, 0x14, 0x68, 0x7f, 0x78, 0x79, 0x90, 0xac, 0x5f, 0xe4, 0x97, 0x63, 0xf2, 0x23, 0xd8, - 0x2c, 0xc4, 0x6a, 0xb2, 0x61, 0xe5, 0xa5, 0x21, 0x7b, 0x3b, 0x1f, 0xb2, 0x7a, 0x3f, 0xad, 0x6b, - 0xf5, 0x35, 0x74, 0xfd, 0xa6, 0xa8, 0x56, 0xb2, 0x19, 0xbc, 0xec, 0x6b, 0x59, 0x3f, 0x37, 0xa0, - 0x5d, 0xb8, 0x0c, 0xd9, 0x07, 0x90, 0x09, 0x30, 0xf6, 0x9e, 0xeb, 0xca, 0x79, 0x5d, 0x5d, 0x1c, - 0x4d, 0xf6, 0xd8, 0x7b, 0xce, 0x6c, 0x73, 0xa0, 0x87, 0xe4, 0x23, 0x68, 0x33, 0x55, 0x3f, 0xe9, - 0x0c, 0x55, 0xc9, 0x41, 0x89, 0xae, 0xae, 0x94, 0xb6, 0x2d, 0x96, 0x9b, 0x5b, 0x47, 0x60, 0x26, - 0xfb, 0x92, 0xaf, 0x81, 0x39, 0xa5, 0x73, 0x55, 0xd5, 0xca, 0x7a, 0xa8, 0x39, 0xa5, 0x73, 0x2c, - 0x68, 0xc9, 0x26, 0x34, 0x04, 0x71, 0x44, 0xe5, 0x09, 0x55, 0xbb, 0x3e, 0xa5, 0xf3, 0x1f, 0xd0, - 0xd8, 0xda, 0x85, 0x56, 0xfe, 0x10, 0xcd, 0xaa, 0x11, 0x46, 0xb2, 0x1e, 0x8d, 0x98, 0xf5, 0x18, - 0x5a, 0xf9, 0xc2, 0x51, 0x64, 0x9b, 0x28, 0x98, 0xf9, 0x2e, 0x32, 0x2e, 0xdb, 0x72, 0x22, 0xba, - 0xc6, 0x8b, 0x40, 0x7e, 0xba, 0x6c, 0xa5, 0x78, 0x16, 0x70, 0x96, 0x29, 0x37, 0x25, 0x8f, 0xf5, - 0x87, 0x1a, 0xd4, 0x65, 0x15, 0x4b, 0xde, 0xcc, 0x34, 0x0e, 0x08, 0x51, 0xfd, 0x95, 0xab, 0x17, - 0xdb, 0x0d, 0xcc, 0xe6, 0x27, 0x0f, 0xd3, 0x2e, 0x22, 0x4d, 0x54, 0x95, 0x5c, 0x91, 0xad, 0x5b, - 0x96, 0xea, 0x2b, 0xb7, 0x2c, 0x9b, 0xd0, 0xf0, 0x67, 0x53, 0x87, 0xcf, 0x63, 0x8c, 0xb5, 0xaa, - 0x5d, 0xf7, 0x67, 0xd3, 0x27, 0xf3, 0x58, 0xd8, 0x94, 0x07, 0x9c, 0x4e, 0x90, 0x24, 0x83, 0xad, - 0x89, 0x0b, 0x82, 0x78, 0x08, 0x6b, 0x19, 0xd0, 0xf3, 0x5c, 0x55, 0x51, 0xb5, 0xb2, 0x5f, 0xfc, - 0xe4, 0xa1, 0x52, 0x77, 0x25, 0x01, 0xc1, 0x13, 0x97, 0xec, 0xe4, 0x2b, 0x74, 0xc4, 0x4a, 0x99, - 0xb0, 0x33, 0x45, 0xb8, 0x40, 0x4a, 0x71, 0x01, 0xe1, 0x6e, 0x92, 0xa5, 0x89, 0x2c, 0x4d, 0xb1, - 0x80, 0xc4, 0xb7, 0xa0, 0x9d, 0xc2, 0x8d, 0x64, 0x31, 0xe5, 0x2e, 0xe9, 0x32, 0x32, 0xbe, 0x0b, - 0xb7, 0x7c, 0x36, 0xe7, 0x4e, 0x91, 0x1b, 0x90, 0x9b, 0x08, 0xda, 0x59, 0x5e, 0xe2, 0x5b, 0xd0, - 0x4a, 0x03, 0x12, 0x79, 0x57, 0x64, 0x9f, 0x94, 0xac, 0x22, 0xdb, 0x1d, 0x68, 0x26, 0x60, 0xbf, - 0x8a, 0x0c, 0x0d, 0x2a, 0x31, 0x3e, 0x29, 0x1f, 0x22, 0x16, 0xcf, 0x26, 0x5c, 0x6d, 0xb2, 0x86, - 0x3c, 0x58, 0x3e, 0xd8, 0x72, 0x1d, 0x79, 0xdf, 0x80, 0xb5, 0x24, 0x0e, 0x90, 0xaf, 0x85, 0x7c, - 0xab, 0x7a, 0x11, 0x99, 0x76, 0x61, 0x3d, 0x8c, 0x82, 0x30, 0x88, 0x59, 0xe4, 0x50, 0xd7, 0x8d, - 0x58, 0x1c, 0x77, 0xda, 0x72, 0x3f, 0xbd, 0x7e, 0x24, 0x97, 0xad, 0x9f, 0x41, 0x43, 0x59, 0xbf, - 0xb4, 0x9b, 0xfa, 0x1e, 0xac, 0x86, 0x34, 0x12, 0x77, 0xca, 0xf6, 0x54, 0xba, 0xa6, 0x3d, 0xa5, - 0x91, 0x68, 0xa2, 0x73, 0xad, 0xd5, 0x0a, 0xf2, 0xcb, 0x25, 0xeb, 0x1e, 0xac, 0xe5, 0x78, 0x44, - 0x18, 0xa0, 0x53, 0xe8, 0x30, 0xc0, 0x49, 0x72, 0x72, 0x25, 0x3d, 0xd9, 0xba, 0x0f, 0x66, 0x62, - 0x68, 0x51, 0x7a, 0x69, 0x3d, 0x0c, 0x65, 0x3b, 0x39, 0xc5, 0x76, 0x31, 0xf8, 0x8c, 0x45, 0xaa, - 0xdc, 0x92, 0x13, 0xeb, 0x29, 0xb4, 0x0b, 0xf9, 0x94, 0xec, 0x41, 0x23, 0x9c, 0x0d, 0x1c, 0xdd, - 0xe6, 0xa7, 0x8d, 0xe1, 0xe9, 0x6c, 0xf0, 0x31, 0xbb, 0xd4, 0x8d, 0x61, 0x88, 0xb3, 0x74, 0xdb, - 0x4a, 0x76, 0xdb, 0x09, 0x34, 0x75, 0x68, 0x92, 0xef, 0x80, 0x99, 0xf8, 0x48, 0x21, 0x81, 0x25, - 0x47, 0xab, 0x4d, 0x53, 0x46, 0xf1, 0xa9, 0x63, 0x6f, 0xe4, 0x33, 0xd7, 0x49, 0xe3, 0x01, 0xcf, - 0x68, 0xda, 0x6d, 0x49, 0xf8, 0x44, 0x3b, 0xbf, 0xf5, 0x2e, 0xd4, 0xe5, 0xdd, 0x84, 0x7d, 0xc4, - 0xce, 0xba, 0x1a, 0x15, 0xe3, 0xd2, 0x4c, 0xfb, 0x47, 0x03, 0x9a, 0x3a, 0x45, 0x95, 0x0a, 0xe5, - 0x2e, 0x5d, 0xf9, 0xaa, 0x97, 0x5e, 0xd4, 0xaa, 0xeb, 0x2c, 0x52, 0x7b, 0xe5, 0x2c, 0xb2, 0x07, - 0x44, 0x26, 0x8b, 0x8b, 0x80, 0x7b, 0xfe, 0xc8, 0x91, 0xb6, 0x96, 0x59, 0x63, 0x1d, 0x29, 0x67, - 0x48, 0x38, 0x15, 0xeb, 0x07, 0x9f, 0x2f, 0x43, 0xfb, 0xa8, 0xff, 0xe0, 0xe4, 0x28, 0x0c, 0x27, - 0xde, 0x90, 0x62, 0x09, 0xbc, 0x0f, 0x35, 0x2c, 0xf2, 0x4b, 0x9e, 0x17, 0xbb, 0x65, 0xdd, 0x26, - 0x39, 0x80, 0x65, 0xac, 0xf5, 0x49, 0xd9, 0x2b, 0x63, 0xb7, 0xb4, 0xe9, 0x14, 0x87, 0xc8, 0x6e, - 0xe0, 0xfa, 0x63, 0x63, 0xb7, 0xac, 0xf3, 0x24, 0x1f, 0x81, 0x99, 0x56, 0xe9, 0x8b, 0x9e, 0x1c, - 0xbb, 0x0b, 0x7b, 0x50, 0x21, 0x9f, 0x56, 0x43, 0x8b, 0x5e, 0xce, 0xba, 0x0b, 0x9b, 0x35, 0x72, - 0x08, 0x0d, 0x5d, 0x25, 0x96, 0x3f, 0x0a, 0x76, 0x17, 0xf4, 0x87, 0xc2, 0x3c, 0xb2, 0xf0, 0x2e, - 0x7b, 0xb9, 0xec, 0x96, 0x36, 0xb1, 0xe4, 0x7d, 0xa8, 0x2b, 0xd8, 0x2f, 0x7d, 0x18, 0xec, 0x96, - 0x77, 0x79, 0x42, 0xc9, 0xb4, 0xf5, 0x58, 0xf4, 0xba, 0xda, 0x5d, 0xd8, 0x6d, 0x93, 0x23, 0x80, - 0x4c, 0x75, 0xbd, 0xf0, 0xd9, 0xb4, 0xbb, 0xb8, 0x8b, 0x26, 0xf7, 0xa1, 0x99, 0xbe, 0x8c, 0x94, - 0x3f, 0x84, 0x76, 0x17, 0x35, 0xb6, 0xfd, 0xaf, 0xff, 0xfb, 0x6f, 0x5b, 0xc6, 0xaf, 0xae, 0xb6, - 0x8c, 0x5f, 0x5f, 0x6d, 0x19, 0x5f, 0x5e, 0x6d, 0x19, 0xbf, 0xbf, 0xda, 0x32, 0xfe, 0x7a, 0xb5, - 0x65, 0xfc, 0xe6, 0xef, 0x5b, 0xc6, 0xa0, 0x8e, 0xee, 0xff, 0xde, 0x7f, 0x02, 0x00, 0x00, 0xff, - 0xff, 0x51, 0x4f, 0x34, 0x66, 0xf8, 0x17, 0x00, 0x00, + golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_4a7ab597ee120b05) +} + +var fileDescriptor_types_4a7ab597ee120b05 = []byte{ + // 2107 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x73, 0x23, 0x47, + 0x15, 0xf7, 0x48, 0xb2, 0xa4, 0x79, 0xb6, 0x25, 0xa7, 0xd7, 0x6b, 0x6b, 0x45, 0xb0, 0xb7, 0x26, + 0x90, 0xd8, 0xc4, 0x91, 0x53, 0x0e, 0xa1, 0xbc, 0xd9, 0x90, 0x2a, 0x6b, 0x77, 0xc1, 0xae, 0x04, + 0x30, 0xb3, 0xbb, 0xe6, 0x42, 0xd5, 0x54, 0x4b, 0xd3, 0x96, 0xa6, 0x56, 0x9a, 0x99, 0xcc, 0xb4, + 0x1c, 0x79, 0x8f, 0x9c, 0x73, 0xc8, 0x81, 0x2a, 0xfe, 0x05, 0xfe, 0x04, 0x8e, 0x9c, 0xa8, 0x1c, + 0x29, 0x8a, 0xf3, 0x02, 0xa6, 0x38, 0xc0, 0x95, 0xa2, 0x8a, 0x23, 0xd5, 0xaf, 0xbb, 0xe7, 0xcb, + 0xa3, 0x25, 0x1b, 0x6e, 0x5c, 0xa4, 0xee, 0xf7, 0xd1, 0x1f, 0x6f, 0xde, 0x7b, 0xbf, 0xf7, 0x1a, + 0x36, 0xe9, 0x60, 0xe8, 0x1d, 0xf0, 0xab, 0x90, 0xc5, 0xf2, 0xb7, 0x17, 0x46, 0x01, 0x0f, 0xc8, + 0x32, 0x4e, 0xba, 0xef, 0x8c, 0x3c, 0x3e, 0x9e, 0x0d, 0x7a, 0xc3, 0x60, 0x7a, 0x30, 0x0a, 0x46, + 0xc1, 0x01, 0x72, 0x07, 0xb3, 0x0b, 0x9c, 0xe1, 0x04, 0x47, 0x52, 0xab, 0xbb, 0x33, 0x0a, 0x82, + 0xd1, 0x84, 0xa5, 0x52, 0xdc, 0x9b, 0xb2, 0x98, 0xd3, 0x69, 0xa8, 0x04, 0x8e, 0x32, 0xeb, 0x71, + 0xe6, 0xbb, 0x2c, 0x9a, 0x7a, 0x3e, 0xcf, 0x0e, 0x27, 0xde, 0x20, 0x3e, 0x18, 0x06, 0xd3, 0x69, + 0xe0, 0x67, 0x0f, 0xd4, 0xbd, 0xff, 0x5f, 0x35, 0x87, 0xd1, 0x55, 0xc8, 0x83, 0x83, 0x29, 0x8b, + 0x9e, 0x4d, 0x98, 0xfa, 0x93, 0xca, 0xd6, 0xef, 0x6a, 0xd0, 0xb0, 0xd9, 0xa7, 0x33, 0x16, 0x73, + 0xb2, 0x0b, 0x35, 0x36, 0x1c, 0x07, 0x9d, 0xca, 0x5d, 0x63, 0x77, 0xe5, 0x90, 0xf4, 0xe4, 0x26, + 0x8a, 0xfb, 0x68, 0x38, 0x0e, 0x4e, 0x96, 0x6c, 0x94, 0x20, 0x6f, 0xc3, 0xf2, 0xc5, 0x64, 0x16, + 0x8f, 0x3b, 0x55, 0x14, 0xbd, 0x95, 0x17, 0xfd, 0x81, 0x60, 0x9d, 0x2c, 0xd9, 0x52, 0x46, 0x2c, + 0xeb, 0xf9, 0x17, 0x41, 0xa7, 0x56, 0xb6, 0xec, 0xa9, 0x7f, 0x81, 0xcb, 0x0a, 0x09, 0x72, 0x04, + 0x10, 0x33, 0xee, 0x04, 0x21, 0xf7, 0x02, 0xbf, 0xb3, 0x8c, 0xf2, 0x5b, 0x79, 0xf9, 0xc7, 0x8c, + 0xff, 0x04, 0xd9, 0x27, 0x4b, 0xb6, 0x19, 0xeb, 0x89, 0xd0, 0xf4, 0x7c, 0x8f, 0x3b, 0xc3, 0x31, + 0xf5, 0xfc, 0x4e, 0xbd, 0x4c, 0xf3, 0xd4, 0xf7, 0xf8, 0x03, 0xc1, 0x16, 0x9a, 0x9e, 0x9e, 0x88, + 0xab, 0x7c, 0x3a, 0x63, 0xd1, 0x55, 0xa7, 0x51, 0x76, 0x95, 0x9f, 0x0a, 0x96, 0xb8, 0x0a, 0xca, + 0x90, 0xfb, 0xb0, 0x32, 0x60, 0x23, 0xcf, 0x77, 0x06, 0x93, 0x60, 0xf8, 0xac, 0xd3, 0x44, 0x95, + 0x4e, 0x5e, 0xa5, 0x2f, 0x04, 0xfa, 0x82, 0x7f, 0xb2, 0x64, 0xc3, 0x20, 0x99, 0x91, 0x43, 0x68, + 0x0e, 0xc7, 0x6c, 0xf8, 0xcc, 0xe1, 0xf3, 0x8e, 0x89, 0x9a, 0xb7, 0xf3, 0x9a, 0x0f, 0x04, 0xf7, + 0xc9, 0xfc, 0x64, 0xc9, 0x6e, 0x0c, 0xe5, 0x90, 0xbc, 0x0f, 0x26, 0xf3, 0x5d, 0xb5, 0xdd, 0x0a, + 0x2a, 0x6d, 0x16, 0xbe, 0x8b, 0xef, 0xea, 0xcd, 0x9a, 0x4c, 0x8d, 0x49, 0x0f, 0xea, 0xc2, 0x51, + 0x3c, 0xde, 0x59, 0x45, 0x9d, 0x8d, 0xc2, 0x46, 0xc8, 0x3b, 0x59, 0xb2, 0x95, 0x94, 0x30, 0x9f, + 0xcb, 0x26, 0xde, 0x25, 0x8b, 0xc4, 0xe1, 0x6e, 0x95, 0x99, 0xef, 0xa1, 0xe4, 0xe3, 0xf1, 0x4c, + 0x57, 0x4f, 0xfa, 0x0d, 0x58, 0xbe, 0xa4, 0x93, 0x19, 0xb3, 0xde, 0x82, 0x95, 0x8c, 0xa7, 0x90, + 0x0e, 0x34, 0xa6, 0x2c, 0x8e, 0xe9, 0x88, 0x75, 0x8c, 0xbb, 0xc6, 0xae, 0x69, 0xeb, 0xa9, 0xd5, + 0x82, 0xd5, 0xac, 0x9f, 0x64, 0x14, 0x85, 0x2f, 0x08, 0xc5, 0x4b, 0x16, 0xc5, 0xc2, 0x01, 0x94, + 0xa2, 0x9a, 0x5a, 0x1f, 0xc0, 0x7a, 0xd1, 0x09, 0xc8, 0x3a, 0x54, 0x9f, 0xb1, 0x2b, 0x25, 0x29, + 0x86, 0x64, 0x43, 0x1d, 0x08, 0xbd, 0xd8, 0xb4, 0xd5, 0xe9, 0xbe, 0xa8, 0x24, 0xca, 0x89, 0x1f, + 0x90, 0x23, 0xa8, 0x89, 0x28, 0x44, 0xed, 0x95, 0xc3, 0x6e, 0x4f, 0x86, 0x68, 0x4f, 0x87, 0x68, + 0xef, 0x89, 0x0e, 0xd1, 0x7e, 0xf3, 0xcb, 0x17, 0x3b, 0x4b, 0x5f, 0xfc, 0x69, 0xc7, 0xb0, 0x51, + 0x83, 0xdc, 0x11, 0x9f, 0x92, 0x7a, 0xbe, 0xe3, 0xb9, 0x6a, 0x9f, 0x06, 0xce, 0x4f, 0x5d, 0x72, + 0x0c, 0xeb, 0xc3, 0xc0, 0x8f, 0x99, 0x1f, 0xcf, 0x62, 0x27, 0xa4, 0x11, 0x9d, 0xc6, 0x2a, 0x4a, + 0xf4, 0x87, 0x7b, 0xa0, 0xd9, 0x67, 0xc8, 0xb5, 0xdb, 0xc3, 0x3c, 0x81, 0x7c, 0x08, 0x70, 0x49, + 0x27, 0x9e, 0x4b, 0x79, 0x10, 0xc5, 0x9d, 0xda, 0xdd, 0x6a, 0x46, 0xf9, 0x5c, 0x33, 0x9e, 0x86, + 0x2e, 0xe5, 0xac, 0x5f, 0x13, 0x27, 0xb3, 0x33, 0xf2, 0xe4, 0x4d, 0x68, 0xd3, 0x30, 0x74, 0x62, + 0x4e, 0x39, 0x73, 0x06, 0x57, 0x9c, 0xc5, 0x18, 0x49, 0xab, 0xf6, 0x1a, 0x0d, 0xc3, 0xc7, 0x82, + 0xda, 0x17, 0x44, 0xcb, 0x4d, 0xbe, 0x03, 0x3a, 0x39, 0x21, 0x50, 0x73, 0x29, 0xa7, 0x68, 0x8d, + 0x55, 0x1b, 0xc7, 0x82, 0x16, 0x52, 0x3e, 0x56, 0x77, 0xc4, 0x31, 0xd9, 0x84, 0xfa, 0x98, 0x79, + 0xa3, 0x31, 0xc7, 0x6b, 0x55, 0x6d, 0x35, 0x13, 0x86, 0x0f, 0xa3, 0xe0, 0x92, 0x61, 0x9c, 0x37, + 0x6d, 0x39, 0xb1, 0xfe, 0x66, 0xc0, 0x6b, 0x37, 0x02, 0x43, 0xac, 0x3b, 0xa6, 0xf1, 0x58, 0xef, + 0x25, 0xc6, 0xe4, 0x6d, 0xb1, 0x2e, 0x75, 0x59, 0xa4, 0xf2, 0xcf, 0x9a, 0xba, 0xf1, 0x09, 0x12, + 0xd5, 0x45, 0x95, 0x08, 0x79, 0x04, 0xeb, 0x13, 0x1a, 0x73, 0x47, 0xfa, 0xaf, 0x83, 0xf9, 0xa5, + 0x9a, 0x8b, 0xa9, 0x4f, 0xa8, 0xf6, 0x73, 0xe1, 0x56, 0x4a, 0xbd, 0x35, 0xc9, 0x51, 0xc9, 0x09, + 0x6c, 0x0c, 0xae, 0x9e, 0x53, 0x9f, 0x7b, 0x3e, 0x73, 0x6e, 0xd8, 0xbc, 0xad, 0x96, 0x7a, 0x74, + 0xe9, 0xb9, 0xcc, 0x1f, 0x6a, 0x63, 0xdf, 0x4a, 0x54, 0x92, 0x8f, 0x11, 0x5b, 0x77, 0xa1, 0x95, + 0x8f, 0x62, 0xd2, 0x82, 0x0a, 0x9f, 0xab, 0x1b, 0x56, 0xf8, 0xdc, 0xb2, 0x12, 0x0f, 0x4c, 0x42, + 0xe9, 0x86, 0xcc, 0x1e, 0xb4, 0x0b, 0x61, 0x9d, 0x31, 0xb7, 0x91, 0x35, 0xb7, 0xd5, 0x86, 0xb5, + 0x5c, 0x34, 0x5b, 0x9f, 0x2f, 0x43, 0xd3, 0x66, 0x71, 0x28, 0x9c, 0x89, 0x1c, 0x81, 0xc9, 0xe6, + 0x43, 0x26, 0x13, 0xa9, 0x51, 0x48, 0x53, 0x52, 0xe6, 0x91, 0xe6, 0x8b, 0x80, 0x4e, 0x84, 0xc9, + 0x5e, 0x0e, 0x04, 0x6e, 0x15, 0x95, 0xb2, 0x28, 0xb0, 0x9f, 0x47, 0x81, 0x8d, 0x82, 0x6c, 0x01, + 0x06, 0xf6, 0x72, 0x30, 0x50, 0x5c, 0x38, 0x87, 0x03, 0xf7, 0x4a, 0x70, 0xa0, 0x78, 0xfc, 0x05, + 0x40, 0x70, 0xaf, 0x04, 0x08, 0x3a, 0x37, 0xf6, 0x2a, 0x45, 0x82, 0xfd, 0x3c, 0x12, 0x14, 0xaf, + 0x53, 0x80, 0x82, 0x0f, 0xcb, 0xa0, 0xe0, 0x4e, 0x41, 0x67, 0x21, 0x16, 0xbc, 0x77, 0x03, 0x0b, + 0x36, 0x0b, 0xaa, 0x25, 0x60, 0x70, 0x2f, 0x97, 0xa5, 0xa1, 0xf4, 0x6e, 0xe5, 0x69, 0x9a, 0x7c, + 0xef, 0x26, 0x8e, 0x6c, 0x15, 0x3f, 0x6d, 0x19, 0x90, 0x1c, 0x14, 0x80, 0xe4, 0x76, 0xf1, 0x94, + 0x05, 0x24, 0x49, 0xf1, 0x60, 0x4f, 0xc4, 0x7d, 0xc1, 0xd3, 0x44, 0x8e, 0x60, 0x51, 0x14, 0x44, + 0x2a, 0x61, 0xcb, 0x89, 0xb5, 0x2b, 0x32, 0x51, 0xea, 0x5f, 0x2f, 0xc1, 0x0e, 0x74, 0xfa, 0x8c, + 0x77, 0x59, 0xbf, 0x32, 0x52, 0x5d, 0x8c, 0xe8, 0x6c, 0x16, 0x33, 0x55, 0x16, 0xcb, 0x40, 0x4a, + 0x25, 0x07, 0x29, 0xe4, 0x3b, 0xf0, 0x1a, 0xa6, 0x11, 0xb4, 0x8b, 0x93, 0x4b, 0x6b, 0x6d, 0xc1, + 0x90, 0x06, 0x91, 0xf9, 0xed, 0x1d, 0xb8, 0x95, 0x91, 0x15, 0x29, 0x16, 0x53, 0x58, 0x0d, 0x83, + 0x77, 0x3d, 0x91, 0x3e, 0x0e, 0xc3, 0x13, 0x1a, 0x8f, 0xad, 0x1f, 0xa5, 0xf7, 0x4f, 0xe1, 0x8a, + 0x40, 0x6d, 0x18, 0xb8, 0xf2, 0x5a, 0x6b, 0x36, 0x8e, 0x05, 0x84, 0x4d, 0x82, 0x11, 0xee, 0x6a, + 0xda, 0x62, 0x28, 0xa4, 0x92, 0x48, 0x31, 0x65, 0x48, 0x58, 0xbf, 0x34, 0xd2, 0xf5, 0x52, 0x04, + 0x2b, 0x03, 0x1b, 0xe3, 0x7f, 0x01, 0x9b, 0xca, 0xab, 0x81, 0x8d, 0x75, 0x6d, 0xa4, 0x5f, 0x24, + 0x81, 0x91, 0xaf, 0x77, 0x45, 0xe1, 0x1c, 0x9e, 0xef, 0xb2, 0x39, 0x06, 0x7c, 0xd5, 0x96, 0x13, + 0x8d, 0xf0, 0x75, 0x34, 0x73, 0x1e, 0xe1, 0x1b, 0x48, 0x93, 0x13, 0xf2, 0x06, 0xc2, 0x4f, 0x70, + 0xa1, 0x22, 0x71, 0xad, 0xa7, 0xca, 0xdc, 0x33, 0x41, 0xb4, 0x25, 0x2f, 0x93, 0x4c, 0xcd, 0x1c, + 0x76, 0xbd, 0x0e, 0xa6, 0x38, 0x68, 0x1c, 0xd2, 0x21, 0xc3, 0xc0, 0x32, 0xed, 0x94, 0x60, 0x9d, + 0x01, 0xb9, 0x19, 0xd0, 0xe4, 0x03, 0xa8, 0x71, 0x3a, 0x12, 0xf6, 0x16, 0x26, 0x6b, 0xf5, 0x64, + 0x65, 0xde, 0xfb, 0xf8, 0xfc, 0x8c, 0x7a, 0x51, 0x7f, 0x53, 0x98, 0xea, 0x1f, 0x2f, 0x76, 0x5a, + 0x42, 0x66, 0x3f, 0x98, 0x7a, 0x9c, 0x4d, 0x43, 0x7e, 0x65, 0xa3, 0x8e, 0xf5, 0x4f, 0x43, 0x24, + 0xfa, 0x5c, 0xa0, 0x97, 0x1a, 0x4e, 0x7b, 0x73, 0x25, 0x83, 0xc9, 0x5f, 0xcd, 0x98, 0xdf, 0x04, + 0x18, 0xd1, 0xd8, 0xf9, 0x8c, 0xfa, 0x9c, 0xb9, 0xca, 0xa2, 0xe6, 0x88, 0xc6, 0x3f, 0x43, 0x82, + 0x28, 0x60, 0x04, 0x7b, 0x16, 0x33, 0x17, 0x4d, 0x5b, 0xb5, 0x1b, 0x23, 0x1a, 0x3f, 0x8d, 0x99, + 0x9b, 0xdc, 0xab, 0xf1, 0xea, 0xf7, 0xca, 0xdb, 0xb1, 0x59, 0xb4, 0xe3, 0xbf, 0x32, 0x3e, 0x9c, + 0x62, 0xe0, 0xff, 0xff, 0xbd, 0xff, 0x6e, 0x08, 0xe8, 0xcf, 0x67, 0x59, 0x72, 0x0a, 0xaf, 0x25, + 0x71, 0xe4, 0xcc, 0x30, 0xbe, 0xb4, 0x2f, 0xbd, 0x3c, 0xfc, 0xd6, 0x2f, 0xf3, 0xe4, 0x98, 0xfc, + 0x18, 0xb6, 0x0a, 0x59, 0x20, 0x59, 0xb0, 0xf2, 0xd2, 0x64, 0x70, 0x3b, 0x9f, 0x0c, 0xf4, 0x7a, + 0xda, 0x12, 0xd5, 0xaf, 0xe1, 0xd9, 0xdf, 0x12, 0x75, 0x50, 0x16, 0x1b, 0xca, 0xbe, 0xa5, 0xf5, + 0x0b, 0x03, 0xda, 0x85, 0xc3, 0x90, 0x03, 0x00, 0x99, 0x5a, 0x63, 0xef, 0xb9, 0xae, 0xc9, 0xd7, + 0xd5, 0xc1, 0xd1, 0x64, 0x8f, 0xbd, 0xe7, 0xcc, 0x36, 0x07, 0x7a, 0x48, 0x3e, 0x82, 0x36, 0x53, + 0x95, 0x99, 0xce, 0x7d, 0x95, 0x1c, 0x48, 0xe9, 0xba, 0x4d, 0xdd, 0xb6, 0xc5, 0x72, 0x73, 0xeb, + 0x18, 0xcc, 0x64, 0x5d, 0xf2, 0x0d, 0x30, 0xa7, 0x74, 0xae, 0xea, 0x65, 0x59, 0x69, 0x35, 0xa7, + 0x74, 0x8e, 0xa5, 0x32, 0xd9, 0x82, 0x86, 0x60, 0x8e, 0xa8, 0xdc, 0xa1, 0x6a, 0xd7, 0xa7, 0x74, + 0xfe, 0x43, 0x1a, 0x5b, 0x7b, 0xd0, 0xca, 0x6f, 0xa2, 0x45, 0x35, 0x76, 0x49, 0xd1, 0xe3, 0x11, + 0xb3, 0x1e, 0x43, 0x2b, 0x5f, 0x92, 0x8a, 0x3c, 0x16, 0x05, 0x33, 0xdf, 0x45, 0xc1, 0x65, 0x5b, + 0x4e, 0x44, 0x3f, 0x7a, 0x19, 0xc8, 0x4f, 0x97, 0xad, 0x41, 0xcf, 0x03, 0xce, 0x32, 0x85, 0xac, + 0x94, 0xb1, 0xfe, 0x50, 0x83, 0xba, 0xac, 0x8f, 0xc9, 0x9b, 0x99, 0x96, 0x04, 0xc1, 0xaf, 0xbf, + 0x72, 0xfd, 0x62, 0xa7, 0x81, 0x38, 0x71, 0xfa, 0x30, 0xed, 0x4f, 0xd2, 0x14, 0x58, 0xc9, 0xa5, + 0x40, 0xdd, 0x0c, 0x55, 0x5f, 0xb9, 0x19, 0xda, 0x82, 0x86, 0x3f, 0x9b, 0x3a, 0x7c, 0x1e, 0x63, + 0x24, 0x56, 0xed, 0xba, 0x3f, 0x9b, 0x3e, 0x99, 0xc7, 0xc2, 0xa6, 0x3c, 0xe0, 0x74, 0x82, 0x2c, + 0x19, 0x8a, 0x4d, 0x24, 0x08, 0xe6, 0x11, 0xac, 0x65, 0xe0, 0xd4, 0x73, 0x55, 0xad, 0xd6, 0xca, + 0x7e, 0xf1, 0xd3, 0x87, 0xea, 0xba, 0x2b, 0x09, 0xbc, 0x9e, 0xba, 0x64, 0x37, 0x5f, 0xfb, 0x23, + 0x0a, 0x4b, 0x28, 0xc8, 0x94, 0xf7, 0x02, 0x83, 0xc5, 0x01, 0x84, 0xbb, 0x49, 0x91, 0x26, 0x8a, + 0x34, 0x05, 0x01, 0x99, 0x6f, 0x41, 0x3b, 0x05, 0x32, 0x29, 0x62, 0xca, 0x55, 0x52, 0x32, 0x0a, + 0xbe, 0x0b, 0x1b, 0x3e, 0x9b, 0x73, 0xa7, 0x28, 0x0d, 0x28, 0x4d, 0x04, 0xef, 0x3c, 0xaf, 0xf1, + 0x6d, 0x68, 0xa5, 0x01, 0x89, 0xb2, 0x2b, 0xb2, 0x03, 0x4b, 0xa8, 0x28, 0x76, 0x07, 0x9a, 0x49, + 0x19, 0xb1, 0x8a, 0x02, 0x0d, 0x2a, 0xab, 0x87, 0xa4, 0x30, 0x89, 0x58, 0x3c, 0x9b, 0x70, 0xb5, + 0xc8, 0x1a, 0xca, 0x60, 0x61, 0x62, 0x4b, 0x3a, 0xca, 0xbe, 0x01, 0x6b, 0x49, 0x1c, 0xa0, 0x5c, + 0x0b, 0xe5, 0x56, 0x35, 0x11, 0x85, 0xf6, 0x60, 0x3d, 0x8c, 0x82, 0x30, 0x88, 0x59, 0xe4, 0x50, + 0xd7, 0x8d, 0x58, 0x1c, 0x77, 0xda, 0x72, 0x3d, 0x4d, 0x3f, 0x96, 0x64, 0xeb, 0xe7, 0xd0, 0x50, + 0xd6, 0x2f, 0xed, 0xd3, 0xbe, 0x0f, 0xab, 0x21, 0x8d, 0xc4, 0x99, 0xb2, 0xdd, 0x9a, 0xae, 0x96, + 0xcf, 0x68, 0x24, 0xda, 0xf3, 0x5c, 0xd3, 0xb6, 0x82, 0xf2, 0x92, 0x64, 0xdd, 0x83, 0xb5, 0x9c, + 0x8c, 0x08, 0x03, 0x74, 0x0a, 0x1d, 0x06, 0x38, 0x49, 0x76, 0xae, 0xa4, 0x3b, 0x5b, 0xf7, 0xc1, + 0x4c, 0x0c, 0x2d, 0x8a, 0x3a, 0x7d, 0x0f, 0x43, 0xd9, 0x4e, 0x4e, 0xb1, 0x11, 0x0d, 0x3e, 0x63, + 0x91, 0x2a, 0xe4, 0xe4, 0xc4, 0x7a, 0x0a, 0xed, 0x42, 0x3e, 0x25, 0xfb, 0xd0, 0x08, 0x67, 0x03, + 0x47, 0x3f, 0x20, 0xa4, 0x2d, 0xe7, 0xd9, 0x6c, 0xf0, 0x31, 0xbb, 0xd2, 0x2d, 0x67, 0x88, 0xb3, + 0x74, 0xd9, 0x4a, 0x76, 0xd9, 0x09, 0x34, 0x75, 0x68, 0x92, 0xef, 0x82, 0x99, 0xf8, 0x48, 0x21, + 0x81, 0x25, 0x5b, 0xab, 0x45, 0x53, 0x41, 0xf1, 0xa9, 0x63, 0x6f, 0xe4, 0x33, 0xd7, 0x49, 0xe3, + 0x01, 0xf7, 0x68, 0xda, 0x6d, 0xc9, 0xf8, 0x44, 0x3b, 0xbf, 0xf5, 0x2e, 0xd4, 0xe5, 0xd9, 0x84, + 0x7d, 0xc4, 0xca, 0xba, 0xce, 0x15, 0xe3, 0xd2, 0x4c, 0xfb, 0x47, 0x03, 0x9a, 0x3a, 0x45, 0x95, + 0x2a, 0xe5, 0x0e, 0x5d, 0xf9, 0xaa, 0x87, 0x5e, 0xf4, 0x08, 0xa0, 0xb3, 0x48, 0xed, 0x95, 0xb3, + 0xc8, 0x3e, 0x10, 0x99, 0x2c, 0x2e, 0x03, 0xee, 0xf9, 0x23, 0x47, 0xda, 0x5a, 0x66, 0x8d, 0x75, + 0xe4, 0x9c, 0x23, 0xe3, 0x4c, 0xd0, 0x0f, 0x3f, 0x5f, 0x86, 0xf6, 0x71, 0xff, 0xc1, 0xe9, 0x71, + 0x18, 0x4e, 0xbc, 0x21, 0xc5, 0xe2, 0xfa, 0x00, 0x6a, 0xd8, 0x3e, 0x94, 0x3c, 0x5c, 0x76, 0xcb, + 0xfa, 0x58, 0x72, 0x08, 0xcb, 0xd8, 0x45, 0x90, 0xb2, 0xf7, 0xcb, 0x6e, 0x69, 0x3b, 0x2b, 0x36, + 0x91, 0x7d, 0xc6, 0xcd, 0x67, 0xcc, 0x6e, 0x59, 0x4f, 0x4b, 0x3e, 0x02, 0x33, 0xad, 0xff, 0x17, + 0x3d, 0x66, 0x76, 0x17, 0x76, 0xb7, 0x42, 0x3f, 0xad, 0x95, 0x16, 0xbd, 0xc9, 0x75, 0x17, 0xb6, + 0x81, 0xe4, 0x08, 0x1a, 0xba, 0xc2, 0x2c, 0x7f, 0x6e, 0xec, 0x2e, 0xe8, 0x3c, 0x85, 0x79, 0x64, + 0x49, 0x5f, 0xf6, 0x26, 0xda, 0x2d, 0x6d, 0x8f, 0xc9, 0xfb, 0x50, 0x57, 0xb0, 0x5f, 0xfa, 0xe4, + 0xd8, 0x2d, 0xef, 0x1f, 0xc5, 0x25, 0xd3, 0xa6, 0x66, 0xd1, 0xbb, 0x6d, 0x77, 0x61, 0x1f, 0x4f, + 0x8e, 0x01, 0x32, 0x95, 0xf9, 0xc2, 0x07, 0xd9, 0xee, 0xe2, 0xfe, 0x9c, 0xdc, 0x87, 0x66, 0xfa, + 0xe6, 0x52, 0xfe, 0xc4, 0xda, 0x5d, 0xd4, 0x32, 0xf7, 0x5f, 0xff, 0xf7, 0x5f, 0xb6, 0x8d, 0x5f, + 0x5f, 0x6f, 0x1b, 0xbf, 0xb9, 0xde, 0x36, 0xbe, 0xbc, 0xde, 0x36, 0x7e, 0x7f, 0xbd, 0x6d, 0xfc, + 0xf9, 0x7a, 0xdb, 0xf8, 0xed, 0x5f, 0xb7, 0x8d, 0x41, 0x1d, 0xdd, 0xff, 0xbd, 0xff, 0x04, 0x00, + 0x00, 0xff, 0xff, 0x2c, 0x0a, 0x65, 0x88, 0x52, 0x18, 0x00, 0x00, } diff --git a/abci/types/types.proto b/abci/types/types.proto index b62162c47..39c96e0e3 100644 --- a/abci/types/types.proto +++ b/abci/types/types.proto @@ -157,6 +157,7 @@ message ResponseQuery { bytes value = 7; merkle.Proof proof = 8; int64 height = 9; + string codespace = 10; } message ResponseBeginBlock { @@ -171,6 +172,7 @@ message ResponseCheckTx { int64 gas_wanted = 5; int64 gas_used = 6; repeated common.KVPair tags = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"]; + string codespace = 8; } message ResponseDeliverTx { @@ -181,6 +183,7 @@ message ResponseDeliverTx { int64 gas_wanted = 5; int64 gas_used = 6; repeated common.KVPair tags = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"]; + string codespace = 8; } message ResponseEndBlock { diff --git a/docs/spec/abci/abci.md b/docs/spec/abci/abci.md index 1306128f6..15e246249 100644 --- a/docs/spec/abci/abci.md +++ b/docs/spec/abci/abci.md @@ -29,10 +29,15 @@ Some methods (`Echo, Info, InitChain, BeginBlock, EndBlock, Commit`), don't return errors because an error would indicate a critical failure in the application and there's nothing Tendermint can do. The problem should be addressed and both Tendermint and the application restarted. + All other methods (`SetOption, Query, CheckTx, DeliverTx`) return an application-specific response `Code uint32`, where only `0` is reserved for `OK`. +Finally, `Query`, `CheckTx`, and `DeliverTx` include a `Codespace string`, whose +intended use is to disambiguate `Code` values returned by different domains of the +application. The `Codespace` is a namespace for the `Code`. + ## Tags Some methods (`CheckTx, BeginBlock, DeliverTx, EndBlock`) @@ -215,6 +220,7 @@ Commit are included in the header of the next block. Note that this is the height of the block containing the application's Merkle root hash, which represents the state as it was after committing the block at Height-1 + - `Codespace (string)`: Namespace for the `Code`. - **Usage**: - Query for data from the application at current or past height. - Optionally return Merkle proof. @@ -257,6 +263,7 @@ Commit are included in the header of the next block. - `GasUsed (int64)`: Amount of gas consumed by transaction. - `Tags ([]cmn.KVPair)`: Key-Value tags for filtering and indexing transactions (eg. by account). + - `Codespace (string)`: Namespace for the `Code`. - **Usage**: - Technically optional - not involved in processing blocks. - Guardian of the mempool: every node runs CheckTx before letting a @@ -284,6 +291,7 @@ Commit are included in the header of the next block. - `GasUsed (int64)`: Amount of gas consumed by transaction. - `Tags ([]cmn.KVPair)`: Key-Value tags for filtering and indexing transactions (eg. by account). + - `Codespace (string)`: Namespace for the `Code`. - **Usage**: - The workhorse of the application - non-optional. - Execute the transaction in full. From 1d8348d70750fe5a942f5ed3997a13bf68c463f0 Mon Sep 17 00:00:00 2001 From: Pierrick Hymbert Date: Sat, 6 Oct 2018 15:53:52 +0200 Subject: [PATCH 040/113] [p2p] Malformed external address causes SIGSEGV (if URL has empty host) (#2564) fix #2071 Signed-off-by: phymbert --- p2p/netaddress.go | 16 ++++++++++------ p2p/netaddress_test.go | 1 + 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/p2p/netaddress.go b/p2p/netaddress.go index a42f0fdde..f848b7a5a 100644 --- a/p2p/netaddress.go +++ b/p2p/netaddress.go @@ -13,6 +13,7 @@ import ( "strings" "time" + "errors" cmn "github.com/tendermint/tendermint/libs/common" ) @@ -97,16 +98,19 @@ func NewNetAddressStringWithOptionalID(addr string) (*NetAddress, error) { if err != nil { return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} } + if len(host) == 0 { + return nil, ErrNetAddressInvalid{ + addrWithoutProtocol, + errors.New("host is empty")} + } ip := net.ParseIP(host) if ip == nil { - if len(host) > 0 { - ips, err := net.LookupIP(host) - if err != nil { - return nil, ErrNetAddressLookup{host, err} - } - ip = ips[0] + ips, err := net.LookupIP(host) + if err != nil { + return nil, ErrNetAddressLookup{host, err} } + ip = ips[0] } port, err := strconv.ParseUint(portStr, 10, 16) diff --git a/p2p/netaddress_test.go b/p2p/netaddress_test.go index 653b436a6..7d806dbd8 100644 --- a/p2p/netaddress_test.go +++ b/p2p/netaddress_test.go @@ -49,6 +49,7 @@ func TestNewNetAddressStringWithOptionalID(t *testing.T) { {"tcp://@127.0.0.1:8080", "", false}, {"tcp://@", "", false}, + {"tcp://:26656", "", false}, {"", "", false}, {"@", "", false}, {" @", "", false}, From 5f88fe0e9bbae465666355b981f57e17941f4b25 Mon Sep 17 00:00:00 2001 From: goolAdapter <267310165@qq.com> Date: Mon, 8 Oct 2018 14:05:12 +0800 Subject: [PATCH 041/113] fix p2p switch FlushThrottle value (#2569) --- CHANGELOG_PENDING.md | 1 + p2p/switch.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index bed71d1f3..6a3d6f00d 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -48,3 +48,4 @@ timeoutPrecommit before starting next round - [common/bit_array] Fixed a bug in the `Or` function - [common/bit_array] Fixed a bug in the `Sub` function (@bradyjoestar) - [common] \#2534 make bit array's PickRandom choose uniformly from true bits +- [p2p] \#2555 fix p2p switch FlushThrottle value (@goolAdapter) diff --git a/p2p/switch.go b/p2p/switch.go index dbef56ebf..8325d7e82 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -103,7 +103,7 @@ func NewSwitch( sw.rng = cmn.NewRand() mConfig := conn.DefaultMConnConfig() - mConfig.FlushThrottle = time.Duration(cfg.FlushThrottleTimeout) * time.Millisecond + mConfig.FlushThrottle = cfg.FlushThrottleTimeout mConfig.SendRate = cfg.SendRate mConfig.RecvRate = cfg.RecvRate mConfig.MaxPacketMsgPayloadSize = cfg.MaxPacketMsgPayloadSize From b8556b97b82c01c422bfec31481d28dfab1297be Mon Sep 17 00:00:00 2001 From: Zach Date: Mon, 8 Oct 2018 02:49:50 -0400 Subject: [PATCH 042/113] circle: save p2p logs as artifacts (#2566) --- .circleci/config.yml | 2 ++ Makefile | 3 +++ test/p2p/circleci.sh | 6 +++++- 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 9d284be6f..55a3da4f9 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -301,6 +301,8 @@ jobs: - run: mkdir -p $GOPATH/src/github.com/tendermint - run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint - run: bash test/p2p/circleci.sh + - store_artifacts: + path: /home/circleci/project/test/p2p/logs upload_coverage: <<: *defaults diff --git a/Makefile b/Makefile index 73bd67b0b..5711e4317 100644 --- a/Makefile +++ b/Makefile @@ -182,6 +182,9 @@ test_p2p: cd .. # requires 'tester' the image from above bash test/p2p/test.sh tester + # the `docker cp` takes a really long time; uncomment for debugging + # + # mkdir -p test/p2p/logs && docker cp rsyslog:/var/log test/p2p/logs test_integrations: make build_docker_test_image diff --git a/test/p2p/circleci.sh b/test/p2p/circleci.sh index 19200afbe..c548d5752 100644 --- a/test/p2p/circleci.sh +++ b/test/p2p/circleci.sh @@ -6,7 +6,7 @@ SOURCE="${BASH_SOURCE[0]}" while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" -LOGS_DIR="$DIR/../logs" +LOGS_DIR="$DIR/logs" echo echo "* [$(date +"%T")] cleaning up $LOGS_DIR" rm -rf "$LOGS_DIR" @@ -33,3 +33,7 @@ fi echo echo "* [$(date +"%T")] running p2p tests on a local docker network" bash "$DIR/../p2p/test.sh" tester + +echo +echo "* [$(date +"%T")] copying log files out of docker container into $LOGS_DIR" +docker cp rsyslog:/var/log $LOGS_DIR From 4c0c6e01168d4d02951ecb2e5eee5a5d2c987ab7 Mon Sep 17 00:00:00 2001 From: bradyjoestar Date: Mon, 8 Oct 2018 00:06:01 -0700 Subject: [PATCH 043/113] [tm-bench] exit on CTRL-C (#2405) --- tools/tm-bench/main.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tools/tm-bench/main.go b/tools/tm-bench/main.go index a418e0363..a7c427c03 100644 --- a/tools/tm-bench/main.go +++ b/tools/tm-bench/main.go @@ -12,6 +12,8 @@ import ( "github.com/tendermint/tendermint/libs/log" tmrpc "github.com/tendermint/tendermint/rpc/client" + "os/signal" + "syscall" ) var logger = log.NewNopLogger() @@ -101,6 +103,19 @@ Examples: "broadcast_tx_"+broadcastTxMethod, ) + //catch Interrupt and quit tm-bench + go func() { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + for sig := range c { + fmt.Printf("captured %v, exiting...\n", sig) + for _, t := range transacters { + t.Stop() + } + os.Exit(1) + } + }() + // Wait until transacters have begun until we get the start time timeStart := time.Now() logger.Info("Time last transacter started", "t", timeStart) From 35b671214cae5cca4f1f47efa3b22e0615e30940 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 8 Oct 2018 17:03:38 +0400 Subject: [PATCH 044/113] tools: Refactor tm-bench (#2570) * specify time unit for FlushThrottleTimeout in TestP2PConfig Refs #2555 * [tm-bench] refactor code https://github.com/tendermint/tendermint/pull/2405#pullrequestreview-157166387 --- config/config.go | 2 +- tools/tm-bench/main.go | 35 +++++++++++++++-------------------- 2 files changed, 16 insertions(+), 21 deletions(-) diff --git a/config/config.go b/config/config.go index 1f9ff3e13..f2bac5c6f 100644 --- a/config/config.go +++ b/config/config.go @@ -405,7 +405,7 @@ func DefaultP2PConfig() *P2PConfig { func TestP2PConfig() *P2PConfig { cfg := DefaultP2PConfig() cfg.ListenAddress = "tcp://0.0.0.0:36656" - cfg.FlushThrottleTimeout = 10 + cfg.FlushThrottleTimeout = 10 * time.Millisecond cfg.AllowDuplicateIP = true return cfg } diff --git a/tools/tm-bench/main.go b/tools/tm-bench/main.go index a7c427c03..87f12ef34 100644 --- a/tools/tm-bench/main.go +++ b/tools/tm-bench/main.go @@ -4,16 +4,16 @@ import ( "flag" "fmt" "os" + "os/signal" "strings" "sync" + "syscall" "time" "github.com/go-kit/kit/log/term" "github.com/tendermint/tendermint/libs/log" tmrpc "github.com/tendermint/tendermint/rpc/client" - "os/signal" - "syscall" ) var logger = log.NewNopLogger() @@ -53,8 +53,7 @@ Examples: if verbose { if outputFormat == "json" { - fmt.Fprintln(os.Stderr, "Verbose mode not supported with json output.") - os.Exit(1) + printErrorAndExit("Verbose mode not supported with json output.") } // Color errors red colorFn := func(keyvals ...interface{}) term.FgBgColor { @@ -71,21 +70,13 @@ Examples: } if txSize < 40 { - fmt.Fprintln( - os.Stderr, - "The size of a transaction must be greater than or equal to 40.", - ) - os.Exit(1) + printErrorAndExit("The size of a transaction must be greater than or equal to 40.") } if broadcastTxMethod != "async" && broadcastTxMethod != "sync" && broadcastTxMethod != "commit" { - fmt.Fprintln( - os.Stderr, - "broadcast-tx-method should be either 'sync', 'async' or 'commit'.", - ) - os.Exit(1) + printErrorAndExit("broadcast-tx-method should be either 'sync', 'async' or 'commit'.") } var ( @@ -103,10 +94,10 @@ Examples: "broadcast_tx_"+broadcastTxMethod, ) - //catch Interrupt and quit tm-bench + // Quit when interrupted or received SIGTERM. + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) go func() { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) for sig := range c { fmt.Printf("captured %v, exiting...\n", sig) for _, t := range transacters { @@ -116,7 +107,7 @@ Examples: } }() - // Wait until transacters have begun until we get the start time + // Wait until transacters have begun until we get the start time. timeStart := time.Now() logger.Info("Time last transacter started", "t", timeStart) @@ -143,8 +134,7 @@ Examples: durationInt, ) if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) + printErrorAndExit(err.Error()) } printStatistics(stats, outputFormat) @@ -196,3 +186,8 @@ func startTransacters( return transacters } + +func printErrorAndExit(err string) { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) +} From b1e7fac787923bb92a2e720b45edf5ca5971b84f Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Mon, 8 Oct 2018 06:15:56 -0700 Subject: [PATCH 045/113] crypto/random: Use chacha20, add forward secrecy (#2562) Ref #2099 --- CHANGELOG_PENDING.md | 1 + crypto/random.go | 61 +++++++++++++++++++++++++------------------ crypto/random_test.go | 23 ++++++++++++++++ 3 files changed, 60 insertions(+), 25 deletions(-) create mode 100644 crypto/random_test.go diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 6a3d6f00d..1a927a329 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -38,6 +38,7 @@ IMPROVEMENTS: - [consensus] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) add additional metrics - [p2p] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) add additional metrics - [config] \#2232 added ValidateBasic method, which performs basic checks +- [crypto] \#2099 make crypto random use chacha, and have forward secrecy of generated randomness BUG FIXES: - [autofile] \#2428 Group.RotateFile need call Flush() before rename (@goolAdapter) diff --git a/crypto/random.go b/crypto/random.go index 5c5057d30..af3286427 100644 --- a/crypto/random.go +++ b/crypto/random.go @@ -1,7 +1,6 @@ package crypto import ( - "crypto/aes" "crypto/cipher" crand "crypto/rand" "crypto/sha256" @@ -9,9 +8,17 @@ import ( "io" "sync" + "golang.org/x/crypto/chacha20poly1305" + . "github.com/tendermint/tendermint/libs/common" ) +// The randomness here is derived from xoring a chacha20 keystream with +// output from crypto/rand's OS Entropy Reader. (Due to fears of the OS' +// entropy being backdoored) +// +// For forward secrecy of produced randomness, the internal chacha key is hashed +// and thereby rotated after each call. var gRandInfo *randInfo func init() { @@ -61,11 +68,10 @@ func CReader() io.Reader { //-------------------------------------------------------------------------------- type randInfo struct { - mtx sync.Mutex - seedBytes [32]byte - cipherAES256 cipher.Block - streamAES256 cipher.Stream - reader io.Reader + mtx sync.Mutex + seedBytes [chacha20poly1305.KeySize]byte + chacha cipher.AEAD + reader io.Reader } // You can call this as many times as you'd like. @@ -79,30 +85,35 @@ func (ri *randInfo) MixEntropy(seedBytes []byte) { h.Write(seedBytes) h.Write(ri.seedBytes[:]) hashBytes := h.Sum(nil) - hashBytes32 := [32]byte{} - copy(hashBytes32[:], hashBytes) - ri.seedBytes = xorBytes32(ri.seedBytes, hashBytes32) - // Create new cipher.Block - var err error - ri.cipherAES256, err = aes.NewCipher(ri.seedBytes[:]) + copy(ri.seedBytes[:], hashBytes) + chacha, err := chacha20poly1305.New(ri.seedBytes[:]) if err != nil { - PanicSanity("Error creating AES256 cipher: " + err.Error()) + panic("Initializing chacha20 failed") } - // Create new stream - ri.streamAES256 = cipher.NewCTR(ri.cipherAES256, randBytes(aes.BlockSize)) + ri.chacha = chacha // Create new reader - ri.reader = &cipher.StreamReader{S: ri.streamAES256, R: crand.Reader} + ri.reader = &cipher.StreamReader{S: ri, R: crand.Reader} } -func (ri *randInfo) Read(b []byte) (n int, err error) { - ri.mtx.Lock() - defer ri.mtx.Unlock() - return ri.reader.Read(b) +func (ri *randInfo) XORKeyStream(dst, src []byte) { + // nonce being 0 is safe due to never re-using a key. + emptyNonce := make([]byte, 12) + tmpDst := ri.chacha.Seal([]byte{}, emptyNonce, src, []byte{0}) + // this removes the poly1305 tag as well, since chacha is a stream cipher + // and we truncate at input length. + copy(dst, tmpDst[:len(src)]) + // hash seedBytes for forward secrecy, and initialize new chacha instance + newSeed := sha256.Sum256(ri.seedBytes[:]) + chacha, err := chacha20poly1305.New(newSeed[:]) + if err != nil { + panic("Initializing chacha20 failed") + } + ri.chacha = chacha } -func xorBytes32(bytesA [32]byte, bytesB [32]byte) (res [32]byte) { - for i, b := range bytesA { - res[i] = b ^ bytesB[i] - } - return res +func (ri *randInfo) Read(b []byte) (n int, err error) { + ri.mtx.Lock() + n, err = ri.reader.Read(b) + ri.mtx.Unlock() + return } diff --git a/crypto/random_test.go b/crypto/random_test.go new file mode 100644 index 000000000..34f7372fe --- /dev/null +++ b/crypto/random_test.go @@ -0,0 +1,23 @@ +package crypto_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/crypto" +) + +// the purpose of this test is primarily to ensure that the randomness +// generation won't error. +func TestRandomConsistency(t *testing.T) { + x1 := crypto.CRandBytes(256) + x2 := crypto.CRandBytes(256) + x3 := crypto.CRandBytes(256) + x4 := crypto.CRandBytes(256) + x5 := crypto.CRandBytes(256) + require.NotEqual(t, x1, x2) + require.NotEqual(t, x3, x4) + require.NotEqual(t, x4, x5) + require.NotEqual(t, x1, x5) +} From c17547ac2f9be420aefedd0a054ccc5bd1f78c0d Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Mon, 8 Oct 2018 06:20:41 -0700 Subject: [PATCH 046/113] Switch nodeID to use tmhash.Size, add test names for net addr tests (#2559) * Switch nodeID to be tmhash.Size, add test names for net addr tests Both of these came up when locally trying to change tmhash size. * fix error introduced by merge --- p2p/key.go | 3 +- p2p/netaddress_test.go | 71 ++++++++++++++++++++++-------------------- 2 files changed, 39 insertions(+), 35 deletions(-) diff --git a/p2p/key.go b/p2p/key.go index 4d1ecd82f..3f38b48a9 100644 --- a/p2p/key.go +++ b/p2p/key.go @@ -8,6 +8,7 @@ import ( crypto "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" ) @@ -16,7 +17,7 @@ type ID string // IDByteLength is the length of a crypto.Address. Currently only 20. // TODO: support other length addresses ? -const IDByteLength = 20 +const IDByteLength = tmhash.Size //------------------------------------------------------------------------------ // Persistent peer ID diff --git a/p2p/netaddress_test.go b/p2p/netaddress_test.go index 7d806dbd8..e7b184a76 100644 --- a/p2p/netaddress_test.go +++ b/p2p/netaddress_test.go @@ -22,49 +22,52 @@ func TestNewNetAddress(t *testing.T) { func TestNewNetAddressStringWithOptionalID(t *testing.T) { testCases := []struct { + name string addr string expected string correct bool }{ - {"127.0.0.1:8080", "127.0.0.1:8080", true}, - {"tcp://127.0.0.1:8080", "127.0.0.1:8080", true}, - {"udp://127.0.0.1:8080", "127.0.0.1:8080", true}, - {"udp//127.0.0.1:8080", "", false}, + {"no node id, no protocol", "127.0.0.1:8080", "127.0.0.1:8080", true}, + {"no node id, tcp input", "tcp://127.0.0.1:8080", "127.0.0.1:8080", true}, + {"no node id, udp input", "udp://127.0.0.1:8080", "127.0.0.1:8080", true}, + {"malformed udp input", "udp//127.0.0.1:8080", "", false}, // {"127.0.0:8080", false}, - {"notahost", "", false}, - {"127.0.0.1:notapath", "", false}, - {"notahost:8080", "", false}, - {"8082", "", false}, - {"127.0.0:8080000", "", false}, - - {"deadbeef@127.0.0.1:8080", "", false}, - {"this-isnot-hex@127.0.0.1:8080", "", false}, - {"xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, - {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, - - {"tcp://deadbeef@127.0.0.1:8080", "", false}, - {"tcp://this-isnot-hex@127.0.0.1:8080", "", false}, - {"tcp://xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, - {"tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, - - {"tcp://@127.0.0.1:8080", "", false}, - {"tcp://@", "", false}, - {"tcp://:26656", "", false}, - {"", "", false}, - {"@", "", false}, - {" @", "", false}, - {" @ ", "", false}, + {"invalid host", "notahost", "", false}, + {"invalid port", "127.0.0.1:notapath", "", false}, + {"invalid host w/ port", "notahost:8080", "", false}, + {"just a port", "8082", "", false}, + {"non-existent port", "127.0.0:8080000", "", false}, + + {"too short nodeId", "deadbeef@127.0.0.1:8080", "", false}, + {"too short, not hex nodeId", "this-isnot-hex@127.0.0.1:8080", "", false}, + {"not hex nodeId", "xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + {"correct nodeId", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, + + {"too short nodeId w/tcp", "tcp://deadbeef@127.0.0.1:8080", "", false}, + {"too short notHex nodeId w/tcp", "tcp://this-isnot-hex@127.0.0.1:8080", "", false}, + {"notHex nodeId w/tcp", "tcp://xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + {"correct nodeId w/tcp", "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, + + {"no node id when expected", "tcp://@127.0.0.1:8080", "", false}, + {"no node id or IP", "tcp://@", "", false}, + {"tcp no host, w/ port", "tcp://:26656", "", false}, + {"empty", "", "", false}, + {"node id delimiter 1", "@", "", false}, + {"node id delimiter 2", " @", "", false}, + {"node id delimiter 3", " @ ", "", false}, } for _, tc := range testCases { - addr, err := NewNetAddressStringWithOptionalID(tc.addr) - if tc.correct { - if assert.Nil(t, err, tc.addr) { - assert.Equal(t, tc.expected, addr.String()) + t.Run(tc.name, func(t *testing.T) { + addr, err := NewNetAddressStringWithOptionalID(tc.addr) + if tc.correct { + if assert.Nil(t, err, tc.addr) { + assert.Equal(t, tc.expected, addr.String()) + } + } else { + assert.NotNil(t, err, tc.addr) } - } else { - assert.NotNil(t, err, tc.addr) - } + }) } } From 4b2bf023dd37c382b14663840f8284be23a7ef31 Mon Sep 17 00:00:00 2001 From: goolAdapter <267310165@qq.com> Date: Mon, 8 Oct 2018 21:36:31 +0800 Subject: [PATCH 047/113] libs: Fix event concurrency flaw (#2519) * fix event concurrency flaw * modify changelog * fix a mistake * fix a lint issue * modify changelog * modify for review issue * modify for review issue * modify for review issue --- CHANGELOG_PENDING.md | 1 + libs/events/events.go | 35 ++++++++++++++++++------ libs/events/events_test.go | 55 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 83 insertions(+), 8 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 1a927a329..0cd8bd2fe 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -50,3 +50,4 @@ timeoutPrecommit before starting next round - [common/bit_array] Fixed a bug in the `Sub` function (@bradyjoestar) - [common] \#2534 make bit array's PickRandom choose uniformly from true bits - [p2p] \#2555 fix p2p switch FlushThrottle value (@goolAdapter) +- [libs/event] \#2518 fix event concurrency flaw (@goolAdapter) diff --git a/libs/events/events.go b/libs/events/events.go index 9c7f0fd05..864365563 100644 --- a/libs/events/events.go +++ b/libs/events/events.go @@ -4,15 +4,23 @@ Pub-Sub in go with event caching package events import ( + "fmt" "sync" cmn "github.com/tendermint/tendermint/libs/common" ) +type ErrListenerWasRemoved struct { + listener string +} + +func (e ErrListenerWasRemoved) Error() string { + return fmt.Sprintf("listener %s was removed", e.listener) +} + // Generic event data can be typed and registered with tendermint/go-amino // via concrete implementation of this interface type EventData interface { - //AssertIsEventData() } // reactors and other modules should export @@ -30,7 +38,7 @@ type EventSwitch interface { cmn.Service Fireable - AddListenerForEvent(listenerID, event string, cb EventCallback) + AddListenerForEvent(listenerID, event string, cb EventCallback) error RemoveListenerForEvent(event string, listenerID string) RemoveListener(listenerID string) } @@ -58,7 +66,7 @@ func (evsw *eventSwitch) OnStart() error { func (evsw *eventSwitch) OnStop() {} -func (evsw *eventSwitch) AddListenerForEvent(listenerID, event string, cb EventCallback) { +func (evsw *eventSwitch) AddListenerForEvent(listenerID, event string, cb EventCallback) error { // Get/Create eventCell and listener evsw.mtx.Lock() eventCell := evsw.eventCells[event] @@ -74,8 +82,12 @@ func (evsw *eventSwitch) AddListenerForEvent(listenerID, event string, cb EventC evsw.mtx.Unlock() // Add event and listener - eventCell.AddListener(listenerID, cb) - listener.AddEvent(event) + err := listener.AddEvent(event) + if err == nil { + eventCell.AddListener(listenerID, cb) + } + + return err } func (evsw *eventSwitch) RemoveListener(listenerID string) { @@ -168,10 +180,15 @@ func (cell *eventCell) RemoveListener(listenerID string) int { func (cell *eventCell) FireEvent(data EventData) { cell.mtx.RLock() + var listenerCopy []EventCallback for _, listener := range cell.listeners { - listener(data) + listenerCopy = append(listenerCopy, listener) } cell.mtx.RUnlock() + + for _, listener := range listenerCopy { + listener(data) + } } //----------------------------------------------------------------------------- @@ -194,14 +211,16 @@ func newEventListener(id string) *eventListener { } } -func (evl *eventListener) AddEvent(event string) { +func (evl *eventListener) AddEvent(event string) error { evl.mtx.Lock() defer evl.mtx.Unlock() if evl.removed { - return + return ErrListenerWasRemoved{listener: evl.id} } + evl.events = append(evl.events, event) + return nil } func (evl *eventListener) GetEvents() []string { diff --git a/libs/events/events_test.go b/libs/events/events_test.go index a01fbbb77..02ec44c4f 100644 --- a/libs/events/events_test.go +++ b/libs/events/events_test.go @@ -17,6 +17,7 @@ func TestAddListenerForEventFireOnce(t *testing.T) { if err != nil { t.Errorf("Failed to start EventSwitch, error: %v", err) } + defer evsw.Stop() messages := make(chan EventData) evsw.AddListenerForEvent("listener", "event", func(data EventData) { @@ -37,6 +38,7 @@ func TestAddListenerForEventFireMany(t *testing.T) { if err != nil { t.Errorf("Failed to start EventSwitch, error: %v", err) } + defer evsw.Stop() doneSum := make(chan uint64) doneSending := make(chan uint64) numbers := make(chan uint64, 4) @@ -66,6 +68,7 @@ func TestAddListenerForDifferentEvents(t *testing.T) { if err != nil { t.Errorf("Failed to start EventSwitch, error: %v", err) } + defer evsw.Stop() doneSum := make(chan uint64) doneSending1 := make(chan uint64) doneSending2 := make(chan uint64) @@ -111,6 +114,7 @@ func TestAddDifferentListenerForDifferentEvents(t *testing.T) { if err != nil { t.Errorf("Failed to start EventSwitch, error: %v", err) } + defer evsw.Stop() doneSum1 := make(chan uint64) doneSum2 := make(chan uint64) doneSending1 := make(chan uint64) @@ -162,6 +166,54 @@ func TestAddDifferentListenerForDifferentEvents(t *testing.T) { } } +func TestAddAndRemoveListenerConcurrency(t *testing.T) { + var ( + stopInputEvent = false + roundCount = 2000 + ) + + evsw := NewEventSwitch() + err := evsw.Start() + if err != nil { + t.Errorf("Failed to start EventSwitch, error: %v", err) + } + defer evsw.Stop() + + done1 := make(chan struct{}) + done2 := make(chan struct{}) + + go func() { + for i := 0; i < roundCount; i++ { + evsw.RemoveListener("listener") + } + done1 <- struct{}{} + }() + + go func() { + for i := 0; i < roundCount; i++ { + index := i //it necessary for closure + evsw.AddListenerForEvent("listener", fmt.Sprintf("event%d", index), + func(data EventData) { + t.Errorf("should not run callback for %d.\n", index) + stopInputEvent = true + }) + } + done2 <- struct{}{} + }() + + <-done1 + <-done2 + + close(done1) + close(done2) + + evsw.RemoveListener("listener") // make sure remove last + + for i := 0; i < roundCount && !stopInputEvent; i++ { + evsw.FireEvent(fmt.Sprintf("event%d", i), uint64(1001)) + } +} + // TestAddAndRemoveListener sets up an EventSwitch, subscribes a listener to // two events, fires a thousand integers for the first event, then unsubscribes // the listener and fires a thousand integers for the second event. @@ -171,6 +223,7 @@ func TestAddAndRemoveListener(t *testing.T) { if err != nil { t.Errorf("Failed to start EventSwitch, error: %v", err) } + defer evsw.Stop() doneSum1 := make(chan uint64) doneSum2 := make(chan uint64) doneSending1 := make(chan uint64) @@ -216,6 +269,7 @@ func TestRemoveListener(t *testing.T) { if err != nil { t.Errorf("Failed to start EventSwitch, error: %v", err) } + defer evsw.Stop() count := 10 sum1, sum2 := 0, 0 // add some listeners and make sure they work @@ -269,6 +323,7 @@ func TestRemoveListenersAsync(t *testing.T) { if err != nil { t.Errorf("Failed to start EventSwitch, error: %v", err) } + defer evsw.Stop() doneSum1 := make(chan uint64) doneSum2 := make(chan uint64) doneSending1 := make(chan uint64) From 989a2f32b1411d159be436d64136248f02289fba Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 9 Oct 2018 15:09:40 +0400 Subject: [PATCH 048/113] libs: Refactor & document events code (#2576) * [libs/events] add more godoc comments * [libs/events] refactor code - improve var naming - improve code structure - do not use defers for unlocking mutexes (defer takes time) --- libs/events/events.go | 66 +++++++++++++++++++++----------------- libs/events/events_test.go | 59 +++++++++++++++------------------- 2 files changed, 63 insertions(+), 62 deletions(-) diff --git a/libs/events/events.go b/libs/events/events.go index 864365563..fb90bbea6 100644 --- a/libs/events/events.go +++ b/libs/events/events.go @@ -1,6 +1,4 @@ -/* -Pub-Sub in go with event caching -*/ +// Package events - Pub-Sub in go with event caching package events import ( @@ -10,30 +8,40 @@ import ( cmn "github.com/tendermint/tendermint/libs/common" ) +// ErrListenerWasRemoved is returned by AddEvent if the listener was removed. type ErrListenerWasRemoved struct { - listener string + listenerID string } +// Error implements the error interface. func (e ErrListenerWasRemoved) Error() string { - return fmt.Sprintf("listener %s was removed", e.listener) + return fmt.Sprintf("listener #%s was removed", e.listenerID) } -// Generic event data can be typed and registered with tendermint/go-amino -// via concrete implementation of this interface -type EventData interface { -} +// EventData is a generic event data can be typed and registered with +// tendermint/go-amino via concrete implementation of this interface. +type EventData interface{} -// reactors and other modules should export -// this interface to become eventable +// Eventable is the interface reactors and other modules must export to become +// eventable. type Eventable interface { SetEventSwitch(evsw EventSwitch) } -// an event switch or cache implements fireable +// Fireable is the interface that wraps the FireEvent method. +// +// FireEvent fires an event with the given name and data. type Fireable interface { FireEvent(event string, data EventData) } +// EventSwitch is the interface for synchronous pubsub, where listeners +// subscribe to certain events and, when an event is fired (see Fireable), +// notified via a callback function. +// +// Listeners are added by calling AddListenerForEvent function. +// They can be removed by calling either RemoveListenerForEvent or +// RemoveListener (for all events). type EventSwitch interface { cmn.Service Fireable @@ -67,7 +75,7 @@ func (evsw *eventSwitch) OnStart() error { func (evsw *eventSwitch) OnStop() {} func (evsw *eventSwitch) AddListenerForEvent(listenerID, event string, cb EventCallback) error { - // Get/Create eventCell and listener + // Get/Create eventCell and listener. evsw.mtx.Lock() eventCell := evsw.eventCells[event] if eventCell == nil { @@ -81,17 +89,17 @@ func (evsw *eventSwitch) AddListenerForEvent(listenerID, event string, cb EventC } evsw.mtx.Unlock() - // Add event and listener - err := listener.AddEvent(event) - if err == nil { - eventCell.AddListener(listenerID, cb) + // Add event and listener. + if err := listener.AddEvent(event); err != nil { + return err } + eventCell.AddListener(listenerID, cb) - return err + return nil } func (evsw *eventSwitch) RemoveListener(listenerID string) { - // Get and remove listener + // Get and remove listener. evsw.mtx.RLock() listener := evsw.listeners[listenerID] evsw.mtx.RUnlock() @@ -180,14 +188,14 @@ func (cell *eventCell) RemoveListener(listenerID string) int { func (cell *eventCell) FireEvent(data EventData) { cell.mtx.RLock() - var listenerCopy []EventCallback - for _, listener := range cell.listeners { - listenerCopy = append(listenerCopy, listener) + var eventCallbacks []EventCallback + for _, cb := range cell.listeners { + eventCallbacks = append(eventCallbacks, cb) } cell.mtx.RUnlock() - for _, listener := range listenerCopy { - listener(data) + for _, cb := range eventCallbacks { + cb(data) } } @@ -213,27 +221,27 @@ func newEventListener(id string) *eventListener { func (evl *eventListener) AddEvent(event string) error { evl.mtx.Lock() - defer evl.mtx.Unlock() if evl.removed { - return ErrListenerWasRemoved{listener: evl.id} + evl.mtx.Unlock() + return ErrListenerWasRemoved{listenerID: evl.id} } evl.events = append(evl.events, event) + evl.mtx.Unlock() return nil } func (evl *eventListener) GetEvents() []string { evl.mtx.RLock() - defer evl.mtx.RUnlock() - events := make([]string, len(evl.events)) copy(events, evl.events) + evl.mtx.RUnlock() return events } func (evl *eventListener) SetRemoved() { evl.mtx.Lock() - defer evl.mtx.Unlock() evl.removed = true + evl.mtx.Unlock() } diff --git a/libs/events/events_test.go b/libs/events/events_test.go index 02ec44c4f..7530afa98 100644 --- a/libs/events/events_test.go +++ b/libs/events/events_test.go @@ -6,6 +6,8 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + cmn "github.com/tendermint/tendermint/libs/common" ) @@ -14,10 +16,9 @@ import ( func TestAddListenerForEventFireOnce(t *testing.T) { evsw := NewEventSwitch() err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } + require.NoError(t, err) defer evsw.Stop() + messages := make(chan EventData) evsw.AddListenerForEvent("listener", "event", func(data EventData) { @@ -35,10 +36,9 @@ func TestAddListenerForEventFireOnce(t *testing.T) { func TestAddListenerForEventFireMany(t *testing.T) { evsw := NewEventSwitch() err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } + require.NoError(t, err) defer evsw.Stop() + doneSum := make(chan uint64) doneSending := make(chan uint64) numbers := make(chan uint64, 4) @@ -65,10 +65,9 @@ func TestAddListenerForEventFireMany(t *testing.T) { func TestAddListenerForDifferentEvents(t *testing.T) { evsw := NewEventSwitch() err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } + require.NoError(t, err) defer evsw.Stop() + doneSum := make(chan uint64) doneSending1 := make(chan uint64) doneSending2 := make(chan uint64) @@ -111,10 +110,9 @@ func TestAddListenerForDifferentEvents(t *testing.T) { func TestAddDifferentListenerForDifferentEvents(t *testing.T) { evsw := NewEventSwitch() err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } + require.NoError(t, err) defer evsw.Stop() + doneSum1 := make(chan uint64) doneSum2 := make(chan uint64) doneSending1 := make(chan uint64) @@ -174,40 +172,38 @@ func TestAddAndRemoveListenerConcurrency(t *testing.T) { evsw := NewEventSwitch() err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } + require.NoError(t, err) defer evsw.Stop() done1 := make(chan struct{}) done2 := make(chan struct{}) + // Must be executed concurrently to uncover the data race. + // 1. RemoveListener go func() { for i := 0; i < roundCount; i++ { evsw.RemoveListener("listener") } - done1 <- struct{}{} + close(done1) }() + // 2. AddListenerForEvent go func() { for i := 0; i < roundCount; i++ { - index := i //it necessary for closure + index := i evsw.AddListenerForEvent("listener", fmt.Sprintf("event%d", index), func(data EventData) { t.Errorf("should not run callback for %d.\n", index) stopInputEvent = true }) } - done2 <- struct{}{} + close(done2) }() <-done1 <-done2 - close(done1) - close(done2) - - evsw.RemoveListener("listener") // make sure remove last + evsw.RemoveListener("listener") // remove the last listener for i := 0; i < roundCount && !stopInputEvent; i++ { evsw.FireEvent(fmt.Sprintf("event%d", i), uint64(1001)) @@ -220,10 +216,9 @@ func TestAddAndRemoveListenerConcurrency(t *testing.T) { func TestAddAndRemoveListener(t *testing.T) { evsw := NewEventSwitch() err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } + require.NoError(t, err) defer evsw.Stop() + doneSum1 := make(chan uint64) doneSum2 := make(chan uint64) doneSending1 := make(chan uint64) @@ -266,10 +261,9 @@ func TestAddAndRemoveListener(t *testing.T) { func TestRemoveListener(t *testing.T) { evsw := NewEventSwitch() err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } + require.NoError(t, err) defer evsw.Stop() + count := 10 sum1, sum2 := 0, 0 // add some listeners and make sure they work @@ -320,10 +314,9 @@ func TestRemoveListener(t *testing.T) { func TestRemoveListenersAsync(t *testing.T) { evsw := NewEventSwitch() err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } + require.NoError(t, err) defer evsw.Stop() + doneSum1 := make(chan uint64) doneSum2 := make(chan uint64) doneSending1 := make(chan uint64) @@ -406,7 +399,7 @@ func TestRemoveListenersAsync(t *testing.T) { // until the receiving channel `numbers` is closed; it then sends the sum // on `doneSum` and closes that channel. Expected to be run in a go-routine. func sumReceivedNumbers(numbers, doneSum chan uint64) { - var sum uint64 = 0 + var sum uint64 for { j, more := <-numbers sum += j @@ -425,7 +418,7 @@ func sumReceivedNumbers(numbers, doneSum chan uint64) { // the test to assert all events have also been received. func fireEvents(evsw EventSwitch, event string, doneChan chan uint64, offset uint64) { - var sentSum uint64 = 0 + var sentSum uint64 for i := offset; i <= offset+uint64(999); i++ { sentSum += i evsw.FireEvent(event, i) From 724e264ff587837b892dff73f1bbd908bbd5fb85 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 9 Oct 2018 16:10:05 +0400 Subject: [PATCH 049/113] separate mock evidence from real evidence (#2571) Closes #2525 --- consensus/reactor_test.go | 9 ++++++--- consensus/types/wire.go | 2 +- evidence/pool_test.go | 8 ++++++++ evidence/reactor_test.go | 6 ++---- types/block_test.go | 8 ++++++++ types/evidence.go | 3 ++- types/wire.go | 7 ++++++- 7 files changed, 33 insertions(+), 10 deletions(-) diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index 41bddbd68..2758f3fab 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -11,6 +11,9 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" @@ -22,9 +25,6 @@ import ( "github.com/tendermint/tendermint/p2p" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func init() { @@ -97,6 +97,9 @@ func TestReactorBasic(t *testing.T) { // Ensure we can process blocks with evidence func TestReactorWithEvidence(t *testing.T) { + types.RegisterMockEvidences(cdc) + types.RegisterMockEvidences(types.GetCodec()) + nValidators := 4 testName := "consensus_reactor_test" tickerFunc := newMockTickerFunc(true) diff --git a/consensus/types/wire.go b/consensus/types/wire.go index db674816d..e8a05b355 100644 --- a/consensus/types/wire.go +++ b/consensus/types/wire.go @@ -1,7 +1,7 @@ package types import ( - "github.com/tendermint/go-amino" + amino "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/types" ) diff --git a/evidence/pool_test.go b/evidence/pool_test.go index 159ae7cd3..c3ed569e1 100644 --- a/evidence/pool_test.go +++ b/evidence/pool_test.go @@ -1,6 +1,7 @@ package evidence import ( + "os" "sync" "testing" @@ -14,6 +15,13 @@ import ( var mockState = sm.State{} +func TestMain(m *testing.M) { + types.RegisterMockEvidences(cdc) + + code := m.Run() + os.Exit(code) +} + func initializeValidatorState(valAddr []byte, height int64) dbm.DB { stateDB := dbm.NewMemDB() diff --git a/evidence/reactor_test.go b/evidence/reactor_test.go index 23fd008af..ea9657d23 100644 --- a/evidence/reactor_test.go +++ b/evidence/reactor_test.go @@ -6,14 +6,12 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" - "github.com/go-kit/kit/log/term" + "github.com/stretchr/testify/assert" + cfg "github.com/tendermint/tendermint/config" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" - - cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" ) diff --git a/types/block_test.go b/types/block_test.go index 43366a63b..887f35a11 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -3,6 +3,7 @@ package types import ( "crypto/rand" "math" + "os" "testing" "time" @@ -13,6 +14,13 @@ import ( cmn "github.com/tendermint/tendermint/libs/common" ) +func TestMain(m *testing.M) { + RegisterMockEvidences(cdc) + + code := m.Run() + os.Exit(code) +} + func TestBlockAddEvidence(t *testing.T) { txs := []Tx{Tx("foo"), Tx("bar")} lastID := makeBlockIDRandom() diff --git a/types/evidence.go b/types/evidence.go index 836a1a597..241e09391 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -46,8 +46,9 @@ type Evidence interface { func RegisterEvidences(cdc *amino.Codec) { cdc.RegisterInterface((*Evidence)(nil), nil) cdc.RegisterConcrete(&DuplicateVoteEvidence{}, "tendermint/DuplicateVoteEvidence", nil) +} - // mocks +func RegisterMockEvidences(cdc *amino.Codec) { cdc.RegisterConcrete(MockGoodEvidence{}, "tendermint/MockGoodEvidence", nil) cdc.RegisterConcrete(MockBadEvidence{}, "tendermint/MockBadEvidence", nil) } diff --git a/types/wire.go b/types/wire.go index c56089983..f3c314fa6 100644 --- a/types/wire.go +++ b/types/wire.go @@ -1,7 +1,7 @@ package types import ( - "github.com/tendermint/go-amino" + amino "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/crypto/encoding/amino" ) @@ -15,3 +15,8 @@ func RegisterBlockAmino(cdc *amino.Codec) { cryptoAmino.RegisterAmino(cdc) RegisterEvidences(cdc) } + +// GetCodec returns a codec used by the package. For testing purposes only. +func GetCodec() *amino.Codec { + return cdc +} From 561fc2d71733e58f9011e1c2e7c45e22dc568756 Mon Sep 17 00:00:00 2001 From: Overbool Date: Tue, 9 Oct 2018 20:19:00 +0800 Subject: [PATCH 050/113] test(db): Test itr.Value in checkValuePanics (#2580) Fixes #2573 --- libs/db/common_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/db/common_test.go b/libs/db/common_test.go index 13e6ed377..1e27a7cac 100644 --- a/libs/db/common_test.go +++ b/libs/db/common_test.go @@ -57,7 +57,7 @@ func checkKeyPanics(t *testing.T, itr Iterator) { } func checkValuePanics(t *testing.T, itr Iterator) { - assert.Panics(t, func() { itr.Key() }, "checkValuePanics expected panic but didn't") + assert.Panics(t, func() { itr.Value() }, "checkValuePanics expected panic but didn't") } func newTempDB(t *testing.T, backend DBBackendType) (db DB, dbDir string) { From e7708850c02c0911cc033bae5b24fb98b8162d57 Mon Sep 17 00:00:00 2001 From: Joon Date: Tue, 9 Oct 2018 21:21:36 +0900 Subject: [PATCH 051/113] libs: Let prefixIterator implements Iterator correctly (#2581) Fixes #2577 --- libs/db/prefix_db.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/libs/db/prefix_db.go b/libs/db/prefix_db.go index 5bb53ebd9..9dc4ee97d 100644 --- a/libs/db/prefix_db.go +++ b/libs/db/prefix_db.go @@ -265,6 +265,8 @@ func (pb prefixBatch) WriteSync() { //---------------------------------------- // prefixIterator +var _ Iterator = (*prefixIterator)(nil) + // Strips prefix while iterating from Iterator. type prefixIterator struct { prefix []byte @@ -274,9 +276,9 @@ type prefixIterator struct { valid bool } -func newPrefixIterator(prefix, start, end []byte, source Iterator) prefixIterator { +func newPrefixIterator(prefix, start, end []byte, source Iterator) *prefixIterator { if !source.Valid() || !bytes.HasPrefix(source.Key(), prefix) { - return prefixIterator{ + return &prefixIterator{ prefix: prefix, start: start, end: end, @@ -284,7 +286,7 @@ func newPrefixIterator(prefix, start, end []byte, source Iterator) prefixIterato valid: false, } } else { - return prefixIterator{ + return &prefixIterator{ prefix: prefix, start: start, end: end, @@ -294,15 +296,15 @@ func newPrefixIterator(prefix, start, end []byte, source Iterator) prefixIterato } } -func (itr prefixIterator) Domain() (start []byte, end []byte) { +func (itr *prefixIterator) Domain() (start []byte, end []byte) { return itr.start, itr.end } -func (itr prefixIterator) Valid() bool { +func (itr *prefixIterator) Valid() bool { return itr.valid && itr.source.Valid() } -func (itr prefixIterator) Next() { +func (itr *prefixIterator) Next() { if !itr.valid { panic("prefixIterator invalid, cannot call Next()") } @@ -314,21 +316,21 @@ func (itr prefixIterator) Next() { } } -func (itr prefixIterator) Key() (key []byte) { +func (itr *prefixIterator) Key() (key []byte) { if !itr.valid { panic("prefixIterator invalid, cannot call Key()") } return stripPrefix(itr.source.Key(), itr.prefix) } -func (itr prefixIterator) Value() (value []byte) { +func (itr *prefixIterator) Value() (value []byte) { if !itr.valid { panic("prefixIterator invalid, cannot call Value()") } return itr.source.Value() } -func (itr prefixIterator) Close() { +func (itr *prefixIterator) Close() { itr.source.Close() } From 8761b274896861aef79cd3341a84a65bfd905f44 Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Tue, 9 Oct 2018 05:41:33 -0700 Subject: [PATCH 052/113] crypto: Add a way to go from pubkey to route (#2574) This is intended for use in a future PR for #2414 --- crypto/encoding/amino/amino.go | 23 ++++++++++++++++++++++- crypto/encoding/amino/encode_test.go | 22 ++++++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/crypto/encoding/amino/amino.go b/crypto/encoding/amino/amino.go index 7728e6afb..d0ae8ebf0 100644 --- a/crypto/encoding/amino/amino.go +++ b/crypto/encoding/amino/amino.go @@ -1,8 +1,11 @@ package cryptoAmino import ( - amino "github.com/tendermint/go-amino" + "errors" + + "reflect" + amino "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/multisig" @@ -10,6 +13,7 @@ import ( ) var cdc = amino.NewCodec() +var routeTable = make(map[reflect.Type]string, 3) func init() { // NOTE: It's important that there be no conflicts here, @@ -19,6 +23,23 @@ func init() { // https://github.com/tendermint/go-amino/issues/9 // is resolved RegisterAmino(cdc) + + // TODO: Have amino provide a way to go from concrete struct to route directly. + // Its currently a private API + routeTable[reflect.TypeOf(ed25519.PubKeyEd25519{})] = ed25519.PubKeyAminoRoute + routeTable[reflect.TypeOf(secp256k1.PubKeySecp256k1{})] = secp256k1.PubKeyAminoRoute + routeTable[reflect.TypeOf(&multisig.PubKeyMultisigThreshold{})] = multisig.PubKeyMultisigThresholdAminoRoute +} + +// PubkeyAminoRoute returns the amino route of a pubkey +// cdc is currently passed in, as eventually this will not be using +// a package level codec. +func PubkeyAminoRoute(cdc *amino.Codec, key crypto.PubKey) (string, error) { + route, ok := routeTable[reflect.TypeOf(key)] + if !ok { + return "", errors.New("Pubkey type not known") + } + return route, nil } // RegisterAmino registers all crypto related types in the given (amino) codec. diff --git a/crypto/encoding/amino/encode_test.go b/crypto/encoding/amino/encode_test.go index 7235ba694..80ed71459 100644 --- a/crypto/encoding/amino/encode_test.go +++ b/crypto/encoding/amino/encode_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/multisig" "github.com/tendermint/tendermint/crypto/secp256k1" ) @@ -127,3 +128,24 @@ func TestPubKeyInvalidDataProperReturnsEmpty(t *testing.T) { require.NotNil(t, err, "expecting a non-nil error") require.Nil(t, pk, "expecting an empty public key on error") } + +func TestPubkeyAminoRoute(t *testing.T) { + tests := []struct { + key crypto.PubKey + want string + wantErr bool + }{ + {ed25519.PubKeyEd25519{}, ed25519.PubKeyAminoRoute, false}, + {secp256k1.PubKeySecp256k1{}, secp256k1.PubKeyAminoRoute, false}, + {&multisig.PubKeyMultisigThreshold{}, multisig.PubKeyMultisigThresholdAminoRoute, false}, + } + for i, tc := range tests { + got, err := PubkeyAminoRoute(cdc, tc.key) + if tc.wantErr { + require.Error(t, err, "tc %d", i) + } else { + require.NoError(t, err, "tc %d", i) + require.Equal(t, tc.want, got, "tc %d", i) + } + } +} From 3fcb62b93107c17679047edcd87fc5020b197f6a Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 9 Oct 2018 18:04:15 +0400 Subject: [PATCH 053/113] :tools: Update docs & fix build-docker Makefile target (#2584) bump alpine version to 3.8 --- tools/tm-bench/Dockerfile | 2 +- tools/tm-bench/Makefile | 4 +- tools/tm-bench/README.md | 85 ++++++++++++++++++++++++------------- tools/tm-monitor/Dockerfile | 2 +- tools/tm-monitor/Makefile | 4 +- tools/tm-monitor/README.md | 75 +++++++++++++++++++------------- 6 files changed, 106 insertions(+), 66 deletions(-) diff --git a/tools/tm-bench/Dockerfile b/tools/tm-bench/Dockerfile index 9adb2936e..d1069643a 100644 --- a/tools/tm-bench/Dockerfile +++ b/tools/tm-bench/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.7 +FROM alpine:3.8 WORKDIR /app COPY tm-bench /app/tm-bench diff --git a/tools/tm-bench/Makefile b/tools/tm-bench/Makefile index 79aaf0c99..8a395f98f 100644 --- a/tools/tm-bench/Makefile +++ b/tools/tm-bench/Makefile @@ -1,5 +1,5 @@ DIST_DIRS := find * -type d -exec -VERSION := $(shell perl -ne '/^var version.*"([^"]+)".*$$/ && print "v$$1\n"' main.go) +VERSION := $(shell perl -ne '/^TMCoreSemVer = "([^"]+)"$$/ && print "v$$1\n"' ../../version/version.go) all: build test install @@ -37,7 +37,7 @@ dist: build-all build-docker: rm -f ./tm-bench - docker run -it --rm -v "$(PWD):/go/src/app" -w "/go/src/app" -e "CGO_ENABLED=0" golang:alpine go build -ldflags "-s -w" -o tm-bench + docker run -it --rm -v "$(PWD)/../../:/go/src/github.com/tendermint/tendermint" -w "/go/src/github.com/tendermint/tendermint/tools/tm-bench" -e "CGO_ENABLED=0" golang:alpine go build -ldflags "-s -w" -o tm-bench docker build -t "tendermint/bench" . clean: diff --git a/tools/tm-bench/README.md b/tools/tm-bench/README.md index 000f20f37..9159a7546 100644 --- a/tools/tm-bench/README.md +++ b/tools/tm-bench/README.md @@ -4,49 +4,72 @@ Tendermint blockchain benchmarking tool: - https://github.com/tendermint/tools/tree/master/tm-bench -For example, the following: - - tm-bench -T 10 -r 1000 localhost:26657 +For example, the following: `tm-bench -T 30 -r 10000 localhost:26657` will output: - Stats Avg StdDev Max Total - Txs/sec 818 532 1549 9000 - Blocks/sec 0.818 0.386 1 9 +``` +Stats Avg StdDev Max Total +Txs/sec 3981 1993 5000 119434 +Blocks/sec 0.800 0.400 1 24 +``` +NOTE: **tm-bench only works with build-in `kvstore` ABCI application**. For it +to work with your application, you will need to modify `generateTx` function. +In the future, we plan to support scriptable transactions (see +[\#1938](https://github.com/tendermint/tendermint/issues/1938)). ## Quick Start +### Docker + +``` +docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init +docker run -it --rm -v "/tmp:/tendermint" -p "26657:26657" --name=tm tendermint/tendermint node --proxy_app=kvstore + +docker run -it --rm --link=tm tendermint/bench tm:26657 +``` + +### Using binaries + [Install Tendermint](https://github.com/tendermint/tendermint#install) -This currently is setup to work on tendermint's develop branch. Please ensure -you are on that. (If not, update `tendermint` and `tmlibs` in gopkg.toml to use - the master branch.) then run: - tendermint init - tendermint node --proxy_app=kvstore +``` +tendermint init +tendermint node --proxy_app=kvstore - tm-bench localhost:26657 +tm-bench localhost:26657 +``` -with the last command being in a seperate window. +with the last command being in a separate window. ## Usage - tm-bench [-c 1] [-T 10] [-r 1000] [-s 250] [endpoints] - - Examples: - tm-bench localhost:26657 - Flags: - -T int - Exit after the specified amount of time in seconds (default 10) - -c int - Connections to keep open per endpoint (default 1) - -r int - Txs per second to send in a connection (default 1000) - -s int - Size per tx in bytes - -v Verbose output +``` +Tendermint blockchain benchmarking tool. + +Usage: + tm-bench [-c 1] [-T 10] [-r 1000] [-s 250] [endpoints] [-output-format [-broadcast-tx-method ]] + +Examples: + tm-bench localhost:26657 +Flags: + -T int + Exit after the specified amount of time in seconds (default 10) + -broadcast-tx-method string + Broadcast method: async (no guarantees; fastest), sync (ensures tx is checked) or commit (ensures tx is checked and committed; slowest) (default "async") + -c int + Connections to keep open per endpoint (default 1) + -output-format string + Output format: plain or json (default "plain") + -r int + Txs per second to send in a connection (default 1000) + -s int + The size of a transaction in bytes, must be greater than or equal to 40. (default 250) + -v Verbose output +``` ## How stats are collected @@ -72,9 +95,11 @@ that tm-bench sends. Similarly the end of the duration will likely end mid-way through tendermint trying to build the next block. -Each of the connections is handled via two separate goroutines. +Each of the connections is handled via two separate goroutines. ## Development - make get_vendor_deps - make test +``` +make get_vendor_deps +make test +``` diff --git a/tools/tm-monitor/Dockerfile b/tools/tm-monitor/Dockerfile index 7edfaca66..930fb639e 100644 --- a/tools/tm-monitor/Dockerfile +++ b/tools/tm-monitor/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.6 +FROM alpine:3.8 WORKDIR /app COPY tm-monitor /app/tm-monitor diff --git a/tools/tm-monitor/Makefile b/tools/tm-monitor/Makefile index 077d60b94..901b0a14d 100644 --- a/tools/tm-monitor/Makefile +++ b/tools/tm-monitor/Makefile @@ -1,5 +1,5 @@ DIST_DIRS := find * -type d -exec -VERSION := $(shell perl -ne '/^var version.*"([^"]+)".*$$/ && print "v$$1\n"' main.go) +VERSION := $(shell perl -ne '/^TMCoreSemVer = "([^"]+)"$$/ && print "v$$1\n"' ../../version/version.go) all: build test install @@ -36,7 +36,7 @@ dist: build-all build-docker: rm -f ./tm-monitor - docker run -it --rm -v "$(PWD):/go/src/github.com/tendermint/tools/tm-monitor" -w "/go/src/github.com/tendermint/tools/tm-monitor" -e "CGO_ENABLED=0" golang:alpine go build -ldflags "-s -w" -o tm-monitor + docker run -it --rm -v "$(PWD)/../../:/go/src/github.com/tendermint/tendermint" -w "/go/src/github.com/tendermint/tendermint/tools/tm-monitor" -e "CGO_ENABLED=0" golang:alpine go build -ldflags "-s -w" -o tm-monitor docker build -t "tendermint/monitor" . clean: diff --git a/tools/tm-monitor/README.md b/tools/tm-monitor/README.md index 4c49775e3..cf4216849 100644 --- a/tools/tm-monitor/README.md +++ b/tools/tm-monitor/README.md @@ -12,18 +12,22 @@ collecting and providing various statistics to the user: Assuming your application is running in another container with the name `app`: - docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init - docker run -it --rm -v "/tmp:/tendermint" -p "26657:26657" --name=tm --link=app tendermint/tendermint node --proxy_app=tcp://app:26658 +``` +docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init +docker run -it --rm -v "/tmp:/tendermint" -p "26657:26657" --name=tm --link=app tendermint/tendermint node --proxy_app=tcp://app:26658 - docker run -it --rm -p "26670:26670" --link=tm tendermint/monitor tm:26657 +docker run -it --rm -p "26670:26670" --link=tm tendermint/monitor tm:26657 +``` If you don't have an application yet, but still want to try monitor out, use `kvstore`: - docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init - docker run -it --rm -v "/tmp:/tendermint" -p "26657:26657" --name=tm tendermint/tendermint node --proxy_app=kvstore +``` +docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init +docker run -it --rm -v "/tmp:/tendermint" -p "26657:26657" --name=tm tendermint/tendermint node --proxy_app=kvstore - docker run -it --rm -p "26670:26670" --link=tm tendermint/monitor tm:26657 +docker run -it --rm -p "26670:26670" --link=tm tendermint/monitor tm:26657 +``` ### Using Binaries @@ -31,40 +35,49 @@ use `kvstore`: then run: - tendermint init - tendermint node --proxy_app=kvstore +``` +tendermint init +tendermint node --proxy_app=kvstore - tm-monitor localhost:26657 +tm-monitor localhost:26657 +``` -with the last command being in a seperate window. +with the last command being in a separate window. ## Usage - tm-monitor [-v] [-no-ton] [-listen-addr="tcp://0.0.0.0:26670"] [endpoints] +``` +Tendermint monitor watches over one or more Tendermint core +applications, collecting and providing various statistics to the user. - Examples: - # monitor single instance - tm-monitor localhost:26657 +Usage: + tm-monitor [-no-ton] [-listen-addr="tcp://0.0.0.0:26670"] [endpoints] - # monitor a few instances by providing comma-separated list of RPC endpoints - tm-monitor host1:26657,host2:26657 - Flags: - -listen-addr string - HTTP and Websocket server listen address (default "tcp://0.0.0.0:26670") - -no-ton - Do not show ton (table of nodes) - -v verbose logging +Examples: + # monitor single instance + tm-monitor localhost:26657 + + # monitor a few instances by providing comma-separated list of RPC endpoints + tm-monitor host1:26657,host2:26657 +Flags: + -listen-addr string + HTTP and Websocket server listen address (default "tcp://0.0.0.0:26670") + -no-ton + Do not show ton (table of nodes) +``` ### RPC UI Run `tm-monitor` and visit http://localhost:26670 You should see the list of the available RPC endpoints: - http://localhost:26670/status - http://localhost:26670/status/network - http://localhost:26670/monitor?endpoint=_ - http://localhost:26670/status/node?name=_ - http://localhost:26670/unmonitor?endpoint=_ +``` +http://localhost:26670/status +http://localhost:26670/status/network +http://localhost:26670/monitor?endpoint=_ +http://localhost:26670/status/node?name=_ +http://localhost:26670/unmonitor?endpoint=_ +``` The API is available as GET requests with URI encoded parameters, or as JSONRPC POST requests. The JSONRPC methods are also exposed over @@ -72,6 +85,8 @@ websocket. ## Development - make get_tools - make get_vendor_deps - make test +``` +make get_tools +make get_vendor_deps +make test +``` From d7341c4057608ece4da42e3706f7dfcaf11efc32 Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Tue, 9 Oct 2018 10:28:15 -0700 Subject: [PATCH 054/113] distribution: Lock binary dependencies to specific commits (#2550) --- CHANGELOG_PENDING.md | 1 + Makefile | 16 +++++++------ scripts/get_tools.sh | 53 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 63 insertions(+), 7 deletions(-) create mode 100755 scripts/get_tools.sh diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 0cd8bd2fe..faaf17562 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -38,6 +38,7 @@ IMPROVEMENTS: - [consensus] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) add additional metrics - [p2p] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) add additional metrics - [config] \#2232 added ValidateBasic method, which performs basic checks +- [tools] \#2238 Binary dependencies are now locked to a specific git commit - [crypto] \#2099 make crypto random use chacha, and have forward secrecy of generated randomness BUG FIXES: diff --git a/Makefile b/Makefile index 5711e4317..0b78574b9 100644 --- a/Makefile +++ b/Makefile @@ -1,16 +1,17 @@ GOTOOLS = \ github.com/mitchellh/gox \ github.com/golang/dep/cmd/dep \ - gopkg.in/alecthomas/gometalinter.v2 \ + github.com/alecthomas/gometalinter \ github.com/gogo/protobuf/protoc-gen-gogo \ github.com/square/certstrap +GOBIN?=${GOPATH}/bin PACKAGES=$(shell go list ./...) INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf BUILD_TAGS?='tendermint' BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`" -LINT_FLAGS = --exclude '.*\.pb\.go' --vendor --deadline=600s +LINT_FLAGS = --exclude '.*\.pb\.go' --exclude 'vendor/*' --vendor --deadline=600s all: check build test install @@ -75,12 +76,13 @@ check_tools: get_tools: @echo "--> Installing tools" - go get -u -v $(GOTOOLS) - @gometalinter.v2 --install + ./scripts/get_tools.sh + @echo "--> Downloading linters (this may take awhile)" + $(GOPATH)/src/github.com/alecthomas/gometalinter/scripts/install.sh -b $(GOBIN) update_tools: @echo "--> Updating tools" - go get -u -v $(GOTOOLS) + ./scripts/get_tools.sh #Update dependencies get_vendor_deps: @@ -227,7 +229,7 @@ fmt: metalinter: @echo "--> Running linter" - @gometalinter.v2 $(LINT_FLAGS) --disable-all \ + @gometalinter $(LINT_FLAGS) --disable-all \ --enable=deadcode \ --enable=gosimple \ --enable=misspell \ @@ -256,7 +258,7 @@ metalinter: metalinter_all: @echo "--> Running linter (all)" - gometalinter.v2 $(LINT_FLAGS) --enable-all --disable=lll ./... + gometalinter $(LINT_FLAGS) --enable-all --disable=lll ./... DESTINATION = ./index.html.md diff --git a/scripts/get_tools.sh b/scripts/get_tools.sh new file mode 100755 index 000000000..955ec943a --- /dev/null +++ b/scripts/get_tools.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +set -e + +# This file downloads all of the binary dependencies we have, and checks out a +# specific git hash. +# +# repos it installs: +# github.com/mitchellh/gox +# github.com/golang/dep/cmd/dep +# gopkg.in/alecthomas/gometalinter.v2 +# github.com/gogo/protobuf/protoc-gen-gogo +# github.com/square/certstrap + +## check if GOPATH is set +if [ -z ${GOPATH+x} ]; then + echo "please set GOPATH (https://github.com/golang/go/wiki/SettingGOPATH)" + exit 1 +fi + +mkdir -p "$GOPATH/src/github.com" +cd "$GOPATH/src/github.com" || exit 1 + +installFromGithub() { + repo=$1 + commit=$2 + # optional + subdir=$3 + echo "--> Installing $repo ($commit)..." + if [ ! -d "$repo" ]; then + mkdir -p "$repo" + git clone "https://github.com/$repo.git" "$repo" + fi + if [ ! -z ${subdir+x} ] && [ ! -d "$repo/$subdir" ]; then + echo "ERROR: no such directory $repo/$subdir" + exit 1 + fi + pushd "$repo" && \ + git fetch origin && \ + git checkout -q "$commit" && \ + if [ ! -z ${subdir+x} ]; then cd "$subdir" || exit 1; fi && \ + go install && \ + if [ ! -z ${subdir+x} ]; then cd - || exit 1; fi && \ + popd || exit 1 + echo "--> Done" + echo "" +} + +installFromGithub mitchellh/gox 51ed453898ca5579fea9ad1f08dff6b121d9f2e8 +installFromGithub golang/dep 22125cfaa6ddc71e145b1535d4b7ee9744fefff2 cmd/dep +## gometalinter v2.0.11 +installFromGithub alecthomas/gometalinter 17a7ffa42374937bfecabfb8d2efbd4db0c26741 +installFromGithub gogo/protobuf 61dbc136cf5d2f08d68a011382652244990a53a9 protoc-gen-gogo +installFromGithub square/certstrap e27060a3643e814151e65b9807b6b06d169580a7 From 05a119aab50f612027d48c7cc3d6f21b342e3f47 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 9 Oct 2018 21:31:06 +0400 Subject: [PATCH 055/113] libs: Test deadlock from listener removal inside callback (#2588) Closes #2575 --- libs/events/events_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libs/events/events_test.go b/libs/events/events_test.go index 7530afa98..8d87986c7 100644 --- a/libs/events/events_test.go +++ b/libs/events/events_test.go @@ -22,6 +22,8 @@ func TestAddListenerForEventFireOnce(t *testing.T) { messages := make(chan EventData) evsw.AddListenerForEvent("listener", "event", func(data EventData) { + // test there's no deadlock if we remove the listener inside a callback + evsw.RemoveListener("listener") messages <- data }) go evsw.FireEvent("event", "data") From 6ec52a9233baf77a2e26e47671b4f9120b3991fd Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 9 Oct 2018 13:31:21 -0400 Subject: [PATCH 056/113] types: cap evidence in block validation (#2560) * cap evidence in block validation * state: use table-driven test for ValidateBlockHeader * state: test evidence cap * fixes from review --- CHANGELOG_PENDING.md | 4 +- state/validation.go | 12 ++-- state/validation_test.go | 149 +++++++++++++++++++++++++-------------- types/evidence.go | 19 ++++- 4 files changed, 126 insertions(+), 58 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index faaf17562..66f2fdc1b 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -49,6 +49,8 @@ timeoutPrecommit before starting next round - [evidence] \#2515 fix db iter leak (@goolAdapter) - [common/bit_array] Fixed a bug in the `Or` function - [common/bit_array] Fixed a bug in the `Sub` function (@bradyjoestar) -- [common] \#2534 make bit array's PickRandom choose uniformly from true bits +- [common] \#2534 Make bit array's PickRandom choose uniformly from true bits +- [consensus] \#1637 Limit the amount of evidence that can be included in a + block - [p2p] \#2555 fix p2p switch FlushThrottle value (@goolAdapter) - [libs/event] \#2518 fix event concurrency flaw (@goolAdapter) diff --git a/state/validation.go b/state/validation.go index ccfe1ef12..9d8ef97a2 100644 --- a/state/validation.go +++ b/state/validation.go @@ -125,13 +125,17 @@ func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { } } + // Limit the amount of evidence + maxEvidenceBytes := types.MaxEvidenceBytesPerBlock(state.ConsensusParams.BlockSize.MaxBytes) + evidenceBytes := int64(len(block.Evidence.Evidence)) * types.MaxEvidenceBytes + if evidenceBytes > maxEvidenceBytes { + return types.NewErrEvidenceOverflow(maxEvidenceBytes, evidenceBytes) + } + // Validate all evidence. - // TODO: Each check requires loading an old validator set. - // We should cap the amount of evidence per block - // to prevent potential proposer DoS. for _, ev := range block.Evidence.Evidence { if err := VerifyEvidence(stateDB, state, ev); err != nil { - return types.NewEvidenceInvalidErr(ev, err) + return types.NewErrEvidenceInvalid(ev, err) } } diff --git a/state/validation_test.go b/state/validation_test.go index ba76a72bc..e5f45166c 100644 --- a/state/validation_test.go +++ b/state/validation_test.go @@ -5,74 +5,119 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto/ed25519" - dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/types" ) -func TestValidateBlock(t *testing.T) { - state, _ := state(1, 1) +// TODO(#2589): +// - generalize this past the first height +// - add txs and build up full State properly +// - test block.Time (see #2587 - there are no conditions on time for the first height) +func TestValidateBlockHeader(t *testing.T) { + var height int64 = 1 // TODO(#2589): generalize + state, stateDB := state(1, int(height)) - blockExec := NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), nil, nil, nil) + blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), nil, nil, nil) - // proper block must pass - block := makeBlock(state, 1) + // A good block passes. + block := makeBlock(state, height) err := blockExec.ValidateBlock(state, block) require.NoError(t, err) - // wrong chain fails - block = makeBlock(state, 1) - block.ChainID = "not-the-real-one" - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) + wrongHash := tmhash.Sum([]byte("this hash is wrong")) - // wrong height fails - block = makeBlock(state, 1) - block.Height += 10 - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) + // Manipulation of any header field causes failure. + testCases := []struct { + name string + malleateBlock func(block *types.Block) + }{ + {"ChainID wrong", func(block *types.Block) { block.ChainID = "not-the-real-one" }}, // wrong chain id + {"Height wrong", func(block *types.Block) { block.Height += 10 }}, // wrong height + // TODO(#2589) (#2587) : {"Time", func(block *types.Block) { block.Time.Add(-time.Second * 3600 * 24) }}, // wrong time + {"NumTxs wrong", func(block *types.Block) { block.NumTxs += 10 }}, // wrong num txs + {"TotalTxs wrong", func(block *types.Block) { block.TotalTxs += 10 }}, // wrong total txs - // wrong total tx fails - block = makeBlock(state, 1) - block.TotalTxs += 10 - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) + {"LastBlockID wrong", func(block *types.Block) { block.LastBlockID.PartsHeader.Total += 10 }}, + {"LastCommitHash wrong", func(block *types.Block) { block.LastCommitHash = wrongHash }}, + {"DataHash wrong", func(block *types.Block) { block.DataHash = wrongHash }}, - // wrong blockid fails - block = makeBlock(state, 1) - block.LastBlockID.PartsHeader.Total += 10 - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) + {"ValidatorsHash wrong", func(block *types.Block) { block.ValidatorsHash = wrongHash }}, + {"NextValidatorsHash wrong", func(block *types.Block) { block.NextValidatorsHash = wrongHash }}, + {"ConsensusHash wrong", func(block *types.Block) { block.ConsensusHash = wrongHash }}, + {"AppHash wrong", func(block *types.Block) { block.AppHash = wrongHash }}, + {"LastResultsHash wrong", func(block *types.Block) { block.LastResultsHash = wrongHash }}, - // wrong app hash fails - block = makeBlock(state, 1) - block.AppHash = []byte("wrong app hash") - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) + {"EvidenceHash wrong", func(block *types.Block) { block.EvidenceHash = wrongHash }}, + {"Proposer wrong", func(block *types.Block) { block.ProposerAddress = ed25519.GenPrivKey().PubKey().Address() }}, + {"Proposer invalid", func(block *types.Block) { block.ProposerAddress = []byte("wrong size") }}, + } - // wrong consensus hash fails - block = makeBlock(state, 1) - block.ConsensusHash = []byte("wrong consensus hash") - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) + for _, tc := range testCases { + block := makeBlock(state, height) + tc.malleateBlock(block) + err := blockExec.ValidateBlock(state, block) + require.Error(t, err, tc.name) + } +} - // wrong results hash fails - block = makeBlock(state, 1) - block.LastResultsHash = []byte("wrong results hash") - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) +/* + TODO(#2589): + - test Block.Data.Hash() == Block.DataHash + - test len(Block.Data.Txs) == Block.NumTxs +*/ +func TestValidateBlockData(t *testing.T) { +} - // wrong validators hash fails - block = makeBlock(state, 1) - block.ValidatorsHash = []byte("wrong validators hash") - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) +/* + TODO(#2589): + - test len(block.LastCommit.Precommits) == state.LastValidators.Size() + - test state.LastValidators.VerifyCommit +*/ +func TestValidateBlockCommit(t *testing.T) { +} - // wrong proposer address - block = makeBlock(state, 1) - block.ProposerAddress = ed25519.GenPrivKey().PubKey().Address() - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) - block.ProposerAddress = []byte("wrong size") +/* + TODO(#2589): + - test good/bad evidence in block +*/ +func TestValidateBlockEvidence(t *testing.T) { + var height int64 = 1 // TODO(#2589): generalize + state, stateDB := state(1, int(height)) + + blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), nil, nil, nil) + + // make some evidence + addr, _ := state.Validators.GetByIndex(0) + goodEvidence := types.NewMockGoodEvidence(height, 0, addr) + + // A block with a couple pieces of evidence passes. + block := makeBlock(state, height) + block.Evidence.Evidence = []types.Evidence{goodEvidence, goodEvidence} + block.EvidenceHash = block.Evidence.Hash() + err := blockExec.ValidateBlock(state, block) + require.NoError(t, err) + + // A block with too much evidence fails. + maxBlockSize := state.ConsensusParams.BlockSize.MaxBytes + maxEvidenceBytes := types.MaxEvidenceBytesPerBlock(maxBlockSize) + maxEvidence := maxEvidenceBytes / types.MaxEvidenceBytes + require.True(t, maxEvidence > 2) + for i := int64(0); i < maxEvidence; i++ { + block.Evidence.Evidence = append(block.Evidence.Evidence, goodEvidence) + } + block.EvidenceHash = block.Evidence.Hash() err = blockExec.ValidateBlock(state, block) require.Error(t, err) + _, ok := err.(*types.ErrEvidenceOverflow) + require.True(t, ok) +} + +/* + TODO(#2589): + - test unmarshalling BlockParts that are too big into a Block that + (note this logic happens in the consensus, not in the validation here). + - test making blocks from the types.MaxXXX functions works/fails as expected +*/ +func TestValidateBlockSize(t *testing.T) { } diff --git a/types/evidence.go b/types/evidence.go index 241e09391..916dd094f 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -21,7 +21,8 @@ type ErrEvidenceInvalid struct { ErrorValue error } -func NewEvidenceInvalidErr(ev Evidence, err error) *ErrEvidenceInvalid { +// NewErrEvidenceInvalid returns a new EvidenceInvalid with the given err. +func NewErrEvidenceInvalid(ev Evidence, err error) *ErrEvidenceInvalid { return &ErrEvidenceInvalid{ev, err} } @@ -30,6 +31,22 @@ func (err *ErrEvidenceInvalid) Error() string { return fmt.Sprintf("Invalid evidence: %v. Evidence: %v", err.ErrorValue, err.Evidence) } +// ErrEvidenceOverflow is for when there is too much evidence in a block. +type ErrEvidenceOverflow struct { + MaxBytes int64 + GotBytes int64 +} + +// NewErrEvidenceOverflow returns a new ErrEvidenceOverflow where got > max. +func NewErrEvidenceOverflow(max, got int64) *ErrEvidenceOverflow { + return &ErrEvidenceOverflow{max, got} +} + +// Error returns a string representation of the error. +func (err *ErrEvidenceOverflow) Error() string { + return fmt.Sprintf("Too much evidence: Max %d bytes, got %d bytes", err.MaxBytes, err.GotBytes) +} + //------------------------------------------- // Evidence represents any provable malicious activity by a validator From ee7b3d260e35c656290caace6255bee3a639a2bc Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Wed, 10 Oct 2018 01:13:42 -0700 Subject: [PATCH 057/113] crypto/amino: Address anton's comment on PubkeyAminoRoute (#2592) --- crypto/encoding/amino/amino.go | 16 ++++++++-------- crypto/encoding/amino/encode_test.go | 27 ++++++++++++--------------- 2 files changed, 20 insertions(+), 23 deletions(-) diff --git a/crypto/encoding/amino/amino.go b/crypto/encoding/amino/amino.go index d0ae8ebf0..d66ecd9b1 100644 --- a/crypto/encoding/amino/amino.go +++ b/crypto/encoding/amino/amino.go @@ -1,8 +1,6 @@ package cryptoAmino import ( - "errors" - "reflect" amino "github.com/tendermint/go-amino" @@ -13,6 +11,11 @@ import ( ) var cdc = amino.NewCodec() + +// routeTable is used to map public key concrete types back +// to their amino routes. This should eventually be handled +// by amino. Example usage: +// routeTable[reflect.TypeOf(ed25519.PubKeyEd25519{})] = ed25519.PubKeyAminoRoute var routeTable = make(map[reflect.Type]string, 3) func init() { @@ -34,12 +37,9 @@ func init() { // PubkeyAminoRoute returns the amino route of a pubkey // cdc is currently passed in, as eventually this will not be using // a package level codec. -func PubkeyAminoRoute(cdc *amino.Codec, key crypto.PubKey) (string, error) { - route, ok := routeTable[reflect.TypeOf(key)] - if !ok { - return "", errors.New("Pubkey type not known") - } - return route, nil +func PubkeyAminoRoute(cdc *amino.Codec, key crypto.PubKey) (string, bool) { + route, found := routeTable[reflect.TypeOf(key)] + return route, found } // RegisterAmino registers all crypto related types in the given (amino) codec. diff --git a/crypto/encoding/amino/encode_test.go b/crypto/encoding/amino/encode_test.go index 80ed71459..056dbec44 100644 --- a/crypto/encoding/amino/encode_test.go +++ b/crypto/encoding/amino/encode_test.go @@ -120,32 +120,29 @@ func TestNilEncodings(t *testing.T) { var e, f crypto.PrivKey checkAminoJSON(t, &e, &f, true) assert.EqualValues(t, e, f) - } func TestPubKeyInvalidDataProperReturnsEmpty(t *testing.T) { pk, err := PubKeyFromBytes([]byte("foo")) - require.NotNil(t, err, "expecting a non-nil error") - require.Nil(t, pk, "expecting an empty public key on error") + require.NotNil(t, err) + require.Nil(t, pk) } func TestPubkeyAminoRoute(t *testing.T) { tests := []struct { - key crypto.PubKey - want string - wantErr bool + key crypto.PubKey + want string + found bool }{ - {ed25519.PubKeyEd25519{}, ed25519.PubKeyAminoRoute, false}, - {secp256k1.PubKeySecp256k1{}, secp256k1.PubKeyAminoRoute, false}, - {&multisig.PubKeyMultisigThreshold{}, multisig.PubKeyMultisigThresholdAminoRoute, false}, + {ed25519.PubKeyEd25519{}, ed25519.PubKeyAminoRoute, true}, + {secp256k1.PubKeySecp256k1{}, secp256k1.PubKeyAminoRoute, true}, + {&multisig.PubKeyMultisigThreshold{}, multisig.PubKeyMultisigThresholdAminoRoute, true}, } for i, tc := range tests { - got, err := PubkeyAminoRoute(cdc, tc.key) - if tc.wantErr { - require.Error(t, err, "tc %d", i) - } else { - require.NoError(t, err, "tc %d", i) - require.Equal(t, tc.want, got, "tc %d", i) + got, found := PubkeyAminoRoute(cdc, tc.key) + require.Equal(t, tc.found, found, "not equal on tc %d", i) + if tc.found { + require.Equal(t, tc.want, got, "not equal on tc %d", i) } } } From 92343ef484374b6010a87e8c3c8de9fb77e0ef7a Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Wed, 10 Oct 2018 09:27:43 -0700 Subject: [PATCH 058/113] Add additional metrics (#2500) * Add additional metrics Continues addressing https://github.com/cosmos/cosmos-sdk/issues/2169. * Add nop metrics to fix NPE * Tweak buckets, code review * Update buckets * Update docs with new metrics * Code review updates --- consensus/metrics.go | 10 +++++++ consensus/reactor.go | 4 +-- docs/tendermint-core/metrics.md | 50 ++++++++++++++++++------------- mempool/mempool.go | 4 ++- mempool/metrics.go | 36 ++++++++++++++++++++-- node/node.go | 26 ++++++++++------ p2p/metrics.go | 12 +++++++- state/execution.go | 53 +++++++++++++++++++++++++++------ state/metrics.go | 33 ++++++++++++++++++++ 9 files changed, 183 insertions(+), 45 deletions(-) create mode 100644 state/metrics.go diff --git a/consensus/metrics.go b/consensus/metrics.go index 39bfd24bd..7b4a3fbc9 100644 --- a/consensus/metrics.go +++ b/consensus/metrics.go @@ -44,6 +44,9 @@ type Metrics struct { CommittedHeight metrics.Gauge // Whether or not a node is fast syncing. 1 if yes, 0 if no. FastSyncing metrics.Gauge + + // Number of blockparts transmitted by peer. + BlockParts metrics.Counter } // PrometheusMetrics returns Metrics build using Prometheus client library. @@ -136,6 +139,12 @@ func PrometheusMetrics(namespace string) *Metrics { Name: "fast_syncing", Help: "Whether or not a node is fast syncing. 1 if yes, 0 if no.", }, []string{}), + BlockParts: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_parts", + Help: "Number of blockparts transmitted by peer.", + }, []string{"peer_id"}), } } @@ -160,5 +169,6 @@ func NopMetrics() *Metrics { TotalTxs: discard.NewGauge(), CommittedHeight: discard.NewGauge(), FastSyncing: discard.NewGauge(), + BlockParts: discard.NewCounter(), } } diff --git a/consensus/reactor.go b/consensus/reactor.go index 376b8eda9..ca63e8992 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -8,7 +8,7 @@ import ( "github.com/pkg/errors" - amino "github.com/tendermint/go-amino" + "github.com/tendermint/go-amino" cstypes "github.com/tendermint/tendermint/consensus/types" cmn "github.com/tendermint/tendermint/libs/common" @@ -274,7 +274,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) ps.ApplyProposalPOLMessage(msg) case *BlockPartMessage: ps.SetHasProposalBlockPart(msg.Height, msg.Round, msg.Part.Index) - + conR.metrics.BlockParts.With("peer_id", string(src.ID())).Add(1) conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()} default: conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) diff --git a/docs/tendermint-core/metrics.md b/docs/tendermint-core/metrics.md index b469c6890..ad6d4c765 100644 --- a/docs/tendermint-core/metrics.md +++ b/docs/tendermint-core/metrics.md @@ -8,35 +8,45 @@ This functionality is disabled by default. To enable the Prometheus metrics, set `instrumentation.prometheus=true` if your config file. Metrics will be served under `/metrics` on 26660 port by default. Listen address can be changed in the config file (see -`instrumentation.prometheus_listen_addr`). +`instrumentation.prometheus\_listen\_addr`). ## List of available metrics The following metrics are available: -``` -| Name | Type | Since | Description | -| --------------------------------------- | ------- | --------- | ----------------------------------------------------------------------------- | -| consensus_height | Gauge | 0.21.0 | Height of the chain | -| consensus_validators | Gauge | 0.21.0 | Number of validators | -| consensus_validators_power | Gauge | 0.21.0 | Total voting power of all validators | -| consensus_missing_validators | Gauge | 0.21.0 | Number of validators who did not sign | -| consensus_missing_validators_power | Gauge | 0.21.0 | Total voting power of the missing validators | -| consensus_byzantine_validators | Gauge | 0.21.0 | Number of validators who tried to double sign | -| consensus_byzantine_validators_power | Gauge | 0.21.0 | Total voting power of the byzantine validators | -| consensus_block_interval_seconds | Histogram | 0.21.0 | Time between this and last block (Block.Header.Time) in seconds | -| consensus_rounds | Gauge | 0.21.0 | Number of rounds | -| consensus_num_txs | Gauge | 0.21.0 | Number of transactions | -| mempool_size | Gauge | 0.21.0 | Number of uncommitted transactions | -| consensus_total_txs | Gauge | 0.21.0 | Total number of transactions committed | -| consensus_block_size_bytes | Gauge | 0.21.0 | Block size in bytes | -| p2p_peers | Gauge | 0.21.0 | Number of peers node's connected to | -``` +| **Name** | **Type** | **Since** | **Tags** | **Description** | +|-----------------------------------------|-----------|-----------|----------|-----------------------------------------------------------------| +| consensus\_height | Gauge | 0.21.0 | | Height of the chain | +| consensus\_validators | Gauge | 0.21.0 | | Number of validators | +| consensus\_validators\_power | Gauge | 0.21.0 | | Total voting power of all validators | +| consensus\_missing\_validators | Gauge | 0.21.0 | | Number of validators who did not sign | +| consensus\_missing\_validators\_power | Gauge | 0.21.0 | | Total voting power of the missing validators | +| consensus\_byzantine\_validators | Gauge | 0.21.0 | | Number of validators who tried to double sign | +| consensus\_byzantine\_validators\_power | Gauge | 0.21.0 | | Total voting power of the byzantine validators | +| consensus\_block\_interval\_seconds | Histogram | 0.21.0 | | Time between this and last block (Block.Header.Time) in seconds | +| consensus\_rounds | Gauge | 0.21.0 | | Number of rounds | +| consensus\_num\_txs | Gauge | 0.21.0 | | Number of transactions | +| consensus\_block\_parts | counter | on dev | peer\_id | number of blockparts transmitted by peer | +| consensus\_latest\_block\_height | gauge | on dev | | /status sync\_info number | +| consensus\_fast\_syncing | gauge | on dev | | either 0 (not fast syncing) or 1 (syncing) | +| consensus\_total\_txs | Gauge | 0.21.0 | | Total number of transactions committed | +| consensus\_block\_size\_bytes | Gauge | 0.21.0 | | Block size in bytes | +| p2p\_peers | Gauge | 0.21.0 | | Number of peers node's connected to | +| p2p\_peer\_receive\_bytes\_total | counter | on dev | peer\_id | number of bytes received from a given peer | +| p2p\_peer\_send\_bytes\_total | counter | on dev | peer\_id | number of bytes sent to a given peer | +| p2p\_peer\_pending\_send\_bytes | gauge | on dev | peer\_id | number of pending bytes to be sent to a given peer | +| p2p\_num\_txs | gauge | on dev | peer\_id | number of transactions submitted by each peer\_id | +| p2p\_pending\_send\_bytes | gauge | on dev | peer\_id | amount of data pending to be sent to peer | +| mempool\_size | Gauge | 0.21.0 | | Number of uncommitted transactions | +| mempool\_tx\_size\_bytes | histogram | on dev | | transaction sizes in bytes | +| mempool\_failed\_txs | counter | on dev | | number of failed transactions | +| mempool\_recheck\_times | counter | on dev | | number of transactions rechecked in the mempool | +| state\_block\_processing\_time | histogram | on dev | | time between BeginBlock and EndBlock in ms | ## Useful queries Percentage of missing + byzantine validators: ``` -((consensus_byzantine_validators_power + consensus_missing_validators_power) / consensus_validators_power) * 100 +((consensus\_byzantine\_validators\_power + consensus\_missing\_validators\_power) / consensus\_validators\_power) * 100 ``` diff --git a/mempool/mempool.go b/mempool/mempool.go index db5f6160c..65cd55354 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -326,6 +326,7 @@ func (mem *Mempool) resCb(req *abci.Request, res *abci.Response) { if mem.recheckCursor == nil { mem.resCbNormal(req, res) } else { + mem.metrics.RecheckTimes.Add(1) mem.resCbRecheck(req, res) } mem.metrics.Size.Set(float64(mem.Size())) @@ -346,11 +347,12 @@ func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) { } mem.txs.PushBack(memTx) mem.logger.Info("Added good transaction", "tx", TxID(tx), "res", r, "total", mem.Size()) + mem.metrics.TxSizeBytes.Observe(float64(len(tx))) mem.notifyTxsAvailable() } else { // ignore bad transaction mem.logger.Info("Rejected bad transaction", "tx", TxID(tx), "res", r) - + mem.metrics.FailedTxs.Add(1) // remove from cache (it might be good later) mem.cache.Remove(tx) } diff --git a/mempool/metrics.go b/mempool/metrics.go index fc4bb4fbe..3418f1efe 100644 --- a/mempool/metrics.go +++ b/mempool/metrics.go @@ -3,15 +3,23 @@ package mempool import ( "github.com/go-kit/kit/metrics" "github.com/go-kit/kit/metrics/discard" - prometheus "github.com/go-kit/kit/metrics/prometheus" + "github.com/go-kit/kit/metrics/prometheus" stdprometheus "github.com/prometheus/client_golang/prometheus" ) +const MetricsSubsytem = "mempool" + // Metrics contains metrics exposed by this package. // see MetricsProvider for descriptions. type Metrics struct { // Size of the mempool. Size metrics.Gauge + // Histogram of transaction sizes, in bytes. + TxSizeBytes metrics.Histogram + // Number of failed transactions. + FailedTxs metrics.Counter + // Number of times transactions are rechecked in the mempool. + RecheckTimes metrics.Counter } // PrometheusMetrics returns Metrics build using Prometheus client library. @@ -19,16 +27,38 @@ func PrometheusMetrics(namespace string) *Metrics { return &Metrics{ Size: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, - Subsystem: "mempool", + Subsystem: MetricsSubsytem, Name: "size", Help: "Size of the mempool (number of uncommitted transactions).", }, []string{}), + TxSizeBytes: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsytem, + Name: "tx_size_bytes", + Help: "Transaction sizes in bytes.", + Buckets: stdprometheus.ExponentialBuckets(1, 3, 17), + }, []string{}), + FailedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsytem, + Name: "failed_txs", + Help: "Number of failed transactions.", + }, []string{}), + RecheckTimes: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsytem, + Name: "recheck_times", + Help: "Number of times transactions are rechecked in the mempool.", + }, []string{}), } } // NopMetrics returns no-op Metrics. func NopMetrics() *Metrics { return &Metrics{ - Size: discard.NewGauge(), + Size: discard.NewGauge(), + TxSizeBytes: discard.NewHistogram(), + FailedTxs: discard.NewCounter(), + RecheckTimes: discard.NewCounter(), } } diff --git a/node/node.go b/node/node.go index 9f9e3636f..d1ab0f86a 100644 --- a/node/node.go +++ b/node/node.go @@ -13,7 +13,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" - amino "github.com/tendermint/go-amino" + "github.com/tendermint/go-amino" abci "github.com/tendermint/tendermint/abci/types" bc "github.com/tendermint/tendermint/blockchain" @@ -32,8 +32,8 @@ import ( rpccore "github.com/tendermint/tendermint/rpc/core" ctypes "github.com/tendermint/tendermint/rpc/core/types" grpccore "github.com/tendermint/tendermint/rpc/grpc" - rpc "github.com/tendermint/tendermint/rpc/lib" - rpcserver "github.com/tendermint/tendermint/rpc/lib/server" + "github.com/tendermint/tendermint/rpc/lib" + "github.com/tendermint/tendermint/rpc/lib/server" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/state/txindex/kv" @@ -98,16 +98,17 @@ func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) { } // MetricsProvider returns a consensus, p2p and mempool Metrics. -type MetricsProvider func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics) +type MetricsProvider func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) // DefaultMetricsProvider returns Metrics build using Prometheus client library // if Prometheus is enabled. Otherwise, it returns no-op Metrics. func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider { - return func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics) { + return func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) { if config.Prometheus { - return cs.PrometheusMetrics(config.Namespace), p2p.PrometheusMetrics(config.Namespace), mempl.PrometheusMetrics(config.Namespace) + return cs.PrometheusMetrics(config.Namespace), p2p.PrometheusMetrics(config.Namespace), + mempl.PrometheusMetrics(config.Namespace), sm.PrometheusMetrics(config.Namespace) } - return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics() + return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics() } } @@ -245,7 +246,7 @@ func NewNode(config *cfg.Config, consensusLogger.Info("This node is not a validator", "addr", privValidator.GetAddress(), "pubKey", privValidator.GetPubKey()) } - csMetrics, p2pMetrics, memplMetrics := metricsProvider() + csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider() // Make MempoolReactor mempool := mempl.NewMempool( @@ -289,7 +290,14 @@ func NewNode(config *cfg.Config, blockExecLogger := logger.With("module", "state") // make block executor for consensus and blockchain reactors to execute blocks - blockExec := sm.NewBlockExecutor(stateDB, blockExecLogger, proxyApp.Consensus(), mempool, evidencePool) + blockExec := sm.NewBlockExecutor( + stateDB, + blockExecLogger, + proxyApp.Consensus(), + mempool, + evidencePool, + sm.BlockExecutorWithMetrics(smMetrics), + ) // Make BlockchainReactor bcReactor := bc.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) diff --git a/p2p/metrics.go b/p2p/metrics.go index 94794dfd9..86a205056 100644 --- a/p2p/metrics.go +++ b/p2p/metrics.go @@ -3,7 +3,7 @@ package p2p import ( "github.com/go-kit/kit/metrics" "github.com/go-kit/kit/metrics/discard" - prometheus "github.com/go-kit/kit/metrics/prometheus" + "github.com/go-kit/kit/metrics/prometheus" stdprometheus "github.com/prometheus/client_golang/prometheus" ) @@ -19,6 +19,8 @@ type Metrics struct { PeerSendBytesTotal metrics.Counter // Pending bytes to be sent to a given peer. PeerPendingSendBytes metrics.Gauge + // Number of transactions submitted by each peer. + NumTxs metrics.Gauge } // PrometheusMetrics returns Metrics build using Prometheus client library. @@ -48,6 +50,13 @@ func PrometheusMetrics(namespace string) *Metrics { Name: "peer_pending_send_bytes", Help: "Number of pending bytes to be sent to a given peer.", }, []string{"peer_id"}), + NumTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "num_txs", + Help: "Number of transactions submitted by each peer.", + }, []string{"peer_id"}), + } } @@ -58,5 +67,6 @@ func NopMetrics() *Metrics { PeerReceiveBytesTotal: discard.NewCounter(), PeerSendBytesTotal: discard.NewCounter(), PeerPendingSendBytes: discard.NewGauge(), + NumTxs: discard.NewGauge(), } } diff --git a/state/execution.go b/state/execution.go index c6d5ce0a1..d5a1a1617 100644 --- a/state/execution.go +++ b/state/execution.go @@ -2,8 +2,9 @@ package state import ( "fmt" + "time" - fail "github.com/ebuchman/fail-test" + "github.com/ebuchman/fail-test" abci "github.com/tendermint/tendermint/abci/types" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" @@ -33,20 +34,37 @@ type BlockExecutor struct { evpool EvidencePool logger log.Logger + + metrics *Metrics +} + +type BlockExecutorOption func(executor *BlockExecutor) + +func BlockExecutorWithMetrics(metrics *Metrics) BlockExecutorOption { + return func(blockExec *BlockExecutor) { + blockExec.metrics = metrics + } } // NewBlockExecutor returns a new BlockExecutor with a NopEventBus. // Call SetEventBus to provide one. func NewBlockExecutor(db dbm.DB, logger log.Logger, proxyApp proxy.AppConnConsensus, - mempool Mempool, evpool EvidencePool) *BlockExecutor { - return &BlockExecutor{ + mempool Mempool, evpool EvidencePool, options ...BlockExecutorOption) *BlockExecutor { + res := &BlockExecutor{ db: db, proxyApp: proxyApp, eventBus: types.NopEventBus{}, mempool: mempool, evpool: evpool, logger: logger, + metrics: NopMetrics(), } + + for _, option := range options { + option(res) + } + + return res } // SetEventBus - sets the event bus for publishing block related events. @@ -74,7 +92,10 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b return state, ErrInvalidBlock(err) } + startTime := time.Now().UnixNano() abciResponses, err := execBlockOnProxyApp(blockExec.logger, blockExec.proxyApp, block, state.LastValidators, blockExec.db) + endTime := time.Now().UnixNano() + blockExec.metrics.BlockProcessingTime.Observe(float64(endTime - startTime) / 1000000) if err != nil { return state, ErrProxyAppConn(err) } @@ -176,8 +197,13 @@ func (blockExec *BlockExecutor) Commit( // Executes block's transactions on proxyAppConn. // Returns a list of transaction results and updates to the validator set -func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, - block *types.Block, lastValSet *types.ValidatorSet, stateDB dbm.DB) (*ABCIResponses, error) { +func execBlockOnProxyApp( + logger log.Logger, + proxyAppConn proxy.AppConnConsensus, + block *types.Block, + lastValSet *types.ValidatorSet, + stateDB dbm.DB, +) (*ABCIResponses, error) { var validTxs, invalidTxs = 0, 0 txIndex := 0 @@ -333,8 +359,12 @@ func updateValidators(currentSet *types.ValidatorSet, abciUpdates []abci.Validat } // updateState returns a new State updated according to the header and responses. -func updateState(state State, blockID types.BlockID, header *types.Header, - abciResponses *ABCIResponses) (State, error) { +func updateState( + state State, + blockID types.BlockID, + header *types.Header, + abciResponses *ABCIResponses, +) (State, error) { // Copy the valset so we can apply changes from EndBlock // and update s.LastValidators and s.Validators. @@ -417,8 +447,13 @@ func fireEvents(logger log.Logger, eventBus types.BlockEventPublisher, block *ty // ExecCommitBlock executes and commits a block on the proxyApp without validating or mutating the state. // It returns the application root hash (result of abci.Commit). -func ExecCommitBlock(appConnConsensus proxy.AppConnConsensus, block *types.Block, - logger log.Logger, lastValSet *types.ValidatorSet, stateDB dbm.DB) ([]byte, error) { +func ExecCommitBlock( + appConnConsensus proxy.AppConnConsensus, + block *types.Block, + logger log.Logger, + lastValSet *types.ValidatorSet, + stateDB dbm.DB, +) ([]byte, error) { _, err := execBlockOnProxyApp(logger, appConnConsensus, block, lastValSet, stateDB) if err != nil { logger.Error("Error executing block on proxy app", "height", block.Height, "err", err) diff --git a/state/metrics.go b/state/metrics.go new file mode 100644 index 000000000..7acbafa30 --- /dev/null +++ b/state/metrics.go @@ -0,0 +1,33 @@ +package state + +import ( + "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" + "github.com/go-kit/kit/metrics/discard" +) + +const MetricsSubsystem = "state" + +type Metrics struct { + // Time between BeginBlock and EndBlock. + BlockProcessingTime metrics.Histogram +} + +func PrometheusMetrics(namespace string) *Metrics { + return &Metrics{ + BlockProcessingTime: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_processing_time", + Help: "Time between BeginBlock and EndBlock in ms.", + Buckets: stdprometheus.LinearBuckets(1, 10, 10), + }, []string{}), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + BlockProcessingTime: discard.NewHistogram(), + } +} From 12fa9d1cab1813cb1657061fcf2f6b80b944b616 Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Wed, 10 Oct 2018 09:46:09 -0700 Subject: [PATCH 059/113] crypto/merkle: Remove byter in favor of plain byte slices (#2595) * crypto/merkle: Remove byter in favor of plain byte slices This PR is fully backwards compatible in terms of function output! (The Go API differs though) The only test case changes was to refactor it to be table driven. * Update godocs per review comments --- CHANGELOG_PENDING.md | 1 + crypto/merkle/simple_map.go | 28 ++++++++----- crypto/merkle/simple_map_test.go | 59 +++++++++----------------- crypto/merkle/simple_proof.go | 25 +++++------ crypto/merkle/simple_tree.go | 9 ++-- crypto/merkle/simple_tree_test.go | 8 ++-- crypto/merkle/types.go | 5 --- types/block.go | 69 +++++++++---------------------- types/encoding_helper.go | 14 +++++++ types/evidence.go | 8 ++-- types/params.go | 8 ++-- types/part_set.go | 6 +-- types/results.go | 19 +++++---- types/tx.go | 6 +-- types/validator.go | 14 ++++++- types/validator_set.go | 6 +-- 16 files changed, 134 insertions(+), 151 deletions(-) create mode 100644 types/encoding_helper.go diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 66f2fdc1b..7e6a30968 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -23,6 +23,7 @@ BREAKING CHANGES: * [rpc/client] \#2298 `ABCIQueryOptions.Trusted` -> `ABCIQueryOptions.Prove` * [types] \#2298 Remove `Index` and `Total` fields from `TxProof`. * [crypto/merkle & lite] \#2298 Various changes to accomodate General Merkle trees + * [crypto/merkle] \#2595 Remove all Hasher objects in favor of byte slices * Blockchain Protocol * [types] \#2459 `Vote`/`Proposal`/`Heartbeat` use amino encoding instead of JSON in `SignBytes`. diff --git a/crypto/merkle/simple_map.go b/crypto/merkle/simple_map.go index ba4b9309a..bde442203 100644 --- a/crypto/merkle/simple_map.go +++ b/crypto/merkle/simple_map.go @@ -1,6 +1,9 @@ package merkle import ( + "bytes" + + amino "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" ) @@ -20,14 +23,15 @@ func newSimpleMap() *simpleMap { } } -// Set hashes the key and value and appends it to the kv pairs. -func (sm *simpleMap) Set(key string, value Hasher) { +// Set creates a kv pair of the key and the hash of the value, +// and then appends it to simpleMap's kv pairs. +func (sm *simpleMap) Set(key string, value []byte) { sm.sorted = false // The value is hashed, so you can // check for equality with a cached value (say) // and make a determination to fetch or not. - vhash := value.Hash() + vhash := tmhash.Sum(value) sm.kvs = append(sm.kvs, cmn.KVPair{ Key: []byte(key), @@ -66,23 +70,25 @@ func (sm *simpleMap) KVPairs() cmn.KVPairs { // then hashed. type KVPair cmn.KVPair -func (kv KVPair) Hash() []byte { - hasher := tmhash.New() - err := encodeByteSlice(hasher, kv.Key) +// Bytes returns key || value, with both the +// key and value length prefixed. +func (kv KVPair) Bytes() []byte { + var b bytes.Buffer + err := amino.EncodeByteSlice(&b, kv.Key) if err != nil { panic(err) } - err = encodeByteSlice(hasher, kv.Value) + err = amino.EncodeByteSlice(&b, kv.Value) if err != nil { panic(err) } - return hasher.Sum(nil) + return b.Bytes() } func hashKVPairs(kvs cmn.KVPairs) []byte { - kvsH := make([]Hasher, len(kvs)) + kvsH := make([][]byte, len(kvs)) for i, kvp := range kvs { - kvsH[i] = KVPair(kvp) + kvsH[i] = KVPair(kvp).Bytes() } - return SimpleHashFromHashers(kvsH) + return SimpleHashFromByteSlices(kvsH) } diff --git a/crypto/merkle/simple_map_test.go b/crypto/merkle/simple_map_test.go index 34febcf16..bc095c003 100644 --- a/crypto/merkle/simple_map_test.go +++ b/crypto/merkle/simple_map_test.go @@ -5,50 +5,29 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/crypto/tmhash" ) -type strHasher string - -func (str strHasher) Hash() []byte { - return tmhash.Sum([]byte(str)) -} - func TestSimpleMap(t *testing.T) { - { - db := newSimpleMap() - db.Set("key1", strHasher("value1")) - assert.Equal(t, "fa9bc106ffd932d919bee935ceb6cf2b3dd72d8f", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") - } - { - db := newSimpleMap() - db.Set("key1", strHasher("value2")) - assert.Equal(t, "e00e7dcfe54e9fafef5111e813a587f01ba9c3e8", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") - } - { - db := newSimpleMap() - db.Set("key1", strHasher("value1")) - db.Set("key2", strHasher("value2")) - assert.Equal(t, "eff12d1c703a1022ab509287c0f196130123d786", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") - } - { - db := newSimpleMap() - db.Set("key2", strHasher("value2")) // NOTE: out of order - db.Set("key1", strHasher("value1")) - assert.Equal(t, "eff12d1c703a1022ab509287c0f196130123d786", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") - } - { - db := newSimpleMap() - db.Set("key1", strHasher("value1")) - db.Set("key2", strHasher("value2")) - db.Set("key3", strHasher("value3")) - assert.Equal(t, "b2c62a277c08dbd2ad73ca53cd1d6bfdf5830d26", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + tests := []struct { + keys []string + values []string // each string gets converted to []byte in test + want string + }{ + {[]string{"key1"}, []string{"value1"}, "fa9bc106ffd932d919bee935ceb6cf2b3dd72d8f"}, + {[]string{"key1"}, []string{"value2"}, "e00e7dcfe54e9fafef5111e813a587f01ba9c3e8"}, + // swap order with 2 keys + {[]string{"key1", "key2"}, []string{"value1", "value2"}, "eff12d1c703a1022ab509287c0f196130123d786"}, + {[]string{"key2", "key1"}, []string{"value2", "value1"}, "eff12d1c703a1022ab509287c0f196130123d786"}, + // swap order with 3 keys + {[]string{"key1", "key2", "key3"}, []string{"value1", "value2", "value3"}, "b2c62a277c08dbd2ad73ca53cd1d6bfdf5830d26"}, + {[]string{"key1", "key3", "key2"}, []string{"value1", "value3", "value2"}, "b2c62a277c08dbd2ad73ca53cd1d6bfdf5830d26"}, } - { + for i, tc := range tests { db := newSimpleMap() - db.Set("key2", strHasher("value2")) // NOTE: out of order - db.Set("key1", strHasher("value1")) - db.Set("key3", strHasher("value3")) - assert.Equal(t, "b2c62a277c08dbd2ad73ca53cd1d6bfdf5830d26", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + for i := 0; i < len(tc.keys); i++ { + db.Set(tc.keys[i], []byte(tc.values[i])) + } + got := db.Hash() + assert.Equal(t, tc.want, fmt.Sprintf("%x", got), "Hash didn't match on tc %d", i) } } diff --git a/crypto/merkle/simple_proof.go b/crypto/merkle/simple_proof.go index 306505fc2..d2cbb126f 100644 --- a/crypto/merkle/simple_proof.go +++ b/crypto/merkle/simple_proof.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" + "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" ) @@ -22,10 +23,10 @@ type SimpleProof struct { Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. } -// SimpleProofsFromHashers computes inclusion proof for given items. +// SimpleProofsFromByteSlices computes inclusion proof for given items. // proofs[0] is the proof for items[0]. -func SimpleProofsFromHashers(items []Hasher) (rootHash []byte, proofs []*SimpleProof) { - trails, rootSPN := trailsFromHashers(items) +func SimpleProofsFromByteSlices(items [][]byte) (rootHash []byte, proofs []*SimpleProof) { + trails, rootSPN := trailsFromByteSlices(items) rootHash = rootSPN.Hash proofs = make([]*SimpleProof, len(items)) for i, trail := range trails { @@ -42,19 +43,19 @@ func SimpleProofsFromHashers(items []Hasher) (rootHash []byte, proofs []*SimpleP // SimpleProofsFromMap generates proofs from a map. The keys/values of the map will be used as the keys/values // in the underlying key-value pairs. // The keys are sorted before the proofs are computed. -func SimpleProofsFromMap(m map[string]Hasher) (rootHash []byte, proofs map[string]*SimpleProof, keys []string) { +func SimpleProofsFromMap(m map[string][]byte) (rootHash []byte, proofs map[string]*SimpleProof, keys []string) { sm := newSimpleMap() for k, v := range m { sm.Set(k, v) } sm.Sort() kvs := sm.kvs - kvsH := make([]Hasher, 0, len(kvs)) - for _, kvp := range kvs { - kvsH = append(kvsH, KVPair(kvp)) + kvsBytes := make([][]byte, len(kvs)) + for i, kvp := range kvs { + kvsBytes[i] = KVPair(kvp).Bytes() } - rootHash, proofList := SimpleProofsFromHashers(kvsH) + rootHash, proofList := SimpleProofsFromByteSlices(kvsBytes) proofs = make(map[string]*SimpleProof) keys = make([]string, len(proofList)) for i, kvp := range kvs { @@ -175,17 +176,17 @@ func (spn *SimpleProofNode) FlattenAunts() [][]byte { // trails[0].Hash is the leaf hash for items[0]. // trails[i].Parent.Parent....Parent == root for all i. -func trailsFromHashers(items []Hasher) (trails []*SimpleProofNode, root *SimpleProofNode) { +func trailsFromByteSlices(items [][]byte) (trails []*SimpleProofNode, root *SimpleProofNode) { // Recursive impl. switch len(items) { case 0: return nil, nil case 1: - trail := &SimpleProofNode{items[0].Hash(), nil, nil, nil} + trail := &SimpleProofNode{tmhash.Sum(items[0]), nil, nil, nil} return []*SimpleProofNode{trail}, trail default: - lefts, leftRoot := trailsFromHashers(items[:(len(items)+1)/2]) - rights, rightRoot := trailsFromHashers(items[(len(items)+1)/2:]) + lefts, leftRoot := trailsFromByteSlices(items[:(len(items)+1)/2]) + rights, rightRoot := trailsFromByteSlices(items[(len(items)+1)/2:]) rootHash := SimpleHashFromTwoHashes(leftRoot.Hash, rightRoot.Hash) root := &SimpleProofNode{rootHash, nil, nil, nil} leftRoot.Parent = root diff --git a/crypto/merkle/simple_tree.go b/crypto/merkle/simple_tree.go index 46a075909..9677aef4e 100644 --- a/crypto/merkle/simple_tree.go +++ b/crypto/merkle/simple_tree.go @@ -18,11 +18,12 @@ func SimpleHashFromTwoHashes(left, right []byte) []byte { return hasher.Sum(nil) } -// SimpleHashFromHashers computes a Merkle tree from items that can be hashed. -func SimpleHashFromHashers(items []Hasher) []byte { +// SimpleHashFromByteSlices computes a Merkle tree where the leaves are the byte slice, +// in the provided order. +func SimpleHashFromByteSlices(items [][]byte) []byte { hashes := make([][]byte, len(items)) for i, item := range items { - hash := item.Hash() + hash := tmhash.Sum(item) hashes[i] = hash } return simpleHashFromHashes(hashes) @@ -32,7 +33,7 @@ func SimpleHashFromHashers(items []Hasher) []byte { // Like calling SimpleHashFromHashers with // `item = []byte(Hash(key) | Hash(value))`, // sorted by `item`. -func SimpleHashFromMap(m map[string]Hasher) []byte { +func SimpleHashFromMap(m map[string][]byte) []byte { sm := newSimpleMap() for k, v := range m { sm.Set(k, v) diff --git a/crypto/merkle/simple_tree_test.go b/crypto/merkle/simple_tree_test.go index b299aba78..32edc652e 100644 --- a/crypto/merkle/simple_tree_test.go +++ b/crypto/merkle/simple_tree_test.go @@ -21,20 +21,20 @@ func TestSimpleProof(t *testing.T) { total := 100 - items := make([]Hasher, total) + items := make([][]byte, total) for i := 0; i < total; i++ { items[i] = testItem(cmn.RandBytes(tmhash.Size)) } - rootHash := SimpleHashFromHashers(items) + rootHash := SimpleHashFromByteSlices(items) - rootHash2, proofs := SimpleProofsFromHashers(items) + rootHash2, proofs := SimpleProofsFromByteSlices(items) require.Equal(t, rootHash, rootHash2, "Unmatched root hashes: %X vs %X", rootHash, rootHash2) // For each item, check the trail. for i, item := range items { - itemHash := item.Hash() + itemHash := tmhash.Sum(item) proof := proofs[i] // Check total/index diff --git a/crypto/merkle/types.go b/crypto/merkle/types.go index 2fcb3f39d..97a47879b 100644 --- a/crypto/merkle/types.go +++ b/crypto/merkle/types.go @@ -25,11 +25,6 @@ type Tree interface { IterateRange(start []byte, end []byte, ascending bool, fx func(key []byte, value []byte) (stop bool)) (stopped bool) } -// Hasher represents a hashable piece of data which can be hashed in the Tree. -type Hasher interface { - Hash() []byte -} - //----------------------------------------------------------------------- // Uvarint length prefixed byteslice diff --git a/types/block.go b/types/block.go index 5610cc799..07a71ca82 100644 --- a/types/block.go +++ b/types/block.go @@ -9,7 +9,6 @@ import ( "time" "github.com/tendermint/tendermint/crypto/merkle" - "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" ) @@ -290,22 +289,22 @@ func (h *Header) Hash() cmn.HexBytes { if h == nil || len(h.ValidatorsHash) == 0 { return nil } - return merkle.SimpleHashFromMap(map[string]merkle.Hasher{ - "ChainID": aminoHasher(h.ChainID), - "Height": aminoHasher(h.Height), - "Time": aminoHasher(h.Time), - "NumTxs": aminoHasher(h.NumTxs), - "TotalTxs": aminoHasher(h.TotalTxs), - "LastBlockID": aminoHasher(h.LastBlockID), - "LastCommit": aminoHasher(h.LastCommitHash), - "Data": aminoHasher(h.DataHash), - "Validators": aminoHasher(h.ValidatorsHash), - "NextValidators": aminoHasher(h.NextValidatorsHash), - "App": aminoHasher(h.AppHash), - "Consensus": aminoHasher(h.ConsensusHash), - "Results": aminoHasher(h.LastResultsHash), - "Evidence": aminoHasher(h.EvidenceHash), - "Proposer": aminoHasher(h.ProposerAddress), + return merkle.SimpleHashFromMap(map[string][]byte{ + "ChainID": cdcEncode(h.ChainID), + "Height": cdcEncode(h.Height), + "Time": cdcEncode(h.Time), + "NumTxs": cdcEncode(h.NumTxs), + "TotalTxs": cdcEncode(h.TotalTxs), + "LastBlockID": cdcEncode(h.LastBlockID), + "LastCommit": cdcEncode(h.LastCommitHash), + "Data": cdcEncode(h.DataHash), + "Validators": cdcEncode(h.ValidatorsHash), + "NextValidators": cdcEncode(h.NextValidatorsHash), + "App": cdcEncode(h.AppHash), + "Consensus": cdcEncode(h.ConsensusHash), + "Results": cdcEncode(h.LastResultsHash), + "Evidence": cdcEncode(h.EvidenceHash), + "Proposer": cdcEncode(h.ProposerAddress), }) } @@ -480,11 +479,11 @@ func (commit *Commit) Hash() cmn.HexBytes { return nil } if commit.hash == nil { - bs := make([]merkle.Hasher, len(commit.Precommits)) + bs := make([][]byte, len(commit.Precommits)) for i, precommit := range commit.Precommits { - bs[i] = aminoHasher(precommit) + bs[i] = cdcEncode(precommit) } - commit.hash = merkle.SimpleHashFromHashers(bs) + commit.hash = merkle.SimpleHashFromByteSlices(bs) } return commit.hash } @@ -689,33 +688,3 @@ func (blockID BlockID) Key() string { func (blockID BlockID) String() string { return fmt.Sprintf(`%v:%v`, blockID.Hash, blockID.PartsHeader) } - -//------------------------------------------------------- - -type hasher struct { - item interface{} -} - -func (h hasher) Hash() []byte { - hasher := tmhash.New() - if h.item != nil && !cmn.IsTypedNil(h.item) && !cmn.IsEmpty(h.item) { - bz, err := cdc.MarshalBinaryBare(h.item) - if err != nil { - panic(err) - } - _, err = hasher.Write(bz) - if err != nil { - panic(err) - } - } - return hasher.Sum(nil) -} - -func aminoHash(item interface{}) []byte { - h := hasher{item} - return h.Hash() -} - -func aminoHasher(item interface{}) merkle.Hasher { - return hasher{item} -} diff --git a/types/encoding_helper.go b/types/encoding_helper.go new file mode 100644 index 000000000..f825de8a6 --- /dev/null +++ b/types/encoding_helper.go @@ -0,0 +1,14 @@ +package types + +import ( + cmn "github.com/tendermint/tendermint/libs/common" +) + +// cdcEncode returns nil if the input is nil, otherwise returns +// cdc.MustMarshalBinaryBare(item) +func cdcEncode(item interface{}) []byte { + if item != nil && !cmn.IsTypedNil(item) && !cmn.IsEmpty(item) { + return cdc.MustMarshalBinaryBare(item) + } + return nil +} diff --git a/types/evidence.go b/types/evidence.go index 916dd094f..00c46c593 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -4,6 +4,8 @@ import ( "bytes" "fmt" + "github.com/tendermint/tendermint/crypto/tmhash" + amino "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/crypto" @@ -104,7 +106,7 @@ func (dve *DuplicateVoteEvidence) Address() []byte { // Hash returns the hash of the evidence. func (dve *DuplicateVoteEvidence) Hash() []byte { - return aminoHasher(dve).Hash() + return tmhash.Sum(cdcEncode(dve)) } // Verify returns an error if the two votes aren't conflicting. @@ -157,8 +159,8 @@ func (dve *DuplicateVoteEvidence) Equal(ev Evidence) bool { } // just check their hashes - dveHash := aminoHasher(dve).Hash() - evHash := aminoHasher(ev).Hash() + dveHash := tmhash.Sum(cdcEncode(dve)) + evHash := tmhash.Sum(cdcEncode(ev)) return bytes.Equal(dveHash, evHash) } diff --git a/types/params.go b/types/params.go index 014694ccb..129d47627 100644 --- a/types/params.go +++ b/types/params.go @@ -82,10 +82,10 @@ func (params *ConsensusParams) Validate() error { // Hash returns a merkle hash of the parameters to store in the block header func (params *ConsensusParams) Hash() []byte { - return merkle.SimpleHashFromMap(map[string]merkle.Hasher{ - "block_size_max_bytes": aminoHasher(params.BlockSize.MaxBytes), - "block_size_max_gas": aminoHasher(params.BlockSize.MaxGas), - "evidence_params_max_age": aminoHasher(params.EvidenceParams.MaxAge), + return merkle.SimpleHashFromMap(map[string][]byte{ + "block_size_max_bytes": cdcEncode(params.BlockSize.MaxBytes), + "block_size_max_gas": cdcEncode(params.BlockSize.MaxGas), + "evidence_params_max_age": cdcEncode(params.EvidenceParams.MaxAge), }) } diff --git a/types/part_set.go b/types/part_set.go index 8c8151ba8..812b1c2fd 100644 --- a/types/part_set.go +++ b/types/part_set.go @@ -88,7 +88,7 @@ func NewPartSetFromData(data []byte, partSize int) *PartSet { // divide data into 4kb parts. total := (len(data) + partSize - 1) / partSize parts := make([]*Part, total) - parts_ := make([]merkle.Hasher, total) + partsBytes := make([][]byte, total) partsBitArray := cmn.NewBitArray(total) for i := 0; i < total; i++ { part := &Part{ @@ -96,11 +96,11 @@ func NewPartSetFromData(data []byte, partSize int) *PartSet { Bytes: data[i*partSize : cmn.MinInt(len(data), (i+1)*partSize)], } parts[i] = part - parts_[i] = part + partsBytes[i] = part.Bytes partsBitArray.SetIndex(i, true) } // Compute merkle proofs - root, proofs := merkle.SimpleProofsFromHashers(parts_) + root, proofs := merkle.SimpleProofsFromByteSlices(partsBytes) for i := 0; i < total; i++ { parts[i].Proof = *proofs[i] } diff --git a/types/results.go b/types/results.go index 17d5891c3..6b5b82d27 100644 --- a/types/results.go +++ b/types/results.go @@ -3,6 +3,7 @@ package types import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" ) @@ -17,10 +18,14 @@ type ABCIResult struct { // Hash returns the canonical hash of the ABCIResult func (a ABCIResult) Hash() []byte { - bz := aminoHash(a) + bz := tmhash.Sum(cdcEncode(a)) return bz } +func (a ABCIResult) Bytes() []byte { + return cdcEncode(a) +} + // ABCIResults wraps the deliver tx results to return a proof type ABCIResults []ABCIResult @@ -54,20 +59,20 @@ func (a ABCIResults) Bytes() []byte { func (a ABCIResults) Hash() []byte { // NOTE: we copy the impl of the merkle tree for txs - // we should be consistent and either do it for both or not. - return merkle.SimpleHashFromHashers(a.toHashers()) + return merkle.SimpleHashFromByteSlices(a.toByteSlices()) } // ProveResult returns a merkle proof of one result from the set func (a ABCIResults) ProveResult(i int) merkle.SimpleProof { - _, proofs := merkle.SimpleProofsFromHashers(a.toHashers()) + _, proofs := merkle.SimpleProofsFromByteSlices(a.toByteSlices()) return *proofs[i] } -func (a ABCIResults) toHashers() []merkle.Hasher { +func (a ABCIResults) toByteSlices() [][]byte { l := len(a) - hashers := make([]merkle.Hasher, l) + bzs := make([][]byte, l) for i := 0; i < l; i++ { - hashers[i] = a[i] + bzs[i] = a[i].Bytes() } - return hashers + return bzs } diff --git a/types/tx.go b/types/tx.go index 41fc310f1..ec42f3f13 100644 --- a/types/tx.go +++ b/types/tx.go @@ -70,11 +70,11 @@ func (txs Txs) IndexByHash(hash []byte) int { // TODO: optimize this! func (txs Txs) Proof(i int) TxProof { l := len(txs) - hashers := make([]merkle.Hasher, l) + bzs := make([][]byte, l) for i := 0; i < l; i++ { - hashers[i] = txs[i] + bzs[i] = txs[i] } - root, proofs := merkle.SimpleProofsFromHashers(hashers) + root, proofs := merkle.SimpleProofsFromByteSlices(bzs) return TxProof{ RootHash: root, diff --git a/types/validator.go b/types/validator.go index 46d1a7a9f..af3471848 100644 --- a/types/validator.go +++ b/types/validator.go @@ -4,6 +4,8 @@ import ( "bytes" "fmt" + "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/crypto" cmn "github.com/tendermint/tendermint/libs/common" ) @@ -71,13 +73,21 @@ func (v *Validator) String() string { // Hash computes the unique ID of a validator with a given voting power. // It excludes the Accum value, which changes with every round. func (v *Validator) Hash() []byte { - return aminoHash(struct { + return tmhash.Sum(v.Bytes()) +} + +// Bytes computes the unique encoding of a validator with a given voting power. +// These are the bytes that gets hashed in consensus. It excludes pubkey +// as its redundant with the address. This also excludes accum which changes +// every round. +func (v *Validator) Bytes() []byte { + return cdcEncode((struct { Address Address VotingPower int64 }{ v.Address, v.VotingPower, - }) + })) } //---------------------------------------- diff --git a/types/validator_set.go b/types/validator_set.go index 4dab4d840..72ab68c08 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -176,11 +176,11 @@ func (vals *ValidatorSet) Hash() []byte { if len(vals.Validators) == 0 { return nil } - hashers := make([]merkle.Hasher, len(vals.Validators)) + bzs := make([][]byte, len(vals.Validators)) for i, val := range vals.Validators { - hashers[i] = val + bzs[i] = val.Bytes() } - return merkle.SimpleHashFromHashers(hashers) + return merkle.SimpleHashFromByteSlices(bzs) } // Add adds val to the validator set and returns true. It returns false if val From 9a6cdaddf275cb9e7a717c4fc833facb8139bfcc Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 10 Oct 2018 21:29:13 +0400 Subject: [PATCH 060/113] fix contributor's name in CHANGELOG_PENDING (#2599) Refs https://github.com/tendermint/tendermint/pull/2506#issuecomment-428458974 --- CHANGELOG_PENDING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 7e6a30968..707d37834 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -49,7 +49,7 @@ BUG FIXES: timeoutPrecommit before starting next round - [evidence] \#2515 fix db iter leak (@goolAdapter) - [common/bit_array] Fixed a bug in the `Or` function -- [common/bit_array] Fixed a bug in the `Sub` function (@bradyjoestar) +- [common/bit_array] Fixed a bug in the `Sub` function (@james-ray) - [common] \#2534 Make bit array's PickRandom choose uniformly from true bits - [consensus] \#1637 Limit the amount of evidence that can be included in a block From feb08fa4f8229c6a8cbe6ae030bdcc8c21eed4ff Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 11 Oct 2018 10:01:53 -0400 Subject: [PATCH 061/113] ed25519: use golang/x/crypto fork (#2558) * ed25519: use golang/x/crypto fork * changelog * gix GenerateFromPassword * fixes from review --- CHANGELOG_PENDING.md | 2 + Gopkg.lock | 23 ++---- Gopkg.toml | 5 ++ crypto/armor/armor.go | 2 +- crypto/ed25519/ed25519.go | 82 +++++++--------------- crypto/hash.go | 2 +- crypto/secp256k1/secp256k1.go | 3 +- crypto/xchacha20poly1305/xchachapoly.go | 2 +- crypto/xsalsa20symmetric/symmetric.go | 3 +- crypto/xsalsa20symmetric/symmetric_test.go | 7 +- p2p/conn/secret_connection.go | 1 + 11 files changed, 51 insertions(+), 81 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 707d37834..bdc2a7314 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -39,6 +39,8 @@ IMPROVEMENTS: - [consensus] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) add additional metrics - [p2p] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) add additional metrics - [config] \#2232 added ValidateBasic method, which performs basic checks +- [crypto/ed25519] \#2558 Switch to use latest `golang.org/x/crypto` through our fork at + github.com/tendermint/crypto - [tools] \#2238 Binary dependencies are now locked to a specific git commit - [crypto] \#2099 make crypto random use chacha, and have forward secrecy of generated randomness diff --git a/Gopkg.lock b/Gopkg.lock index 8deb06378..0f70bb2f7 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -364,18 +364,6 @@ pruneopts = "UT" revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df" -[[projects]] - branch = "master" - digest = "1:087aaa7920e5d0bf79586feb57ce01c35c830396ab4392798112e8aae8c47722" - name = "github.com/tendermint/ed25519" - packages = [ - ".", - "edwards25519", - "extra25519", - ] - pruneopts = "UT" - revision = "d8387025d2b9d158cf4efb07e7ebf814bcce2057" - [[projects]] digest = "1:e0a2a4be1e20c305badc2b0a7a9ab7fef6da500763bec23ab81df3b5f9eec9ee" name = "github.com/tendermint/go-amino" @@ -385,14 +373,15 @@ version = "v0.12.0-rc0" [[projects]] - branch = "master" - digest = "1:c31a37cafc12315b8bd745c8ad6a006ac25350472488162a821e557b3e739d67" + digest = "1:72b71e3a29775e5752ed7a8012052a3dee165e27ec18cedddae5288058f09acf" name = "golang.org/x/crypto" packages = [ "bcrypt", "blowfish", "chacha20poly1305", "curve25519", + "ed25519", + "ed25519/internal/edwards25519", "hkdf", "internal/chacha20", "internal/subtle", @@ -405,7 +394,8 @@ "salsa20/salsa", ] pruneopts = "UT" - revision = "56440b844dfe139a8ac053f4ecac0b20b79058f4" + revision = "3764759f34a542a3aef74d6b02e35be7ab893bba" + source = "github.com/tendermint/crypto" [[projects]] digest = "1:d36f55a999540d29b6ea3c2ea29d71c76b1d9853fdcd3e5c5cb4836f2ba118f1" @@ -543,12 +533,11 @@ "github.com/syndtr/goleveldb/leveldb/iterator", "github.com/syndtr/goleveldb/leveldb/opt", "github.com/tendermint/btcd/btcec", - "github.com/tendermint/ed25519", - "github.com/tendermint/ed25519/extra25519", "github.com/tendermint/go-amino", "golang.org/x/crypto/bcrypt", "golang.org/x/crypto/chacha20poly1305", "golang.org/x/crypto/curve25519", + "golang.org/x/crypto/ed25519", "golang.org/x/crypto/hkdf", "golang.org/x/crypto/nacl/box", "golang.org/x/crypto/nacl/secretbox", diff --git a/Gopkg.toml b/Gopkg.toml index d3bca19e8..07ff3c534 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -72,6 +72,11 @@ ## Some repos dont have releases. ## Pin to revision +[[constraint]] + name = "golang.org/x/crypto" + source = "github.com/tendermint/crypto" + revision = "3764759f34a542a3aef74d6b02e35be7ab893bba" + [[override]] name = "github.com/jmhodges/levigo" revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9" diff --git a/crypto/armor/armor.go b/crypto/armor/armor.go index c15d070e6..e3b29a971 100644 --- a/crypto/armor/armor.go +++ b/crypto/armor/armor.go @@ -5,7 +5,7 @@ import ( "fmt" "io/ioutil" - "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/armor" // forked to github.com/tendermint/crypto ) func EncodeArmor(blockType string, headers map[string]string, data []byte) string { diff --git a/crypto/ed25519/ed25519.go b/crypto/ed25519/ed25519.go index c55b3588f..c2bed6ab1 100644 --- a/crypto/ed25519/ed25519.go +++ b/crypto/ed25519/ed25519.go @@ -6,9 +6,9 @@ import ( "fmt" "io" - "github.com/tendermint/ed25519" - "github.com/tendermint/ed25519/extra25519" amino "github.com/tendermint/go-amino" + "golang.org/x/crypto/ed25519" // forked to github.com/tendermint/crypto + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/tmhash" ) @@ -47,8 +47,7 @@ func (privKey PrivKeyEd25519) Bytes() []byte { // Sign produces a signature on the provided message. func (privKey PrivKeyEd25519) Sign(msg []byte) ([]byte, error) { - privKeyBytes := [64]byte(privKey) - signatureBytes := ed25519.Sign(&privKeyBytes, msg) + signatureBytes := ed25519.Sign(privKey[:], msg) return signatureBytes[:], nil } @@ -65,14 +64,14 @@ func (privKey PrivKeyEd25519) PubKey() crypto.PubKey { break } } - if initialized { - var pubkeyBytes [PubKeyEd25519Size]byte - copy(pubkeyBytes[:], privKeyBytes[32:]) - return PubKeyEd25519(pubkeyBytes) + + if !initialized { + panic("Expected PrivKeyEd25519 to include concatenated pubkey bytes") } - pubBytes := *ed25519.MakePublicKey(&privKeyBytes) - return PubKeyEd25519(pubBytes) + var pubkeyBytes [PubKeyEd25519Size]byte + copy(pubkeyBytes[:], privKeyBytes[32:]) + return PubKeyEd25519(pubkeyBytes) } // Equals - you probably don't need to use this. @@ -85,17 +84,6 @@ func (privKey PrivKeyEd25519) Equals(other crypto.PrivKey) bool { } } -// ToCurve25519 takes a private key and returns its representation on -// Curve25519. Curve25519 is birationally equivalent to Edwards25519, -// which Ed25519 uses internally. This method is intended for use in -// an X25519 Diffie Hellman key exchange. -func (privKey PrivKeyEd25519) ToCurve25519() *[PubKeyEd25519Size]byte { - keyCurve25519 := new([32]byte) - privKeyBytes := [64]byte(privKey) - extra25519.PrivateKeyToCurve25519(keyCurve25519, &privKeyBytes) - return keyCurve25519 -} - // GenPrivKey generates a new ed25519 private key. // It uses OS randomness in conjunction with the current global random seed // in tendermint/libs/common to generate the private key. @@ -105,16 +93,16 @@ func GenPrivKey() PrivKeyEd25519 { // genPrivKey generates a new ed25519 private key using the provided reader. func genPrivKey(rand io.Reader) PrivKeyEd25519 { - privKey := new([64]byte) - _, err := io.ReadFull(rand, privKey[:32]) + seed := make([]byte, 32) + _, err := io.ReadFull(rand, seed[:]) if err != nil { panic(err) } - // ed25519.MakePublicKey(privKey) alters the last 32 bytes of privKey. - // It places the pubkey in the last 32 bytes of privKey, and returns the - // public key. - ed25519.MakePublicKey(privKey) - return PrivKeyEd25519(*privKey) + + privKey := ed25519.NewKeyFromSeed(seed) + var privKeyEd PrivKeyEd25519 + copy(privKeyEd[:], privKey) + return privKeyEd } // GenPrivKeyFromSecret hashes the secret with SHA2, and uses @@ -122,14 +110,12 @@ func genPrivKey(rand io.Reader) PrivKeyEd25519 { // NOTE: secret should be the output of a KDF like bcrypt, // if it's derived from user input. func GenPrivKeyFromSecret(secret []byte) PrivKeyEd25519 { - privKey32 := crypto.Sha256(secret) // Not Ripemd160 because we want 32 bytes. - privKey := new([64]byte) - copy(privKey[:32], privKey32) - // ed25519.MakePublicKey(privKey) alters the last 32 bytes of privKey. - // It places the pubkey in the last 32 bytes of privKey, and returns the - // public key. - ed25519.MakePublicKey(privKey) - return PrivKeyEd25519(*privKey) + seed := crypto.Sha256(secret) // Not Ripemd160 because we want 32 bytes. + + privKey := ed25519.NewKeyFromSeed(seed) + var privKeyEd PrivKeyEd25519 + copy(privKeyEd[:], privKey) + return privKeyEd } //------------------------------------- @@ -156,30 +142,12 @@ func (pubKey PubKeyEd25519) Bytes() []byte { return bz } -func (pubKey PubKeyEd25519) VerifyBytes(msg []byte, sig_ []byte) bool { +func (pubKey PubKeyEd25519) VerifyBytes(msg []byte, sig []byte) bool { // make sure we use the same algorithm to sign - if len(sig_) != SignatureSize { + if len(sig) != SignatureSize { return false } - sig := new([SignatureSize]byte) - copy(sig[:], sig_) - pubKeyBytes := [PubKeyEd25519Size]byte(pubKey) - return ed25519.Verify(&pubKeyBytes, msg, sig) -} - -// ToCurve25519 takes a public key and returns its representation on -// Curve25519. Curve25519 is birationally equivalent to Edwards25519, -// which Ed25519 uses internally. This method is intended for use in -// an X25519 Diffie Hellman key exchange. -// -// If there is an error, then this function returns nil. -func (pubKey PubKeyEd25519) ToCurve25519() *[PubKeyEd25519Size]byte { - keyCurve25519, pubKeyBytes := new([PubKeyEd25519Size]byte), [PubKeyEd25519Size]byte(pubKey) - ok := extra25519.PublicKeyToCurve25519(keyCurve25519, &pubKeyBytes) - if !ok { - return nil - } - return keyCurve25519 + return ed25519.Verify(pubKey[:], msg, sig) } func (pubKey PubKeyEd25519) String() string { diff --git a/crypto/hash.go b/crypto/hash.go index c1fb41f7a..a384bbb55 100644 --- a/crypto/hash.go +++ b/crypto/hash.go @@ -3,7 +3,7 @@ package crypto import ( "crypto/sha256" - "golang.org/x/crypto/ripemd160" + "golang.org/x/crypto/ripemd160" // forked to github.com/tendermint/crypto ) func Sha256(bytes []byte) []byte { diff --git a/crypto/secp256k1/secp256k1.go b/crypto/secp256k1/secp256k1.go index 2c64d1e9d..784409f3c 100644 --- a/crypto/secp256k1/secp256k1.go +++ b/crypto/secp256k1/secp256k1.go @@ -9,8 +9,9 @@ import ( secp256k1 "github.com/tendermint/btcd/btcec" amino "github.com/tendermint/go-amino" + "golang.org/x/crypto/ripemd160" // forked to github.com/tendermint/crypto + "github.com/tendermint/tendermint/crypto" - "golang.org/x/crypto/ripemd160" ) //------------------------------------- diff --git a/crypto/xchacha20poly1305/xchachapoly.go b/crypto/xchacha20poly1305/xchachapoly.go index c7a175b5f..115c9190f 100644 --- a/crypto/xchacha20poly1305/xchachapoly.go +++ b/crypto/xchacha20poly1305/xchachapoly.go @@ -8,7 +8,7 @@ import ( "errors" "fmt" - "golang.org/x/crypto/chacha20poly1305" + "golang.org/x/crypto/chacha20poly1305" // forked to github.com/tendermint/crypto ) // Implements crypto.AEAD diff --git a/crypto/xsalsa20symmetric/symmetric.go b/crypto/xsalsa20symmetric/symmetric.go index aa33ee14a..c51e24590 100644 --- a/crypto/xsalsa20symmetric/symmetric.go +++ b/crypto/xsalsa20symmetric/symmetric.go @@ -4,9 +4,10 @@ import ( "errors" "fmt" + "golang.org/x/crypto/nacl/secretbox" // forked to github.com/tendermint/crypto + "github.com/tendermint/tendermint/crypto" cmn "github.com/tendermint/tendermint/libs/common" - "golang.org/x/crypto/nacl/secretbox" ) // TODO, make this into a struct that implements crypto.Symmetric. diff --git a/crypto/xsalsa20symmetric/symmetric_test.go b/crypto/xsalsa20symmetric/symmetric_test.go index d955307ea..e9adf728e 100644 --- a/crypto/xsalsa20symmetric/symmetric_test.go +++ b/crypto/xsalsa20symmetric/symmetric_test.go @@ -6,8 +6,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/crypto/bcrypt" // forked to github.com/tendermint/crypto + "github.com/tendermint/tendermint/crypto" - "golang.org/x/crypto/bcrypt" ) func TestSimple(t *testing.T) { @@ -29,7 +30,9 @@ func TestSimpleWithKDF(t *testing.T) { plaintext := []byte("sometext") secretPass := []byte("somesecret") - secret, err := bcrypt.GenerateFromPassword(secretPass, 12) + salt := []byte("somesaltsomesalt") // len 16 + // NOTE: we use a fork of x/crypto so we can inject our own randomness for salt + secret, err := bcrypt.GenerateFromPassword(salt, secretPass, 12) if err != nil { t.Error(err) } diff --git a/p2p/conn/secret_connection.go b/p2p/conn/secret_connection.go index 3628eb4a3..acdd96de4 100644 --- a/p2p/conn/secret_connection.go +++ b/p2p/conn/secret_connection.go @@ -10,6 +10,7 @@ import ( "net" "time" + // forked to github.com/tendermint/crypto "golang.org/x/crypto/chacha20poly1305" "golang.org/x/crypto/curve25519" "golang.org/x/crypto/nacl/box" From cc0bea522ce9968d9e132f376cc041975a149c8e Mon Sep 17 00:00:00 2001 From: Connor Stein Date: Thu, 11 Oct 2018 10:03:34 -0400 Subject: [PATCH 062/113] Minor doc fix regarding testnet on non-linux OS (#2601) --- docs/networks/deploy-testnets.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/docs/networks/deploy-testnets.md b/docs/networks/deploy-testnets.md index 4df6916bb..21c346103 100644 --- a/docs/networks/deploy-testnets.md +++ b/docs/networks/deploy-testnets.md @@ -66,7 +66,17 @@ make localnet-start ``` from the root of the tendermint repository. This will spin up a 4-node -local testnet. Review the target in the Makefile to debug any problems. +local testnet. Note that this command expects a linux binary in the build directory. +If you built the binary using a non-linux OS, you may see +the error `Binary needs to be OS linux, ARCH amd64`, in which case you can +run: + +``` +make build-linux +make localnet-start +``` + +Review the target in the Makefile to debug any problems. ### Cloud From 69ecda18f9d8c42edceaa77061ad1081a323ccbd Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Thu, 11 Oct 2018 07:16:25 -0700 Subject: [PATCH 063/113] refactor nop_event_bus.go into event_bus.go (#2605) This is just to match the style of the rest of the codebase, and to reduce the number of files in types. --- types/event_bus.go | 71 ++++++++++++++++++++++++++++++++++++++ types/nop_event_bus.go | 77 ------------------------------------------ 2 files changed, 71 insertions(+), 77 deletions(-) delete mode 100644 types/nop_event_bus.go diff --git a/types/event_bus.go b/types/event_bus.go index 466ae7b44..269d5ab1f 100644 --- a/types/event_bus.go +++ b/types/event_bus.go @@ -165,3 +165,74 @@ func logIfTagExists(tag string, tags map[string]string, logger log.Logger) { logger.Error("Found predefined tag (value will be overwritten)", "tag", tag, "value", value) } } + +//----------------------------------------------------------------------------- +type NopEventBus struct{} + +func (NopEventBus) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error { + return nil +} + +func (NopEventBus) Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error { + return nil +} + +func (NopEventBus) UnsubscribeAll(ctx context.Context, subscriber string) error { + return nil +} + +func (NopEventBus) PublishEventNewBlock(data EventDataNewBlock) error { + return nil +} + +func (NopEventBus) PublishEventNewBlockHeader(data EventDataNewBlockHeader) error { + return nil +} + +func (NopEventBus) PublishEventVote(data EventDataVote) error { + return nil +} + +func (NopEventBus) PublishEventTx(data EventDataTx) error { + return nil +} + +func (NopEventBus) PublishEventNewRoundStep(data EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventTimeoutPropose(data EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventTimeoutWait(data EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventNewRound(data EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventCompleteProposal(data EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventPolka(data EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventUnlock(data EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventRelock(data EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventLock(data EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventValidatorSetUpdates(data EventDataValidatorSetUpdates) error { + return nil +} diff --git a/types/nop_event_bus.go b/types/nop_event_bus.go deleted file mode 100644 index 93694da47..000000000 --- a/types/nop_event_bus.go +++ /dev/null @@ -1,77 +0,0 @@ -package types - -import ( - "context" - - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" -) - -type NopEventBus struct{} - -func (NopEventBus) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error { - return nil -} - -func (NopEventBus) Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error { - return nil -} - -func (NopEventBus) UnsubscribeAll(ctx context.Context, subscriber string) error { - return nil -} - -func (NopEventBus) PublishEventNewBlock(data EventDataNewBlock) error { - return nil -} - -func (NopEventBus) PublishEventNewBlockHeader(data EventDataNewBlockHeader) error { - return nil -} - -func (NopEventBus) PublishEventVote(data EventDataVote) error { - return nil -} - -func (NopEventBus) PublishEventTx(data EventDataTx) error { - return nil -} - -func (NopEventBus) PublishEventNewRoundStep(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventTimeoutPropose(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventTimeoutWait(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventNewRound(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventCompleteProposal(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventPolka(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventUnlock(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventRelock(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventLock(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventValidatorSetUpdates(data EventDataValidatorSetUpdates) error { - return nil -} From 7b48ea1788878d02c84b714c06640d595aa0b90e Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 11 Oct 2018 13:55:36 -0400 Subject: [PATCH 064/113] privval: set deadline in readMsg (#2548) * privval: set deadline in readMsg * fixes from review --- privval/socket.go | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/privval/socket.go b/privval/socket.go index da95f8fb4..64d4c46d0 100644 --- a/privval/socket.go +++ b/privval/socket.go @@ -18,11 +18,11 @@ import ( ) const ( - defaultAcceptDeadlineSeconds = 3 - defaultConnDeadlineSeconds = 3 - defaultConnHeartBeatSeconds = 30 - defaultConnWaitSeconds = 60 - defaultDialRetries = 10 + defaultAcceptDeadlineSeconds = 30 // tendermint waits this long for remote val to connect + defaultConnDeadlineSeconds = 3 // must be set before each read + defaultConnHeartBeatSeconds = 30 // tcp keep-alive period + defaultConnWaitSeconds = 60 // XXX: is this redundant with the accept deadline? + defaultDialRetries = 10 // try to connect to tendermint this many times ) // Socket errors. @@ -33,12 +33,6 @@ var ( ErrUnexpectedResponse = errors.New("received unexpected response") ) -var ( - acceptDeadline = time.Second * defaultAcceptDeadlineSeconds - connDeadline = time.Second * defaultConnDeadlineSeconds - connHeartbeat = time.Second * defaultConnHeartBeatSeconds -) - // SocketPVOption sets an optional parameter on the SocketPV. type SocketPVOption func(*SocketPV) @@ -93,9 +87,9 @@ func NewSocketPV( ) *SocketPV { sc := &SocketPV{ addr: socketAddr, - acceptDeadline: acceptDeadline, - connDeadline: connDeadline, - connHeartbeat: connHeartbeat, + acceptDeadline: time.Second * defaultAcceptDeadlineSeconds, + connDeadline: time.Second * defaultConnDeadlineSeconds, + connHeartbeat: time.Second * defaultConnHeartBeatSeconds, connWaitTimeout: time.Second * defaultConnWaitSeconds, privKey: privKey, } @@ -441,7 +435,7 @@ func (rs *RemoteSigner) connect() (net.Conn, error) { continue } - if err := conn.SetDeadline(time.Now().Add(connDeadline)); err != nil { + if err := conn.SetDeadline(time.Now().Add(time.Second * defaultConnDeadlineSeconds)); err != nil { err = cmn.ErrorWrap(err, "setting connection timeout failed") rs.Logger.Error( "connect", @@ -587,6 +581,14 @@ type RemoteSignerError struct { func readMsg(r io.Reader) (msg SocketPVMsg, err error) { const maxSocketPVMsgSize = 1024 * 10 + + // set deadline before trying to read + conn := r.(net.Conn) + if err := conn.SetDeadline(time.Now().Add(time.Second * defaultConnDeadlineSeconds)); err != nil { + err = cmn.ErrorWrap(err, "setting connection timeout failed in readMsg") + return msg, err + } + _, err = cdc.UnmarshalBinaryReader(r, &msg, maxSocketPVMsgSize) if _, ok := err.(timeoutError); ok { err = cmn.ErrorWrap(ErrConnTimeout, err.Error()) From 3744e8271d664a89cc453a546d7ab827fd43d9a4 Mon Sep 17 00:00:00 2001 From: Alessio Treglia Date: Thu, 11 Oct 2018 20:37:21 -0700 Subject: [PATCH 065/113] [R4R] Pass nil to NewValidatorSet() when genesis file's Validators field is nil (#2617) * Pass nil to NewValidatorSet() when genesis file's Validators field is nil Closes: #2616 * Update CHANGELOG_PENDING.md --- CHANGELOG_PENDING.md | 1 + state/state.go | 32 ++++++++++++++++++++------------ state/state_test.go | 13 +++++++++++++ 3 files changed, 34 insertions(+), 12 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index bdc2a7314..dace33f19 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -57,3 +57,4 @@ timeoutPrecommit before starting next round block - [p2p] \#2555 fix p2p switch FlushThrottle value (@goolAdapter) - [libs/event] \#2518 fix event concurrency flaw (@goolAdapter) +- [state] \#2616 Pass nil to NewValidatorSet() when genesis file's Validators field is nil diff --git a/state/state.go b/state/state.go index 26510816b..1f60fd653 100644 --- a/state/state.go +++ b/state/state.go @@ -198,17 +198,25 @@ func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { } // Make validators slice - validators := make([]*types.Validator, len(genDoc.Validators)) - for i, val := range genDoc.Validators { - pubKey := val.PubKey - address := pubKey.Address() - - // Make validator - validators[i] = &types.Validator{ - Address: address, - PubKey: pubKey, - VotingPower: val.Power, + var validatorSet, nextValidatorSet *types.ValidatorSet + if genDoc.Validators == nil { + validatorSet = types.NewValidatorSet(nil) + nextValidatorSet = types.NewValidatorSet(nil) + } else { + validators := make([]*types.Validator, len(genDoc.Validators)) + for i, val := range genDoc.Validators { + pubKey := val.PubKey + address := pubKey.Address() + + // Make validator + validators[i] = &types.Validator{ + Address: address, + PubKey: pubKey, + VotingPower: val.Power, + } } + validatorSet = types.NewValidatorSet(validators) + nextValidatorSet = types.NewValidatorSet(validators).CopyIncrementAccum(1) } return State{ @@ -219,8 +227,8 @@ func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { LastBlockID: types.BlockID{}, LastBlockTime: genDoc.GenesisTime, - NextValidators: types.NewValidatorSet(validators).CopyIncrementAccum(1), - Validators: types.NewValidatorSet(validators), + NextValidators: nextValidatorSet, + Validators: validatorSet, LastValidators: types.NewValidatorSet(nil), LastHeightValidatorsChanged: 1, diff --git a/state/state_test.go b/state/state_test.go index 1ab470b02..2c777307a 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -48,6 +48,19 @@ func TestStateCopy(t *testing.T) { %v`, state)) } +//TestMakeGenesisStateNilValidators tests state's consistency when genesis file's validators field is nil. +func TestMakeGenesisStateNilValidators(t *testing.T) { + doc := types.GenesisDoc{ + ChainID: "dummy", + Validators: nil, + } + require.Nil(t, doc.ValidateAndComplete()) + state, err := MakeGenesisState(&doc) + require.Nil(t, err) + require.Equal(t, 0, len(state.Validators.Validators)) + require.Equal(t, 0, len(state.NextValidators.Validators)) +} + // TestStateSaveLoad tests saving and loading State from a db. func TestStateSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) From e1538bf67ea5122b0add392e93bd2a3ea73dee69 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 12 Oct 2018 09:03:58 +0400 Subject: [PATCH 066/113] state: require block.Time of the fist block to be genesis time (#2594) * require block.Time of the fist block to be genesis time Refs #2587: ``` We only start validating block.Time when Height > 1, because there is no commit to compute the median timestamp from for the first block. This means a faulty proposer could make the first block with whatever time they want. Instead, we should require the timestamp of block 1 to match the genesis time. I discovered this while refactoring the ValidateBlock tests to be table-driven while working on tests for #2560. ``` * do not accept blocks with negative height * update changelog and spec * nanos precision for test genesis time * Fix failing test (#2607) --- CHANGELOG_PENDING.md | 1 + cmd/tendermint/commands/lite.go | 14 +++++++------- config/toml.go | 2 +- consensus/state_test.go | 7 ++++--- docs/spec/blockchain/blockchain.md | 9 +++++++++ p2p/netaddress.go | 1 + state/state.go | 5 +---- state/validation.go | 9 +++++++++ state/validation_test.go | 11 ++++++----- types/block.go | 7 +++++++ types/block_test.go | 3 ++- 11 files changed, 48 insertions(+), 21 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index dace33f19..f12684020 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -28,6 +28,7 @@ BREAKING CHANGES: * Blockchain Protocol * [types] \#2459 `Vote`/`Proposal`/`Heartbeat` use amino encoding instead of JSON in `SignBytes`. * [types] \#2512 Remove the pubkey field from the validator hash + * [state] \#2587 require block.Time of the fist block to be genesis time * P2P Protocol diff --git a/cmd/tendermint/commands/lite.go b/cmd/tendermint/commands/lite.go index bc51d7de2..eb2817b60 100644 --- a/cmd/tendermint/commands/lite.go +++ b/cmd/tendermint/commands/lite.go @@ -26,12 +26,12 @@ just with added trust and running locally.`, } var ( - listenAddr string - nodeAddr string - chainID string - home string - maxOpenConnections int - cacheSize int + listenAddr string + nodeAddr string + chainID string + home string + maxOpenConnections int + cacheSize int ) func init() { @@ -39,7 +39,7 @@ func init() { LiteCmd.Flags().StringVar(&nodeAddr, "node", "tcp://localhost:26657", "Connect to a Tendermint node at this address") LiteCmd.Flags().StringVar(&chainID, "chain-id", "tendermint", "Specify the Tendermint chain ID") LiteCmd.Flags().StringVar(&home, "home-dir", ".tendermint-lite", "Specify the home directory") - LiteCmd.Flags().IntVar(&maxOpenConnections,"max-open-connections",900,"Maximum number of simultaneous connections (including WebSocket).") + LiteCmd.Flags().IntVar(&maxOpenConnections, "max-open-connections", 900, "Maximum number of simultaneous connections (including WebSocket).") LiteCmd.Flags().IntVar(&cacheSize, "cache-size", 10, "Specify the memory trust store cache size") } diff --git a/config/toml.go b/config/toml.go index ddfe5f055..62e5fa978 100644 --- a/config/toml.go +++ b/config/toml.go @@ -342,7 +342,7 @@ func ResetTestRoot(testName string) *Config { } var testGenesis = `{ - "genesis_time": "0001-01-01T00:00:00.000Z", + "genesis_time": "2017-10-10T08:20:13.695936996Z", "chain_id": "tendermint_test", "validators": [ { diff --git a/consensus/state_test.go b/consensus/state_test.go index 831f77f4a..e7d4b4fab 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -455,8 +455,9 @@ func TestStateLockNoPOL(t *testing.T) { ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds()) + cs2, _ := randConsensusState(2) // needed so generated block is different than locked block // before we time out into new round, set next proposal block - prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) + prop, propBlock := decideProposal(cs2, vs2, vs2.Height, vs2.Round+1) if prop == nil || propBlock == nil { t.Fatal("Failed to create proposal block with vs2") } @@ -479,7 +480,7 @@ func TestStateLockNoPOL(t *testing.T) { ensureNewVote(voteCh) // prevote // prevote for locked block (not proposal) - validatePrevote(t, cs1, 0, vss[0], cs1.LockedBlock.Hash()) + validatePrevote(t, cs1, 3, vss[0], cs1.LockedBlock.Hash()) signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) ensureNewVote(voteCh) @@ -487,7 +488,7 @@ func TestStateLockNoPOL(t *testing.T) { ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrevote.Nanoseconds()) ensureNewVote(voteCh) - validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal + validatePrecommit(t, cs1, 3, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height ensureNewVote(voteCh) diff --git a/docs/spec/blockchain/blockchain.md b/docs/spec/blockchain/blockchain.md index bd0af70ab..4a433b5d8 100644 --- a/docs/spec/blockchain/blockchain.md +++ b/docs/spec/blockchain/blockchain.md @@ -230,6 +230,15 @@ It must equal the weighted median of the timestamps of the valid votes in the bl Note: the timestamp of a vote must be greater by at least one millisecond than that of the block being voted on. +The timestamp of the first block must be equal to the genesis time (since +there's no votes to compute the median). + +``` +if block.Header.Height == 1 { + block.Header.Timestamp == genesisTime +} +``` + See the section on [BFT time](../consensus/bft-time.md) for more details. ### NumTxs diff --git a/p2p/netaddress.go b/p2p/netaddress.go index f848b7a5a..ec9a0ea7c 100644 --- a/p2p/netaddress.go +++ b/p2p/netaddress.go @@ -14,6 +14,7 @@ import ( "time" "errors" + cmn "github.com/tendermint/tendermint/libs/common" ) diff --git a/state/state.go b/state/state.go index 1f60fd653..23c0d632c 100644 --- a/state/state.go +++ b/state/state.go @@ -118,10 +118,7 @@ func (state State) MakeBlock( // Set time if height == 1 { - block.Time = tmtime.Now() - if block.Time.Before(state.LastBlockTime) { - block.Time = state.LastBlockTime // state.LastBlockTime for height == 1 is genesis time - } + block.Time = state.LastBlockTime // genesis time } else { block.Time = MedianTime(commit, state.LastValidators) } diff --git a/state/validation.go b/state/validation.go index 9d8ef97a2..a308870e5 100644 --- a/state/validation.go +++ b/state/validation.go @@ -123,6 +123,15 @@ func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { block.Time, ) } + } else if block.Height == 1 { + genesisTime := state.LastBlockTime + if !block.Time.Equal(genesisTime) { + return fmt.Errorf( + "Block time %v is not equal to genesis time %v", + block.Time, + genesisTime, + ) + } } // Limit the amount of evidence diff --git a/state/validation_test.go b/state/validation_test.go index e5f45166c..3c58c7130 100644 --- a/state/validation_test.go +++ b/state/validation_test.go @@ -2,6 +2,7 @@ package state import ( "testing" + "time" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto/ed25519" @@ -32,11 +33,11 @@ func TestValidateBlockHeader(t *testing.T) { name string malleateBlock func(block *types.Block) }{ - {"ChainID wrong", func(block *types.Block) { block.ChainID = "not-the-real-one" }}, // wrong chain id - {"Height wrong", func(block *types.Block) { block.Height += 10 }}, // wrong height - // TODO(#2589) (#2587) : {"Time", func(block *types.Block) { block.Time.Add(-time.Second * 3600 * 24) }}, // wrong time - {"NumTxs wrong", func(block *types.Block) { block.NumTxs += 10 }}, // wrong num txs - {"TotalTxs wrong", func(block *types.Block) { block.TotalTxs += 10 }}, // wrong total txs + {"ChainID wrong", func(block *types.Block) { block.ChainID = "not-the-real-one" }}, + {"Height wrong", func(block *types.Block) { block.Height += 10 }}, + {"Time wrong", func(block *types.Block) { block.Time = block.Time.Add(-time.Second * 3600 * 24) }}, + {"NumTxs wrong", func(block *types.Block) { block.NumTxs += 10 }}, + {"TotalTxs wrong", func(block *types.Block) { block.TotalTxs += 10 }}, {"LastBlockID wrong", func(block *types.Block) { block.LastBlockID.PartsHeader.Total += 10 }}, {"LastCommitHash wrong", func(block *types.Block) { block.LastCommitHash = wrongHash }}, diff --git a/types/block.go b/types/block.go index 07a71ca82..bd9092f4e 100644 --- a/types/block.go +++ b/types/block.go @@ -64,6 +64,13 @@ func (b *Block) ValidateBasic() error { b.mtx.Lock() defer b.mtx.Unlock() + if b.Height < 0 { + return fmt.Errorf( + "Negative Block.Header.Height: %v", + b.Height, + ) + } + newTxs := int64(len(b.Data.Txs)) if b.NumTxs != newTxs { return fmt.Errorf( diff --git a/types/block_test.go b/types/block_test.go index 887f35a11..962aa0026 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -60,6 +60,7 @@ func TestBlockValidateBasic(t *testing.T) { }{ {"Make Block", func(blk *Block) {}, false}, {"Make Block w/ proposer Addr", func(blk *Block) { blk.ProposerAddress = valSet.GetProposer().Address }, false}, + {"Negative Height", func(blk *Block) { blk.Height = -1 }, true}, {"Increase NumTxs", func(blk *Block) { blk.NumTxs++ }, true}, {"Remove 1/2 the commits", func(blk *Block) { blk.LastCommit.Precommits = commit.Precommits[:commit.Size()/2] @@ -81,7 +82,7 @@ func TestBlockValidateBasic(t *testing.T) { t.Run(tc.testName, func(t *testing.T) { block := MakeBlock(h, txs, commit, evList) tc.malleateBlock(block) - assert.Equal(t, tc.expErr, block.ValidateBasic() != nil, "Validate Basic had an unexpected result") + assert.Equal(t, tc.expErr, block.ValidateBasic() != nil, "ValidateBasic had an unexpected result") }) } } From 2363d8897949919fafec4992047ef803bc5b8ee4 Mon Sep 17 00:00:00 2001 From: Zarko Milosevic Date: Fri, 12 Oct 2018 22:13:01 +0200 Subject: [PATCH 067/113] consensus: Wait for proposal or timeout before prevote (#2540) * Fix termination issues and improve tests * Improve formatting and tests based on reviewer feedback --- CHANGELOG_PENDING.md | 2 + config/config.go | 2 +- consensus/common_test.go | 155 +++++++++-- consensus/mempool_test.go | 36 ++- consensus/state.go | 52 ++-- consensus/state_test.go | 567 ++++++++++++++++++++------------------ node/node.go | 2 +- p2p/metrics.go | 1 - state/execution.go | 32 +-- state/metrics.go | 2 +- 10 files changed, 500 insertions(+), 351 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index f12684020..0f919bdaa 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -50,6 +50,8 @@ BUG FIXES: - [node] \#2434 Make node respond to signal interrupts while sleeping for genesis time - [consensus] [\#1690](https://github.com/tendermint/tendermint/issues/1690) wait for timeoutPrecommit before starting next round +- [consensus] [\#1745](https://github.com/tendermint/tendermint/issues/1745) wait for +Proposal or timeoutProposal before entering prevote - [evidence] \#2515 fix db iter leak (@goolAdapter) - [common/bit_array] Fixed a bug in the `Or` function - [common/bit_array] Fixed a bug in the `Sub` function (@james-ray) diff --git a/config/config.go b/config/config.go index f2bac5c6f..ede57207c 100644 --- a/config/config.go +++ b/config/config.go @@ -565,7 +565,7 @@ func DefaultConsensusConfig() *ConsensusConfig { // TestConsensusConfig returns a configuration for testing the consensus service func TestConsensusConfig() *ConsensusConfig { cfg := DefaultConsensusConfig() - cfg.TimeoutPropose = 100 * time.Millisecond + cfg.TimeoutPropose = 40 * time.Millisecond cfg.TimeoutProposeDelta = 1 * time.Millisecond cfg.TimeoutPrevote = 10 * time.Millisecond cfg.TimeoutPrevoteDelta = 1 * time.Millisecond diff --git a/consensus/common_test.go b/consensus/common_test.go index 2a5cc8e79..cf76e924d 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -39,8 +39,8 @@ const ( ) // genesis, chain_id, priv_val -var config *cfg.Config // NOTE: must be reset for each _test.go file -var ensureTimeout = time.Second * 1 // must be in seconds because CreateEmptyBlocksInterval is +var config *cfg.Config // NOTE: must be reset for each _test.go file +var ensureTimeout = time.Millisecond * 100 func ensureDir(dir string, mode os.FileMode) { if err := cmn.EnsureDir(dir, mode); err != nil { @@ -317,67 +317,156 @@ func ensureNoNewEvent(ch <-chan interface{}, timeout time.Duration, } } -func ensureNoNewStep(stepCh <-chan interface{}) { - ensureNoNewEvent(stepCh, ensureTimeout, "We should be stuck waiting, "+ - "not moving to the next step") +func ensureNoNewEventOnChannel(ch <-chan interface{}) { + ensureNoNewEvent( + ch, + ensureTimeout, + "We should be stuck waiting, not receiving new event on the channel") +} + +func ensureNoNewRoundStep(stepCh <-chan interface{}) { + ensureNoNewEvent( + stepCh, + ensureTimeout, + "We should be stuck waiting, not receiving NewRoundStep event") +} + +func ensureNoNewUnlock(unlockCh <-chan interface{}) { + ensureNoNewEvent( + unlockCh, + ensureTimeout, + "We should be stuck waiting, not receiving Unlock event") } func ensureNoNewTimeout(stepCh <-chan interface{}, timeout int64) { timeoutDuration := time.Duration(timeout*5) * time.Nanosecond - ensureNoNewEvent(stepCh, timeoutDuration, "We should be stuck waiting, "+ - "not moving to the next step") + ensureNoNewEvent( + stepCh, + timeoutDuration, + "We should be stuck waiting, not receiving NewTimeout event") } -func ensureNewEvent(ch <-chan interface{}, timeout time.Duration, errorMessage string) { +func ensureNewEvent( + ch <-chan interface{}, + height int64, + round int, + timeout time.Duration, + errorMessage string) { + select { case <-time.After(timeout): panic(errorMessage) - case <-ch: - break + case ev := <-ch: + rs, ok := ev.(types.EventDataRoundState) + if !ok { + panic( + fmt.Sprintf( + "expected a EventDataRoundState, got %v.Wrong subscription channel?", + reflect.TypeOf(rs))) + } + if rs.Height != height { + panic(fmt.Sprintf("expected height %v, got %v", height, rs.Height)) + } + if rs.Round != round { + panic(fmt.Sprintf("expected round %v, got %v", round, rs.Round)) + } + // TODO: We could check also for a step at this point! } } -func ensureNewStep(stepCh <-chan interface{}) { - ensureNewEvent(stepCh, ensureTimeout, +func ensureNewRoundStep(stepCh <-chan interface{}, height int64, round int) { + ensureNewEvent( + stepCh, + height, + round, + ensureTimeout, "Timeout expired while waiting for NewStep event") } -func ensureNewRound(roundCh <-chan interface{}) { - ensureNewEvent(roundCh, ensureTimeout, +func ensureNewVote(voteCh <-chan interface{}, height int64, round int) { + select { + case <-time.After(ensureTimeout): + break + case v := <-voteCh: + edv, ok := v.(types.EventDataVote) + if !ok { + panic(fmt.Sprintf("expected a *types.Vote, "+ + "got %v. wrong subscription channel?", + reflect.TypeOf(v))) + } + vote := edv.Vote + if vote.Height != height { + panic(fmt.Sprintf("expected height %v, got %v", height, vote.Height)) + } + if vote.Round != round { + panic(fmt.Sprintf("expected round %v, got %v", round, vote.Round)) + } + } +} + +func ensureNewRound(roundCh <-chan interface{}, height int64, round int) { + ensureNewEvent(roundCh, height, round, ensureTimeout, "Timeout expired while waiting for NewRound event") } -func ensureNewTimeout(timeoutCh <-chan interface{}, timeout int64) { - timeoutDuration := time.Duration(timeout*5) * time.Nanosecond - ensureNewEvent(timeoutCh, timeoutDuration, +func ensureNewTimeout(timeoutCh <-chan interface{}, height int64, round int, timeout int64) { + timeoutDuration := time.Duration(timeout*3) * time.Nanosecond + ensureNewEvent(timeoutCh, height, round, timeoutDuration, "Timeout expired while waiting for NewTimeout event") } -func ensureNewProposal(proposalCh <-chan interface{}) { - ensureNewEvent(proposalCh, ensureTimeout, +func ensureNewProposal(proposalCh <-chan interface{}, height int64, round int) { + ensureNewEvent(proposalCh, height, round, ensureTimeout, "Timeout expired while waiting for NewProposal event") } -func ensureNewBlock(blockCh <-chan interface{}) { - ensureNewEvent(blockCh, ensureTimeout, - "Timeout expired while waiting for NewBlock event") +func ensureNewBlock(blockCh <-chan interface{}, height int64) { + select { + case <-time.After(ensureTimeout): + panic("Timeout expired while waiting for NewBlock event") + case ev := <-blockCh: + block, ok := ev.(types.EventDataNewBlock) + if !ok { + panic(fmt.Sprintf("expected a *types.EventDataNewBlock, "+ + "got %v. wrong subscription channel?", + reflect.TypeOf(block))) + } + if block.Block.Height != height { + panic(fmt.Sprintf("expected height %v, got %v", height, block.Block.Height)) + } + } } -func ensureNewVote(voteCh <-chan interface{}) { - ensureNewEvent(voteCh, ensureTimeout, - "Timeout expired while waiting for NewVote event") +func ensureNewBlockHeader(blockCh <-chan interface{}, height int64, blockHash cmn.HexBytes) { + select { + case <-time.After(ensureTimeout): + panic("Timeout expired while waiting for NewBlockHeader event") + case ev := <-blockCh: + blockHeader, ok := ev.(types.EventDataNewBlockHeader) + if !ok { + panic(fmt.Sprintf("expected a *types.EventDataNewBlockHeader, "+ + "got %v. wrong subscription channel?", + reflect.TypeOf(blockHeader))) + } + if blockHeader.Header.Height != height { + panic(fmt.Sprintf("expected height %v, got %v", height, blockHeader.Header.Height)) + } + if !bytes.Equal(blockHeader.Header.Hash(), blockHash) { + panic(fmt.Sprintf("expected header %X, got %X", blockHash, blockHeader.Header.Hash())) + } + } } -func ensureNewUnlock(unlockCh <-chan interface{}) { - ensureNewEvent(unlockCh, ensureTimeout, +func ensureNewUnlock(unlockCh <-chan interface{}, height int64, round int) { + ensureNewEvent(unlockCh, height, round, ensureTimeout, "Timeout expired while waiting for NewUnlock event") } -func ensureVote(voteCh chan interface{}, height int64, round int, +func ensureVote(voteCh <-chan interface{}, height int64, round int, voteType byte) { select { case <-time.After(ensureTimeout): - break + panic("Timeout expired while waiting for NewVote event") case v := <-voteCh: edv, ok := v.(types.EventDataVote) if !ok { @@ -398,6 +487,14 @@ func ensureVote(voteCh chan interface{}, height int64, round int, } } +func ensureNewEventOnChannel(ch <-chan interface{}) { + select { + case <-time.After(ensureTimeout): + panic("Timeout expired while waiting for new activity on the channel") + case <-ch: + } +} + //------------------------------------------------------------------------------- // consensus nets diff --git a/consensus/mempool_test.go b/consensus/mempool_test.go index 179766fd0..ed97ae681 100644 --- a/consensus/mempool_test.go +++ b/consensus/mempool_test.go @@ -28,12 +28,12 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) startTestRound(cs, height, round) - ensureNewStep(newBlockCh) // first block gets committed - ensureNoNewStep(newBlockCh) + ensureNewEventOnChannel(newBlockCh) // first block gets committed + ensureNoNewEventOnChannel(newBlockCh) deliverTxsRange(cs, 0, 1) - ensureNewStep(newBlockCh) // commit txs - ensureNewStep(newBlockCh) // commit updated app hash - ensureNoNewStep(newBlockCh) + ensureNewEventOnChannel(newBlockCh) // commit txs + ensureNewEventOnChannel(newBlockCh) // commit updated app hash + ensureNoNewEventOnChannel(newBlockCh) } func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { @@ -46,9 +46,9 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) startTestRound(cs, height, round) - ensureNewStep(newBlockCh) // first block gets committed - ensureNoNewStep(newBlockCh) // then we dont make a block ... - ensureNewStep(newBlockCh) // until the CreateEmptyBlocksInterval has passed + ensureNewEventOnChannel(newBlockCh) // first block gets committed + ensureNoNewEventOnChannel(newBlockCh) // then we dont make a block ... + ensureNewEventOnChannel(newBlockCh) // until the CreateEmptyBlocksInterval has passed } func TestMempoolProgressInHigherRound(t *testing.T) { @@ -72,13 +72,19 @@ func TestMempoolProgressInHigherRound(t *testing.T) { } startTestRound(cs, height, round) - ensureNewStep(newRoundCh) // first round at first height - ensureNewStep(newBlockCh) // first block gets committed - ensureNewStep(newRoundCh) // first round at next height - deliverTxsRange(cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round - <-timeoutCh - ensureNewStep(newRoundCh) // wait for the next round - ensureNewStep(newBlockCh) // now we can commit the block + ensureNewRoundStep(newRoundCh, height, round) // first round at first height + ensureNewEventOnChannel(newBlockCh) // first block gets committed + + height = height + 1 // moving to the next height + round = 0 + + ensureNewRoundStep(newRoundCh, height, round) // first round at next height + deliverTxsRange(cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round + ensureNewTimeout(timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds()) + + round = round + 1 // moving to the next round + ensureNewRoundStep(newRoundCh, height, round) // wait for the next round + ensureNewEventOnChannel(newBlockCh) // now we can commit the block } func deliverTxsRange(cs *ConsensusState, start, end int) { diff --git a/consensus/state.go b/consensus/state.go index 0100a1504..022023ae3 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -83,7 +83,8 @@ type ConsensusState struct { // internal state mtx sync.RWMutex cstypes.RoundState - state sm.State // State until height-1. + triggeredTimeoutPrecommit bool + state sm.State // State until height-1. // state changes may be triggered by: msgs from peers, // msgs from ourself, or by timeouts @@ -711,6 +712,7 @@ func (cs *ConsensusState) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) { cs.enterPrecommit(ti.Height, ti.Round) case cstypes.RoundStepPrecommitWait: cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()) + cs.enterPrecommit(ti.Height, ti.Round) cs.enterNewRound(ti.Height, ti.Round+1) default: panic(fmt.Sprintf("Invalid timeout step: %v", ti.Step)) @@ -772,6 +774,7 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) { cs.ProposalBlockParts = nil } cs.Votes.SetRound(round + 1) // also track next round (round+1) to allow round-skipping + cs.triggeredTimeoutPrecommit = false cs.eventBus.PublishEventNewRound(cs.RoundStateEvent()) cs.metrics.Rounds.Set(float64(round)) @@ -782,7 +785,8 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) { waitForTxs := cs.config.WaitForTxs() && round == 0 && !cs.needProofBlock(height) if waitForTxs { if cs.config.CreateEmptyBlocksInterval > 0 { - cs.scheduleTimeout(cs.config.CreateEmptyBlocksInterval, height, round, cstypes.RoundStepNewRound) + cs.scheduleTimeout(cs.config.CreateEmptyBlocksInterval, height, round, + cstypes.RoundStepNewRound) } go cs.proposalHeartbeat(height, round) } else { @@ -1013,6 +1017,7 @@ func (cs *ConsensusState) enterPrevote(height int64, round int) { func (cs *ConsensusState) defaultDoPrevote(height int64, round int) { logger := cs.Logger.With("height", height, "round", round) + // If a block is locked, prevote that. if cs.LockedBlock != nil { logger.Info("enterPrevote: Block was locked") @@ -1171,8 +1176,12 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { func (cs *ConsensusState) enterPrecommitWait(height int64, round int) { logger := cs.Logger.With("height", height, "round", round) - if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommitWait <= cs.Step) { - logger.Debug(fmt.Sprintf("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + if cs.Height != height || round < cs.Round || (cs.Round == round && cs.triggeredTimeoutPrecommit) { + logger.Debug( + fmt.Sprintf( + "enterPrecommitWait(%v/%v): Invalid args. "+ + "Current state is Height/Round: %v/%v/, triggeredTimeoutPrecommit:%v", + height, round, cs.Height, cs.Round, cs.triggeredTimeoutPrecommit)) return } if !cs.Votes.Precommits(round).HasTwoThirdsAny() { @@ -1182,7 +1191,7 @@ func (cs *ConsensusState) enterPrecommitWait(height int64, round int) { defer func() { // Done enterPrecommitWait: - cs.updateRoundStep(round, cstypes.RoundStepPrecommitWait) + cs.triggeredTimeoutPrecommit = true cs.newStep() }() @@ -1495,6 +1504,9 @@ func (cs *ConsensusState) addProposalBlockPart(msg *BlockPartMessage, peerID p2p if cs.Step <= cstypes.RoundStepPropose && cs.isProposalComplete() { // Move onto the next step cs.enterPrevote(height, cs.Round) + if hasTwoThirds { // this is optimisation as this will be triggered when prevote is added + cs.enterPrecommit(height, cs.Round) + } } else if cs.Step == cstypes.RoundStepCommit { // If we're waiting on the proposal block... cs.tryFinalizeCommit(height) @@ -1609,7 +1621,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, // Update Valid* if we can. // NOTE: our proposal block may be nil or not what received a polka.. // TODO: we may want to still update the ValidBlock and obtain it via gossipping - if !blockID.IsZero() && + if len(blockID.Hash) != 0 && (cs.ValidRound < vote.Round) && (vote.Round <= cs.Round) && cs.ProposalBlock.HashesTo(blockID.Hash) { @@ -1621,14 +1633,14 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, } } - // If +2/3 prevotes for *anything* for this or future round: - if cs.Round <= vote.Round && prevotes.HasTwoThirdsAny() { - // Round-skip over to PrevoteWait or goto Precommit. - cs.enterNewRound(height, vote.Round) // if the vote is ahead of us + // If +2/3 prevotes for *anything* for future round: + if cs.Round < vote.Round && prevotes.HasTwoThirdsAny() { + // Round-skip if there is any 2/3+ of votes ahead of us + cs.enterNewRound(height, vote.Round) + } else if cs.Round == vote.Round && cstypes.RoundStepPrevote <= cs.Step { // current round if prevotes.HasTwoThirdsMajority() { cs.enterPrecommit(height, vote.Round) - } else { - cs.enterPrevote(height, vote.Round) // if the vote is ahead of us + } else if prevotes.HasTwoThirdsAny() { cs.enterPrevoteWait(height, vote.Round) } } else if cs.Proposal != nil && 0 <= cs.Proposal.POLRound && cs.Proposal.POLRound == vote.Round { @@ -1641,21 +1653,25 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, case types.VoteTypePrecommit: precommits := cs.Votes.Precommits(vote.Round) cs.Logger.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort()) + blockID, ok := precommits.TwoThirdsMajority() - if ok && len(blockID.Hash) != 0 { + if ok { // Executed as TwoThirdsMajority could be from a higher round cs.enterNewRound(height, vote.Round) cs.enterPrecommit(height, vote.Round) - cs.enterCommit(height, vote.Round) - - if cs.config.SkipTimeoutCommit && precommits.HasAll() { - cs.enterNewRound(cs.Height, 0) + if len(blockID.Hash) != 0 { + cs.enterCommit(height, vote.Round) + if cs.config.SkipTimeoutCommit && precommits.HasAll() { + cs.enterNewRound(cs.Height, 0) + } + } else { + cs.enterPrecommitWait(height, vote.Round) } } else if cs.Round <= vote.Round && precommits.HasTwoThirdsAny() { cs.enterNewRound(height, vote.Round) - cs.enterPrecommit(height, vote.Round) cs.enterPrecommitWait(height, vote.Round) } + default: panic(fmt.Sprintf("Unexpected vote type %X", vote.Type)) // go-wire should prevent this. } diff --git a/consensus/state_test.go b/consensus/state_test.go index e7d4b4fab..d80b0c8ae 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" cstypes "github.com/tendermint/tendermint/consensus/types" @@ -68,7 +69,7 @@ func TestStateProposerSelection0(t *testing.T) { startTestRound(cs1, height, round) // Wait for new round so proposer is set. - ensureNewRound(newRoundCh) + ensureNewRound(newRoundCh, height, round) // Commit a block and ensure proposer for the next height is correct. prop := cs1.GetRoundState().Validators.GetProposer() @@ -77,13 +78,13 @@ func TestStateProposerSelection0(t *testing.T) { } // Wait for complete proposal. - ensureNewProposal(proposalCh) + ensureNewProposal(proposalCh, height, round) rs := cs1.GetRoundState() signAddVotes(cs1, types.VoteTypePrecommit, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:]...) // Wait for new round so next validator is set. - ensureNewRound(newRoundCh) + ensureNewRound(newRoundCh, height+1, 0) prop = cs1.GetRoundState().Validators.GetProposer() if !bytes.Equal(prop.Address, vss[1].GetAddress()) { @@ -94,27 +95,29 @@ func TestStateProposerSelection0(t *testing.T) { // Now let's do it all again, but starting from round 2 instead of 0 func TestStateProposerSelection2(t *testing.T) { cs1, vss := randConsensusState(4) // test needs more work for more than 3 validators - + height := cs1.Height newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) // this time we jump in at round 2 incrementRound(vss[1:]...) incrementRound(vss[1:]...) - startTestRound(cs1, cs1.Height, 2) - ensureNewRound(newRoundCh) // wait for the new round + round := 2 + startTestRound(cs1, height, round) + + ensureNewRound(newRoundCh, height, round) // wait for the new round // everyone just votes nil. we get a new proposer each round for i := 0; i < len(vss); i++ { prop := cs1.GetRoundState().Validators.GetProposer() - correctProposer := vss[(i+2)%len(vss)].GetAddress() + correctProposer := vss[(i+round)%len(vss)].GetAddress() if !bytes.Equal(prop.Address, correctProposer) { panic(fmt.Sprintf("expected RoundState.Validators.GetProposer() to be validator %d. Got %X", (i+2)%len(vss), prop.Address)) } rs := cs1.GetRoundState() signAddVotes(cs1, types.VoteTypePrecommit, nil, rs.ProposalBlockParts.Header(), vss[1:]...) - ensureNewRound(newRoundCh) // wait for the new round event each round + ensureNewRound(newRoundCh, height, i+round+1) // wait for the new round event each round incrementRound(vss[1:]...) } @@ -132,7 +135,7 @@ func TestStateEnterProposeNoPrivValidator(t *testing.T) { startTestRound(cs, height, round) // if we're not a validator, EnterPropose should timeout - ensureNewTimeout(timeoutCh, cs.config.TimeoutPropose.Nanoseconds()) + ensureNewTimeout(timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds()) if cs.GetRoundState().Proposal != nil { t.Error("Expected to make no proposal, since no privValidator") @@ -152,7 +155,7 @@ func TestStateEnterProposeYesPrivValidator(t *testing.T) { cs.enterNewRound(height, round) cs.startRoutines(3) - ensureNewProposal(proposalCh) + ensureNewProposal(proposalCh, height, round) // Check that Proposal, ProposalBlock, ProposalBlockParts are set. rs := cs.GetRoundState() @@ -208,22 +211,19 @@ func TestStateBadProposal(t *testing.T) { startTestRound(cs1, height, round) // wait for proposal - ensureNewProposal(proposalCh) + ensureNewProposal(proposalCh, height, round) // wait for prevote - ensureNewVote(voteCh) - + ensureVote(voteCh, height, round, types.VoteTypePrevote) validatePrevote(t, cs1, round, vss[0], nil) // add bad prevote from vs2 and wait for it signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) - ensureNewVote(voteCh) + ensureVote(voteCh, height, round, types.VoteTypePrevote) // wait for precommit - ensureNewVote(voteCh) - + ensureVote(voteCh, height, round, types.VoteTypePrecommit) validatePrecommit(t, cs1, round, 0, vss[0], nil, nil) - signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) } //---------------------------------------------------------------------------------------------------- @@ -246,21 +246,21 @@ func TestStateFullRound1(t *testing.T) { propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) + // Maybe it would be better to call explicitly startRoutines(4) startTestRound(cs, height, round) - ensureNewRound(newRoundCh) + ensureNewRound(newRoundCh, height, round) - // grab proposal - re := <-propCh - propBlockHash := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState).ProposalBlock.Hash() + ensureNewProposal(propCh, height, round) + propBlockHash := cs.GetRoundState().ProposalBlock.Hash() - ensureNewVote(voteCh) // wait for prevote + ensureVote(voteCh, height, round, types.VoteTypePrevote) // wait for prevote validatePrevote(t, cs, round, vss[0], propBlockHash) - ensureNewVote(voteCh) // wait for precommit + ensureVote(voteCh, height, round, types.VoteTypePrecommit) // wait for precommit // we're going to roll right into new height - ensureNewRound(newRoundCh) + ensureNewRound(newRoundCh, height+1, 0) validateLastPrecommit(t, cs, vss[0], propBlockHash) } @@ -275,8 +275,8 @@ func TestStateFullRoundNil(t *testing.T) { cs.enterPrevote(height, round) cs.startRoutines(4) - ensureNewVote(voteCh) // prevote - ensureNewVote(voteCh) // precommit + ensureVote(voteCh, height, round, types.VoteTypePrevote) // prevote + ensureVote(voteCh, height, round, types.VoteTypePrecommit) // precommit // should prevote and precommit nil validatePrevoteAndPrecommit(t, cs, round, 0, vss[0], nil, nil) @@ -295,7 +295,7 @@ func TestStateFullRound2(t *testing.T) { // start round and wait for propose and prevote startTestRound(cs1, height, round) - ensureNewVote(voteCh) // prevote + ensureVote(voteCh, height, round, types.VoteTypePrevote) // prevote // we should be stuck in limbo waiting for more prevotes rs := cs1.GetRoundState() @@ -303,10 +303,9 @@ func TestStateFullRound2(t *testing.T) { // prevote arrives from vs2: signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propPartsHeader, vs2) - ensureNewVote(voteCh) - - ensureNewVote(voteCh) //precommit + ensureVote(voteCh, height, round, types.VoteTypePrevote) // prevote + ensureVote(voteCh, height, round, types.VoteTypePrecommit) //precommit // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, 0, 0, vss[0], propBlockHash, propBlockHash) @@ -314,10 +313,10 @@ func TestStateFullRound2(t *testing.T) { // precommit arrives from vs2: signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propPartsHeader, vs2) - ensureNewVote(voteCh) + ensureVote(voteCh, height, round, types.VoteTypePrecommit) // wait to finish commit, propose in next height - ensureNewBlock(newBlockCh) + ensureNewBlock(newBlockCh, height) } //------------------------------------------------------------------------------------------ @@ -328,7 +327,7 @@ func TestStateFullRound2(t *testing.T) { func TestStateLockNoPOL(t *testing.T) { cs1, vss := randConsensusState(2) vs2 := vss[1] - height := cs1.Height + height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes @@ -343,41 +342,43 @@ func TestStateLockNoPOL(t *testing.T) { */ // start round and wait for prevote - cs1.enterNewRound(height, 0) + cs1.enterNewRound(height, round) cs1.startRoutines(0) - re := <-proposalCh - rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - theBlockHash := rs.ProposalBlock.Hash() + ensureNewRound(newRoundCh, height, round) + + ensureNewProposal(proposalCh, height, round) + roundState := cs1.GetRoundState() + theBlockHash := roundState.ProposalBlock.Hash() + thePartSetHeader := roundState.ProposalBlockParts.Header() - ensureNewVote(voteCh) // prevote + ensureVote(voteCh, height, round, types.VoteTypePrevote) // prevote // we should now be stuck in limbo forever, waiting for more prevotes // prevote arrives from vs2: - signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2) - ensureNewVote(voteCh) // prevote - - ensureNewVote(voteCh) // precommit + signAddVotes(cs1, types.VoteTypePrevote, theBlockHash, thePartSetHeader, vs2) + ensureVote(voteCh, height, round, types.VoteTypePrevote) // prevote + ensureVote(voteCh, height, round, types.VoteTypePrecommit) // precommit // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash) + validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) // we should now be stuck in limbo forever, waiting for more precommits // lets add one for a different block - // NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round hash := make([]byte, len(theBlockHash)) copy(hash, theBlockHash) hash[0] = byte((hash[0] + 1) % 255) - signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) - ensureNewVote(voteCh) // precommit + signAddVotes(cs1, types.VoteTypePrecommit, hash, thePartSetHeader, vs2) + ensureVote(voteCh, height, round, types.VoteTypePrecommit) // precommit // (note we're entering precommit for a second time this round) // but with invalid args. then we enterPrecommitWait, and the timeout to new round - ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) /// - ensureNewRound(newRoundCh) + round = round + 1 // moving to the next round + ensureNewRound(newRoundCh, height, round) t.Log("#### ONTO ROUND 1") /* Round2 (cs1, B) // B B2 @@ -386,43 +387,42 @@ func TestStateLockNoPOL(t *testing.T) { incrementRound(vs2) // now we're on a new round and not the proposer, so wait for timeout - re = <-timeoutProposeCh - rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) + + rs := cs1.GetRoundState() if rs.ProposalBlock != nil { panic("Expected proposal block to be nil") } // wait to finish prevote - ensureNewVote(voteCh) - + ensureVote(voteCh, height, round, types.VoteTypePrevote) // we should have prevoted our locked block - validatePrevote(t, cs1, 1, vss[0], rs.LockedBlock.Hash()) + validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) // add a conflicting prevote from the other validator signAddVotes(cs1, types.VoteTypePrevote, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) - ensureNewVote(voteCh) + ensureVote(voteCh, height, round, types.VoteTypePrevote) // now we're going to enter prevote again, but with invalid args // and then prevote wait, which should timeout. then wait for precommit - ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrevote.Nanoseconds()) - - ensureNewVote(voteCh) // precommit + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) + ensureVote(voteCh, height, round, types.VoteTypePrecommit) // precommit // the proposed block should still be locked and our precommit added // we should precommit nil and be locked on the proposal - validatePrecommit(t, cs1, 1, 0, vss[0], nil, theBlockHash) + validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // add conflicting precommit from vs2 - // NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) - ensureNewVote(voteCh) + ensureVote(voteCh, height, round, types.VoteTypePrecommit) // (note we're entering precommit for a second time this round, but with invalid args // then we enterPrecommitWait and timeout into NewRound - ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) - ensureNewRound(newRoundCh) + round = round + 1 // entering new round + ensureNewRound(newRoundCh, height, round) t.Log("#### ONTO ROUND 2") /* Round3 (vs2, _) // B, B2 @@ -430,30 +430,29 @@ func TestStateLockNoPOL(t *testing.T) { incrementRound(vs2) - re = <-proposalCh - rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + ensureNewProposal(proposalCh, height, round) + rs = cs1.GetRoundState() // now we're on a new round and are the proposer if !bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()) { panic(fmt.Sprintf("Expected proposal block to be locked block. Got %v, Expected %v", rs.ProposalBlock, rs.LockedBlock)) } - ensureNewVote(voteCh) // prevote - - validatePrevote(t, cs1, 2, vss[0], rs.LockedBlock.Hash()) + ensureVote(voteCh, height, round, types.VoteTypePrevote) // prevote + validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) - ensureNewVote(voteCh) + ensureVote(voteCh, height, round, types.VoteTypePrevote) - ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrevote.Nanoseconds()) - ensureNewVote(voteCh) // precommit + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) + ensureVote(voteCh, height, round, types.VoteTypePrecommit) // precommit - validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal + validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height - ensureNewVote(voteCh) + ensureVote(voteCh, height, round, types.VoteTypePrecommit) - ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) cs2, _ := randConsensusState(2) // needed so generated block is different than locked block // before we time out into new round, set next proposal block @@ -464,7 +463,8 @@ func TestStateLockNoPOL(t *testing.T) { incrementRound(vs2) - ensureNewRound(newRoundCh) + round = round + 1 // entering new round + ensureNewRound(newRoundCh, height, round) t.Log("#### ONTO ROUND 3") /* Round4 (vs2, C) // B C // B C @@ -476,35 +476,34 @@ func TestStateLockNoPOL(t *testing.T) { t.Fatal(err) } - ensureNewProposal(proposalCh) - ensureNewVote(voteCh) // prevote - + ensureNewProposal(proposalCh, height, round) + ensureVote(voteCh, height, round, types.VoteTypePrevote) // prevote // prevote for locked block (not proposal) validatePrevote(t, cs1, 3, vss[0], cs1.LockedBlock.Hash()) + // prevote for proposed block signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) - ensureNewVote(voteCh) - - ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrevote.Nanoseconds()) - ensureNewVote(voteCh) + ensureVote(voteCh, height, round, types.VoteTypePrevote) - validatePrecommit(t, cs1, 3, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) + ensureVote(voteCh, height, round, types.VoteTypePrecommit) + validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height - ensureNewVote(voteCh) + ensureVote(voteCh, height, round, types.VoteTypePrecommit) } // 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka func TestStateLockPOLRelock(t *testing.T) { cs1, vss := randConsensusState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) @@ -517,28 +516,25 @@ func TestStateLockPOLRelock(t *testing.T) { */ // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, 0) + startTestRound(cs1, height, round) - ensureNewRound(newRoundCh) - re := <-proposalCh - rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + ensureNewRound(newRoundCh, height, round) + ensureNewProposal(proposalCh, height, round) + rs := cs1.GetRoundState() theBlockHash := rs.ProposalBlock.Hash() + theBlockParts := rs.ProposalBlockParts.Header() - ensureNewVote(voteCh) // prevote + ensureVote(voteCh, height, round, types.VoteTypePrevote) // prevote - signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2, vs3, vs4) - // prevotes - discardFromChan(voteCh, 3) + signAddVotes(cs1, types.VoteTypePrevote, theBlockHash, theBlockParts, vs2, vs3, vs4) - ensureNewVote(voteCh) // our precommit + ensureVote(voteCh, height, round, types.VoteTypePrecommit) // our precommit // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash) + validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) // add precommits from the rest signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4) - signAddVotes(cs1, types.VoteTypePrecommit, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs3) - // precommites - discardFromChan(voteCh, 3) + signAddVotes(cs1, types.VoteTypePrecommit, theBlockHash, theBlockParts, vs3) // before we timeout to the new round set the new proposal prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) @@ -548,14 +544,15 @@ func TestStateLockPOLRelock(t *testing.T) { incrementRound(vs2, vs3, vs4) // timeout to new round - ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + round = round + 1 // moving to the next round //XXX: this isnt guaranteed to get there before the timeoutPropose ... if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } - ensureNewRound(newRoundCh) + ensureNewRound(newRoundCh, height, round) t.Log("### ONTO ROUND 1") /* @@ -566,60 +563,34 @@ func TestStateLockPOLRelock(t *testing.T) { // now we're on a new round and not the proposer // but we should receive the proposal - select { - case <-proposalCh: - case <-timeoutProposeCh: - <-proposalCh - } + ensureNewProposal(proposalCh, height, round) // go to prevote, prevote for locked block (not proposal), move on - ensureNewVote(voteCh) - validatePrevote(t, cs1, 0, vss[0], theBlockHash) + ensureVote(voteCh, height, round, types.VoteTypePrevote) + validatePrevote(t, cs1, round, vss[0], theBlockHash) // now lets add prevotes from everyone else for the new block signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) - // prevotes - discardFromChan(voteCh, 3) - - // now either we go to PrevoteWait or Precommit - select { - case <-timeoutWaitCh: // we're in PrevoteWait, go to Precommit - // XXX: there's no guarantee we see the polka, this might be a precommit for nil, - // in which case the test fails! - <-voteCh - case <-voteCh: // we went straight to Precommit - } + ensureVote(voteCh, height, round, types.VoteTypePrecommit) // we should have unlocked and locked on the new block - validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash) + validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propBlockParts.Header(), vs2, vs3) - discardFromChan(voteCh, 2) - - be := <-newBlockCh - b := be.(types.EventDataNewBlockHeader) - re = <-newRoundCh - rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - if rs.Height != 2 { - panic("Expected height to increment") - } + ensureNewBlockHeader(newBlockCh, height, propBlockHash) - if !bytes.Equal(b.Header.Hash(), propBlockHash) { - panic("Expected new block to be proposal block") - } + ensureNewRound(newRoundCh, height+1, 0) } // 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka func TestStateLockPOLUnlock(t *testing.T) { cs1, vss := randConsensusState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - h := cs1.GetRoundState().Height - r := cs1.GetRoundState().Round + height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) @@ -634,75 +605,72 @@ func TestStateLockPOLUnlock(t *testing.T) { */ // start round and wait for propose and prevote - startTestRound(cs1, h, r) - ensureNewRound(newRoundCh) - re := <-proposalCh - rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - theBlockHash := rs.ProposalBlock.Hash() + startTestRound(cs1, height, round) + ensureNewRound(newRoundCh, height, round) - ensureVote(voteCh, h, r, types.VoteTypePrevote) + ensureNewProposal(proposalCh, height, round) + rs := cs1.GetRoundState() + theBlockHash := rs.ProposalBlock.Hash() + theBlockParts := rs.ProposalBlockParts.Header() - signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2, vs3, vs4) + ensureVote(voteCh, height, round, types.VoteTypePrevote) + validatePrevote(t, cs1, round, vss[0], theBlockHash) - ensureVote(voteCh, h, r, types.VoteTypePrecommit) + signAddVotes(cs1, types.VoteTypePrevote, theBlockHash, theBlockParts, vs2, vs3, vs4) + ensureVote(voteCh, height, round, types.VoteTypePrecommit) // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, r, 0, vss[0], theBlockHash, theBlockHash) + validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) rs = cs1.GetRoundState() // add precommits from the rest signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4) - signAddVotes(cs1, types.VoteTypePrecommit, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs3) + signAddVotes(cs1, types.VoteTypePrecommit, theBlockHash, theBlockParts, vs3) // before we time out into new round, set next proposal block prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) propBlockParts := propBlock.MakePartSet(partSize) - incrementRound(vs2, vs3, vs4) - // timeout to new round - re = <-timeoutWaitCh - rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + rs = cs1.GetRoundState() lockedBlockHash := rs.LockedBlock.Hash() - //XXX: this isnt guaranteed to get there before the timeoutPropose ... - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } + incrementRound(vs2, vs3, vs4) + round = round + 1 // moving to the next round - ensureNewRound(newRoundCh) + ensureNewRound(newRoundCh, height, round) t.Log("#### ONTO ROUND 1") /* Round2 (vs2, C) // B nil nil nil // nil nil nil _ cs1 unlocks! */ - - // now we're on a new round and not the proposer, - // but we should receive the proposal - select { - case <-proposalCh: - case <-timeoutProposeCh: - <-proposalCh + //XXX: this isnt guaranteed to get there before the timeoutPropose ... + if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) } + ensureNewProposal(proposalCh, height, round) + // go to prevote, prevote for locked block (not proposal) - ensureVote(voteCh, h, r+1, types.VoteTypePrevote) - validatePrevote(t, cs1, 0, vss[0], lockedBlockHash) + ensureVote(voteCh, height, round, types.VoteTypePrevote) + validatePrevote(t, cs1, round, vss[0], lockedBlockHash) + // now lets add prevotes from everyone else for nil (a polka!) signAddVotes(cs1, types.VoteTypePrevote, nil, types.PartSetHeader{}, vs2, vs3, vs4) // the polka makes us unlock and precommit nil - ensureNewUnlock(unlockCh) - ensureVote(voteCh, h, r+1, types.VoteTypePrecommit) + ensureNewUnlock(unlockCh, height, round) + ensureVote(voteCh, height, round, types.VoteTypePrecommit) // we should have unlocked and committed nil // NOTE: since we don't relock on nil, the lock round is 0 - validatePrecommit(t, cs1, r+1, 0, vss[0], nil, nil) + validatePrecommit(t, cs1, round, 0, vss[0], nil, nil) signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3) - ensureNewRound(newRoundCh) + ensureNewRound(newRoundCh, height, round+1) } // 4 vals @@ -712,8 +680,7 @@ func TestStateLockPOLUnlock(t *testing.T) { func TestStateLockPOLSafety1(t *testing.T) { cs1, vss := randConsensusState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - h := cs1.GetRoundState().Height - r := cs1.GetRoundState().Round + height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes @@ -724,38 +691,28 @@ func TestStateLockPOLSafety1(t *testing.T) { voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, 0) - ensureNewRound(newRoundCh) - re := <-proposalCh - rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - propBlock := rs.ProposalBlock + startTestRound(cs1, cs1.Height, round) + ensureNewRound(newRoundCh, height, round) - ensureVote(voteCh, h, r, types.VoteTypePrevote) + ensureNewProposal(proposalCh, height, round) + rs := cs1.GetRoundState() + propBlock := rs.ProposalBlock - validatePrevote(t, cs1, 0, vss[0], propBlock.Hash()) + ensureVote(voteCh, height, round, types.VoteTypePrevote) + validatePrevote(t, cs1, round, vss[0], propBlock.Hash()) // the others sign a polka but we don't see it prevotes := signVotes(types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4) - // before we time out into new round, set next proposer - // and next proposal block - - //TODO: Should we remove this? - /* - _, v1 := cs1.Validators.GetByAddress(vss[0].Address) - v1.VotingPower = 1 - if updated := cs1.Validators.Update(v1); !updated { - panic("failed to update validator") - }*/ - t.Logf("old prop hash %v", fmt.Sprintf("%X", propBlock.Hash())) // we do see them precommit nil signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3, vs4) - ensureVote(voteCh, h, r, types.VoteTypePrecommit) + // cs1 precommit nil + ensureVote(voteCh, height, round, types.VoteTypePrecommit) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) - ensureNewRound(newRoundCh) t.Log("### ONTO ROUND 1") prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) @@ -764,6 +721,9 @@ func TestStateLockPOLSafety1(t *testing.T) { incrementRound(vs2, vs3, vs4) + round = round + 1 // moving to the next round + ensureNewRound(newRoundCh, height, round) + //XXX: this isnt guaranteed to get there before the timeoutPropose ... if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) @@ -773,39 +733,34 @@ func TestStateLockPOLSafety1(t *testing.T) { // a polka happened but we didn't see it! */ - // now we're on a new round and not the proposer, - // but we should receive the proposal - select { - case re = <-proposalCh: - case <-timeoutProposeCh: - re = <-proposalCh - } + ensureNewProposal(proposalCh, height, round) - rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + rs = cs1.GetRoundState() if rs.LockedBlock != nil { panic("we should not be locked!") } t.Logf("new prop hash %v", fmt.Sprintf("%X", propBlockHash)) + // go to prevote, prevote for proposal block - ensureVote(voteCh, h, r+1, types.VoteTypePrevote) - validatePrevote(t, cs1, 1, vss[0], propBlockHash) + ensureVote(voteCh, height, round, types.VoteTypePrevote) + validatePrevote(t, cs1, round, vss[0], propBlockHash) // now we see the others prevote for it, so we should lock on it signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) - ensureVote(voteCh, h, r+1, types.VoteTypePrecommit) - + ensureVote(voteCh, height, round, types.VoteTypePrecommit) // we should have precommitted - validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash) + validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) - signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3) + signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3, vs4) - ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) incrementRound(vs2, vs3, vs4) + round = round + 1 // moving to the next round - ensureNewRound(newRoundCh) + ensureNewRound(newRoundCh, height, round) t.Log("### ONTO ROUND 2") /*Round3 @@ -813,22 +768,22 @@ func TestStateLockPOLSafety1(t *testing.T) { */ // timeout of propose - ensureNewTimeout(timeoutProposeCh, cs1.config.TimeoutPropose.Nanoseconds()) + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) // finish prevote - ensureVote(voteCh, h, r+2, types.VoteTypePrevote) - + ensureVote(voteCh, height, round, types.VoteTypePrevote) // we should prevote what we're locked on - validatePrevote(t, cs1, 2, vss[0], propBlockHash) + validatePrevote(t, cs1, round, vss[0], propBlockHash) newStepCh := subscribe(cs1.eventBus, types.EventQueryNewRoundStep) + // before prevotes from the previous round are added // add prevotes from the earlier round addVotes(cs1, prevotes...) t.Log("Done adding prevotes!") - ensureNoNewStep(newStepCh) + ensureNoNewRoundStep(newStepCh) } // 4 vals. @@ -841,13 +796,11 @@ func TestStateLockPOLSafety1(t *testing.T) { func TestStateLockPOLSafety2(t *testing.T) { cs1, vss := randConsensusState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - h := cs1.GetRoundState().Height - r := cs1.GetRoundState().Round + height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) @@ -855,7 +808,7 @@ func TestStateLockPOLSafety2(t *testing.T) { // the block for R0: gets polkad but we miss it // (even though we signed it, shhh) - _, propBlock0 := decideProposal(cs1, vss[0], cs1.Height, cs1.Round) + _, propBlock0 := decideProposal(cs1, vss[0], height, round) propBlockHash0 := propBlock0.Hash() propBlockParts0 := propBlock0.MakePartSet(partSize) @@ -870,25 +823,25 @@ func TestStateLockPOLSafety2(t *testing.T) { incrementRound(vs2, vs3, vs4) - cs1.updateRoundStep(0, cstypes.RoundStepPrecommitWait) - + round = round + 1 // moving to the next round t.Log("### ONTO Round 1") // jump in at round 1 - startTestRound(cs1, h, r+1) - ensureNewRound(newRoundCh) + startTestRound(cs1, height, round) + ensureNewRound(newRoundCh, height, round) if err := cs1.SetProposalAndBlock(prop1, propBlock1, propBlockParts1, "some peer"); err != nil { t.Fatal(err) } - ensureNewProposal(proposalCh) + ensureNewProposal(proposalCh, height, round) - ensureVote(voteCh, h, r+1, types.VoteTypePrevote) + ensureVote(voteCh, height, round, types.VoteTypePrevote) + validatePrevote(t, cs1, round, vss[0], propBlockHash1) signAddVotes(cs1, types.VoteTypePrevote, propBlockHash1, propBlockParts1.Header(), vs2, vs3, vs4) - ensureVote(voteCh, h, r+1, types.VoteTypePrecommit) + ensureVote(voteCh, height, round, types.VoteTypePrecommit) // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash1, propBlockHash1) + validatePrecommit(t, cs1, round, round, vss[0], propBlockHash1, propBlockHash1) // add precommits from the rest signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4) @@ -897,10 +850,11 @@ func TestStateLockPOLSafety2(t *testing.T) { incrementRound(vs2, vs3, vs4) // timeout of precommit wait to new round - ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + round = round + 1 // moving to the next round // in round 2 we see the polkad block from round 0 - newProp := types.NewProposal(h, 2, propBlockParts0.Header(), 0, propBlockID1) + newProp := types.NewProposal(height, round, propBlockParts0.Header(), 0, propBlockID1) if err := vs3.SignProposal(config.ChainID(), newProp); err != nil { t.Fatal(err) } @@ -911,25 +865,16 @@ func TestStateLockPOLSafety2(t *testing.T) { // Add the pol votes addVotes(cs1, prevotes...) - ensureNewRound(newRoundCh) + ensureNewRound(newRoundCh, height, round) t.Log("### ONTO Round 2") /*Round2 // now we see the polka from round 1, but we shouldnt unlock */ + ensureNewProposal(proposalCh, height, round) - select { - case <-timeoutProposeCh: - <-proposalCh - case <-proposalCh: - } - - select { - case <-unlockCh: - panic("validator unlocked using an old polka") - case <-voteCh: - // prevote our locked block - } - validatePrevote(t, cs1, 2, vss[0], propBlockHash1) + ensureNoNewUnlock(unlockCh) + ensureVote(voteCh, height, round, types.VoteTypePrevote) + validatePrevote(t, cs1, round, vss[0], propBlockHash1) } @@ -939,18 +884,110 @@ func TestStateLockPOLSafety2(t *testing.T) { func TestWaitingTimeoutOnNilPolka(t *testing.T) { cs1, vss := randConsensusState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) // start round - startTestRound(cs1, cs1.Height, 0) - ensureNewRound(newRoundCh) + startTestRound(cs1, height, round) + ensureNewRound(newRoundCh, height, round) signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3, vs4) - ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds()) - ensureNewRound(newRoundCh) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewRound(newRoundCh, height, round+1) +} + +// 4 vals, 3 Prevotes for nil from the higher round. +// What we want: +// P0 waits for timeoutPropose in the next round before entering prevote +func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { + cs1, vss := randConsensusState(4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round + + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) + + // start round + startTestRound(cs1, height, round) + ensureNewRound(newRoundCh, height, round) + + ensureVote(voteCh, height, round, types.VoteTypePrevote) + + incrementRound(vss[1:]...) + signAddVotes(cs1, types.VoteTypePrevote, nil, types.PartSetHeader{}, vs2, vs3, vs4) + + round = round + 1 // moving to the next round + ensureNewRound(newRoundCh, height, round) + + rs := cs1.GetRoundState() + assert.True(t, rs.Step == cstypes.RoundStepPropose) // P0 does not prevote before timeoutPropose expires + + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) + + ensureVote(voteCh, height, round, types.VoteTypePrevote) + validatePrevote(t, cs1, round, vss[0], nil) +} + +// 4 vals, 3 Precommits for nil from the higher round. +// What we want: +// P0 jump to higher round, precommit and start precommit wait +func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { + cs1, vss := randConsensusState(4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round + + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) + + // start round + startTestRound(cs1, height, round) + ensureNewRound(newRoundCh, height, round) + + ensureVote(voteCh, height, round, types.VoteTypePrevote) + + incrementRound(vss[1:]...) + signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3, vs4) + + round = round + 1 // moving to the next round + ensureNewRound(newRoundCh, height, round) + + ensureVote(voteCh, height, round, types.VoteTypePrecommit) + validatePrecommit(t, cs1, round, 0, vss[0], nil, nil) + + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + + round = round + 1 // moving to the next round + ensureNewRound(newRoundCh, height, round) +} + +// 4 vals, 3 Prevotes for nil in the current round. +// What we want: +// P0 wait for timeoutPropose to expire before sending prevote. +func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { + cs1, vss := randConsensusState(4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, 1 + + timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) + + // start round in which PO is not proposer + startTestRound(cs1, height, round) + ensureNewRound(newRoundCh, height, round) + + incrementRound(vss[1:]...) + signAddVotes(cs1, types.VoteTypePrevote, nil, types.PartSetHeader{}, vs2, vs3, vs4) + + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) + + ensureVote(voteCh, height, round, types.VoteTypePrevote) + validatePrevote(t, cs1, round, vss[0], nil) } //------------------------------------------------------------------------------------------ @@ -1041,8 +1078,7 @@ func TestStateSlashingPrecommits(t *testing.T) { func TestStateHalt1(t *testing.T) { cs1, vss := randConsensusState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - h := cs1.GetRoundState().Height - r := cs1.GetRoundState().Round + height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) @@ -1052,20 +1088,21 @@ func TestStateHalt1(t *testing.T) { voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, 0) - ensureNewRound(newRoundCh) - re := <-proposalCh - rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + startTestRound(cs1, height, round) + ensureNewRound(newRoundCh, height, round) + + ensureNewProposal(proposalCh, height, round) + rs := cs1.GetRoundState() propBlock := rs.ProposalBlock propBlockParts := propBlock.MakePartSet(partSize) - ensureVote(voteCh, h, r, types.VoteTypePrevote) + ensureVote(voteCh, height, round, types.VoteTypePrevote) - signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlockParts.Header(), vs3, vs4) - ensureVote(voteCh, h, r, types.VoteTypePrecommit) + signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlockParts.Header(), vs2, vs3, vs4) + ensureVote(voteCh, height, round, types.VoteTypePrecommit) // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 0, 0, vss[0], propBlock.Hash(), propBlock.Hash()) + validatePrecommit(t, cs1, round, round, vss[0], propBlock.Hash(), propBlock.Hash()) // add precommits from the rest signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2) // didnt receive proposal @@ -1076,9 +1113,12 @@ func TestStateHalt1(t *testing.T) { incrementRound(vs2, vs3, vs4) // timeout to new round - ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds()) - re = <-newRoundCh - rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + + round = round + 1 // moving to the next round + + ensureNewRound(newRoundCh, height, round) + rs = cs1.GetRoundState() t.Log("### ONTO ROUND 1") /*Round2 @@ -1087,20 +1127,16 @@ func TestStateHalt1(t *testing.T) { */ // go to prevote, prevote for locked block - ensureVote(voteCh, h, r+1, types.VoteTypePrevote) - validatePrevote(t, cs1, 0, vss[0], rs.LockedBlock.Hash()) + ensureVote(voteCh, height, round, types.VoteTypePrevote) + validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) // now we receive the precommit from the previous round addVotes(cs1, precommit4) // receiving that precommit should take us straight to commit - ensureNewBlock(newBlockCh) - re = <-newRoundCh - rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + ensureNewBlock(newBlockCh, height) - if rs.Height != 2 { - panic("expected height to increment") - } + ensureNewRound(newRoundCh, height+1, 0) } func TestStateOutputsBlockPartsStats(t *testing.T) { @@ -1186,10 +1222,3 @@ func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan interface{} { } return out } - -// discardFromChan reads n values from the channel. -func discardFromChan(ch <-chan interface{}, n int) { - for i := 0; i < n; i++ { - <-ch - } -} diff --git a/node/node.go b/node/node.go index d1ab0f86a..9c409787d 100644 --- a/node/node.go +++ b/node/node.go @@ -106,7 +106,7 @@ func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider { return func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) { if config.Prometheus { return cs.PrometheusMetrics(config.Namespace), p2p.PrometheusMetrics(config.Namespace), - mempl.PrometheusMetrics(config.Namespace), sm.PrometheusMetrics(config.Namespace) + mempl.PrometheusMetrics(config.Namespace), sm.PrometheusMetrics(config.Namespace) } return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics() } diff --git a/p2p/metrics.go b/p2p/metrics.go index 86a205056..b066fb317 100644 --- a/p2p/metrics.go +++ b/p2p/metrics.go @@ -56,7 +56,6 @@ func PrometheusMetrics(namespace string) *Metrics { Name: "num_txs", Help: "Number of transactions submitted by each peer.", }, []string{"peer_id"}), - } } diff --git a/state/execution.go b/state/execution.go index d5a1a1617..611efa516 100644 --- a/state/execution.go +++ b/state/execution.go @@ -49,7 +49,7 @@ func BlockExecutorWithMetrics(metrics *Metrics) BlockExecutorOption { // NewBlockExecutor returns a new BlockExecutor with a NopEventBus. // Call SetEventBus to provide one. func NewBlockExecutor(db dbm.DB, logger log.Logger, proxyApp proxy.AppConnConsensus, - mempool Mempool, evpool EvidencePool, options ...BlockExecutorOption) *BlockExecutor { + mempool Mempool, evpool EvidencePool, options ...BlockExecutorOption) *BlockExecutor { res := &BlockExecutor{ db: db, proxyApp: proxyApp, @@ -95,7 +95,7 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b startTime := time.Now().UnixNano() abciResponses, err := execBlockOnProxyApp(blockExec.logger, blockExec.proxyApp, block, state.LastValidators, blockExec.db) endTime := time.Now().UnixNano() - blockExec.metrics.BlockProcessingTime.Observe(float64(endTime - startTime) / 1000000) + blockExec.metrics.BlockProcessingTime.Observe(float64(endTime-startTime) / 1000000) if err != nil { return state, ErrProxyAppConn(err) } @@ -198,11 +198,11 @@ func (blockExec *BlockExecutor) Commit( // Executes block's transactions on proxyAppConn. // Returns a list of transaction results and updates to the validator set func execBlockOnProxyApp( - logger log.Logger, - proxyAppConn proxy.AppConnConsensus, - block *types.Block, - lastValSet *types.ValidatorSet, - stateDB dbm.DB, + logger log.Logger, + proxyAppConn proxy.AppConnConsensus, + block *types.Block, + lastValSet *types.ValidatorSet, + stateDB dbm.DB, ) (*ABCIResponses, error) { var validTxs, invalidTxs = 0, 0 @@ -360,10 +360,10 @@ func updateValidators(currentSet *types.ValidatorSet, abciUpdates []abci.Validat // updateState returns a new State updated according to the header and responses. func updateState( - state State, - blockID types.BlockID, - header *types.Header, - abciResponses *ABCIResponses, + state State, + blockID types.BlockID, + header *types.Header, + abciResponses *ABCIResponses, ) (State, error) { // Copy the valset so we can apply changes from EndBlock @@ -448,11 +448,11 @@ func fireEvents(logger log.Logger, eventBus types.BlockEventPublisher, block *ty // ExecCommitBlock executes and commits a block on the proxyApp without validating or mutating the state. // It returns the application root hash (result of abci.Commit). func ExecCommitBlock( - appConnConsensus proxy.AppConnConsensus, - block *types.Block, - logger log.Logger, - lastValSet *types.ValidatorSet, - stateDB dbm.DB, + appConnConsensus proxy.AppConnConsensus, + block *types.Block, + logger log.Logger, + lastValSet *types.ValidatorSet, + stateDB dbm.DB, ) ([]byte, error) { _, err := execBlockOnProxyApp(logger, appConnConsensus, block, lastValSet, stateDB) if err != nil { diff --git a/state/metrics.go b/state/metrics.go index 7acbafa30..4e99753f0 100644 --- a/state/metrics.go +++ b/state/metrics.go @@ -2,9 +2,9 @@ package state import ( "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics/discard" "github.com/go-kit/kit/metrics/prometheus" stdprometheus "github.com/prometheus/client_golang/prometheus" - "github.com/go-kit/kit/metrics/discard" ) const MetricsSubsystem = "state" From 1b51cf3f4692f6c9157fcc5f5a71f203002a0a07 Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Fri, 12 Oct 2018 14:48:00 -0700 Subject: [PATCH 068/113] Remove unnecessary layer of indirection / unnecessary allocation of hashes (#2620) --- crypto/merkle/simple_tree.go | 31 +++++++++---------------------- 1 file changed, 9 insertions(+), 22 deletions(-) diff --git a/crypto/merkle/simple_tree.go b/crypto/merkle/simple_tree.go index 9677aef4e..45e0c5c56 100644 --- a/crypto/merkle/simple_tree.go +++ b/crypto/merkle/simple_tree.go @@ -21,12 +21,16 @@ func SimpleHashFromTwoHashes(left, right []byte) []byte { // SimpleHashFromByteSlices computes a Merkle tree where the leaves are the byte slice, // in the provided order. func SimpleHashFromByteSlices(items [][]byte) []byte { - hashes := make([][]byte, len(items)) - for i, item := range items { - hash := tmhash.Sum(item) - hashes[i] = hash + switch len(items) { + case 0: + return nil + case 1: + return tmhash.Sum(items[0]) + default: + left := SimpleHashFromByteSlices(items[:(len(items)+1)/2]) + right := SimpleHashFromByteSlices(items[(len(items)+1)/2:]) + return SimpleHashFromTwoHashes(left, right) } - return simpleHashFromHashes(hashes) } // SimpleHashFromMap computes a Merkle tree from sorted map. @@ -40,20 +44,3 @@ func SimpleHashFromMap(m map[string][]byte) []byte { } return sm.Hash() } - -//---------------------------------------------------------------- - -// Expects hashes! -func simpleHashFromHashes(hashes [][]byte) []byte { - // Recursive impl. - switch len(hashes) { - case 0: - return nil - case 1: - return hashes[0] - default: - left := simpleHashFromHashes(hashes[:(len(hashes)+1)/2]) - right := simpleHashFromHashes(hashes[(len(hashes)+1)/2:]) - return SimpleHashFromTwoHashes(left, right) - } -} From 8888595b94d9b15df548b6ab7a08840fc43265d8 Mon Sep 17 00:00:00 2001 From: Ismail Khoffi Date: Sat, 13 Oct 2018 01:21:46 +0200 Subject: [PATCH 069/113] [R4R] Fixed sized and reordered fields for Vote/Proposal/Heartbeat SignBytes (#2598) * WIP: switching to fixed offsets for SignBytes * add version field to sign bytes and update order * more comments on test-cases and add a tc with a chainID * remove amino:"write_empty" tag - it doesn't affect if default fixed size fields ((u)int64) are written or not - add comment about int->int64 casting * update CHANGELOG_PENDING * update documentation * add back link to issue #1622 in documentation * remove JSON tags and add (failing test-case) * fix failing test * update test-vectors due to added `Type` field * change Type field from string to byte and add new type alias - SignedMsgType replaces VoteTypePrevote, VoteTypePrecommit and adds new ProposalType to separate votes from proposal when signed - update test-vectors * fix remains from rebasing * use SignMessageType instead of byte everywhere * fixes from review --- CHANGELOG_PENDING.md | 20 ++- consensus/byzantine_test.go | 4 +- consensus/common_test.go | 10 +- consensus/reactor.go | 44 +++--- consensus/replay_test.go | 2 +- consensus/state.go | 30 ++-- consensus/state_test.go | 196 ++++++++++++------------ consensus/types/height_vote_set.go | 18 +-- consensus/types/height_vote_set_test.go | 2 +- docs/spec/blockchain/blockchain.md | 5 +- docs/spec/blockchain/encoding.md | 21 +-- lite/helpers.go | 2 +- privval/priv_validator.go | 4 +- privval/priv_validator_test.go | 6 +- privval/socket_test.go | 4 +- state/execution_test.go | 4 +- types/block.go | 6 +- types/block_test.go | 16 +- types/canonical.go | 80 +++++----- types/evidence_test.go | 2 +- types/signed_msg_type.go | 27 ++++ types/test_util.go | 2 +- types/validator_set.go | 4 +- types/validator_set_test.go | 2 +- types/vote.go | 38 ++--- types/vote_set.go | 12 +- types/vote_set_test.go | 30 ++-- types/vote_test.go | 108 ++++++++++++- 28 files changed, 407 insertions(+), 292 deletions(-) create mode 100644 types/signed_msg_type.go diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 0f919bdaa..f82ddbc2b 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -24,11 +24,16 @@ BREAKING CHANGES: * [types] \#2298 Remove `Index` and `Total` fields from `TxProof`. * [crypto/merkle & lite] \#2298 Various changes to accomodate General Merkle trees * [crypto/merkle] \#2595 Remove all Hasher objects in favor of byte slices + * [types] \#2598 `VoteTypeXxx` are now * Blockchain Protocol - * [types] \#2459 `Vote`/`Proposal`/`Heartbeat` use amino encoding instead of JSON in `SignBytes`. + * [types] Update SignBytes for `Vote`/`Proposal`/`Heartbeat`: + * \#2459 Use amino encoding instead of JSON in `SignBytes`. + * \#2598 Reorder fields and use fixed sized encoding. + * \#2598 Change `Type` field fromt `string` to `byte` and use new + `SignedMsgType` to enumerate. * [types] \#2512 Remove the pubkey field from the validator hash - * [state] \#2587 require block.Time of the fist block to be genesis time + * [state] \#2587 Require block.Time of the fist block to be genesis time * P2P Protocol @@ -37,9 +42,10 @@ FEATURES: - [abci] \#2557 Add `Codespace` field to `Response{CheckTx, DeliverTx, Query}` IMPROVEMENTS: -- [consensus] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) add additional metrics -- [p2p] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) add additional metrics -- [config] \#2232 added ValidateBasic method, which performs basic checks +- Additional Metrics + - [consensus] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) + - [p2p] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) +- [config] \#2232 Added ValidateBasic method, which performs basic checks - [crypto/ed25519] \#2558 Switch to use latest `golang.org/x/crypto` through our fork at github.com/tendermint/crypto - [tools] \#2238 Binary dependencies are now locked to a specific git commit @@ -50,8 +56,8 @@ BUG FIXES: - [node] \#2434 Make node respond to signal interrupts while sleeping for genesis time - [consensus] [\#1690](https://github.com/tendermint/tendermint/issues/1690) wait for timeoutPrecommit before starting next round -- [consensus] [\#1745](https://github.com/tendermint/tendermint/issues/1745) wait for -Proposal or timeoutProposal before entering prevote +- [consensus] [\#1745](https://github.com/tendermint/tendermint/issues/1745) wait for +Proposal or timeoutProposal before entering prevote - [evidence] \#2515 fix db iter leak (@goolAdapter) - [common/bit_array] Fixed a bug in the `Or` function - [common/bit_array] Fixed a bug in the `Sub` function (@james-ray) diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index 3903e6b9e..60c2b0dbd 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -226,8 +226,8 @@ func sendProposalAndParts(height int64, round int, cs *ConsensusState, peer p2p. // votes cs.mtx.Lock() - prevote, _ := cs.signVote(types.VoteTypePrevote, blockHash, parts.Header()) - precommit, _ := cs.signVote(types.VoteTypePrecommit, blockHash, parts.Header()) + prevote, _ := cs.signVote(types.PrevoteType, blockHash, parts.Header()) + precommit, _ := cs.signVote(types.PrecommitType, blockHash, parts.Header()) cs.mtx.Unlock() peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(&VoteMessage{prevote})) diff --git a/consensus/common_test.go b/consensus/common_test.go index cf76e924d..26f8e3e57 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -71,7 +71,7 @@ func NewValidatorStub(privValidator types.PrivValidator, valIndex int) *validato } } -func (vs *validatorStub) signVote(voteType byte, hash []byte, header types.PartSetHeader) (*types.Vote, error) { +func (vs *validatorStub) signVote(voteType types.SignedMsgType, hash []byte, header types.PartSetHeader) (*types.Vote, error) { vote := &types.Vote{ ValidatorIndex: vs.Index, ValidatorAddress: vs.PrivValidator.GetAddress(), @@ -86,7 +86,7 @@ func (vs *validatorStub) signVote(voteType byte, hash []byte, header types.PartS } // Sign vote for type/hash/header -func signVote(vs *validatorStub, voteType byte, hash []byte, header types.PartSetHeader) *types.Vote { +func signVote(vs *validatorStub, voteType types.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote { v, err := vs.signVote(voteType, hash, header) if err != nil { panic(fmt.Errorf("failed to sign vote: %v", err)) @@ -94,7 +94,7 @@ func signVote(vs *validatorStub, voteType byte, hash []byte, header types.PartSe return v } -func signVotes(voteType byte, hash []byte, header types.PartSetHeader, vss ...*validatorStub) []*types.Vote { +func signVotes(voteType types.SignedMsgType, hash []byte, header types.PartSetHeader, vss ...*validatorStub) []*types.Vote { votes := make([]*types.Vote, len(vss)) for i, vs := range vss { votes[i] = signVote(vs, voteType, hash, header) @@ -144,7 +144,7 @@ func addVotes(to *ConsensusState, votes ...*types.Vote) { } } -func signAddVotes(to *ConsensusState, voteType byte, hash []byte, header types.PartSetHeader, vss ...*validatorStub) { +func signAddVotes(to *ConsensusState, voteType types.SignedMsgType, hash []byte, header types.PartSetHeader, vss ...*validatorStub) { votes := signVotes(voteType, hash, header, vss...) addVotes(to, votes...) } @@ -463,7 +463,7 @@ func ensureNewUnlock(unlockCh <-chan interface{}, height int64, round int) { } func ensureVote(voteCh <-chan interface{}, height int64, round int, - voteType byte) { + voteType types.SignedMsgType) { select { case <-time.After(ensureTimeout): panic("Timeout expired while waiting for NewVote event") diff --git a/consensus/reactor.go b/consensus/reactor.go index ca63e8992..6643273cb 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -237,9 +237,9 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) // (and consequently shows which we don't have) var ourVotes *cmn.BitArray switch msg.Type { - case types.VoteTypePrevote: + case types.PrevoteType: ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID) - case types.VoteTypePrecommit: + case types.PrecommitType: ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID) default: conR.Logger.Error("Bad VoteSetBitsMessage field Type") @@ -317,9 +317,9 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) if height == msg.Height { var ourVotes *cmn.BitArray switch msg.Type { - case types.VoteTypePrevote: + case types.PrevoteType: ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID) - case types.VoteTypePrecommit: + case types.PrecommitType: ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID) default: conR.Logger.Error("Bad VoteSetBitsMessage field Type") @@ -739,7 +739,7 @@ OUTER_LOOP: peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ Height: prs.Height, Round: prs.Round, - Type: types.VoteTypePrevote, + Type: types.PrevoteType, BlockID: maj23, })) time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) @@ -756,7 +756,7 @@ OUTER_LOOP: peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ Height: prs.Height, Round: prs.Round, - Type: types.VoteTypePrecommit, + Type: types.PrecommitType, BlockID: maj23, })) time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) @@ -773,7 +773,7 @@ OUTER_LOOP: peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ Height: prs.Height, Round: prs.ProposalPOLRound, - Type: types.VoteTypePrevote, + Type: types.PrevoteType, BlockID: maj23, })) time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) @@ -792,7 +792,7 @@ OUTER_LOOP: peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ Height: prs.Height, Round: commit.Round(), - Type: types.VoteTypePrecommit, + Type: types.PrecommitType, BlockID: commit.BlockID, })) time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) @@ -1022,7 +1022,7 @@ func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote return nil, false } - height, round, type_, size := votes.Height(), votes.Round(), votes.Type(), votes.Size() + height, round, type_, size := votes.Height(), votes.Round(), types.SignedMsgType(votes.Type()), votes.Size() // Lazily set data using 'votes'. if votes.IsCommit() { @@ -1041,7 +1041,7 @@ func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote return nil, false } -func (ps *PeerState) getVoteBitArray(height int64, round int, type_ byte) *cmn.BitArray { +func (ps *PeerState) getVoteBitArray(height int64, round int, type_ types.SignedMsgType) *cmn.BitArray { if !types.IsVoteTypeValid(type_) { return nil } @@ -1049,25 +1049,25 @@ func (ps *PeerState) getVoteBitArray(height int64, round int, type_ byte) *cmn.B if ps.PRS.Height == height { if ps.PRS.Round == round { switch type_ { - case types.VoteTypePrevote: + case types.PrevoteType: return ps.PRS.Prevotes - case types.VoteTypePrecommit: + case types.PrecommitType: return ps.PRS.Precommits } } if ps.PRS.CatchupCommitRound == round { switch type_ { - case types.VoteTypePrevote: + case types.PrevoteType: return nil - case types.VoteTypePrecommit: + case types.PrecommitType: return ps.PRS.CatchupCommit } } if ps.PRS.ProposalPOLRound == round { switch type_ { - case types.VoteTypePrevote: + case types.PrevoteType: return ps.PRS.ProposalPOL - case types.VoteTypePrecommit: + case types.PrecommitType: return nil } } @@ -1076,9 +1076,9 @@ func (ps *PeerState) getVoteBitArray(height int64, round int, type_ byte) *cmn.B if ps.PRS.Height == height+1 { if ps.PRS.LastCommitRound == round { switch type_ { - case types.VoteTypePrevote: + case types.PrevoteType: return nil - case types.VoteTypePrecommit: + case types.PrecommitType: return ps.PRS.LastCommit } } @@ -1187,7 +1187,7 @@ func (ps *PeerState) SetHasVote(vote *types.Vote) { ps.setHasVote(vote.Height, vote.Round, vote.Type, vote.ValidatorIndex) } -func (ps *PeerState) setHasVote(height int64, round int, type_ byte, index int) { +func (ps *PeerState) setHasVote(height int64, round int, type_ types.SignedMsgType, index int) { logger := ps.logger.With("peerH/R", fmt.Sprintf("%d/%d", ps.PRS.Height, ps.PRS.Round), "H/R", fmt.Sprintf("%d/%d", height, round)) logger.Debug("setHasVote", "type", type_, "index", index) @@ -1453,7 +1453,7 @@ func (m *VoteMessage) String() string { type HasVoteMessage struct { Height int64 Round int - Type byte + Type types.SignedMsgType Index int } @@ -1468,7 +1468,7 @@ func (m *HasVoteMessage) String() string { type VoteSetMaj23Message struct { Height int64 Round int - Type byte + Type types.SignedMsgType BlockID types.BlockID } @@ -1483,7 +1483,7 @@ func (m *VoteSetMaj23Message) String() string { type VoteSetBitsMessage struct { Height int64 Round int - Type byte + Type types.SignedMsgType BlockID types.BlockID Votes *cmn.BitArray } diff --git a/consensus/replay_test.go b/consensus/replay_test.go index 7a828da64..160e777c3 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -542,7 +542,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { return nil, nil, err } case *types.Vote: - if p.Type == types.VoteTypePrecommit { + if p.Type == types.PrecommitType { thisBlockCommit = &types.Commit{ BlockID: p.BlockID, Precommits: []*types.Vote{p}, diff --git a/consensus/state.go b/consensus/state.go index 022023ae3..37047aa30 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -460,7 +460,7 @@ func (cs *ConsensusState) reconstructLastCommit(state sm.State) { return } seenCommit := cs.blockStore.LoadSeenCommit(state.LastBlockHeight) - lastPrecommits := types.NewVoteSet(state.ChainID, state.LastBlockHeight, seenCommit.Round(), types.VoteTypePrecommit, state.LastValidators) + lastPrecommits := types.NewVoteSet(state.ChainID, state.LastBlockHeight, seenCommit.Round(), types.PrecommitType, state.LastValidators) for _, precommit := range seenCommit.Precommits { if precommit == nil { continue @@ -1021,14 +1021,14 @@ func (cs *ConsensusState) defaultDoPrevote(height int64, round int) { // If a block is locked, prevote that. if cs.LockedBlock != nil { logger.Info("enterPrevote: Block was locked") - cs.signAddVote(types.VoteTypePrevote, cs.LockedBlock.Hash(), cs.LockedBlockParts.Header()) + cs.signAddVote(types.PrevoteType, cs.LockedBlock.Hash(), cs.LockedBlockParts.Header()) return } // If ProposalBlock is nil, prevote nil. if cs.ProposalBlock == nil { logger.Info("enterPrevote: ProposalBlock is nil") - cs.signAddVote(types.VoteTypePrevote, nil, types.PartSetHeader{}) + cs.signAddVote(types.PrevoteType, nil, types.PartSetHeader{}) return } @@ -1037,7 +1037,7 @@ func (cs *ConsensusState) defaultDoPrevote(height int64, round int) { if err != nil { // ProposalBlock is invalid, prevote nil. logger.Error("enterPrevote: ProposalBlock is invalid", "err", err) - cs.signAddVote(types.VoteTypePrevote, nil, types.PartSetHeader{}) + cs.signAddVote(types.PrevoteType, nil, types.PartSetHeader{}) return } @@ -1045,7 +1045,7 @@ func (cs *ConsensusState) defaultDoPrevote(height int64, round int) { // NOTE: the proposal signature is validated when it is received, // and the proposal block parts are validated as they are received (against the merkle hash in the proposal) logger.Info("enterPrevote: ProposalBlock is valid") - cs.signAddVote(types.VoteTypePrevote, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) + cs.signAddVote(types.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) } // Enter: any +2/3 prevotes at next round. @@ -1103,7 +1103,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { } else { logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit. Precommitting nil.") } - cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{}) + cs.signAddVote(types.PrecommitType, nil, types.PartSetHeader{}) return } @@ -1127,7 +1127,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { cs.LockedBlockParts = nil cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) } - cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{}) + cs.signAddVote(types.PrecommitType, nil, types.PartSetHeader{}) return } @@ -1138,7 +1138,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { logger.Info("enterPrecommit: +2/3 prevoted locked block. Relocking") cs.LockedRound = round cs.eventBus.PublishEventRelock(cs.RoundStateEvent()) - cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader) + cs.signAddVote(types.PrecommitType, blockID.Hash, blockID.PartsHeader) return } @@ -1153,7 +1153,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { cs.LockedBlock = cs.ProposalBlock cs.LockedBlockParts = cs.ProposalBlockParts cs.eventBus.PublishEventLock(cs.RoundStateEvent()) - cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader) + cs.signAddVote(types.PrecommitType, blockID.Hash, blockID.PartsHeader) return } @@ -1169,7 +1169,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader) } cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) - cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{}) + cs.signAddVote(types.PrecommitType, nil, types.PartSetHeader{}) } // Enter: any +2/3 precommits for next round. @@ -1550,7 +1550,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, // A precommit for the previous height? // These come in while we wait timeoutCommit if vote.Height+1 == cs.Height { - if !(cs.Step == cstypes.RoundStepNewHeight && vote.Type == types.VoteTypePrecommit) { + if !(cs.Step == cstypes.RoundStepNewHeight && vote.Type == types.PrecommitType) { // TODO: give the reason .. // fmt.Errorf("tryAddVote: Wrong height, not a LastCommit straggler commit.") return added, ErrVoteHeightMismatch @@ -1593,7 +1593,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, cs.evsw.FireEvent(types.EventVote, vote) switch vote.Type { - case types.VoteTypePrevote: + case types.PrevoteType: prevotes := cs.Votes.Prevotes(vote.Round) cs.Logger.Info("Added to prevote", "vote", vote, "prevotes", prevotes.StringShort()) @@ -1650,7 +1650,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, } } - case types.VoteTypePrecommit: + case types.PrecommitType: precommits := cs.Votes.Precommits(vote.Round) cs.Logger.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort()) @@ -1679,7 +1679,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, return } -func (cs *ConsensusState) signVote(type_ byte, hash []byte, header types.PartSetHeader) (*types.Vote, error) { +func (cs *ConsensusState) signVote(type_ types.SignedMsgType, hash []byte, header types.PartSetHeader) (*types.Vote, error) { addr := cs.privValidator.GetAddress() valIndex, _ := cs.Validators.GetByAddress(addr) @@ -1714,7 +1714,7 @@ func (cs *ConsensusState) voteTime() time.Time { } // sign the vote and publish on internalMsgQueue -func (cs *ConsensusState) signAddVote(type_ byte, hash []byte, header types.PartSetHeader) *types.Vote { +func (cs *ConsensusState) signAddVote(type_ types.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote { // if we don't have a key or we're not in the validator set, do nothing if cs.privValidator == nil || !cs.Validators.HasAddress(cs.privValidator.GetAddress()) { return nil diff --git a/consensus/state_test.go b/consensus/state_test.go index d80b0c8ae..229d7e7bb 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -81,7 +81,7 @@ func TestStateProposerSelection0(t *testing.T) { ensureNewProposal(proposalCh, height, round) rs := cs1.GetRoundState() - signAddVotes(cs1, types.VoteTypePrecommit, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:]...) + signAddVotes(cs1, types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:]...) // Wait for new round so next validator is set. ensureNewRound(newRoundCh, height+1, 0) @@ -116,7 +116,7 @@ func TestStateProposerSelection2(t *testing.T) { } rs := cs1.GetRoundState() - signAddVotes(cs1, types.VoteTypePrecommit, nil, rs.ProposalBlockParts.Header(), vss[1:]...) + signAddVotes(cs1, types.PrecommitType, nil, rs.ProposalBlockParts.Header(), vss[1:]...) ensureNewRound(newRoundCh, height, i+round+1) // wait for the new round event each round incrementRound(vss[1:]...) } @@ -214,16 +214,17 @@ func TestStateBadProposal(t *testing.T) { ensureNewProposal(proposalCh, height, round) // wait for prevote - ensureVote(voteCh, height, round, types.VoteTypePrevote) + ensureVote(voteCh, height, round, types.PrevoteType) validatePrevote(t, cs1, round, vss[0], nil) // add bad prevote from vs2 and wait for it - signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) - ensureVote(voteCh, height, round, types.VoteTypePrevote) + signAddVotes(cs1, types.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) + ensureVote(voteCh, height, round, types.PrevoteType) // wait for precommit - ensureVote(voteCh, height, round, types.VoteTypePrecommit) + ensureVote(voteCh, height, round, types.PrecommitType) validatePrecommit(t, cs1, round, 0, vss[0], nil, nil) + signAddVotes(cs1, types.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) } //---------------------------------------------------------------------------------------------------- @@ -254,10 +255,10 @@ func TestStateFullRound1(t *testing.T) { ensureNewProposal(propCh, height, round) propBlockHash := cs.GetRoundState().ProposalBlock.Hash() - ensureVote(voteCh, height, round, types.VoteTypePrevote) // wait for prevote + ensureVote(voteCh, height, round, types.PrevoteType) // wait for prevote validatePrevote(t, cs, round, vss[0], propBlockHash) - ensureVote(voteCh, height, round, types.VoteTypePrecommit) // wait for precommit + ensureVote(voteCh, height, round, types.PrecommitType) // wait for precommit // we're going to roll right into new height ensureNewRound(newRoundCh, height+1, 0) @@ -275,8 +276,8 @@ func TestStateFullRoundNil(t *testing.T) { cs.enterPrevote(height, round) cs.startRoutines(4) - ensureVote(voteCh, height, round, types.VoteTypePrevote) // prevote - ensureVote(voteCh, height, round, types.VoteTypePrecommit) // precommit + ensureVote(voteCh, height, round, types.PrevoteType) // prevote + ensureVote(voteCh, height, round, types.PrecommitType) // precommit // should prevote and precommit nil validatePrevoteAndPrecommit(t, cs, round, 0, vss[0], nil, nil) @@ -295,25 +296,25 @@ func TestStateFullRound2(t *testing.T) { // start round and wait for propose and prevote startTestRound(cs1, height, round) - ensureVote(voteCh, height, round, types.VoteTypePrevote) // prevote + ensureVote(voteCh, height, round, types.PrevoteType) // prevote // we should be stuck in limbo waiting for more prevotes rs := cs1.GetRoundState() propBlockHash, propPartsHeader := rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header() // prevote arrives from vs2: - signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propPartsHeader, vs2) - ensureVote(voteCh, height, round, types.VoteTypePrevote) // prevote + signAddVotes(cs1, types.PrevoteType, propBlockHash, propPartsHeader, vs2) + ensureVote(voteCh, height, round, types.PrevoteType) // prevote - ensureVote(voteCh, height, round, types.VoteTypePrecommit) //precommit + ensureVote(voteCh, height, round, types.PrecommitType) //precommit // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, 0, 0, vss[0], propBlockHash, propBlockHash) // we should be stuck in limbo waiting for more precommits // precommit arrives from vs2: - signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propPartsHeader, vs2) - ensureVote(voteCh, height, round, types.VoteTypePrecommit) + signAddVotes(cs1, types.PrecommitType, propBlockHash, propPartsHeader, vs2) + ensureVote(voteCh, height, round, types.PrecommitType) // wait to finish commit, propose in next height ensureNewBlock(newBlockCh, height) @@ -352,14 +353,14 @@ func TestStateLockNoPOL(t *testing.T) { theBlockHash := roundState.ProposalBlock.Hash() thePartSetHeader := roundState.ProposalBlockParts.Header() - ensureVote(voteCh, height, round, types.VoteTypePrevote) // prevote + ensureVote(voteCh, height, round, types.PrevoteType) // prevote // we should now be stuck in limbo forever, waiting for more prevotes // prevote arrives from vs2: - signAddVotes(cs1, types.VoteTypePrevote, theBlockHash, thePartSetHeader, vs2) - ensureVote(voteCh, height, round, types.VoteTypePrevote) // prevote + signAddVotes(cs1, types.PrevoteType, theBlockHash, thePartSetHeader, vs2) + ensureVote(voteCh, height, round, types.PrevoteType) // prevote - ensureVote(voteCh, height, round, types.VoteTypePrecommit) // precommit + ensureVote(voteCh, height, round, types.PrecommitType) // precommit // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) @@ -368,8 +369,8 @@ func TestStateLockNoPOL(t *testing.T) { hash := make([]byte, len(theBlockHash)) copy(hash, theBlockHash) hash[0] = byte((hash[0] + 1) % 255) - signAddVotes(cs1, types.VoteTypePrecommit, hash, thePartSetHeader, vs2) - ensureVote(voteCh, height, round, types.VoteTypePrecommit) // precommit + signAddVotes(cs1, types.PrecommitType, hash, thePartSetHeader, vs2) + ensureVote(voteCh, height, round, types.PrecommitType) // precommit // (note we're entering precommit for a second time this round) // but with invalid args. then we enterPrecommitWait, and the timeout to new round @@ -396,26 +397,26 @@ func TestStateLockNoPOL(t *testing.T) { } // wait to finish prevote - ensureVote(voteCh, height, round, types.VoteTypePrevote) + ensureVote(voteCh, height, round, types.PrevoteType) // we should have prevoted our locked block validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) // add a conflicting prevote from the other validator - signAddVotes(cs1, types.VoteTypePrevote, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) - ensureVote(voteCh, height, round, types.VoteTypePrevote) + signAddVotes(cs1, types.PrevoteType, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) + ensureVote(voteCh, height, round, types.PrevoteType) // now we're going to enter prevote again, but with invalid args // and then prevote wait, which should timeout. then wait for precommit ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) - ensureVote(voteCh, height, round, types.VoteTypePrecommit) // precommit + ensureVote(voteCh, height, round, types.PrecommitType) // precommit // the proposed block should still be locked and our precommit added // we should precommit nil and be locked on the proposal validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // add conflicting precommit from vs2 - signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) - ensureVote(voteCh, height, round, types.VoteTypePrecommit) + signAddVotes(cs1, types.PrecommitType, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) + ensureVote(voteCh, height, round, types.PrecommitType) // (note we're entering precommit for a second time this round, but with invalid args // then we enterPrecommitWait and timeout into NewRound @@ -438,19 +439,19 @@ func TestStateLockNoPOL(t *testing.T) { panic(fmt.Sprintf("Expected proposal block to be locked block. Got %v, Expected %v", rs.ProposalBlock, rs.LockedBlock)) } - ensureVote(voteCh, height, round, types.VoteTypePrevote) // prevote + ensureVote(voteCh, height, round, types.PrevoteType) // prevote validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) - signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) - ensureVote(voteCh, height, round, types.VoteTypePrevote) + signAddVotes(cs1, types.PrevoteType, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) + ensureVote(voteCh, height, round, types.PrevoteType) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) - ensureVote(voteCh, height, round, types.VoteTypePrecommit) // precommit + ensureVote(voteCh, height, round, types.PrecommitType) // precommit validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal - signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height - ensureVote(voteCh, height, round, types.VoteTypePrecommit) + signAddVotes(cs1, types.PrecommitType, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height + ensureVote(voteCh, height, round, types.PrecommitType) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) @@ -477,20 +478,20 @@ func TestStateLockNoPOL(t *testing.T) { } ensureNewProposal(proposalCh, height, round) - ensureVote(voteCh, height, round, types.VoteTypePrevote) // prevote + ensureVote(voteCh, height, round, types.PrevoteType) // prevote // prevote for locked block (not proposal) validatePrevote(t, cs1, 3, vss[0], cs1.LockedBlock.Hash()) // prevote for proposed block - signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) - ensureVote(voteCh, height, round, types.VoteTypePrevote) + signAddVotes(cs1, types.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) + ensureVote(voteCh, height, round, types.PrevoteType) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) - ensureVote(voteCh, height, round, types.VoteTypePrecommit) + ensureVote(voteCh, height, round, types.PrecommitType) validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal - signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height - ensureVote(voteCh, height, round, types.VoteTypePrecommit) + signAddVotes(cs1, types.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height + ensureVote(voteCh, height, round, types.PrecommitType) } // 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka @@ -524,17 +525,17 @@ func TestStateLockPOLRelock(t *testing.T) { theBlockHash := rs.ProposalBlock.Hash() theBlockParts := rs.ProposalBlockParts.Header() - ensureVote(voteCh, height, round, types.VoteTypePrevote) // prevote + ensureVote(voteCh, height, round, types.PrevoteType) // prevote - signAddVotes(cs1, types.VoteTypePrevote, theBlockHash, theBlockParts, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) - ensureVote(voteCh, height, round, types.VoteTypePrecommit) // our precommit + ensureVote(voteCh, height, round, types.PrecommitType) // our precommit // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) // add precommits from the rest - signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4) - signAddVotes(cs1, types.VoteTypePrecommit, theBlockHash, theBlockParts, vs3) + signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4) + signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3) // before we timeout to the new round set the new proposal prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) @@ -566,17 +567,17 @@ func TestStateLockPOLRelock(t *testing.T) { ensureNewProposal(proposalCh, height, round) // go to prevote, prevote for locked block (not proposal), move on - ensureVote(voteCh, height, round, types.VoteTypePrevote) + ensureVote(voteCh, height, round, types.PrevoteType) validatePrevote(t, cs1, round, vss[0], theBlockHash) // now lets add prevotes from everyone else for the new block - signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) - ensureVote(voteCh, height, round, types.VoteTypePrecommit) + ensureVote(voteCh, height, round, types.PrecommitType) // we should have unlocked and locked on the new block validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) - signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propBlockParts.Header(), vs2, vs3) + signAddVotes(cs1, types.PrecommitType, propBlockHash, propBlockParts.Header(), vs2, vs3) ensureNewBlockHeader(newBlockCh, height, propBlockHash) ensureNewRound(newRoundCh, height+1, 0) @@ -613,20 +614,20 @@ func TestStateLockPOLUnlock(t *testing.T) { theBlockHash := rs.ProposalBlock.Hash() theBlockParts := rs.ProposalBlockParts.Header() - ensureVote(voteCh, height, round, types.VoteTypePrevote) + ensureVote(voteCh, height, round, types.PrevoteType) validatePrevote(t, cs1, round, vss[0], theBlockHash) - signAddVotes(cs1, types.VoteTypePrevote, theBlockHash, theBlockParts, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) - ensureVote(voteCh, height, round, types.VoteTypePrecommit) + ensureVote(voteCh, height, round, types.PrecommitType) // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) rs = cs1.GetRoundState() // add precommits from the rest - signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4) - signAddVotes(cs1, types.VoteTypePrecommit, theBlockHash, theBlockParts, vs3) + signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4) + signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3) // before we time out into new round, set next proposal block prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) @@ -655,21 +656,20 @@ func TestStateLockPOLUnlock(t *testing.T) { ensureNewProposal(proposalCh, height, round) // go to prevote, prevote for locked block (not proposal) - ensureVote(voteCh, height, round, types.VoteTypePrevote) + ensureVote(voteCh, height, round, types.PrevoteType) validatePrevote(t, cs1, round, vss[0], lockedBlockHash) - // now lets add prevotes from everyone else for nil (a polka!) - signAddVotes(cs1, types.VoteTypePrevote, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) // the polka makes us unlock and precommit nil ensureNewUnlock(unlockCh, height, round) - ensureVote(voteCh, height, round, types.VoteTypePrecommit) + ensureVote(voteCh, height, round, types.PrecommitType) // we should have unlocked and committed nil // NOTE: since we don't relock on nil, the lock round is 0 validatePrecommit(t, cs1, round, 0, vss[0], nil, nil) - signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3) + signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3) ensureNewRound(newRoundCh, height, round+1) } @@ -698,19 +698,19 @@ func TestStateLockPOLSafety1(t *testing.T) { rs := cs1.GetRoundState() propBlock := rs.ProposalBlock - ensureVote(voteCh, height, round, types.VoteTypePrevote) + ensureVote(voteCh, height, round, types.PrevoteType) validatePrevote(t, cs1, round, vss[0], propBlock.Hash()) // the others sign a polka but we don't see it - prevotes := signVotes(types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4) + prevotes := signVotes(types.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4) t.Logf("old prop hash %v", fmt.Sprintf("%X", propBlock.Hash())) // we do see them precommit nil - signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) // cs1 precommit nil - ensureVote(voteCh, height, round, types.VoteTypePrecommit) + ensureVote(voteCh, height, round, types.PrecommitType) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) t.Log("### ONTO ROUND 1") @@ -743,17 +743,17 @@ func TestStateLockPOLSafety1(t *testing.T) { t.Logf("new prop hash %v", fmt.Sprintf("%X", propBlockHash)) // go to prevote, prevote for proposal block - ensureVote(voteCh, height, round, types.VoteTypePrevote) + ensureVote(voteCh, height, round, types.PrevoteType) validatePrevote(t, cs1, round, vss[0], propBlockHash) // now we see the others prevote for it, so we should lock on it - signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) - ensureVote(voteCh, height, round, types.VoteTypePrecommit) + ensureVote(voteCh, height, round, types.PrecommitType) // we should have precommitted validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) - signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) @@ -771,7 +771,7 @@ func TestStateLockPOLSafety1(t *testing.T) { ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) // finish prevote - ensureVote(voteCh, height, round, types.VoteTypePrevote) + ensureVote(voteCh, height, round, types.PrevoteType) // we should prevote what we're locked on validatePrevote(t, cs1, round, vss[0], propBlockHash) @@ -813,7 +813,7 @@ func TestStateLockPOLSafety2(t *testing.T) { propBlockParts0 := propBlock0.MakePartSet(partSize) // the others sign a polka but we don't see it - prevotes := signVotes(types.VoteTypePrevote, propBlockHash0, propBlockParts0.Header(), vs2, vs3, vs4) + prevotes := signVotes(types.PrevoteType, propBlockHash0, propBlockParts0.Header(), vs2, vs3, vs4) // the block for round 1 prop1, propBlock1 := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) @@ -834,18 +834,18 @@ func TestStateLockPOLSafety2(t *testing.T) { } ensureNewProposal(proposalCh, height, round) - ensureVote(voteCh, height, round, types.VoteTypePrevote) + ensureVote(voteCh, height, round, types.PrevoteType) validatePrevote(t, cs1, round, vss[0], propBlockHash1) - signAddVotes(cs1, types.VoteTypePrevote, propBlockHash1, propBlockParts1.Header(), vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, propBlockHash1, propBlockParts1.Header(), vs2, vs3, vs4) - ensureVote(voteCh, height, round, types.VoteTypePrecommit) + ensureVote(voteCh, height, round, types.PrecommitType) // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, round, round, vss[0], propBlockHash1, propBlockHash1) // add precommits from the rest - signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4) - signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash1, propBlockParts1.Header(), vs3) + signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4) + signAddVotes(cs1, types.PrecommitType, propBlockHash1, propBlockParts1.Header(), vs3) incrementRound(vs2, vs3, vs4) @@ -873,7 +873,7 @@ func TestStateLockPOLSafety2(t *testing.T) { ensureNewProposal(proposalCh, height, round) ensureNoNewUnlock(unlockCh) - ensureVote(voteCh, height, round, types.VoteTypePrevote) + ensureVote(voteCh, height, round, types.PrevoteType) validatePrevote(t, cs1, round, vss[0], propBlockHash1) } @@ -893,7 +893,7 @@ func TestWaitingTimeoutOnNilPolka(t *testing.T) { startTestRound(cs1, height, round) ensureNewRound(newRoundCh, height, round) - signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) ensureNewRound(newRoundCh, height, round+1) @@ -915,10 +915,10 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { startTestRound(cs1, height, round) ensureNewRound(newRoundCh, height, round) - ensureVote(voteCh, height, round, types.VoteTypePrevote) + ensureVote(voteCh, height, round, types.PrevoteType) incrementRound(vss[1:]...) - signAddVotes(cs1, types.VoteTypePrevote, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) round = round + 1 // moving to the next round ensureNewRound(newRoundCh, height, round) @@ -928,7 +928,7 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) - ensureVote(voteCh, height, round, types.VoteTypePrevote) + ensureVote(voteCh, height, round, types.PrevoteType) validatePrevote(t, cs1, round, vss[0], nil) } @@ -948,15 +948,15 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { startTestRound(cs1, height, round) ensureNewRound(newRoundCh, height, round) - ensureVote(voteCh, height, round, types.VoteTypePrevote) + ensureVote(voteCh, height, round, types.PrevoteType) incrementRound(vss[1:]...) - signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) round = round + 1 // moving to the next round ensureNewRound(newRoundCh, height, round) - ensureVote(voteCh, height, round, types.VoteTypePrecommit) + ensureVote(voteCh, height, round, types.PrecommitType) validatePrecommit(t, cs1, round, 0, vss[0], nil, nil) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) @@ -982,11 +982,11 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { ensureNewRound(newRoundCh, height, round) incrementRound(vss[1:]...) - signAddVotes(cs1, types.VoteTypePrevote, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) - ensureVote(voteCh, height, round, types.VoteTypePrevote) + ensureVote(voteCh, height, round, types.PrevoteType) validatePrevote(t, cs1, round, vss[0], nil) } @@ -1017,7 +1017,7 @@ func TestStateSlashingPrevotes(t *testing.T) { // add one for a different block should cause us to go into prevote wait hash := rs.ProposalBlock.Hash() hash[0] = byte(hash[0]+1) % 255 - signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlockParts.Header(), vs2) + signAddVotes(cs1, types.PrevoteType, hash, rs.ProposalBlockParts.Header(), vs2) <-timeoutWaitCh @@ -1025,7 +1025,7 @@ func TestStateSlashingPrevotes(t *testing.T) { // away and ignore more prevotes (and thus fail to slash!) // add the conflicting vote - signAddVotes(cs1, types.VoteTypePrevote, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) + signAddVotes(cs1, types.PrevoteType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) // XXX: Check for existence of Dupeout info } @@ -1047,7 +1047,7 @@ func TestStateSlashingPrecommits(t *testing.T) { <-voteCh // prevote // add prevote from vs2 - signAddVotes(cs1, types.VoteTypePrevote, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) + signAddVotes(cs1, types.PrevoteType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) <-voteCh // precommit @@ -1055,13 +1055,13 @@ func TestStateSlashingPrecommits(t *testing.T) { // add one for a different block should cause us to go into prevote wait hash := rs.ProposalBlock.Hash() hash[0] = byte(hash[0]+1) % 255 - signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlockParts.Header(), vs2) + signAddVotes(cs1, types.PrecommitType, hash, rs.ProposalBlockParts.Header(), vs2) // NOTE: we have to send the vote for different block first so we don't just go into precommit round right // away and ignore more prevotes (and thus fail to slash!) // add precommit from vs2 - signAddVotes(cs1, types.VoteTypePrecommit, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) + signAddVotes(cs1, types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) // XXX: Check for existence of Dupeout info } @@ -1096,19 +1096,19 @@ func TestStateHalt1(t *testing.T) { propBlock := rs.ProposalBlock propBlockParts := propBlock.MakePartSet(partSize) - ensureVote(voteCh, height, round, types.VoteTypePrevote) + ensureVote(voteCh, height, round, types.PrevoteType) - signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlockParts.Header(), vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, propBlock.Hash(), propBlockParts.Header(), vs2, vs3, vs4) - ensureVote(voteCh, height, round, types.VoteTypePrecommit) + ensureVote(voteCh, height, round, types.PrecommitType) // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, round, round, vss[0], propBlock.Hash(), propBlock.Hash()) // add precommits from the rest - signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2) // didnt receive proposal - signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlockParts.Header(), vs3) + signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2) // didnt receive proposal + signAddVotes(cs1, types.PrecommitType, propBlock.Hash(), propBlockParts.Header(), vs3) // we receive this later, but vs3 might receive it earlier and with ours will go to commit! - precommit4 := signVote(vs4, types.VoteTypePrecommit, propBlock.Hash(), propBlockParts.Header()) + precommit4 := signVote(vs4, types.PrecommitType, propBlock.Hash(), propBlockParts.Header()) incrementRound(vs2, vs3, vs4) @@ -1127,7 +1127,7 @@ func TestStateHalt1(t *testing.T) { */ // go to prevote, prevote for locked block - ensureVote(voteCh, height, round, types.VoteTypePrevote) + ensureVote(voteCh, height, round, types.PrevoteType) validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) // now we receive the precommit from the previous round @@ -1187,7 +1187,7 @@ func TestStateOutputVoteStats(t *testing.T) { // create dummy peer peer := p2pdummy.NewPeer() - vote := signVote(vss[1], types.VoteTypePrecommit, []byte("test"), types.PartSetHeader{}) + vote := signVote(vss[1], types.PrecommitType, []byte("test"), types.PartSetHeader{}) voteMessage := &VoteMessage{vote} cs.handleMsg(msgInfo{voteMessage, peer.ID()}) @@ -1201,7 +1201,7 @@ func TestStateOutputVoteStats(t *testing.T) { // sending the vote for the bigger height incrementHeight(vss[1]) - vote = signVote(vss[1], types.VoteTypePrecommit, []byte("test"), types.PartSetHeader{}) + vote = signVote(vss[1], types.PrecommitType, []byte("test"), types.PartSetHeader{}) cs.handleMsg(msgInfo{&VoteMessage{vote}, peer.ID()}) diff --git a/consensus/types/height_vote_set.go b/consensus/types/height_vote_set.go index 1c8ac67cb..eee013eea 100644 --- a/consensus/types/height_vote_set.go +++ b/consensus/types/height_vote_set.go @@ -99,8 +99,8 @@ func (hvs *HeightVoteSet) addRound(round int) { cmn.PanicSanity("addRound() for an existing round") } // log.Debug("addRound(round)", "round", round) - prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, types.VoteTypePrevote, hvs.valSet) - precommits := types.NewVoteSet(hvs.chainID, hvs.height, round, types.VoteTypePrecommit, hvs.valSet) + prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, types.PrevoteType, hvs.valSet) + precommits := types.NewVoteSet(hvs.chainID, hvs.height, round, types.PrecommitType, hvs.valSet) hvs.roundVoteSets[round] = RoundVoteSet{ Prevotes: prevotes, Precommits: precommits, @@ -134,13 +134,13 @@ func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, func (hvs *HeightVoteSet) Prevotes(round int) *types.VoteSet { hvs.mtx.Lock() defer hvs.mtx.Unlock() - return hvs.getVoteSet(round, types.VoteTypePrevote) + return hvs.getVoteSet(round, types.PrevoteType) } func (hvs *HeightVoteSet) Precommits(round int) *types.VoteSet { hvs.mtx.Lock() defer hvs.mtx.Unlock() - return hvs.getVoteSet(round, types.VoteTypePrecommit) + return hvs.getVoteSet(round, types.PrecommitType) } // Last round and blockID that has +2/3 prevotes for a particular block or nil. @@ -149,7 +149,7 @@ func (hvs *HeightVoteSet) POLInfo() (polRound int, polBlockID types.BlockID) { hvs.mtx.Lock() defer hvs.mtx.Unlock() for r := hvs.round; r >= 0; r-- { - rvs := hvs.getVoteSet(r, types.VoteTypePrevote) + rvs := hvs.getVoteSet(r, types.PrevoteType) polBlockID, ok := rvs.TwoThirdsMajority() if ok { return r, polBlockID @@ -158,15 +158,15 @@ func (hvs *HeightVoteSet) POLInfo() (polRound int, polBlockID types.BlockID) { return -1, types.BlockID{} } -func (hvs *HeightVoteSet) getVoteSet(round int, type_ byte) *types.VoteSet { +func (hvs *HeightVoteSet) getVoteSet(round int, type_ types.SignedMsgType) *types.VoteSet { rvs, ok := hvs.roundVoteSets[round] if !ok { return nil } switch type_ { - case types.VoteTypePrevote: + case types.PrevoteType: return rvs.Prevotes - case types.VoteTypePrecommit: + case types.PrecommitType: return rvs.Precommits default: cmn.PanicSanity(fmt.Sprintf("Unexpected vote type %X", type_)) @@ -178,7 +178,7 @@ func (hvs *HeightVoteSet) getVoteSet(round int, type_ byte) *types.VoteSet { // NOTE: if there are too many peers, or too much peer churn, // this can cause memory issues. // TODO: implement ability to remove peers too -func (hvs *HeightVoteSet) SetPeerMaj23(round int, type_ byte, peerID p2p.ID, blockID types.BlockID) error { +func (hvs *HeightVoteSet) SetPeerMaj23(round int, type_ types.SignedMsgType, peerID p2p.ID, blockID types.BlockID) error { hvs.mtx.Lock() defer hvs.mtx.Unlock() if !types.IsVoteTypeValid(type_) { diff --git a/consensus/types/height_vote_set_test.go b/consensus/types/height_vote_set_test.go index 5f469221d..e2298cef9 100644 --- a/consensus/types/height_vote_set_test.go +++ b/consensus/types/height_vote_set_test.go @@ -56,7 +56,7 @@ func makeVoteHR(t *testing.T, height int64, round int, privVals []types.PrivVali Height: height, Round: round, Timestamp: tmtime.Now(), - Type: types.VoteTypePrecommit, + Type: types.PrecommitType, BlockID: types.BlockID{[]byte("fakehash"), types.PartSetHeader{}}, } chainID := config.ChainID() diff --git a/docs/spec/blockchain/blockchain.md b/docs/spec/blockchain/blockchain.md index 4a433b5d8..89ab1b4f7 100644 --- a/docs/spec/blockchain/blockchain.md +++ b/docs/spec/blockchain/blockchain.md @@ -410,8 +410,9 @@ must be greater than 2/3 of the total voting power of the complete validator set A vote is a signed message broadcast in the consensus for a particular block at a particular height and round. When stored in the blockchain or propagated over the network, votes are encoded in Amino. -For signing, votes are represented via `CanonicalVote` and also encoded using amino (protobuf compatible) via -`Vote.SignBytes` which includes the `ChainID`. +For signing, votes are represented via `CanonicalVote` and also encoded using amino (protobuf compatible) via +`Vote.SignBytes` which includes the `ChainID`, and uses a different ordering of +the fields. We define a method `Verify` that returns `true` if the signature verifies against the pubkey for the `SignBytes` using the given ChainID: diff --git a/docs/spec/blockchain/encoding.md b/docs/spec/blockchain/encoding.md index 2ff024ce0..ed92739d0 100644 --- a/docs/spec/blockchain/encoding.md +++ b/docs/spec/blockchain/encoding.md @@ -300,20 +300,23 @@ Where the `"value"` is the base64 encoding of the raw pubkey bytes, and the Signed messages (eg. votes, proposals) in the consensus are encoded using Amino. -When signing, the elements of a message are sorted alphabetically by key and prepended with -a `chain_id` and `type` field. +When signing, the elements of a message are re-ordered so the fixed-length fields +are first, making it easy to quickly check the version, height, round, and type. +The `ChainID` is also appended to the end. We call this encoding the SignBytes. For instance, SignBytes for a vote is the Amino encoding of the following struct: ```go type CanonicalVote struct { - ChainID string - Type string - BlockID CanonicalBlockID - Height int64 - Round int - Timestamp time.Time + Version uint64 `binary:"fixed64"` + Height int64 `binary:"fixed64"` + Round int64 `binary:"fixed64"` VoteType byte + Timestamp time.Time + BlockID CanonicalBlockID + ChainID string } ``` -NOTE: see [#1622](https://github.com/tendermint/tendermint/issues/1622) for how field ordering will change +The field ordering and the fixed sized encoding for the first three fields is optimized to ease parsing of SignBytes +in HSMs. It creates fixed offsets for relevant fields that need to be read in this context. +See [#1622](https://github.com/tendermint/tendermint/issues/1622) for more details. diff --git a/lite/helpers.go b/lite/helpers.go index 16d22e708..5177ee50b 100644 --- a/lite/helpers.go +++ b/lite/helpers.go @@ -97,7 +97,7 @@ func makeVote(header *types.Header, valset *types.ValidatorSet, key crypto.PrivK Height: header.Height, Round: 1, Timestamp: tmtime.Now(), - Type: types.VoteTypePrecommit, + Type: types.PrecommitType, BlockID: types.BlockID{Hash: header.Hash()}, } // Sign it diff --git a/privval/priv_validator.go b/privval/priv_validator.go index e606b826a..c5fba509f 100644 --- a/privval/priv_validator.go +++ b/privval/priv_validator.go @@ -25,9 +25,9 @@ const ( func voteToStep(vote *types.Vote) int8 { switch vote.Type { - case types.VoteTypePrevote: + case types.PrevoteType: return stepPrevote - case types.VoteTypePrecommit: + case types.PrecommitType: return stepPrecommit default: cmn.PanicSanity("Unknown vote type") diff --git a/privval/priv_validator_test.go b/privval/priv_validator_test.go index 404ff770b..90796ddfc 100644 --- a/privval/priv_validator_test.go +++ b/privval/priv_validator_test.go @@ -101,7 +101,7 @@ func TestSignVote(t *testing.T) { block1 := types.BlockID{[]byte{1, 2, 3}, types.PartSetHeader{}} block2 := types.BlockID{[]byte{3, 2, 1}, types.PartSetHeader{}} height, round := int64(10), 1 - voteType := types.VoteTypePrevote + voteType := byte(types.PrevoteType) // sign a vote for first time vote := newVote(privVal.Address, 0, height, round, voteType, block1) @@ -206,7 +206,7 @@ func TestDifferByTimestamp(t *testing.T) { // test vote { - voteType := types.VoteTypePrevote + voteType := byte(types.PrevoteType) blockID := types.BlockID{[]byte{1, 2, 3}, types.PartSetHeader{}} vote := newVote(privVal.Address, 0, height, round, voteType, blockID) err := privVal.SignVote("mychainid", vote) @@ -235,7 +235,7 @@ func newVote(addr types.Address, idx int, height int64, round int, typ byte, blo ValidatorIndex: idx, Height: height, Round: round, - Type: typ, + Type: types.SignedMsgType(typ), Timestamp: tmtime.Now(), BlockID: blockID, } diff --git a/privval/socket_test.go b/privval/socket_test.go index 84e721be7..aa2e15fa0 100644 --- a/privval/socket_test.go +++ b/privval/socket_test.go @@ -79,7 +79,7 @@ func TestSocketPVVote(t *testing.T) { sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV()) ts = time.Now() - vType = types.VoteTypePrecommit + vType = types.PrecommitType want = &types.Vote{Timestamp: ts, Type: vType} have = &types.Vote{Timestamp: ts, Type: vType} ) @@ -237,7 +237,7 @@ func TestRemoteSignVoteErrors(t *testing.T) { sc, rs = testSetupSocketPair(t, chainID, types.NewErroringMockPV()) ts = time.Now() - vType = types.VoteTypePrecommit + vType = types.PrecommitType vote = &types.Vote{Timestamp: ts, Type: vType} ) defer sc.Stop() diff --git a/state/execution_test.go b/state/execution_test.go index e93c9bfd1..273e9ebea 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -64,7 +64,7 @@ func TestBeginBlockValidators(t *testing.T) { prevBlockID := types.BlockID{prevHash, prevParts} now := tmtime.Now() - vote0 := &types.Vote{ValidatorIndex: 0, Timestamp: now, Type: types.VoteTypePrecommit} + vote0 := &types.Vote{ValidatorIndex: 0, Timestamp: now, Type: types.PrecommitType} vote1 := &types.Vote{ValidatorIndex: 1, Timestamp: now} testCases := []struct { @@ -135,7 +135,7 @@ func TestBeginBlockByzantineValidators(t *testing.T) { types.TM2PB.Evidence(ev2, valSet, now)}}, } - vote0 := &types.Vote{ValidatorIndex: 0, Timestamp: now, Type: types.VoteTypePrecommit} + vote0 := &types.Vote{ValidatorIndex: 0, Timestamp: now, Type: types.PrecommitType} vote1 := &types.Vote{ValidatorIndex: 1, Timestamp: now} votes := []*types.Vote{vote0, vote1} lastCommit := &types.Commit{BlockID: prevBlockID, Precommits: votes} diff --git a/types/block.go b/types/block.go index bd9092f4e..fe3b17250 100644 --- a/types/block.go +++ b/types/block.go @@ -388,7 +388,7 @@ func (commit *Commit) FirstPrecommit() *Vote { } } return &Vote{ - Type: VoteTypePrecommit, + Type: PrecommitType, } } @@ -410,7 +410,7 @@ func (commit *Commit) Round() int { // Type returns the vote type of the commit, which is always VoteTypePrecommit func (commit *Commit) Type() byte { - return VoteTypePrecommit + return byte(PrecommitType) } // Size returns the number of votes in the commit @@ -462,7 +462,7 @@ func (commit *Commit) ValidateBasic() error { continue } // Ensure that all votes are precommits. - if precommit.Type != VoteTypePrecommit { + if precommit.Type != PrecommitType { return fmt.Errorf("Invalid commit vote. Expected precommit, got %v", precommit.Type) } diff --git a/types/block_test.go b/types/block_test.go index 962aa0026..7abd79d79 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -26,7 +26,7 @@ func TestBlockAddEvidence(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, VoteTypePrecommit, 10, 1) + voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals) require.NoError(t, err) @@ -46,7 +46,7 @@ func TestBlockValidateBasic(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, VoteTypePrecommit, 10, 1) + voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals) require.NoError(t, err) @@ -106,7 +106,7 @@ func TestBlockMakePartSetWithEvidence(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, VoteTypePrecommit, 10, 1) + voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals) require.NoError(t, err) @@ -123,7 +123,7 @@ func TestBlockHashesTo(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, VoteTypePrecommit, 10, 1) + voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals) require.NoError(t, err) @@ -190,14 +190,14 @@ func TestNilDataHashDoesntCrash(t *testing.T) { func TestCommit(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, _, vals := randVoteSet(h-1, 1, VoteTypePrecommit, 10, 1) + voteSet, _, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals) require.NoError(t, err) assert.NotNil(t, commit.FirstPrecommit()) assert.Equal(t, h-1, commit.Height()) assert.Equal(t, 1, commit.Round()) - assert.Equal(t, VoteTypePrecommit, commit.Type()) + assert.Equal(t, PrecommitType, SignedMsgType(commit.Type())) if commit.Size() <= 0 { t.Fatalf("commit %v has a zero or negative size: %d", commit, commit.Size()) } @@ -218,7 +218,7 @@ func TestCommitValidateBasic(t *testing.T) { {"Random Commit", func(com *Commit) {}, false}, {"Nil precommit", func(com *Commit) { com.Precommits[0] = nil }, false}, {"Incorrect signature", func(com *Commit) { com.Precommits[0].Signature = []byte{0} }, false}, - {"Incorrect type", func(com *Commit) { com.Precommits[0].Type = VoteTypePrevote }, true}, + {"Incorrect type", func(com *Commit) { com.Precommits[0].Type = PrevoteType }, true}, {"Incorrect height", func(com *Commit) { com.Precommits[0].Height = int64(100) }, true}, {"Incorrect round", func(com *Commit) { com.Precommits[0].Round = 100 }, true}, } @@ -268,7 +268,7 @@ func TestMaxHeaderBytes(t *testing.T) { func randCommit() *Commit { lastID := makeBlockIDRandom() h := int64(3) - voteSet, _, vals := randVoteSet(h-1, 1, VoteTypePrecommit, 10, 1) + voteSet, _, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals) if err != nil { panic(err) diff --git a/types/canonical.go b/types/canonical.go index cdf0bd7b5..8a33debda 100644 --- a/types/canonical.go +++ b/types/canonical.go @@ -13,44 +13,46 @@ import ( const TimeFormat = time.RFC3339Nano type CanonicalBlockID struct { - Hash cmn.HexBytes `json:"hash,omitempty"` - PartsHeader CanonicalPartSetHeader `json:"parts,omitempty"` + Hash cmn.HexBytes + PartsHeader CanonicalPartSetHeader } type CanonicalPartSetHeader struct { - Hash cmn.HexBytes `json:"hash,omitempty"` - Total int `json:"total,omitempty"` + Hash cmn.HexBytes + Total int } type CanonicalProposal struct { - ChainID string `json:"@chain_id"` - Type string `json:"@type"` - BlockPartsHeader CanonicalPartSetHeader `json:"block_parts_header"` - Height int64 `json:"height"` - POLBlockID CanonicalBlockID `json:"pol_block_id"` - POLRound int `json:"pol_round"` - Round int `json:"round"` - Timestamp time.Time `json:"timestamp"` + Version uint64 `binary:"fixed64"` + Height int64 `binary:"fixed64"` + Round int64 `binary:"fixed64"` + Type SignedMsgType // type alias for byte + POLRound int64 `binary:"fixed64"` + Timestamp time.Time + BlockPartsHeader CanonicalPartSetHeader + POLBlockID CanonicalBlockID + ChainID string } type CanonicalVote struct { - ChainID string `json:"@chain_id"` - Type string `json:"@type"` - BlockID CanonicalBlockID `json:"block_id"` - Height int64 `json:"height"` - Round int `json:"round"` - Timestamp time.Time `json:"timestamp"` - VoteType byte `json:"type"` + Version uint64 `binary:"fixed64"` + Height int64 `binary:"fixed64"` + Round int64 `binary:"fixed64"` + Type SignedMsgType // type alias for byte + Timestamp time.Time + BlockID CanonicalBlockID + ChainID string } type CanonicalHeartbeat struct { - ChainID string `json:"@chain_id"` - Type string `json:"@type"` - Height int64 `json:"height"` - Round int `json:"round"` - Sequence int `json:"sequence"` - ValidatorAddress Address `json:"validator_address"` - ValidatorIndex int `json:"validator_index"` + Version uint64 `binary:"fixed64"` + Height int64 `binary:"fixed64"` + Round int `binary:"fixed64"` + Type byte + Sequence int `binary:"fixed64"` + ValidatorAddress Address + ValidatorIndex int + ChainID string } //----------------------------------- @@ -72,38 +74,40 @@ func CanonicalizePartSetHeader(psh PartSetHeader) CanonicalPartSetHeader { func CanonicalizeProposal(chainID string, proposal *Proposal) CanonicalProposal { return CanonicalProposal{ - ChainID: chainID, - Type: "proposal", - BlockPartsHeader: CanonicalizePartSetHeader(proposal.BlockPartsHeader), + Version: 0, // TODO Height: proposal.Height, + Round: int64(proposal.Round), // cast int->int64 to make amino encode it fixed64 (does not work for int) + Type: ProposalType, + POLRound: int64(proposal.POLRound), Timestamp: proposal.Timestamp, + BlockPartsHeader: CanonicalizePartSetHeader(proposal.BlockPartsHeader), POLBlockID: CanonicalizeBlockID(proposal.POLBlockID), - POLRound: proposal.POLRound, - Round: proposal.Round, + ChainID: chainID, } } func CanonicalizeVote(chainID string, vote *Vote) CanonicalVote { return CanonicalVote{ - ChainID: chainID, - Type: "vote", - BlockID: CanonicalizeBlockID(vote.BlockID), + Version: 0, // TODO Height: vote.Height, - Round: vote.Round, + Round: int64(vote.Round), // cast int->int64 to make amino encode it fixed64 (does not work for int) + Type: vote.Type, Timestamp: vote.Timestamp, - VoteType: vote.Type, + BlockID: CanonicalizeBlockID(vote.BlockID), + ChainID: chainID, } } func CanonicalizeHeartbeat(chainID string, heartbeat *Heartbeat) CanonicalHeartbeat { return CanonicalHeartbeat{ - ChainID: chainID, - Type: "heartbeat", + Version: 0, // TODO Height: heartbeat.Height, Round: heartbeat.Round, + Type: byte(HeartbeatType), Sequence: heartbeat.Sequence, ValidatorAddress: heartbeat.ValidatorAddress, ValidatorIndex: heartbeat.ValidatorIndex, + ChainID: chainID, } } diff --git a/types/evidence_test.go b/types/evidence_test.go index 1a7e9ea5d..79805691c 100644 --- a/types/evidence_test.go +++ b/types/evidence_test.go @@ -22,7 +22,7 @@ func makeVote(val PrivValidator, chainID string, valIndex int, height int64, rou ValidatorIndex: valIndex, Height: height, Round: round, - Type: byte(step), + Type: SignedMsgType(step), BlockID: blockID, } err := val.SignVote(chainID, v) diff --git a/types/signed_msg_type.go b/types/signed_msg_type.go new file mode 100644 index 000000000..cc3ddbdc1 --- /dev/null +++ b/types/signed_msg_type.go @@ -0,0 +1,27 @@ +package types + +// SignedMsgType is a type of signed message in the consensus. +type SignedMsgType byte + +const ( + // Votes + PrevoteType SignedMsgType = 0x01 + PrecommitType SignedMsgType = 0x02 + + // Proposals + ProposalType SignedMsgType = 0x20 + + // Heartbeat + HeartbeatType SignedMsgType = 0x30 +) + +func IsVoteTypeValid(type_ SignedMsgType) bool { + switch type_ { + case PrevoteType: + return true + case PrecommitType: + return true + default: + return false + } +} diff --git a/types/test_util.go b/types/test_util.go index e20ea212e..80f0c7872 100644 --- a/types/test_util.go +++ b/types/test_util.go @@ -16,7 +16,7 @@ func MakeCommit(blockID BlockID, height int64, round int, ValidatorIndex: i, Height: height, Round: round, - Type: VoteTypePrecommit, + Type: PrecommitType, BlockID: blockID, Timestamp: tmtime.Now(), } diff --git a/types/validator_set.go b/types/validator_set.go index 72ab68c08..ab030d1be 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -282,7 +282,7 @@ func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height i if precommit.Round != round { return fmt.Errorf("Invalid commit -- wrong round: want %v got %v", round, precommit.Round) } - if precommit.Type != VoteTypePrecommit { + if precommit.Type != PrecommitType { return fmt.Errorf("Invalid commit -- not precommit @ index %v", idx) } _, val := vals.GetByIndex(idx) @@ -361,7 +361,7 @@ func (vals *ValidatorSet) VerifyFutureCommit(newSet *ValidatorSet, chainID strin if precommit.Round != round { return cmn.NewError("Invalid commit -- wrong round: %v vs %v", round, precommit.Round) } - if precommit.Type != VoteTypePrecommit { + if precommit.Type != PrecommitType { return cmn.NewError("Invalid commit -- not precommit @ index %v", idx) } // See if this validator is in oldVals. diff --git a/types/validator_set_test.go b/types/validator_set_test.go index e41117074..d886b419c 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -385,7 +385,7 @@ func TestValidatorSetVerifyCommit(t *testing.T) { Height: height, Round: 0, Timestamp: tmtime.Now(), - Type: VoteTypePrecommit, + Type: PrecommitType, BlockID: blockID, } sig, err := privKey.Sign(vote.SignBytes(chainID)) diff --git a/types/vote.go b/types/vote.go index 5a31f0e2b..2d70e21b2 100644 --- a/types/vote.go +++ b/types/vote.go @@ -43,37 +43,19 @@ func NewConflictingVoteError(val *Validator, voteA, voteB *Vote) *ErrVoteConflic } } -// Types of votes -// TODO Make a new type "VoteType" -const ( - VoteTypePrevote = byte(0x01) - VoteTypePrecommit = byte(0x02) -) - -func IsVoteTypeValid(type_ byte) bool { - switch type_ { - case VoteTypePrevote: - return true - case VoteTypePrecommit: - return true - default: - return false - } -} - // Address is hex bytes. type Address = crypto.Address // Represents a prevote, precommit, or commit vote from validators for consensus. type Vote struct { - ValidatorAddress Address `json:"validator_address"` - ValidatorIndex int `json:"validator_index"` - Height int64 `json:"height"` - Round int `json:"round"` - Timestamp time.Time `json:"timestamp"` - Type byte `json:"type"` - BlockID BlockID `json:"block_id"` // zero if vote is nil. - Signature []byte `json:"signature"` + ValidatorAddress Address `json:"validator_address"` + ValidatorIndex int `json:"validator_index"` + Height int64 `json:"height"` + Round int `json:"round"` + Timestamp time.Time `json:"timestamp"` + Type SignedMsgType `json:"type"` + BlockID BlockID `json:"block_id"` // zero if vote is nil. + Signature []byte `json:"signature"` } func (vote *Vote) SignBytes(chainID string) []byte { @@ -95,9 +77,9 @@ func (vote *Vote) String() string { } var typeString string switch vote.Type { - case VoteTypePrevote: + case PrevoteType: typeString = "Prevote" - case VoteTypePrecommit: + case PrecommitType: typeString = "Precommit" default: cmn.PanicSanity("Unknown vote type") diff --git a/types/vote_set.go b/types/vote_set.go index dbcacbbdb..cdfa3d40d 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -55,7 +55,7 @@ type VoteSet struct { chainID string height int64 round int - type_ byte + type_ SignedMsgType valSet *ValidatorSet mtx sync.Mutex @@ -68,7 +68,7 @@ type VoteSet struct { } // Constructs a new VoteSet struct used to accumulate votes for given height/round. -func NewVoteSet(chainID string, height int64, round int, type_ byte, valSet *ValidatorSet) *VoteSet { +func NewVoteSet(chainID string, height int64, round int, type_ SignedMsgType, valSet *ValidatorSet) *VoteSet { if height == 0 { cmn.PanicSanity("Cannot make VoteSet for height == 0, doesn't make sense.") } @@ -109,7 +109,7 @@ func (voteSet *VoteSet) Type() byte { if voteSet == nil { return 0x00 } - return voteSet.type_ + return byte(voteSet.type_) } func (voteSet *VoteSet) Size() int { @@ -381,7 +381,7 @@ func (voteSet *VoteSet) IsCommit() bool { if voteSet == nil { return false } - if voteSet.type_ != VoteTypePrecommit { + if voteSet.type_ != PrecommitType { return false } voteSet.mtx.Lock() @@ -529,8 +529,8 @@ func (voteSet *VoteSet) sumTotalFrac() (int64, int64, float64) { // Commit func (voteSet *VoteSet) MakeCommit() *Commit { - if voteSet.type_ != VoteTypePrecommit { - cmn.PanicSanity("Cannot MakeCommit() unless VoteSet.Type is VoteTypePrecommit") + if voteSet.type_ != PrecommitType { + cmn.PanicSanity("Cannot MakeCommit() unless VoteSet.Type is PrecommitType") } voteSet.mtx.Lock() defer voteSet.mtx.Unlock() diff --git a/types/vote_set_test.go b/types/vote_set_test.go index 995fb94bd..641872920 100644 --- a/types/vote_set_test.go +++ b/types/vote_set_test.go @@ -11,7 +11,7 @@ import ( ) // NOTE: privValidators are in order -func randVoteSet(height int64, round int, type_ byte, numValidators int, votingPower int64) (*VoteSet, *ValidatorSet, []PrivValidator) { +func randVoteSet(height int64, round int, type_ SignedMsgType, numValidators int, votingPower int64) (*VoteSet, *ValidatorSet, []PrivValidator) { valSet, privValidators := RandValidatorSet(numValidators, votingPower) return NewVoteSet("test_chain_id", height, round, type_, valSet), valSet, privValidators } @@ -41,7 +41,7 @@ func withRound(vote *Vote, round int) *Vote { // Convenience: Return new vote with different type func withType(vote *Vote, type_ byte) *Vote { vote = vote.Copy() - vote.Type = type_ + vote.Type = SignedMsgType(type_) return vote } @@ -61,7 +61,7 @@ func withBlockPartsHeader(vote *Vote, blockPartsHeader PartSetHeader) *Vote { func TestAddVote(t *testing.T) { height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 10, 1) + voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 10, 1) val0 := privValidators[0] // t.Logf(">> %v", voteSet) @@ -82,7 +82,7 @@ func TestAddVote(t *testing.T) { ValidatorIndex: 0, // since privValidators are in order Height: height, Round: round, - Type: VoteTypePrevote, + Type: PrevoteType, Timestamp: tmtime.Now(), BlockID: BlockID{nil, PartSetHeader{}}, } @@ -105,14 +105,14 @@ func TestAddVote(t *testing.T) { func Test2_3Majority(t *testing.T) { height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 10, 1) + voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 10, 1) voteProto := &Vote{ ValidatorAddress: nil, // NOTE: must fill in ValidatorIndex: -1, // NOTE: must fill in Height: height, Round: round, - Type: VoteTypePrevote, + Type: PrevoteType, Timestamp: tmtime.Now(), BlockID: BlockID{nil, PartSetHeader{}}, } @@ -158,7 +158,7 @@ func Test2_3Majority(t *testing.T) { func Test2_3MajorityRedux(t *testing.T) { height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 100, 1) + voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 100, 1) blockHash := crypto.CRandBytes(32) blockPartsTotal := 123 @@ -170,7 +170,7 @@ func Test2_3MajorityRedux(t *testing.T) { Height: height, Round: round, Timestamp: tmtime.Now(), - Type: VoteTypePrevote, + Type: PrevoteType, BlockID: BlockID{blockHash, blockPartsHeader}, } @@ -257,7 +257,7 @@ func Test2_3MajorityRedux(t *testing.T) { func TestBadVotes(t *testing.T) { height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 10, 1) + voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 10, 1) voteProto := &Vote{ ValidatorAddress: nil, @@ -265,7 +265,7 @@ func TestBadVotes(t *testing.T) { Height: height, Round: round, Timestamp: tmtime.Now(), - Type: VoteTypePrevote, + Type: PrevoteType, BlockID: BlockID{nil, PartSetHeader{}}, } @@ -308,7 +308,7 @@ func TestBadVotes(t *testing.T) { // val3 votes of another type. { vote := withValidator(voteProto, privValidators[3].GetAddress(), 3) - added, err := signAddVote(privValidators[3], withType(vote, VoteTypePrecommit), voteSet) + added, err := signAddVote(privValidators[3], withType(vote, byte(PrecommitType)), voteSet) if added || err == nil { t.Errorf("Expected VoteSet.Add to fail, wrong type") } @@ -317,7 +317,7 @@ func TestBadVotes(t *testing.T) { func TestConflicts(t *testing.T) { height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 4, 1) + voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 4, 1) blockHash1 := cmn.RandBytes(32) blockHash2 := cmn.RandBytes(32) @@ -327,7 +327,7 @@ func TestConflicts(t *testing.T) { Height: height, Round: round, Timestamp: tmtime.Now(), - Type: VoteTypePrevote, + Type: PrevoteType, BlockID: BlockID{nil, PartSetHeader{}}, } @@ -447,7 +447,7 @@ func TestConflicts(t *testing.T) { func TestMakeCommit(t *testing.T) { height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrecommit, 10, 1) + voteSet, _, privValidators := randVoteSet(height, round, PrecommitType, 10, 1) blockHash, blockPartsHeader := crypto.CRandBytes(32), PartSetHeader{123, crypto.CRandBytes(32)} voteProto := &Vote{ @@ -456,7 +456,7 @@ func TestMakeCommit(t *testing.T) { Height: height, Round: round, Timestamp: tmtime.Now(), - Type: VoteTypePrecommit, + Type: PrecommitType, BlockID: BlockID{blockHash, blockPartsHeader}, } diff --git a/types/vote_test.go b/types/vote_test.go index d0c41a065..282953f46 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -13,11 +13,11 @@ import ( ) func examplePrevote() *Vote { - return exampleVote(VoteTypePrevote) + return exampleVote(byte(PrevoteType)) } func examplePrecommit() *Vote { - return exampleVote(VoteTypePrecommit) + return exampleVote(byte(PrecommitType)) } func exampleVote(t byte) *Vote { @@ -32,7 +32,7 @@ func exampleVote(t byte) *Vote { Height: 12345, Round: 2, Timestamp: stamp, - Type: t, + Type: SignedMsgType(t), BlockID: BlockID{ Hash: tmhash.Sum([]byte("blockID_hash")), PartsHeader: PartSetHeader{ @@ -53,6 +53,98 @@ func TestVoteSignable(t *testing.T) { require.Equal(t, expected, signBytes, "Got unexpected sign bytes for Vote.") } +func TestVoteSignableTestVectors(t *testing.T) { + voteWithVersion := CanonicalizeVote("", &Vote{Height: 1, Round: 1}) + voteWithVersion.Version = 123 + + tests := []struct { + canonicalVote CanonicalVote + want []byte + }{ + { + CanonicalizeVote("", &Vote{}), + // NOTE: Height and Round are skipped here. This case needs to be considered while parsing. + []byte{0xb, 0x2a, 0x9, 0x9, 0x0, 0x9, 0x6e, 0x88, 0xf1, 0xff, 0xff, 0xff}, + }, + // with proper (fixed size) height and round (PreCommit): + { + CanonicalizeVote("", &Vote{Height: 1, Round: 1, Type: PrecommitType}), + []byte{ + 0x1f, // total length + 0x11, // (field_number << 3) | wire_type (version is missing) + 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height + 0x19, // (field_number << 3) | wire_type + 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round + 0x20, // (field_number << 3) | wire_type + 0x2, // PrecommitType + 0x2a, // (field_number << 3) | wire_type + // remaining fields (timestamp): + 0x9, 0x9, 0x0, 0x9, 0x6e, 0x88, 0xf1, 0xff, 0xff, 0xff}, + }, + // with proper (fixed size) height and round (PreVote): + { + CanonicalizeVote("", &Vote{Height: 1, Round: 1, Type: PrevoteType}), + []byte{ + 0x1f, // total length + 0x11, // (field_number << 3) | wire_type (version is missing) + 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height + 0x19, // (field_number << 3) | wire_type + 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round + 0x20, // (field_number << 3) | wire_type + 0x1, // PrevoteType + 0x2a, // (field_number << 3) | wire_type + // remaining fields (timestamp): + 0x9, 0x9, 0x0, 0x9, 0x6e, 0x88, 0xf1, 0xff, 0xff, 0xff}, + }, + // containing version (empty type) + { + voteWithVersion, + []byte{ + 0x26, // total length + 0x9, // (field_number << 3) | wire_type + 0x7b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // version (123) + 0x11, // (field_number << 3) | wire_type + 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height + 0x19, // (field_number << 3) | wire_type + 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round + // remaining fields (timestamp): + 0x2a, + 0x9, 0x9, 0x0, 0x9, 0x6e, 0x88, 0xf1, 0xff, 0xff, 0xff}, + }, + // containing non-empty chain_id: + { + CanonicalizeVote("test_chain_id", &Vote{Height: 1, Round: 1}), + []byte{ + 0x2c, // total length + 0x11, // (field_number << 3) | wire_type + 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height + 0x19, // (field_number << 3) | wire_type + 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round + // remaining fields: + 0x2a, // (field_number << 3) | wire_type + 0x9, 0x9, 0x0, 0x9, 0x6e, 0x88, 0xf1, 0xff, 0xff, 0xff, // timestamp + 0x3a, // (field_number << 3) | wire_type + 0xd, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64}, // chainID + }, + } + for i, tc := range tests { + got, err := cdc.MarshalBinary(tc.canonicalVote) + require.NoError(t, err) + + require.Equal(t, tc.want, got, "test case #%v: got unexpected sign bytes for Vote.", i) + } +} + +func TestVoteProposalNotEq(t *testing.T) { + cv := CanonicalizeVote("", &Vote{Height: 1, Round: 1}) + p := CanonicalizeProposal("", &Proposal{Height: 1, Round: 1}) + vb, err := cdc.MarshalBinary(cv) + require.NoError(t, err) + pb, err := cdc.MarshalBinary(p) + require.NoError(t, err) + require.NotEqual(t, vb, pb) +} + func TestVoteVerifySignature(t *testing.T) { privVal := NewMockPV() pubkey := privVal.GetPubKey() @@ -85,12 +177,12 @@ func TestVoteVerifySignature(t *testing.T) { func TestIsVoteTypeValid(t *testing.T) { tc := []struct { name string - in byte + in SignedMsgType out bool }{ - {"Prevote", VoteTypePrevote, true}, - {"Precommit", VoteTypePrecommit, true}, - {"InvalidType", byte(3), false}, + {"Prevote", PrevoteType, true}, + {"Precommit", PrecommitType, true}, + {"InvalidType", SignedMsgType(0x3), false}, } for _, tt := range tc { @@ -128,7 +220,7 @@ func TestMaxVoteBytes(t *testing.T) { Height: math.MaxInt64, Round: math.MaxInt64, Timestamp: tmtime.Now(), - Type: VoteTypePrevote, + Type: PrevoteType, BlockID: BlockID{ Hash: tmhash.Sum([]byte("blockID_hash")), PartsHeader: PartSetHeader{ From 0baa7588c278c43cd19fbf435f1de17ece9923a7 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 12 Oct 2018 19:25:33 -0400 Subject: [PATCH 070/113] p2p: NodeInfo is an interface; General cleanup (#2556) * p2p: NodeInfo is an interface * (squash) fixes from review * (squash) more fixes from review * p2p: remove peerConn.HandshakeTimeout * p2p: NodeInfo is two interfaces. Remove String() * fixes from review * remove test code from peer.RemoteIP() * p2p: remove peer.OriginalAddr(). See #2618 * use a mockPeer in peer_set_test.go * p2p: fix testNodeInfo naming * p2p: remove unused var * remove testRandNodeInfo * fix linter * fix retry dialing self * fix rpc --- benchmarks/codec_test.go | 50 +++++---------- blockchain/reactor_test.go | 3 +- consensus/common_test.go | 2 +- node/node.go | 6 +- p2p/dummy/peer.go | 7 +- p2p/errors.go | 5 +- p2p/node_info.go | 76 +++++++++++++--------- p2p/peer.go | 107 ++++++++----------------------- p2p/peer_set_test.go | 45 +++++++------ p2p/peer_test.go | 36 +++++------ p2p/pex/pex_reactor_test.go | 7 +- p2p/switch.go | 13 ++-- p2p/switch_test.go | 1 + p2p/test_util.go | 89 +++++++++++++------------ p2p/transport.go | 35 +++++----- p2p/transport_test.go | 83 +++++++++--------------- rpc/core/consensus.go | 3 +- rpc/core/net.go | 9 ++- rpc/core/status.go | 3 +- rpc/core/types/responses.go | 8 +-- rpc/core/types/responses_test.go | 12 ++-- 21 files changed, 269 insertions(+), 331 deletions(-) diff --git a/benchmarks/codec_test.go b/benchmarks/codec_test.go index c0e13d168..71d7a83b2 100644 --- a/benchmarks/codec_test.go +++ b/benchmarks/codec_test.go @@ -12,23 +12,27 @@ import ( ctypes "github.com/tendermint/tendermint/rpc/core/types" ) +func testNodeInfo(id p2p.ID) p2p.DefaultNodeInfo { + return p2p.DefaultNodeInfo{ + ID_: id, + Moniker: "SOMENAME", + Network: "SOMENAME", + ListenAddr: "SOMEADDR", + Version: "SOMEVER", + Other: p2p.DefaultNodeInfoOther{ + AminoVersion: "SOMESTRING", + P2PVersion: "OTHERSTRING", + }, + } +} + func BenchmarkEncodeStatusWire(b *testing.B) { b.StopTimer() cdc := amino.NewCodec() ctypes.RegisterAmino(cdc) nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()} status := &ctypes.ResultStatus{ - NodeInfo: p2p.NodeInfo{ - ID: nodeKey.ID(), - Moniker: "SOMENAME", - Network: "SOMENAME", - ListenAddr: "SOMEADDR", - Version: "SOMEVER", - Other: p2p.NodeInfoOther{ - AminoVersion: "SOMESTRING", - P2PVersion: "OTHERSTRING", - }, - }, + NodeInfo: testNodeInfo(nodeKey.ID()), SyncInfo: ctypes.SyncInfo{ LatestBlockHash: []byte("SOMEBYTES"), LatestBlockHeight: 123, @@ -56,17 +60,7 @@ func BenchmarkEncodeNodeInfoWire(b *testing.B) { cdc := amino.NewCodec() ctypes.RegisterAmino(cdc) nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()} - nodeInfo := p2p.NodeInfo{ - ID: nodeKey.ID(), - Moniker: "SOMENAME", - Network: "SOMENAME", - ListenAddr: "SOMEADDR", - Version: "SOMEVER", - Other: p2p.NodeInfoOther{ - AminoVersion: "SOMESTRING", - P2PVersion: "OTHERSTRING", - }, - } + nodeInfo := testNodeInfo(nodeKey.ID()) b.StartTimer() counter := 0 @@ -84,17 +78,7 @@ func BenchmarkEncodeNodeInfoBinary(b *testing.B) { cdc := amino.NewCodec() ctypes.RegisterAmino(cdc) nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()} - nodeInfo := p2p.NodeInfo{ - ID: nodeKey.ID(), - Moniker: "SOMENAME", - Network: "SOMENAME", - ListenAddr: "SOMEADDR", - Version: "SOMEVER", - Other: p2p.NodeInfoOther{ - AminoVersion: "SOMESTRING", - P2PVersion: "OTHERSTRING", - }, - } + nodeInfo := testNodeInfo(nodeKey.ID()) b.StartTimer() counter := 0 diff --git a/blockchain/reactor_test.go b/blockchain/reactor_test.go index b63a057e1..7fc7ffb77 100644 --- a/blockchain/reactor_test.go +++ b/blockchain/reactor_test.go @@ -198,7 +198,7 @@ func (tp *bcrTestPeer) TrySend(chID byte, msgBytes []byte) bool { } func (tp *bcrTestPeer) Send(chID byte, msgBytes []byte) bool { return tp.TrySend(chID, msgBytes) } -func (tp *bcrTestPeer) NodeInfo() p2p.NodeInfo { return p2p.NodeInfo{} } +func (tp *bcrTestPeer) NodeInfo() p2p.NodeInfo { return p2p.DefaultNodeInfo{} } func (tp *bcrTestPeer) Status() p2p.ConnectionStatus { return p2p.ConnectionStatus{} } func (tp *bcrTestPeer) ID() p2p.ID { return tp.id } func (tp *bcrTestPeer) IsOutbound() bool { return false } @@ -206,4 +206,3 @@ func (tp *bcrTestPeer) IsPersistent() bool { return true } func (tp *bcrTestPeer) Get(s string) interface{} { return s } func (tp *bcrTestPeer) Set(string, interface{}) {} func (tp *bcrTestPeer) RemoteIP() net.IP { return []byte{127, 0, 0, 1} } -func (tp *bcrTestPeer) OriginalAddr() *p2p.NetAddress { return nil } diff --git a/consensus/common_test.go b/consensus/common_test.go index 26f8e3e57..ddce69145 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -568,7 +568,7 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int { for i, s := range switches { - if peer.NodeInfo().ID == s.NodeInfo().ID { + if peer.NodeInfo().ID() == s.NodeInfo().ID() { return i } } diff --git a/node/node.go b/node/node.go index 9c409787d..ed0fa1198 100644 --- a/node/node.go +++ b/node/node.go @@ -761,8 +761,8 @@ func makeNodeInfo( if _, ok := txIndexer.(*null.TxIndex); ok { txIndexerStatus = "off" } - nodeInfo := p2p.NodeInfo{ - ID: nodeID, + nodeInfo := p2p.DefaultNodeInfo{ + ID_: nodeID, Network: chainID, Version: version.Version, Channels: []byte{ @@ -772,7 +772,7 @@ func makeNodeInfo( evidence.EvidenceChannel, }, Moniker: config.Moniker, - Other: p2p.NodeInfoOther{ + Other: p2p.DefaultNodeInfoOther{ AminoVersion: amino.Version, P2PVersion: p2p.Version, ConsensusVersion: cs.Version, diff --git a/p2p/dummy/peer.go b/p2p/dummy/peer.go index bb6e822fc..4871719d4 100644 --- a/p2p/dummy/peer.go +++ b/p2p/dummy/peer.go @@ -42,7 +42,7 @@ func (p *peer) IsPersistent() bool { // NodeInfo always returns empty node info. func (p *peer) NodeInfo() p2p.NodeInfo { - return p2p.NodeInfo{} + return p2p.DefaultNodeInfo{} } // RemoteIP always returns localhost. @@ -78,8 +78,3 @@ func (p *peer) Get(key string) interface{} { } return nil } - -// OriginalAddr always returns nil. -func (p *peer) OriginalAddr() *p2p.NetAddress { - return nil -} diff --git a/p2p/errors.go b/p2p/errors.go index 902d22034..706150945 100644 --- a/p2p/errors.go +++ b/p2p/errors.go @@ -40,13 +40,12 @@ func (e ErrRejected) Error() string { if e.isDuplicate { if e.conn != nil { return fmt.Sprintf( - "duplicate CONN<%s>: %s", + "duplicate CONN<%s>", e.conn.RemoteAddr().String(), - e.err, ) } if e.id != "" { - return fmt.Sprintf("duplicate ID<%v>: %s", e.id, e.err) + return fmt.Sprintf("duplicate ID<%v>", e.id) } } diff --git a/p2p/node_info.go b/p2p/node_info.go index a16535949..a468443d1 100644 --- a/p2p/node_info.go +++ b/p2p/node_info.go @@ -2,6 +2,7 @@ package p2p import ( "fmt" + "reflect" "strings" cmn "github.com/tendermint/tendermint/libs/common" @@ -17,12 +18,32 @@ func MaxNodeInfoSize() int { return maxNodeInfoSize } -// NodeInfo is the basic node information exchanged +// NodeInfo exposes basic info of a node +// and determines if we're compatible +type NodeInfo interface { + nodeInfoAddress + nodeInfoTransport +} + +// nodeInfoAddress exposes just the core info of a node. +type nodeInfoAddress interface { + ID() ID + NetAddress() *NetAddress +} + +// nodeInfoTransport is validates a nodeInfo and checks +// our compatibility with it. It's for use in the handshake. +type nodeInfoTransport interface { + ValidateBasic() error + CompatibleWith(other NodeInfo) error +} + +// DefaultNodeInfo is the basic node information exchanged // between two peers during the Tendermint P2P handshake. -type NodeInfo struct { +type DefaultNodeInfo struct { // Authenticate // TODO: replace with NetAddress - ID ID `json:"id"` // authenticated identifier + ID_ ID `json:"id"` // authenticated identifier ListenAddr string `json:"listen_addr"` // accepting incoming // Check compatibility. @@ -32,12 +53,12 @@ type NodeInfo struct { Channels cmn.HexBytes `json:"channels"` // channels this node knows about // ASCIIText fields - Moniker string `json:"moniker"` // arbitrary moniker - Other NodeInfoOther `json:"other"` // other application specific data + Moniker string `json:"moniker"` // arbitrary moniker + Other DefaultNodeInfoOther `json:"other"` // other application specific data } -// NodeInfoOther is the misc. applcation specific data -type NodeInfoOther struct { +// DefaultNodeInfoOther is the misc. applcation specific data +type DefaultNodeInfoOther struct { AminoVersion string `json:"amino_version"` P2PVersion string `json:"p2p_version"` ConsensusVersion string `json:"consensus_version"` @@ -46,19 +67,12 @@ type NodeInfoOther struct { RPCAddress string `json:"rpc_address"` } -func (o NodeInfoOther) String() string { - return fmt.Sprintf( - "{amino_version: %v, p2p_version: %v, consensus_version: %v, rpc_version: %v, tx_index: %v, rpc_address: %v}", - o.AminoVersion, - o.P2PVersion, - o.ConsensusVersion, - o.RPCVersion, - o.TxIndex, - o.RPCAddress, - ) +// ID returns the node's peer ID. +func (info DefaultNodeInfo) ID() ID { + return info.ID_ } -// Validate checks the self-reported NodeInfo is safe. +// ValidateBasic checks the self-reported DefaultNodeInfo is safe. // It returns an error if there // are too many Channels, if there are any duplicate Channels, // if the ListenAddr is malformed, or if the ListenAddr is a host name @@ -71,7 +85,7 @@ func (o NodeInfoOther) String() string { // International clients could then use punycode (or we could use // url-encoding), and we just need to be careful with how we handle that in our // clients. (e.g. off by default). -func (info NodeInfo) Validate() error { +func (info DefaultNodeInfo) ValidateBasic() error { if len(info.Channels) > maxNumChannels { return fmt.Errorf("info.Channels is too long (%v). Max is %v", len(info.Channels), maxNumChannels) } @@ -111,14 +125,19 @@ func (info NodeInfo) Validate() error { } // ensure ListenAddr is good - _, err := NewNetAddressString(IDAddressString(info.ID, info.ListenAddr)) + _, err := NewNetAddressString(IDAddressString(info.ID(), info.ListenAddr)) return err } -// CompatibleWith checks if two NodeInfo are compatible with eachother. +// CompatibleWith checks if two DefaultNodeInfo are compatible with eachother. // CONTRACT: two nodes are compatible if the major version matches and network match // and they have at least one channel in common. -func (info NodeInfo) CompatibleWith(other NodeInfo) error { +func (info DefaultNodeInfo) CompatibleWith(other_ NodeInfo) error { + other, ok := other_.(DefaultNodeInfo) + if !ok { + return fmt.Errorf("wrong NodeInfo type. Expected DefaultNodeInfo, got %v", reflect.TypeOf(other_)) + } + iMajor, _, _, iErr := splitVersion(info.Version) oMajor, _, _, oErr := splitVersion(other.Version) @@ -164,18 +183,18 @@ OUTER_LOOP: return nil } -// NetAddress returns a NetAddress derived from the NodeInfo - +// NetAddress returns a NetAddress derived from the DefaultNodeInfo - // it includes the authenticated peer ID and the self-reported // ListenAddr. Note that the ListenAddr is not authenticated and // may not match that address actually dialed if its an outbound peer. -func (info NodeInfo) NetAddress() *NetAddress { - netAddr, err := NewNetAddressString(IDAddressString(info.ID, info.ListenAddr)) +func (info DefaultNodeInfo) NetAddress() *NetAddress { + netAddr, err := NewNetAddressString(IDAddressString(info.ID(), info.ListenAddr)) if err != nil { switch err.(type) { case ErrNetAddressLookup: // XXX If the peer provided a host name and the lookup fails here // we're out of luck. - // TODO: use a NetAddress in NodeInfo + // TODO: use a NetAddress in DefaultNodeInfo default: panic(err) // everything should be well formed by now } @@ -183,11 +202,6 @@ func (info NodeInfo) NetAddress() *NetAddress { return netAddr } -func (info NodeInfo) String() string { - return fmt.Sprintf("NodeInfo{id: %v, moniker: %v, network: %v [listen %v], version: %v (%v)}", - info.ID, info.Moniker, info.Network, info.ListenAddr, info.Version, info.Other) -} - func splitVersion(version string) (string, string, string, error) { spl := strings.Split(version, ".") if len(spl) != 3 { diff --git a/p2p/peer.go b/p2p/peer.go index ba22695e7..009313141 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -3,7 +3,6 @@ package p2p import ( "fmt" "net" - "sync/atomic" "time" cmn "github.com/tendermint/tendermint/libs/common" @@ -15,19 +14,18 @@ import ( const metricsTickerDuration = 10 * time.Second -var testIPSuffix uint32 - // Peer is an interface representing a peer connected on a reactor. type Peer interface { cmn.Service - ID() ID // peer's cryptographic ID - RemoteIP() net.IP // remote IP of the connection + ID() ID // peer's cryptographic ID + RemoteIP() net.IP // remote IP of the connection + IsOutbound() bool // did we dial the peer IsPersistent() bool // do we redial this peer when we disconnect + NodeInfo() NodeInfo // peer's info Status() tmconn.ConnectionStatus - OriginalAddr() *NetAddress Send(byte, []byte) bool TrySend(byte, []byte) bool @@ -40,12 +38,13 @@ type Peer interface { // peerConn contains the raw connection and its config. type peerConn struct { - outbound bool - persistent bool - config *config.P2PConfig - conn net.Conn // source connection - ip net.IP - originalAddr *NetAddress // nil for inbound connections + outbound bool + persistent bool + config *config.P2PConfig + conn net.Conn // source connection + + // cached RemoteIP() + ip net.IP } // ID only exists for SecretConnection. @@ -60,14 +59,6 @@ func (pc peerConn) RemoteIP() net.IP { return pc.ip } - // In test cases a conn could not be present at all or be an in-memory - // implementation where we want to return a fake ip. - if pc.conn == nil || pc.conn.RemoteAddr().String() == "pipe" { - pc.ip = net.IP{172, 16, 0, byte(atomic.AddUint32(&testIPSuffix, 1))} - - return pc.ip - } - host, _, err := net.SplitHostPort(pc.conn.RemoteAddr().String()) if err != nil { panic(err) @@ -120,7 +111,7 @@ func newPeer( p := &peer{ peerConn: pc, nodeInfo: nodeInfo, - channels: nodeInfo.Channels, + channels: nodeInfo.(DefaultNodeInfo).Channels, // TODO Data: cmn.NewCMap(), metricsTicker: time.NewTicker(metricsTickerDuration), metrics: NopMetrics(), @@ -142,6 +133,15 @@ func newPeer( return p } +// String representation. +func (p *peer) String() string { + if p.outbound { + return fmt.Sprintf("Peer{%v %v out}", p.mconn, p.ID()) + } + + return fmt.Sprintf("Peer{%v %v in}", p.mconn, p.ID()) +} + //--------------------------------------------------- // Implements cmn.Service @@ -177,7 +177,7 @@ func (p *peer) OnStop() { // ID returns the peer's ID - the hex encoded hash of its pubkey. func (p *peer) ID() ID { - return p.nodeInfo.ID + return p.nodeInfo.ID() } // IsOutbound returns true if the connection is outbound, false otherwise. @@ -195,15 +195,6 @@ func (p *peer) NodeInfo() NodeInfo { return p.nodeInfo } -// OriginalAddr returns the original address, which was used to connect with -// the peer. Returns nil for inbound peers. -func (p *peer) OriginalAddr() *NetAddress { - if p.peerConn.outbound { - return p.peerConn.originalAddr - } - return nil -} - // Status returns the peer's ConnectionStatus. func (p *peer) Status() tmconn.ConnectionStatus { return p.mconn.Status() @@ -272,53 +263,14 @@ func (p *peer) hasChannel(chID byte) bool { } //--------------------------------------------------- -// methods used by the Switch +// methods only used for testing +// TODO: can we remove these? -// CloseConn should be called by the Switch if the peer was created but never -// started. +// CloseConn closes the underlying connection func (pc *peerConn) CloseConn() { pc.conn.Close() // nolint: errcheck } -// HandshakeTimeout performs the Tendermint P2P handshake between a given node -// and the peer by exchanging their NodeInfo. It sets the received nodeInfo on -// the peer. -// NOTE: blocking -func (pc *peerConn) HandshakeTimeout( - ourNodeInfo NodeInfo, - timeout time.Duration, -) (peerNodeInfo NodeInfo, err error) { - // Set deadline for handshake so we don't block forever on conn.ReadFull - if err := pc.conn.SetDeadline(time.Now().Add(timeout)); err != nil { - return peerNodeInfo, cmn.ErrorWrap(err, "Error setting deadline") - } - - var trs, _ = cmn.Parallel( - func(_ int) (val interface{}, err error, abort bool) { - _, err = cdc.MarshalBinaryWriter(pc.conn, ourNodeInfo) - return - }, - func(_ int) (val interface{}, err error, abort bool) { - _, err = cdc.UnmarshalBinaryReader( - pc.conn, - &peerNodeInfo, - int64(MaxNodeInfoSize()), - ) - return - }, - ) - if err := trs.FirstError(); err != nil { - return peerNodeInfo, cmn.ErrorWrap(err, "Error during handshake") - } - - // Remove deadline - if err := pc.conn.SetDeadline(time.Time{}); err != nil { - return peerNodeInfo, cmn.ErrorWrap(err, "Error removing deadline") - } - - return peerNodeInfo, nil -} - // Addr returns peer's remote network address. func (p *peer) Addr() net.Addr { return p.peerConn.conn.RemoteAddr() @@ -332,14 +284,7 @@ func (p *peer) CanSend(chID byte) bool { return p.mconn.CanSend(chID) } -// String representation. -func (p *peer) String() string { - if p.outbound { - return fmt.Sprintf("Peer{%v %v out}", p.mconn, p.ID()) - } - - return fmt.Sprintf("Peer{%v %v in}", p.mconn, p.ID()) -} +//--------------------------------------------------- func PeerMetrics(metrics *Metrics) PeerOption { return func(p *peer) { diff --git a/p2p/peer_set_test.go b/p2p/peer_set_test.go index ee1c52eab..c0ad80005 100644 --- a/p2p/peer_set_test.go +++ b/p2p/peer_set_test.go @@ -1,7 +1,6 @@ package p2p import ( - "fmt" "net" "sync" "testing" @@ -12,24 +11,34 @@ import ( cmn "github.com/tendermint/tendermint/libs/common" ) -// Returns an empty kvstore peer -func randPeer(ip net.IP) *peer { +// mockPeer for testing the PeerSet +type mockPeer struct { + cmn.BaseService + ip net.IP + id ID +} + +func (mp *mockPeer) TrySend(chID byte, msgBytes []byte) bool { return true } +func (mp *mockPeer) Send(chID byte, msgBytes []byte) bool { return true } +func (mp *mockPeer) NodeInfo() NodeInfo { return DefaultNodeInfo{} } +func (mp *mockPeer) Status() ConnectionStatus { return ConnectionStatus{} } +func (mp *mockPeer) ID() ID { return mp.id } +func (mp *mockPeer) IsOutbound() bool { return false } +func (mp *mockPeer) IsPersistent() bool { return true } +func (mp *mockPeer) Get(s string) interface{} { return s } +func (mp *mockPeer) Set(string, interface{}) {} +func (mp *mockPeer) RemoteIP() net.IP { return mp.ip } + +// Returns a mock peer +func newMockPeer(ip net.IP) *mockPeer { if ip == nil { ip = net.IP{127, 0, 0, 1} } - nodeKey := NodeKey{PrivKey: ed25519.GenPrivKey()} - p := &peer{ - nodeInfo: NodeInfo{ - ID: nodeKey.ID(), - ListenAddr: fmt.Sprintf("%v.%v.%v.%v:26656", cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256), - }, - metrics: NopMetrics(), + return &mockPeer{ + ip: ip, + id: nodeKey.ID(), } - - p.ip = ip - - return p } func TestPeerSetAddRemoveOne(t *testing.T) { @@ -39,7 +48,7 @@ func TestPeerSetAddRemoveOne(t *testing.T) { var peerList []Peer for i := 0; i < 5; i++ { - p := randPeer(net.IP{127, 0, 0, byte(i)}) + p := newMockPeer(net.IP{127, 0, 0, byte(i)}) if err := peerSet.Add(p); err != nil { t.Error(err) } @@ -83,7 +92,7 @@ func TestPeerSetAddRemoveMany(t *testing.T) { peers := []Peer{} N := 100 for i := 0; i < N; i++ { - peer := randPeer(net.IP{127, 0, 0, byte(i)}) + peer := newMockPeer(net.IP{127, 0, 0, byte(i)}) if err := peerSet.Add(peer); err != nil { t.Errorf("Failed to add new peer") } @@ -107,7 +116,7 @@ func TestPeerSetAddRemoveMany(t *testing.T) { func TestPeerSetAddDuplicate(t *testing.T) { t.Parallel() peerSet := NewPeerSet() - peer := randPeer(nil) + peer := newMockPeer(nil) n := 20 errsChan := make(chan error) @@ -149,7 +158,7 @@ func TestPeerSetGet(t *testing.T) { var ( peerSet = NewPeerSet() - peer = randPeer(nil) + peer = newMockPeer(nil) ) assert.Nil(t, peerSet.Get(peer.ID()), "expecting a nil lookup, before .Add") diff --git a/p2p/peer_test.go b/p2p/peer_test.go index a2a2946a1..fecf7f1cc 100644 --- a/p2p/peer_test.go +++ b/p2p/peer_test.go @@ -19,8 +19,6 @@ import ( tmconn "github.com/tendermint/tendermint/p2p/conn" ) -const testCh = 0x01 - func TestPeerBasic(t *testing.T) { assert, require := assert.New(t), require.New(t) @@ -81,18 +79,14 @@ func createOutboundPeerAndPerformHandshake( if err != nil { return nil, err } - nodeInfo, err := pc.HandshakeTimeout(NodeInfo{ - ID: addr.ID, - Moniker: "host_peer", - Network: "testing", - Version: "123.123.123", - Channels: []byte{testCh}, - }, 1*time.Second) + timeout := 1 * time.Second + ourNodeInfo := testNodeInfo(addr.ID, "host_peer") + peerNodeInfo, err := handshake(pc.conn, timeout, ourNodeInfo) if err != nil { return nil, err } - p := newPeer(pc, mConfig, nodeInfo, reactorsByCh, chDescs, func(p Peer, r interface{}) {}) + p := newPeer(pc, mConfig, peerNodeInfo, reactorsByCh, chDescs, func(p Peer, r interface{}) {}) p.SetLogger(log.TestingLogger().With("peer", addr)) return p, nil } @@ -120,7 +114,7 @@ func testOutboundPeerConn( return peerConn{}, cmn.ErrorWrap(err, "Error creating peer") } - pc, err := testPeerConn(conn, config, true, persistent, ourNodePrivKey, addr) + pc, err := testPeerConn(conn, config, true, persistent, ourNodePrivKey) if err != nil { if cerr := conn.Close(); cerr != nil { return peerConn{}, cmn.ErrorWrap(err, cerr.Error()) @@ -191,14 +185,7 @@ func (rp *remotePeer) accept(l net.Listener) { golog.Fatalf("Failed to create a peer: %+v", err) } - _, err = handshake(pc.conn, time.Second, NodeInfo{ - ID: rp.Addr().ID, - Moniker: "remote_peer", - Network: "testing", - Version: "123.123.123", - ListenAddr: l.Addr().String(), - Channels: rp.channels, - }) + _, err = handshake(pc.conn, time.Second, rp.nodeInfo(l)) if err != nil { golog.Fatalf("Failed to perform handshake: %+v", err) } @@ -217,3 +204,14 @@ func (rp *remotePeer) accept(l net.Listener) { } } } + +func (rp *remotePeer) nodeInfo(l net.Listener) NodeInfo { + return DefaultNodeInfo{ + ID_: rp.Addr().ID, + Moniker: "remote_peer", + Network: "testing", + Version: "123.123.123", + ListenAddr: l.Addr().String(), + Channels: rp.channels, + } +} diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go index c22eabdc1..b0338c3c2 100644 --- a/p2p/pex/pex_reactor_test.go +++ b/p2p/pex/pex_reactor_test.go @@ -320,7 +320,7 @@ func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) { peer := p2p.CreateRandomPeer(false) pexR, book := createReactor(&PEXReactorConfig{}) - book.AddPrivateIDs([]string{string(peer.NodeInfo().ID)}) + book.AddPrivateIDs([]string{string(peer.NodeInfo().ID())}) defer teardownReactor(book) // we have to send a request to receive responses @@ -391,8 +391,8 @@ func (mp mockPeer) ID() p2p.ID { return mp.addr.ID } func (mp mockPeer) IsOutbound() bool { return mp.outbound } func (mp mockPeer) IsPersistent() bool { return mp.persistent } func (mp mockPeer) NodeInfo() p2p.NodeInfo { - return p2p.NodeInfo{ - ID: mp.addr.ID, + return p2p.DefaultNodeInfo{ + ID_: mp.addr.ID, ListenAddr: mp.addr.DialString(), } } @@ -402,7 +402,6 @@ func (mockPeer) Send(byte, []byte) bool { return false } func (mockPeer) TrySend(byte, []byte) bool { return false } func (mockPeer) Set(string, interface{}) {} func (mockPeer) Get(string) interface{} { return nil } -func (mockPeer) OriginalAddr() *p2p.NetAddress { return nil } func assertPeersWithTimeout( t *testing.T, diff --git a/p2p/switch.go b/p2p/switch.go index 8325d7e82..64e248fc3 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -280,12 +280,9 @@ func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) { sw.stopAndRemovePeer(peer, reason) if peer.IsPersistent() { - addr := peer.OriginalAddr() - if addr == nil { - // FIXME: persistent peers can't be inbound right now. - // self-reported address for inbound persistent peers - addr = peer.NodeInfo().NetAddress() - } + // TODO: use the original address dialed, not the self reported one + // See #2618. + addr := peer.NodeInfo().NetAddress() go sw.reconnectToPeer(addr) } } @@ -560,9 +557,13 @@ func (sw *Switch) addOutboundPeerWithConfig( // to avoid dialing in the future. sw.addrBook.RemoveAddress(addr) sw.addrBook.AddOurAddress(addr) + + return err } } + // retry persistent peers after + // any dial error besides IsSelf() if persistent { go sw.reconnectToPeer(addr) } diff --git a/p2p/switch_test.go b/p2p/switch_test.go index 4fea3cfe0..f52e47f06 100644 --- a/p2p/switch_test.go +++ b/p2p/switch_test.go @@ -143,6 +143,7 @@ func assertMsgReceivedWithTimeout(t *testing.T, msgBytes []byte, channel byte, r } return } + case <-time.After(timeout): t.Fatalf("Expected to have received 1 message in channel #%v, got zero", channel) } diff --git a/p2p/test_util.go b/p2p/test_util.go index e35e0989f..2859dc645 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -14,6 +14,19 @@ import ( "github.com/tendermint/tendermint/p2p/conn" ) +const testCh = 0x01 + +//------------------------------------------------ + +type mockNodeInfo struct { + addr *NetAddress +} + +func (ni mockNodeInfo) ID() ID { return ni.addr.ID } +func (ni mockNodeInfo) NetAddress() *NetAddress { return ni.addr } +func (ni mockNodeInfo) ValidateBasic() error { return nil } +func (ni mockNodeInfo) CompatibleWith(other NodeInfo) error { return nil } + func AddPeerToSwitch(sw *Switch, peer Peer) { sw.peers.Add(peer) } @@ -24,12 +37,9 @@ func CreateRandomPeer(outbound bool) *peer { peerConn: peerConn{ outbound: outbound, }, - nodeInfo: NodeInfo{ - ID: netAddr.ID, - ListenAddr: netAddr.DialString(), - }, - mconn: &conn.MConnection{}, - metrics: NopMetrics(), + nodeInfo: mockNodeInfo{netAddr}, + mconn: &conn.MConnection{}, + metrics: NopMetrics(), } p.SetLogger(log.TestingLogger().With("peer", addr)) return p @@ -159,36 +169,15 @@ func MakeSwitch( initSwitch func(int, *Switch) *Switch, opts ...SwitchOption, ) *Switch { - var ( - nodeKey = NodeKey{ - PrivKey: ed25519.GenPrivKey(), - } - ni = NodeInfo{ - ID: nodeKey.ID(), - Moniker: fmt.Sprintf("switch%d", i), - Network: network, - Version: version, - ListenAddr: fmt.Sprintf("127.0.0.1:%d", cmn.RandIntn(64512)+1023), - Other: NodeInfoOther{ - AminoVersion: "1.0", - P2PVersion: "1.0", - ConsensusVersion: "1.0", - RPCVersion: "1.0", - TxIndex: "off", - RPCAddress: fmt.Sprintf("127.0.0.1:%d", cmn.RandIntn(64512)+1023), - }, - } - ) - addr, err := NewNetAddressStringWithOptionalID( - IDAddressString(nodeKey.ID(), ni.ListenAddr), - ) - if err != nil { - panic(err) + nodeKey := NodeKey{ + PrivKey: ed25519.GenPrivKey(), } + nodeInfo := testNodeInfo(nodeKey.ID(), fmt.Sprintf("node%d", i)) - t := NewMultiplexTransport(ni, nodeKey) + t := NewMultiplexTransport(nodeInfo, nodeKey) + addr := nodeInfo.NetAddress() if err := t.Listen(*addr); err != nil { panic(err) } @@ -198,14 +187,16 @@ func MakeSwitch( sw.SetLogger(log.TestingLogger()) sw.SetNodeKey(&nodeKey) + ni := nodeInfo.(DefaultNodeInfo) for ch := range sw.reactorsByCh { ni.Channels = append(ni.Channels, ch) } + nodeInfo = ni // TODO: We need to setup reactors ahead of time so the NodeInfo is properly // populated and we don't have to do those awkward overrides and setters. - t.nodeInfo = ni - sw.SetNodeInfo(ni) + t.nodeInfo = nodeInfo + sw.SetNodeInfo(nodeInfo) return sw } @@ -215,7 +206,7 @@ func testInboundPeerConn( config *config.P2PConfig, ourNodePrivKey crypto.PrivKey, ) (peerConn, error) { - return testPeerConn(conn, config, false, false, ourNodePrivKey, nil) + return testPeerConn(conn, config, false, false, ourNodePrivKey) } func testPeerConn( @@ -223,7 +214,6 @@ func testPeerConn( cfg *config.P2PConfig, outbound, persistent bool, ourNodePrivKey crypto.PrivKey, - originalAddr *NetAddress, ) (pc peerConn, err error) { conn := rawConn @@ -241,10 +231,27 @@ func testPeerConn( // Only the information we already have return peerConn{ - config: cfg, - outbound: outbound, - persistent: persistent, - conn: conn, - originalAddr: originalAddr, + config: cfg, + outbound: outbound, + persistent: persistent, + conn: conn, }, nil } + +//---------------------------------------------------------------- +// rand node info + +func testNodeInfo(id ID, name string) NodeInfo { + return testNodeInfoWithNetwork(id, name, "testing") +} + +func testNodeInfoWithNetwork(id ID, name, network string) NodeInfo { + return DefaultNodeInfo{ + ID_: id, + ListenAddr: fmt.Sprintf("127.0.0.1:%d", cmn.RandIntn(64512)+1023), + Moniker: name, + Network: network, + Version: "123.123.123", + Channels: []byte{testCh}, + } +} diff --git a/p2p/transport.go b/p2p/transport.go index 6f097b4f7..b20f32f3d 100644 --- a/p2p/transport.go +++ b/p2p/transport.go @@ -335,7 +335,7 @@ func (mt *MultiplexTransport) upgrade( secretConn, err = upgradeSecretConn(c, mt.handshakeTimeout, mt.nodeKey.PrivKey) if err != nil { - return nil, NodeInfo{}, ErrRejected{ + return nil, nil, ErrRejected{ conn: c, err: fmt.Errorf("secrect conn failed: %v", err), isAuthFailure: true, @@ -344,15 +344,15 @@ func (mt *MultiplexTransport) upgrade( nodeInfo, err = handshake(secretConn, mt.handshakeTimeout, mt.nodeInfo) if err != nil { - return nil, NodeInfo{}, ErrRejected{ + return nil, nil, ErrRejected{ conn: c, err: fmt.Errorf("handshake failed: %v", err), isAuthFailure: true, } } - if err := nodeInfo.Validate(); err != nil { - return nil, NodeInfo{}, ErrRejected{ + if err := nodeInfo.ValidateBasic(); err != nil { + return nil, nil, ErrRejected{ conn: c, err: err, isNodeInfoInvalid: true, @@ -360,34 +360,34 @@ func (mt *MultiplexTransport) upgrade( } // Ensure connection key matches self reported key. - if connID := PubKeyToID(secretConn.RemotePubKey()); connID != nodeInfo.ID { - return nil, NodeInfo{}, ErrRejected{ + if connID := PubKeyToID(secretConn.RemotePubKey()); connID != nodeInfo.ID() { + return nil, nil, ErrRejected{ conn: c, id: connID, err: fmt.Errorf( "conn.ID (%v) NodeInfo.ID (%v) missmatch", connID, - nodeInfo.ID, + nodeInfo.ID(), ), isAuthFailure: true, } } // Reject self. - if mt.nodeInfo.ID == nodeInfo.ID { - return nil, NodeInfo{}, ErrRejected{ - addr: *NewNetAddress(nodeInfo.ID, c.RemoteAddr()), + if mt.nodeInfo.ID() == nodeInfo.ID() { + return nil, nil, ErrRejected{ + addr: *NewNetAddress(nodeInfo.ID(), c.RemoteAddr()), conn: c, - id: nodeInfo.ID, + id: nodeInfo.ID(), isSelf: true, } } if err := mt.nodeInfo.CompatibleWith(nodeInfo); err != nil { - return nil, NodeInfo{}, ErrRejected{ + return nil, nil, ErrRejected{ conn: c, err: err, - id: nodeInfo.ID, + id: nodeInfo.ID(), isIncompatible: true, } } @@ -430,17 +430,18 @@ func handshake( nodeInfo NodeInfo, ) (NodeInfo, error) { if err := c.SetDeadline(time.Now().Add(timeout)); err != nil { - return NodeInfo{}, err + return nil, err } var ( errc = make(chan error, 2) - peerNodeInfo NodeInfo + peerNodeInfo DefaultNodeInfo + ourNodeInfo = nodeInfo.(DefaultNodeInfo) ) go func(errc chan<- error, c net.Conn) { - _, err := cdc.MarshalBinaryWriter(c, nodeInfo) + _, err := cdc.MarshalBinaryWriter(c, ourNodeInfo) errc <- err }(errc, c) go func(errc chan<- error, c net.Conn) { @@ -455,7 +456,7 @@ func handshake( for i := 0; i < cap(errc); i++ { err := <-errc if err != nil { - return NodeInfo{}, err + return nil, err } } diff --git a/p2p/transport_test.go b/p2p/transport_test.go index 9e3cc467f..cce223a3e 100644 --- a/p2p/transport_test.go +++ b/p2p/transport_test.go @@ -11,9 +11,15 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" ) +var defaultNodeName = "host_peer" + +func emptyNodeInfo() NodeInfo { + return DefaultNodeInfo{} +} + func TestTransportMultiplexConnFilter(t *testing.T) { mt := NewMultiplexTransport( - NodeInfo{}, + emptyNodeInfo(), NodeKey{ PrivKey: ed25519.GenPrivKey(), }, @@ -70,7 +76,7 @@ func TestTransportMultiplexConnFilter(t *testing.T) { func TestTransportMultiplexConnFilterTimeout(t *testing.T) { mt := NewMultiplexTransport( - NodeInfo{}, + emptyNodeInfo(), NodeKey{ PrivKey: ed25519.GenPrivKey(), }, @@ -120,6 +126,7 @@ func TestTransportMultiplexConnFilterTimeout(t *testing.T) { t.Errorf("expected ErrFilterTimeout") } } + func TestTransportMultiplexAcceptMultiple(t *testing.T) { mt := testSetupMultiplexTransport(t) @@ -134,12 +141,7 @@ func TestTransportMultiplexAcceptMultiple(t *testing.T) { var ( pv = ed25519.GenPrivKey() dialer = NewMultiplexTransport( - NodeInfo{ - ID: PubKeyToID(pv.PubKey()), - ListenAddr: "127.0.0.1:0", - Moniker: "dialer", - Version: "1.0.0", - }, + testNodeInfo(PubKeyToID(pv.PubKey()), defaultNodeName), NodeKey{ PrivKey: pv, }, @@ -207,15 +209,10 @@ func TestTransportMultiplexAcceptNonBlocking(t *testing.T) { var ( fastNodePV = ed25519.GenPrivKey() - fastNodeInfo = NodeInfo{ - ID: PubKeyToID(fastNodePV.PubKey()), - ListenAddr: "127.0.0.1:0", - Moniker: "fastNode", - Version: "1.0.0", - } - errc = make(chan error) - fastc = make(chan struct{}) - slowc = make(chan struct{}) + fastNodeInfo = testNodeInfo(PubKeyToID(fastNodePV.PubKey()), "fastnode") + errc = make(chan error) + fastc = make(chan struct{}) + slowc = make(chan struct{}) ) // Simulate slow Peer. @@ -248,11 +245,11 @@ func TestTransportMultiplexAcceptNonBlocking(t *testing.T) { return } - _, err = handshake(sc, 20*time.Millisecond, NodeInfo{ - ID: PubKeyToID(ed25519.GenPrivKey().PubKey()), - ListenAddr: "127.0.0.1:0", - Moniker: "slow_peer", - }) + _, err = handshake(sc, 20*time.Millisecond, + testNodeInfo( + PubKeyToID(ed25519.GenPrivKey().PubKey()), + "slow_peer", + )) if err != nil { errc <- err return @@ -311,12 +308,7 @@ func TestTransportMultiplexValidateNodeInfo(t *testing.T) { var ( pv = ed25519.GenPrivKey() dialer = NewMultiplexTransport( - NodeInfo{ - ID: PubKeyToID(pv.PubKey()), - ListenAddr: "127.0.0.1:0", - Moniker: "", // Should not be empty. - Version: "1.0.0", - }, + testNodeInfo(PubKeyToID(pv.PubKey()), ""), // Should not be empty NodeKey{ PrivKey: pv, }, @@ -359,12 +351,9 @@ func TestTransportMultiplexRejectMissmatchID(t *testing.T) { go func() { dialer := NewMultiplexTransport( - NodeInfo{ - ID: PubKeyToID(ed25519.GenPrivKey().PubKey()), - ListenAddr: "127.0.0.1:0", - Moniker: "dialer", - Version: "1.0.0", - }, + testNodeInfo( + PubKeyToID(ed25519.GenPrivKey().PubKey()), "dialer", + ), NodeKey{ PrivKey: ed25519.GenPrivKey(), }, @@ -408,12 +397,7 @@ func TestTransportMultiplexRejectIncompatible(t *testing.T) { var ( pv = ed25519.GenPrivKey() dialer = NewMultiplexTransport( - NodeInfo{ - ID: PubKeyToID(pv.PubKey()), - ListenAddr: "127.0.0.1:0", - Moniker: "dialer", - Version: "2.0.0", - }, + testNodeInfoWithNetwork(PubKeyToID(pv.PubKey()), "dialer", "incompatible-network"), NodeKey{ PrivKey: pv, }, @@ -521,9 +505,7 @@ func TestTransportHandshake(t *testing.T) { var ( peerPV = ed25519.GenPrivKey() - peerNodeInfo = NodeInfo{ - ID: PubKeyToID(peerPV.PubKey()), - } + peerNodeInfo = testNodeInfo(PubKeyToID(peerPV.PubKey()), defaultNodeName) ) go func() { @@ -534,13 +516,13 @@ func TestTransportHandshake(t *testing.T) { } go func(c net.Conn) { - _, err := cdc.MarshalBinaryWriter(c, peerNodeInfo) + _, err := cdc.MarshalBinaryWriter(c, peerNodeInfo.(DefaultNodeInfo)) if err != nil { t.Error(err) } }(c) go func(c net.Conn) { - ni := NodeInfo{} + var ni DefaultNodeInfo _, err := cdc.UnmarshalBinaryReader( c, @@ -558,7 +540,7 @@ func TestTransportHandshake(t *testing.T) { t.Fatal(err) } - ni, err := handshake(c, 20*time.Millisecond, NodeInfo{}) + ni, err := handshake(c, 20*time.Millisecond, emptyNodeInfo()) if err != nil { t.Fatal(err) } @@ -572,12 +554,9 @@ func testSetupMultiplexTransport(t *testing.T) *MultiplexTransport { var ( pv = ed25519.GenPrivKey() mt = NewMultiplexTransport( - NodeInfo{ - ID: PubKeyToID(pv.PubKey()), - ListenAddr: "127.0.0.1:0", - Moniker: "transport", - Version: "1.0.0", - }, + testNodeInfo( + PubKeyToID(pv.PubKey()), "transport", + ), NodeKey{ PrivKey: pv, }, diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index 1d5f92753..1c2619d5c 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -2,7 +2,6 @@ package core import ( cm "github.com/tendermint/tendermint/consensus" - "github.com/tendermint/tendermint/p2p" ctypes "github.com/tendermint/tendermint/rpc/core/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" @@ -201,7 +200,7 @@ func DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { } peerStates[i] = ctypes.PeerStateInfo{ // Peer basic info. - NodeAddress: p2p.IDAddressString(peer.ID(), peer.NodeInfo().ListenAddr), + NodeAddress: peer.NodeInfo().NetAddress().String(), // Peer consensus state. PeerState: peerStateJSON, } diff --git a/rpc/core/net.go b/rpc/core/net.go index 9816d2f63..dbd4d8c0b 100644 --- a/rpc/core/net.go +++ b/rpc/core/net.go @@ -1,8 +1,11 @@ package core import ( + "fmt" + "github.com/pkg/errors" + "github.com/tendermint/tendermint/p2p" ctypes "github.com/tendermint/tendermint/rpc/core/types" ) @@ -37,8 +40,12 @@ import ( func NetInfo() (*ctypes.ResultNetInfo, error) { peers := []ctypes.Peer{} for _, peer := range p2pPeers.Peers().List() { + nodeInfo, ok := peer.NodeInfo().(p2p.DefaultNodeInfo) + if !ok { + return nil, fmt.Errorf("peer.NodeInfo() is not DefaultNodeInfo") + } peers = append(peers, ctypes.Peer{ - NodeInfo: peer.NodeInfo(), + NodeInfo: nodeInfo, IsOutbound: peer.IsOutbound(), ConnectionStatus: peer.Status(), }) diff --git a/rpc/core/status.go b/rpc/core/status.go index 17fb2f341..c26b06b8a 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -5,6 +5,7 @@ import ( "time" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/p2p" ctypes "github.com/tendermint/tendermint/rpc/core/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" @@ -91,7 +92,7 @@ func Status() (*ctypes.ResultStatus, error) { } result := &ctypes.ResultStatus{ - NodeInfo: p2pTransport.NodeInfo(), + NodeInfo: p2pTransport.NodeInfo().(p2p.DefaultNodeInfo), SyncInfo: ctypes.SyncInfo{ LatestBlockHash: latestBlockHash, LatestAppHash: latestAppHash, diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index a6dcf2b93..07628d1c6 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -74,9 +74,9 @@ type ValidatorInfo struct { // Node Status type ResultStatus struct { - NodeInfo p2p.NodeInfo `json:"node_info"` - SyncInfo SyncInfo `json:"sync_info"` - ValidatorInfo ValidatorInfo `json:"validator_info"` + NodeInfo p2p.DefaultNodeInfo `json:"node_info"` + SyncInfo SyncInfo `json:"sync_info"` + ValidatorInfo ValidatorInfo `json:"validator_info"` } // Is TxIndexing enabled @@ -107,7 +107,7 @@ type ResultDialPeers struct { // A peer type Peer struct { - p2p.NodeInfo `json:"node_info"` + NodeInfo p2p.DefaultNodeInfo `json:"node_info"` IsOutbound bool `json:"is_outbound"` ConnectionStatus p2p.ConnectionStatus `json:"connection_status"` } diff --git a/rpc/core/types/responses_test.go b/rpc/core/types/responses_test.go index c6c86e1f7..796299d33 100644 --- a/rpc/core/types/responses_test.go +++ b/rpc/core/types/responses_test.go @@ -15,17 +15,17 @@ func TestStatusIndexer(t *testing.T) { status = &ResultStatus{} assert.False(t, status.TxIndexEnabled()) - status.NodeInfo = p2p.NodeInfo{} + status.NodeInfo = p2p.DefaultNodeInfo{} assert.False(t, status.TxIndexEnabled()) cases := []struct { expected bool - other p2p.NodeInfoOther + other p2p.DefaultNodeInfoOther }{ - {false, p2p.NodeInfoOther{}}, - {false, p2p.NodeInfoOther{TxIndex: "aa"}}, - {false, p2p.NodeInfoOther{TxIndex: "off"}}, - {true, p2p.NodeInfoOther{TxIndex: "on"}}, + {false, p2p.DefaultNodeInfoOther{}}, + {false, p2p.DefaultNodeInfoOther{TxIndex: "aa"}}, + {false, p2p.DefaultNodeInfoOther{TxIndex: "off"}}, + {true, p2p.DefaultNodeInfoOther{TxIndex: "on"}}, } for _, tc := range cases { From 0790223518b2f4c75f98244c59396f25c40100ab Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Sat, 13 Oct 2018 17:01:21 -0700 Subject: [PATCH 071/113] Comment about ed25519 private key format on Sign (#2632) Closes #2001 --- crypto/ed25519/ed25519.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crypto/ed25519/ed25519.go b/crypto/ed25519/ed25519.go index c2bed6ab1..61872d98d 100644 --- a/crypto/ed25519/ed25519.go +++ b/crypto/ed25519/ed25519.go @@ -46,6 +46,12 @@ func (privKey PrivKeyEd25519) Bytes() []byte { } // Sign produces a signature on the provided message. +// This assumes the privkey is wellformed in the golang format. +// The first 32 bytes should be random, +// corresponding to the normal ed25519 private key. +// The latter 32 bytes should be the compressed public key. +// If these conditions aren't met, Sign will panic or produce an +// incorrect signature. func (privKey PrivKeyEd25519) Sign(msg []byte) ([]byte, error) { signatureBytes := ed25519.Sign(privKey[:], msg) return signatureBytes[:], nil From 37928cb9907a60ae79d5b387d1d89ffc43dc07d8 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 15 Oct 2018 06:28:41 +0400 Subject: [PATCH 072/113] set next validators along with validators while replay (#2637) Closes #2634 --- consensus/replay.go | 1 + 1 file changed, 1 insertion(+) diff --git a/consensus/replay.go b/consensus/replay.go index c92654f2c..af6369c3b 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -287,6 +287,7 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight return nil, err } state.Validators = types.NewValidatorSet(vals) + state.NextValidators = types.NewValidatorSet(vals) } if res.ConsensusParams != nil { state.ConsensusParams = types.PB2TM.ConsensusParams(res.ConsensusParams) From 287b25a0592ad572e5e184d1f3e995b68454699d Mon Sep 17 00:00:00 2001 From: Zarko Milosevic Date: Mon, 15 Oct 2018 22:05:13 +0200 Subject: [PATCH 073/113] Align with spec (#2642) --- consensus/common_test.go | 8 ++ consensus/reactor.go | 6 +- consensus/state.go | 18 ++-- consensus/state_test.go | 198 ++++++++++++++++++++++++++++----------- p2p/metrics.go | 2 +- types/evidence_test.go | 2 +- 6 files changed, 162 insertions(+), 72 deletions(-) diff --git a/consensus/common_test.go b/consensus/common_test.go index ddce69145..ca14a2926 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -487,6 +487,14 @@ func ensureVote(voteCh <-chan interface{}, height int64, round int, } } +func ensurePrecommit(voteCh <-chan interface{}, height int64, round int) { + ensureVote(voteCh, height, round, types.PrecommitType) +} + +func ensurePrevote(voteCh <-chan interface{}, height int64, round int) { + ensureVote(voteCh, height, round, types.PrevoteType) +} + func ensureNewEventOnChannel(ch <-chan interface{}) { select { case <-time.After(ensureTimeout): diff --git a/consensus/reactor.go b/consensus/reactor.go index 6643273cb..bcf77fb3a 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -429,9 +429,9 @@ func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote) { func makeRoundStepMessages(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage, csMsg *CommitStepMessage) { nrsMsg = &NewRoundStepMessage{ - Height: rs.Height, - Round: rs.Round, - Step: rs.Step, + Height: rs.Height, + Round: rs.Round, + Step: rs.Step, SecondsSinceStartTime: int(time.Since(rs.StartTime).Seconds()), LastCommitRound: rs.LastCommit.Round(), } diff --git a/consensus/state.go b/consensus/state.go index 37047aa30..375674008 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -532,10 +532,10 @@ func (cs *ConsensusState) updateToState(state sm.State) { cs.Proposal = nil cs.ProposalBlock = nil cs.ProposalBlockParts = nil - cs.LockedRound = 0 + cs.LockedRound = -1 cs.LockedBlock = nil cs.LockedBlockParts = nil - cs.ValidRound = 0 + cs.ValidRound = -1 cs.ValidBlock = nil cs.ValidBlockParts = nil cs.Votes = cstypes.NewHeightVoteSet(state.ChainID, height, validators) @@ -889,10 +889,7 @@ func (cs *ConsensusState) defaultDecideProposal(height int64, round int) { var blockParts *types.PartSet // Decide on block - if cs.LockedBlock != nil { - // If we're locked onto a block, just choose that. - block, blockParts = cs.LockedBlock, cs.LockedBlockParts - } else if cs.ValidBlock != nil { + if cs.ValidBlock != nil { // If there is valid block, choose that. block, blockParts = cs.ValidBlock, cs.ValidBlockParts } else { @@ -983,7 +980,6 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts // Enter: `timeoutPropose` after entering Propose. // Enter: proposal block and POL is ready. -// Enter: any +2/3 prevotes for future round. // Prevote for LockedBlock if we're locked, or ProposalBlock if valid. // Otherwise vote nil. func (cs *ConsensusState) enterPrevote(height int64, round int) { @@ -1072,8 +1068,8 @@ func (cs *ConsensusState) enterPrevoteWait(height int64, round int) { } // Enter: `timeoutPrevote` after any +2/3 prevotes. +// Enter: `timeoutPrecommit` after any +2/3 precommits. // Enter: +2/3 precomits for block or nil. -// Enter: any +2/3 precommits for next round. // Lock & precommit the ProposalBlock if we have enough prevotes for it (a POL in this round) // else, unlock an existing lock and precommit nil if +2/3 of prevotes were nil, // else, precommit nil otherwise. @@ -1122,7 +1118,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { logger.Info("enterPrecommit: +2/3 prevoted for nil.") } else { logger.Info("enterPrecommit: +2/3 prevoted for nil. Unlocking") - cs.LockedRound = 0 + cs.LockedRound = -1 cs.LockedBlock = nil cs.LockedBlockParts = nil cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) @@ -1161,7 +1157,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { // Fetch that block, unlock, and precommit nil. // The +2/3 prevotes for this round is the POL for our unlock. // TODO: In the future save the POL prevotes for justification. - cs.LockedRound = 0 + cs.LockedRound = -1 cs.LockedBlock = nil cs.LockedBlockParts = nil if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) { @@ -1612,7 +1608,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, !cs.LockedBlock.HashesTo(blockID.Hash) { cs.Logger.Info("Unlocking because of POL.", "lockedRound", cs.LockedRound, "POLRound", vote.Round) - cs.LockedRound = 0 + cs.LockedRound = -1 cs.LockedBlock = nil cs.LockedBlockParts = nil cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) diff --git a/consensus/state_test.go b/consensus/state_test.go index 229d7e7bb..c4fc11c3d 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -214,16 +214,16 @@ func TestStateBadProposal(t *testing.T) { ensureNewProposal(proposalCh, height, round) // wait for prevote - ensureVote(voteCh, height, round, types.PrevoteType) + ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], nil) // add bad prevote from vs2 and wait for it signAddVotes(cs1, types.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) - ensureVote(voteCh, height, round, types.PrevoteType) + ensurePrevote(voteCh, height, round) // wait for precommit - ensureVote(voteCh, height, round, types.PrecommitType) - validatePrecommit(t, cs1, round, 0, vss[0], nil, nil) + ensurePrecommit(voteCh, height, round) + validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) signAddVotes(cs1, types.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) } @@ -255,10 +255,10 @@ func TestStateFullRound1(t *testing.T) { ensureNewProposal(propCh, height, round) propBlockHash := cs.GetRoundState().ProposalBlock.Hash() - ensureVote(voteCh, height, round, types.PrevoteType) // wait for prevote + ensurePrevote(voteCh, height, round) // wait for prevote validatePrevote(t, cs, round, vss[0], propBlockHash) - ensureVote(voteCh, height, round, types.PrecommitType) // wait for precommit + ensurePrecommit(voteCh, height, round) // wait for precommit // we're going to roll right into new height ensureNewRound(newRoundCh, height+1, 0) @@ -276,11 +276,11 @@ func TestStateFullRoundNil(t *testing.T) { cs.enterPrevote(height, round) cs.startRoutines(4) - ensureVote(voteCh, height, round, types.PrevoteType) // prevote - ensureVote(voteCh, height, round, types.PrecommitType) // precommit + ensurePrevote(voteCh, height, round) // prevote + ensurePrecommit(voteCh, height, round) // precommit // should prevote and precommit nil - validatePrevoteAndPrecommit(t, cs, round, 0, vss[0], nil, nil) + validatePrevoteAndPrecommit(t, cs, round, -1, vss[0], nil, nil) } // run through propose, prevote, precommit commit with two validators @@ -296,7 +296,7 @@ func TestStateFullRound2(t *testing.T) { // start round and wait for propose and prevote startTestRound(cs1, height, round) - ensureVote(voteCh, height, round, types.PrevoteType) // prevote + ensurePrevote(voteCh, height, round) // prevote // we should be stuck in limbo waiting for more prevotes rs := cs1.GetRoundState() @@ -304,9 +304,9 @@ func TestStateFullRound2(t *testing.T) { // prevote arrives from vs2: signAddVotes(cs1, types.PrevoteType, propBlockHash, propPartsHeader, vs2) - ensureVote(voteCh, height, round, types.PrevoteType) // prevote + ensurePrevote(voteCh, height, round) // prevote - ensureVote(voteCh, height, round, types.PrecommitType) //precommit + ensurePrecommit(voteCh, height, round) //precommit // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, 0, 0, vss[0], propBlockHash, propBlockHash) @@ -314,7 +314,7 @@ func TestStateFullRound2(t *testing.T) { // precommit arrives from vs2: signAddVotes(cs1, types.PrecommitType, propBlockHash, propPartsHeader, vs2) - ensureVote(voteCh, height, round, types.PrecommitType) + ensurePrecommit(voteCh, height, round) // wait to finish commit, propose in next height ensureNewBlock(newBlockCh, height) @@ -353,14 +353,14 @@ func TestStateLockNoPOL(t *testing.T) { theBlockHash := roundState.ProposalBlock.Hash() thePartSetHeader := roundState.ProposalBlockParts.Header() - ensureVote(voteCh, height, round, types.PrevoteType) // prevote + ensurePrevote(voteCh, height, round) // prevote // we should now be stuck in limbo forever, waiting for more prevotes // prevote arrives from vs2: signAddVotes(cs1, types.PrevoteType, theBlockHash, thePartSetHeader, vs2) - ensureVote(voteCh, height, round, types.PrevoteType) // prevote + ensurePrevote(voteCh, height, round) // prevote - ensureVote(voteCh, height, round, types.PrecommitType) // precommit + ensurePrecommit(voteCh, height, round) // precommit // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) @@ -370,7 +370,7 @@ func TestStateLockNoPOL(t *testing.T) { copy(hash, theBlockHash) hash[0] = byte((hash[0] + 1) % 255) signAddVotes(cs1, types.PrecommitType, hash, thePartSetHeader, vs2) - ensureVote(voteCh, height, round, types.PrecommitType) // precommit + ensurePrecommit(voteCh, height, round) // precommit // (note we're entering precommit for a second time this round) // but with invalid args. then we enterPrecommitWait, and the timeout to new round @@ -397,26 +397,26 @@ func TestStateLockNoPOL(t *testing.T) { } // wait to finish prevote - ensureVote(voteCh, height, round, types.PrevoteType) + ensurePrevote(voteCh, height, round) // we should have prevoted our locked block validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) // add a conflicting prevote from the other validator signAddVotes(cs1, types.PrevoteType, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) - ensureVote(voteCh, height, round, types.PrevoteType) + ensurePrevote(voteCh, height, round) // now we're going to enter prevote again, but with invalid args // and then prevote wait, which should timeout. then wait for precommit ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) - ensureVote(voteCh, height, round, types.PrecommitType) // precommit + ensurePrecommit(voteCh, height, round) // precommit // the proposed block should still be locked and our precommit added // we should precommit nil and be locked on the proposal validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // add conflicting precommit from vs2 signAddVotes(cs1, types.PrecommitType, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) - ensureVote(voteCh, height, round, types.PrecommitType) + ensurePrecommit(voteCh, height, round) // (note we're entering precommit for a second time this round, but with invalid args // then we enterPrecommitWait and timeout into NewRound @@ -439,19 +439,19 @@ func TestStateLockNoPOL(t *testing.T) { panic(fmt.Sprintf("Expected proposal block to be locked block. Got %v, Expected %v", rs.ProposalBlock, rs.LockedBlock)) } - ensureVote(voteCh, height, round, types.PrevoteType) // prevote + ensurePrevote(voteCh, height, round) // prevote validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) signAddVotes(cs1, types.PrevoteType, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) - ensureVote(voteCh, height, round, types.PrevoteType) + ensurePrevote(voteCh, height, round) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) - ensureVote(voteCh, height, round, types.PrecommitType) // precommit + ensurePrecommit(voteCh, height, round) // precommit validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal signAddVotes(cs1, types.PrecommitType, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height - ensureVote(voteCh, height, round, types.PrecommitType) + ensurePrecommit(voteCh, height, round) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) @@ -478,20 +478,20 @@ func TestStateLockNoPOL(t *testing.T) { } ensureNewProposal(proposalCh, height, round) - ensureVote(voteCh, height, round, types.PrevoteType) // prevote + ensurePrevote(voteCh, height, round) // prevote // prevote for locked block (not proposal) validatePrevote(t, cs1, 3, vss[0], cs1.LockedBlock.Hash()) // prevote for proposed block signAddVotes(cs1, types.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) - ensureVote(voteCh, height, round, types.PrevoteType) + ensurePrevote(voteCh, height, round) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) - ensureVote(voteCh, height, round, types.PrecommitType) + ensurePrecommit(voteCh, height, round) validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal signAddVotes(cs1, types.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height - ensureVote(voteCh, height, round, types.PrecommitType) + ensurePrecommit(voteCh, height, round) } // 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka @@ -525,11 +525,11 @@ func TestStateLockPOLRelock(t *testing.T) { theBlockHash := rs.ProposalBlock.Hash() theBlockParts := rs.ProposalBlockParts.Header() - ensureVote(voteCh, height, round, types.PrevoteType) // prevote + ensurePrevote(voteCh, height, round) // prevote signAddVotes(cs1, types.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) - ensureVote(voteCh, height, round, types.PrecommitType) // our precommit + ensurePrecommit(voteCh, height, round) // our precommit // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) @@ -567,13 +567,13 @@ func TestStateLockPOLRelock(t *testing.T) { ensureNewProposal(proposalCh, height, round) // go to prevote, prevote for locked block (not proposal), move on - ensureVote(voteCh, height, round, types.PrevoteType) + ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], theBlockHash) // now lets add prevotes from everyone else for the new block signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) - ensureVote(voteCh, height, round, types.PrecommitType) + ensurePrecommit(voteCh, height, round) // we should have unlocked and locked on the new block validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) @@ -614,12 +614,12 @@ func TestStateLockPOLUnlock(t *testing.T) { theBlockHash := rs.ProposalBlock.Hash() theBlockParts := rs.ProposalBlockParts.Header() - ensureVote(voteCh, height, round, types.PrevoteType) + ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], theBlockHash) signAddVotes(cs1, types.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) - ensureVote(voteCh, height, round, types.PrecommitType) + ensurePrecommit(voteCh, height, round) // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) @@ -656,18 +656,18 @@ func TestStateLockPOLUnlock(t *testing.T) { ensureNewProposal(proposalCh, height, round) // go to prevote, prevote for locked block (not proposal) - ensureVote(voteCh, height, round, types.PrevoteType) + ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], lockedBlockHash) // now lets add prevotes from everyone else for nil (a polka!) signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) // the polka makes us unlock and precommit nil ensureNewUnlock(unlockCh, height, round) - ensureVote(voteCh, height, round, types.PrecommitType) + ensurePrecommit(voteCh, height, round) // we should have unlocked and committed nil - // NOTE: since we don't relock on nil, the lock round is 0 - validatePrecommit(t, cs1, round, 0, vss[0], nil, nil) + // NOTE: since we don't relock on nil, the lock round is -1 + validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3) ensureNewRound(newRoundCh, height, round+1) @@ -698,7 +698,7 @@ func TestStateLockPOLSafety1(t *testing.T) { rs := cs1.GetRoundState() propBlock := rs.ProposalBlock - ensureVote(voteCh, height, round, types.PrevoteType) + ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], propBlock.Hash()) // the others sign a polka but we don't see it @@ -710,7 +710,7 @@ func TestStateLockPOLSafety1(t *testing.T) { signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) // cs1 precommit nil - ensureVote(voteCh, height, round, types.PrecommitType) + ensurePrecommit(voteCh, height, round) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) t.Log("### ONTO ROUND 1") @@ -743,13 +743,13 @@ func TestStateLockPOLSafety1(t *testing.T) { t.Logf("new prop hash %v", fmt.Sprintf("%X", propBlockHash)) // go to prevote, prevote for proposal block - ensureVote(voteCh, height, round, types.PrevoteType) + ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], propBlockHash) // now we see the others prevote for it, so we should lock on it signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) - ensureVote(voteCh, height, round, types.PrecommitType) + ensurePrecommit(voteCh, height, round) // we should have precommitted validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) @@ -771,7 +771,7 @@ func TestStateLockPOLSafety1(t *testing.T) { ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) // finish prevote - ensureVote(voteCh, height, round, types.PrevoteType) + ensurePrevote(voteCh, height, round) // we should prevote what we're locked on validatePrevote(t, cs1, round, vss[0], propBlockHash) @@ -834,12 +834,12 @@ func TestStateLockPOLSafety2(t *testing.T) { } ensureNewProposal(proposalCh, height, round) - ensureVote(voteCh, height, round, types.PrevoteType) + ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], propBlockHash1) signAddVotes(cs1, types.PrevoteType, propBlockHash1, propBlockParts1.Header(), vs2, vs3, vs4) - ensureVote(voteCh, height, round, types.PrecommitType) + ensurePrecommit(voteCh, height, round) // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, round, round, vss[0], propBlockHash1, propBlockHash1) @@ -873,11 +873,97 @@ func TestStateLockPOLSafety2(t *testing.T) { ensureNewProposal(proposalCh, height, round) ensureNoNewUnlock(unlockCh) - ensureVote(voteCh, height, round, types.PrevoteType) + ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], propBlockHash1) } +// 4 vals. +// polka P0 at R0 for B0. We lock B0 on P0 at R0. P0 unlocks value at R1. + +// What we want: +// P0 proposes B0 at R3. +func TestProposeValidBlock(t *testing.T) { + cs1, vss := randConsensusState(4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round + + partSize := types.BlockPartSizeBytes + + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) + voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) + + // start round and wait for propose and prevote + startTestRound(cs1, cs1.Height, round) + ensureNewRound(newRoundCh, height, round) + + ensureNewProposal(proposalCh, height, round) + rs := cs1.GetRoundState() + propBlock := rs.ProposalBlock + propBlockHash := propBlock.Hash() + + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], propBlockHash) + + // the others sign a polka but we don't see it + signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4) + + ensurePrecommit(voteCh, height, round) + // we should have precommitted + validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) + + signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + + incrementRound(vs2, vs3, vs4) + round = round + 1 // moving to the next round + + ensureNewRound(newRoundCh, height, round) + + t.Log("### ONTO ROUND 2") + + // timeout of propose + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) + + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], propBlockHash) + + signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + + ensureNewUnlock(unlockCh, height, round) + + ensurePrecommit(voteCh, height, round) + // we should have precommitted + validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + + incrementRound(vs2, vs3, vs4) + incrementRound(vs2, vs3, vs4) + + signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + + round = round + 2 // moving to the next round + + ensureNewRound(newRoundCh, height, round) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + + round = round + 1 // moving to the next round + + ensureNewRound(newRoundCh, height, round) + + t.Log("### ONTO ROUND 4") + + ensureNewProposal(proposalCh, height, round) + + rs = cs1.GetRoundState() + assert.True(t, bytes.Equal(rs.ProposalBlock.Hash(), propBlockHash)) + assert.True(t, bytes.Equal(rs.ProposalBlock.Hash(), rs.ValidBlock.Hash())) +} + // 4 vals, 3 Nil Precommits at P0 // What we want: // P0 waits for timeoutPrecommit before starting next round @@ -915,7 +1001,7 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { startTestRound(cs1, height, round) ensureNewRound(newRoundCh, height, round) - ensureVote(voteCh, height, round, types.PrevoteType) + ensurePrevote(voteCh, height, round) incrementRound(vss[1:]...) signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) @@ -928,7 +1014,7 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) - ensureVote(voteCh, height, round, types.PrevoteType) + ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], nil) } @@ -948,7 +1034,7 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { startTestRound(cs1, height, round) ensureNewRound(newRoundCh, height, round) - ensureVote(voteCh, height, round, types.PrevoteType) + ensurePrevote(voteCh, height, round) incrementRound(vss[1:]...) signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) @@ -956,8 +1042,8 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { round = round + 1 // moving to the next round ensureNewRound(newRoundCh, height, round) - ensureVote(voteCh, height, round, types.PrecommitType) - validatePrecommit(t, cs1, round, 0, vss[0], nil, nil) + ensurePrecommit(voteCh, height, round) + validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) @@ -986,7 +1072,7 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) - ensureVote(voteCh, height, round, types.PrevoteType) + ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], nil) } @@ -1096,11 +1182,11 @@ func TestStateHalt1(t *testing.T) { propBlock := rs.ProposalBlock propBlockParts := propBlock.MakePartSet(partSize) - ensureVote(voteCh, height, round, types.PrevoteType) + ensurePrevote(voteCh, height, round) signAddVotes(cs1, types.PrevoteType, propBlock.Hash(), propBlockParts.Header(), vs2, vs3, vs4) - ensureVote(voteCh, height, round, types.PrecommitType) + ensurePrecommit(voteCh, height, round) // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, round, round, vss[0], propBlock.Hash(), propBlock.Hash()) @@ -1127,7 +1213,7 @@ func TestStateHalt1(t *testing.T) { */ // go to prevote, prevote for locked block - ensureVote(voteCh, height, round, types.PrevoteType) + ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) // now we receive the precommit from the previous round diff --git a/p2p/metrics.go b/p2p/metrics.go index b066fb317..ed26d1192 100644 --- a/p2p/metrics.go +++ b/p2p/metrics.go @@ -62,7 +62,7 @@ func PrometheusMetrics(namespace string) *Metrics { // NopMetrics returns no-op Metrics. func NopMetrics() *Metrics { return &Metrics{ - Peers: discard.NewGauge(), + Peers: discard.NewGauge(), PeerReceiveBytesTotal: discard.NewCounter(), PeerSendBytesTotal: discard.NewCounter(), PeerPendingSendBytes: discard.NewGauge(), diff --git a/types/evidence_test.go b/types/evidence_test.go index 79805691c..a8d7efff8 100644 --- a/types/evidence_test.go +++ b/types/evidence_test.go @@ -61,7 +61,7 @@ func TestEvidence(t *testing.T) { {vote1, makeVote(val, chainID, 0, 10, 3, 1, blockID2), false}, // wrong round {vote1, makeVote(val, chainID, 0, 10, 2, 2, blockID2), false}, // wrong step {vote1, makeVote(val2, chainID, 0, 10, 2, 1, blockID), false}, // wrong validator - {vote1, badVote, false}, // signed by wrong key + {vote1, badVote, false}, // signed by wrong key } pubKey := val.GetPubKey() From 26462025bc327b238d9972ce9ff0fc07175a1ab4 Mon Sep 17 00:00:00 2001 From: Joon Date: Tue, 16 Oct 2018 05:28:49 +0900 Subject: [PATCH 074/113] standardize header.Hash() (#2610) --- types/block.go | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/types/block.go b/types/block.go index fe3b17250..f41f4c1fa 100644 --- a/types/block.go +++ b/types/block.go @@ -297,21 +297,21 @@ func (h *Header) Hash() cmn.HexBytes { return nil } return merkle.SimpleHashFromMap(map[string][]byte{ - "ChainID": cdcEncode(h.ChainID), - "Height": cdcEncode(h.Height), - "Time": cdcEncode(h.Time), - "NumTxs": cdcEncode(h.NumTxs), - "TotalTxs": cdcEncode(h.TotalTxs), - "LastBlockID": cdcEncode(h.LastBlockID), - "LastCommit": cdcEncode(h.LastCommitHash), - "Data": cdcEncode(h.DataHash), - "Validators": cdcEncode(h.ValidatorsHash), - "NextValidators": cdcEncode(h.NextValidatorsHash), - "App": cdcEncode(h.AppHash), - "Consensus": cdcEncode(h.ConsensusHash), - "Results": cdcEncode(h.LastResultsHash), - "Evidence": cdcEncode(h.EvidenceHash), - "Proposer": cdcEncode(h.ProposerAddress), + "ChainID": cdcEncode(h.ChainID), + "Height": cdcEncode(h.Height), + "Time": cdcEncode(h.Time), + "NumTxs": cdcEncode(h.NumTxs), + "TotalTxs": cdcEncode(h.TotalTxs), + "LastBlockID": cdcEncode(h.LastBlockID), + "LastCommitHash": cdcEncode(h.LastCommitHash), + "DataHash": cdcEncode(h.DataHash), + "ValidatorsHash": cdcEncode(h.ValidatorsHash), + "NextValidatorsHash": cdcEncode(h.NextValidatorsHash), + "AppHash": cdcEncode(h.AppHash), + "ConsensusHash": cdcEncode(h.ConsensusHash), + "LastResultsHash": cdcEncode(h.LastResultsHash), + "EvidenceHash": cdcEncode(h.EvidenceHash), + "ProposerAddress": cdcEncode(h.ProposerAddress), }) } From 4ab7dcf3acdad757c96ec0553d6542aa3a93ee2f Mon Sep 17 00:00:00 2001 From: Joon Date: Tue, 16 Oct 2018 05:31:27 +0900 Subject: [PATCH 075/113] [R4R] Unmerklize ConsensusParams.Hash() (#2609) * Hash() uses tmhash instead of merkle.SimpleHashFromMap * marshal whole struct * update comments * update docs --- docs/spec/blockchain/blockchain.md | 4 ++-- types/params.go | 19 ++++++++++++------- types/params_test.go | 5 +++++ 3 files changed, 19 insertions(+), 9 deletions(-) diff --git a/docs/spec/blockchain/blockchain.md b/docs/spec/blockchain/blockchain.md index 89ab1b4f7..029b64fac 100644 --- a/docs/spec/blockchain/blockchain.md +++ b/docs/spec/blockchain/blockchain.md @@ -320,10 +320,10 @@ next validator sets Merkle root. ### ConsensusParamsHash ```go -block.ConsensusParamsHash == SimpleMerkleRoot(state.ConsensusParams) +block.ConsensusParamsHash == tmhash(amino(state.ConsensusParams)) ``` -Simple Merkle root of the consensus parameters. +Hash of the amino-encoded consensus parameters. ### AppHash diff --git a/types/params.go b/types/params.go index 129d47627..ed1e7963b 100644 --- a/types/params.go +++ b/types/params.go @@ -2,7 +2,7 @@ package types import ( abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" ) @@ -80,13 +80,18 @@ func (params *ConsensusParams) Validate() error { return nil } -// Hash returns a merkle hash of the parameters to store in the block header +// Hash returns a hash of the parameters to store in the block header +// No Merkle tree here, only three values are hashed here +// thus benefit from saving space < drawbacks from proofs' overhead +// Revisit this function if new fields are added to ConsensusParams func (params *ConsensusParams) Hash() []byte { - return merkle.SimpleHashFromMap(map[string][]byte{ - "block_size_max_bytes": cdcEncode(params.BlockSize.MaxBytes), - "block_size_max_gas": cdcEncode(params.BlockSize.MaxGas), - "evidence_params_max_age": cdcEncode(params.EvidenceParams.MaxAge), - }) + hasher := tmhash.New() + bz := cdcEncode(params) + if bz == nil { + panic("cannot fail to encode ConsensusParams") + } + hasher.Write(bz) + return hasher.Sum(nil) } // Update returns a copy of the params with updates from the non-zero fields of p2. diff --git a/types/params_test.go b/types/params_test.go index 888b678b4..2936e5a4e 100644 --- a/types/params_test.go +++ b/types/params_test.go @@ -53,6 +53,11 @@ func TestConsensusParamsHash(t *testing.T) { makeParams(4, 2, 3), makeParams(1, 4, 3), makeParams(1, 2, 4), + makeParams(2, 5, 7), + makeParams(1, 7, 6), + makeParams(9, 5, 4), + makeParams(7, 8, 9), + makeParams(4, 6, 5), } hashes := make([][]byte, len(params)) From 124d0db1e092e1f7d6a3747cee29c370d998e61c Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Mon, 15 Oct 2018 15:42:47 -0500 Subject: [PATCH 076/113] Make txs and evidencelist use merkle.SimpleHashFromBytes to create hash (#2635) This is a performance regression, but will also spare the types directory from knowing about RFC 6962, which is a more correct abstraction. For txs this performance hit will be fixed soon with #2603. For evidence, the performance impact is negligible due to it being capped at a small number. --- CHANGELOG_PENDING.md | 1 + crypto/merkle/simple_proof.go | 6 +++--- crypto/merkle/simple_tree.go | 6 +++--- types/block.go | 2 -- types/evidence.go | 31 ++++++++++++++++++++----------- types/tx.go | 17 ++++++----------- 6 files changed, 33 insertions(+), 30 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index f82ddbc2b..05369ea66 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -24,6 +24,7 @@ BREAKING CHANGES: * [types] \#2298 Remove `Index` and `Total` fields from `TxProof`. * [crypto/merkle & lite] \#2298 Various changes to accomodate General Merkle trees * [crypto/merkle] \#2595 Remove all Hasher objects in favor of byte slices + * [crypto/merkle] \#2635 merkle.SimpleHashFromTwoHashes is no longer exported * [types] \#2598 `VoteTypeXxx` are now * Blockchain Protocol diff --git a/crypto/merkle/simple_proof.go b/crypto/merkle/simple_proof.go index d2cbb126f..fd6d07b88 100644 --- a/crypto/merkle/simple_proof.go +++ b/crypto/merkle/simple_proof.go @@ -134,13 +134,13 @@ func computeHashFromAunts(index int, total int, leafHash []byte, innerHashes [][ if leftHash == nil { return nil } - return SimpleHashFromTwoHashes(leftHash, innerHashes[len(innerHashes)-1]) + return simpleHashFromTwoHashes(leftHash, innerHashes[len(innerHashes)-1]) } rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) if rightHash == nil { return nil } - return SimpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash) + return simpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash) } } @@ -187,7 +187,7 @@ func trailsFromByteSlices(items [][]byte) (trails []*SimpleProofNode, root *Simp default: lefts, leftRoot := trailsFromByteSlices(items[:(len(items)+1)/2]) rights, rightRoot := trailsFromByteSlices(items[(len(items)+1)/2:]) - rootHash := SimpleHashFromTwoHashes(leftRoot.Hash, rightRoot.Hash) + rootHash := simpleHashFromTwoHashes(leftRoot.Hash, rightRoot.Hash) root := &SimpleProofNode{rootHash, nil, nil, nil} leftRoot.Parent = root leftRoot.Right = rightRoot diff --git a/crypto/merkle/simple_tree.go b/crypto/merkle/simple_tree.go index 45e0c5c56..7aacb0889 100644 --- a/crypto/merkle/simple_tree.go +++ b/crypto/merkle/simple_tree.go @@ -4,8 +4,8 @@ import ( "github.com/tendermint/tendermint/crypto/tmhash" ) -// SimpleHashFromTwoHashes is the basic operation of the Merkle tree: Hash(left | right). -func SimpleHashFromTwoHashes(left, right []byte) []byte { +// simpleHashFromTwoHashes is the basic operation of the Merkle tree: Hash(left | right). +func simpleHashFromTwoHashes(left, right []byte) []byte { var hasher = tmhash.New() err := encodeByteSlice(hasher, left) if err != nil { @@ -29,7 +29,7 @@ func SimpleHashFromByteSlices(items [][]byte) []byte { default: left := SimpleHashFromByteSlices(items[:(len(items)+1)/2]) right := SimpleHashFromByteSlices(items[(len(items)+1)/2:]) - return SimpleHashFromTwoHashes(left, right) + return simpleHashFromTwoHashes(left, right) } } diff --git a/types/block.go b/types/block.go index f41f4c1fa..45a5b8c37 100644 --- a/types/block.go +++ b/types/block.go @@ -576,7 +576,6 @@ func (sh SignedHeader) StringIndented(indent string) string { indent, sh.Header.StringIndented(indent+" "), indent, sh.Commit.StringIndented(indent+" "), indent) - return "" } //----------------------------------------------------------------------------- @@ -660,7 +659,6 @@ func (data *EvidenceData) StringIndented(indent string) string { %s}#%v`, indent, strings.Join(evStrings, "\n"+indent+" "), indent, data.hash) - return "" } //-------------------------------------------------------------------------------- diff --git a/types/evidence.go b/types/evidence.go index 00c46c593..57523ab1e 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -55,6 +55,7 @@ func (err *ErrEvidenceOverflow) Error() string { type Evidence interface { Height() int64 // height of the equivocation Address() []byte // address of the equivocating validator + Bytes() []byte // bytes which compromise the evidence Hash() []byte // hash of the evidence Verify(chainID string, pubKey crypto.PubKey) error // verify the evidence Equal(Evidence) bool // check equality of evidence @@ -88,6 +89,8 @@ type DuplicateVoteEvidence struct { VoteB *Vote } +var _ Evidence = &DuplicateVoteEvidence{} + // String returns a string representation of the evidence. func (dve *DuplicateVoteEvidence) String() string { return fmt.Sprintf("VoteA: %v; VoteB: %v", dve.VoteA, dve.VoteB) @@ -104,6 +107,11 @@ func (dve *DuplicateVoteEvidence) Address() []byte { return dve.PubKey.Address() } +// Hash returns the hash of the evidence. +func (dve *DuplicateVoteEvidence) Bytes() []byte { + return cdcEncode(dve) +} + // Hash returns the hash of the evidence. func (dve *DuplicateVoteEvidence) Hash() []byte { return tmhash.Sum(cdcEncode(dve)) @@ -172,6 +180,8 @@ type MockGoodEvidence struct { Address_ []byte } +var _ Evidence = &MockGoodEvidence{} + // UNSTABLE func NewMockGoodEvidence(height int64, idx int, address []byte) MockGoodEvidence { return MockGoodEvidence{height, address} @@ -182,6 +192,9 @@ func (e MockGoodEvidence) Address() []byte { return e.Address_ } func (e MockGoodEvidence) Hash() []byte { return []byte(fmt.Sprintf("%d-%x", e.Height_, e.Address_)) } +func (e MockGoodEvidence) Bytes() []byte { + return []byte(fmt.Sprintf("%d-%x", e.Height_, e.Address_)) +} func (e MockGoodEvidence) Verify(chainID string, pubKey crypto.PubKey) error { return nil } func (e MockGoodEvidence) Equal(ev Evidence) bool { e2 := ev.(MockGoodEvidence) @@ -216,18 +229,14 @@ type EvidenceList []Evidence // Hash returns the simple merkle root hash of the EvidenceList. func (evl EvidenceList) Hash() []byte { - // Recursive impl. - // Copied from crypto/merkle to avoid allocations - switch len(evl) { - case 0: - return nil - case 1: - return evl[0].Hash() - default: - left := EvidenceList(evl[:(len(evl)+1)/2]).Hash() - right := EvidenceList(evl[(len(evl)+1)/2:]).Hash() - return merkle.SimpleHashFromTwoHashes(left, right) + // These allocations are required because Evidence is not of type Bytes, and + // golang slices can't be typed cast. This shouldn't be a performance problem since + // the Evidence size is capped. + evidenceBzs := make([][]byte, len(evl)) + for i := 0; i < len(evl); i++ { + evidenceBzs[i] = evl[i].Bytes() } + return merkle.SimpleHashFromByteSlices(evidenceBzs) } func (evl EvidenceList) String() string { diff --git a/types/tx.go b/types/tx.go index ec42f3f13..10c097e36 100644 --- a/types/tx.go +++ b/types/tx.go @@ -31,18 +31,13 @@ type Txs []Tx // Hash returns the simple Merkle root hash of the transactions. func (txs Txs) Hash() []byte { - // Recursive impl. - // Copied from tendermint/crypto/merkle to avoid allocations - switch len(txs) { - case 0: - return nil - case 1: - return txs[0].Hash() - default: - left := Txs(txs[:(len(txs)+1)/2]).Hash() - right := Txs(txs[(len(txs)+1)/2:]).Hash() - return merkle.SimpleHashFromTwoHashes(left, right) + // These allocations will be removed once Txs is switched to [][]byte, + // ref #2603. This is because golang does not allow type casting slices without unsafe + txBzs := make([][]byte, len(txs)) + for i := 0; i < len(txs); i++ { + txBzs[i] = txs[i] } + return merkle.SimpleHashFromByteSlices(txBzs) } // Index returns the index of this transaction in the list, or -1 if not found From 55362ed76630f3e1ebec159a598f6a9fb5892cb1 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 16 Oct 2018 10:09:24 +0400 Subject: [PATCH 077/113] [pubsub] document design shortcomings (#2641) Refs https://github.com/tendermint/tendermint/issues/1811#issuecomment-427825250 --- libs/pubsub/pubsub.go | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index c104439f8..18f098d87 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -9,6 +9,39 @@ // When some message is published, we match it with all queries. If there is a // match, this message will be pushed to all clients, subscribed to that query. // See query subpackage for our implementation. +// +// Due to the blocking send implementation, a single subscriber can freeze an +// entire server by not reading messages before it unsubscribes. To avoid such +// scenario, subscribers must either: +// +// a) make sure they continue to read from the out channel until +// Unsubscribe(All) is called +// +// s.Subscribe(ctx, sub, qry, out) +// go func() { +// for msg := range out { +// // handle msg +// // will exit automatically when out is closed by Unsubscribe(All) +// } +// }() +// s.UnsubscribeAll(ctx, sub) +// +// b) drain the out channel before calling Unsubscribe(All) +// +// s.Subscribe(ctx, sub, qry, out) +// defer func() { +// for range out { +// // drain out to make sure we don't block +// } +// s.UnsubscribeAll(ctx, sub) +// }() +// for msg := range out { +// // handle msg +// if err != nil { +// return err +// } +// } +// package pubsub import ( From 80562669bfc0ad56d9d37662407c60661fdf476b Mon Sep 17 00:00:00 2001 From: Peng Zhong <172531+nylira@users.noreply.github.com> Date: Tue, 16 Oct 2018 14:10:52 +0800 Subject: [PATCH 078/113] add Google Analytics for documentation pages (#2645) We need Google Analytics to start measuring how many developers are viewing our documentation. That way we can gauge how successful an/or useful various pages are. VuePress supports GA and all we have to provide is the tracking code. This PR also renames the static docs site to "Tendermint Documentation", which is a better representation of the contents than only "Tendermint Core". --- docs/.vuepress/config.js | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/.vuepress/config.js b/docs/.vuepress/config.js index 342c5eac3..9f8ddbc73 100644 --- a/docs/.vuepress/config.js +++ b/docs/.vuepress/config.js @@ -1,6 +1,7 @@ module.exports = { - title: "Tendermint Core", - description: "Documentation for Tendermint Core", + title: "Tendermint Documentation", + description: "Documentation for Tendermint Core.", + ga: "UA-51029217-1", dest: "./dist/docs", base: "/docs/", markdown: { From ed107d0e845144e5f6a819a4da6afbde95f05042 Mon Sep 17 00:00:00 2001 From: Uzair1995 Date: Wed, 17 Oct 2018 11:12:31 +0500 Subject: [PATCH 079/113] [scripts/install_tendermint_ubuntu] change /root to local user (#2647) --- scripts/install/install_tendermint_ubuntu.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/install/install_tendermint_ubuntu.sh b/scripts/install/install_tendermint_ubuntu.sh index 0e1de1177..b9605de07 100644 --- a/scripts/install/install_tendermint_ubuntu.sh +++ b/scripts/install/install_tendermint_ubuntu.sh @@ -22,12 +22,12 @@ curl -O https://storage.googleapis.com/golang/go1.10.linux-amd64.tar.gz tar -xvf go1.10.linux-amd64.tar.gz # move go binary and add to path -mv go /usr/local +sudo mv go /usr/local echo "export PATH=\$PATH:/usr/local/go/bin" >> ~/.profile # create the goApps directory, set GOPATH, and put it on PATH mkdir goApps -echo "export GOPATH=/root/goApps" >> ~/.profile +echo "export GOPATH=$HOME/goApps" >> ~/.profile echo "export PATH=\$PATH:\$GOPATH/bin" >> ~/.profile source ~/.profile From f60713bca86a49464c1e10bae7cfe636c88b8bc8 Mon Sep 17 00:00:00 2001 From: Hendrik Hofstadt Date: Wed, 17 Oct 2018 10:26:14 +0200 Subject: [PATCH 080/113] privval: Add IPCPV and fix SocketPV (#2568) Ref: #2563 I added IPC as an unencrypted alternative to SocketPV. Besides I fixed the following aspects of SocketPV: Added locking since we are operating on a single socket The connection deadline is extended every time a successful packet exchange happens; otherwise the connection would always die permanently x seconds after the connection was established. Added a ping/heartbeat mechanism to keep the connection alive; native TCP keepalives do not work in this use-case * Extend the SecureConn socket to extend its deadline * Add locking & ping/heartbeat packets to SocketPV * Implement IPC PV and abstract socket signing * Refactored IPC and SocketPV * Implement @melekes comments * Fixes to rebase --- node/node.go | 4 +- privval/ipc.go | 120 ++++ privval/ipc_server.go | 131 ++++ privval/ipc_test.go | 147 +++++ privval/remote_signer.go | 303 +++++++++ privval/socket.go | 605 ------------------ privval/tcp.go | 214 +++++++ privval/tcp_server.go | 160 +++++ privval/{socket_tcp.go => tcp_socket.go} | 44 +- ...{socket_tcp_test.go => tcp_socket_test.go} | 5 +- privval/{socket_test.go => tcp_test.go} | 106 +-- privval/wire.go | 2 +- 12 files changed, 1176 insertions(+), 665 deletions(-) create mode 100644 privval/ipc.go create mode 100644 privval/ipc_server.go create mode 100644 privval/ipc_test.go create mode 100644 privval/remote_signer.go delete mode 100644 privval/socket.go create mode 100644 privval/tcp.go create mode 100644 privval/tcp_server.go rename privval/{socket_tcp.go => tcp_socket.go} (59%) rename privval/{socket_tcp_test.go => tcp_socket_test.go} (91%) rename privval/{socket_test.go => tcp_test.go} (82%) diff --git a/node/node.go b/node/node.go index ed0fa1198..9939f1c65 100644 --- a/node/node.go +++ b/node/node.go @@ -215,7 +215,7 @@ func NewNode(config *cfg.Config, // TODO: persist this key so external signer // can actually authenticate us privKey = ed25519.GenPrivKey() - pvsc = privval.NewSocketPV( + pvsc = privval.NewTCPVal( logger.With("module", "privval"), config.PrivValidatorListenAddr, privKey, @@ -579,7 +579,7 @@ func (n *Node) OnStop() { } } - if pvsc, ok := n.privValidator.(*privval.SocketPV); ok { + if pvsc, ok := n.privValidator.(*privval.TCPVal); ok { if err := pvsc.Stop(); err != nil { n.Logger.Error("Error stopping priv validator socket client", "err", err) } diff --git a/privval/ipc.go b/privval/ipc.go new file mode 100644 index 000000000..eda23fe6f --- /dev/null +++ b/privval/ipc.go @@ -0,0 +1,120 @@ +package privval + +import ( + "net" + "time" + + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/types" +) + +// IPCValOption sets an optional parameter on the SocketPV. +type IPCValOption func(*IPCVal) + +// IPCValConnTimeout sets the read and write timeout for connections +// from external signing processes. +func IPCValConnTimeout(timeout time.Duration) IPCValOption { + return func(sc *IPCVal) { sc.connTimeout = timeout } +} + +// IPCValHeartbeat sets the period on which to check the liveness of the +// connected Signer connections. +func IPCValHeartbeat(period time.Duration) IPCValOption { + return func(sc *IPCVal) { sc.connHeartbeat = period } +} + +// IPCVal implements PrivValidator, it uses a unix socket to request signatures +// from an external process. +type IPCVal struct { + cmn.BaseService + *RemoteSignerClient + + addr string + + connTimeout time.Duration + connHeartbeat time.Duration + + conn net.Conn + cancelPing chan struct{} + pingTicker *time.Ticker +} + +// Check that IPCVal implements PrivValidator. +var _ types.PrivValidator = (*IPCVal)(nil) + +// NewIPCVal returns an instance of IPCVal. +func NewIPCVal( + logger log.Logger, + socketAddr string, +) *IPCVal { + sc := &IPCVal{ + addr: socketAddr, + connTimeout: connTimeout, + connHeartbeat: connHeartbeat, + } + + sc.BaseService = *cmn.NewBaseService(logger, "IPCVal", sc) + + return sc +} + +// OnStart implements cmn.Service. +func (sc *IPCVal) OnStart() error { + err := sc.connect() + if err != nil { + sc.Logger.Error("OnStart", "err", err) + return err + } + + sc.RemoteSignerClient = NewRemoteSignerClient(sc.conn) + + // Start a routine to keep the connection alive + sc.cancelPing = make(chan struct{}, 1) + sc.pingTicker = time.NewTicker(sc.connHeartbeat) + go func() { + for { + select { + case <-sc.pingTicker.C: + err := sc.Ping() + if err != nil { + sc.Logger.Error("Ping", "err", err) + } + case <-sc.cancelPing: + sc.pingTicker.Stop() + return + } + } + }() + + return nil +} + +// OnStop implements cmn.Service. +func (sc *IPCVal) OnStop() { + if sc.cancelPing != nil { + close(sc.cancelPing) + } + + if sc.conn != nil { + if err := sc.conn.Close(); err != nil { + sc.Logger.Error("OnStop", "err", err) + } + } +} + +func (sc *IPCVal) connect() error { + la, err := net.ResolveUnixAddr("unix", sc.addr) + if err != nil { + return err + } + + conn, err := net.DialUnix("unix", nil, la) + if err != nil { + return err + } + + sc.conn = newTimeoutConn(conn, sc.connTimeout) + + return nil +} diff --git a/privval/ipc_server.go b/privval/ipc_server.go new file mode 100644 index 000000000..d3907cbdb --- /dev/null +++ b/privval/ipc_server.go @@ -0,0 +1,131 @@ +package privval + +import ( + "io" + "net" + "time" + + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/types" +) + +// IPCRemoteSignerOption sets an optional parameter on the IPCRemoteSigner. +type IPCRemoteSignerOption func(*IPCRemoteSigner) + +// IPCRemoteSignerConnDeadline sets the read and write deadline for connections +// from external signing processes. +func IPCRemoteSignerConnDeadline(deadline time.Duration) IPCRemoteSignerOption { + return func(ss *IPCRemoteSigner) { ss.connDeadline = deadline } +} + +// IPCRemoteSignerConnRetries sets the amount of attempted retries to connect. +func IPCRemoteSignerConnRetries(retries int) IPCRemoteSignerOption { + return func(ss *IPCRemoteSigner) { ss.connRetries = retries } +} + +// IPCRemoteSigner is a RPC implementation of PrivValidator that listens on a unix socket. +type IPCRemoteSigner struct { + cmn.BaseService + + addr string + chainID string + connDeadline time.Duration + connRetries int + privVal types.PrivValidator + + listener *net.UnixListener +} + +// NewIPCRemoteSigner returns an instance of IPCRemoteSigner. +func NewIPCRemoteSigner( + logger log.Logger, + chainID, socketAddr string, + privVal types.PrivValidator, +) *IPCRemoteSigner { + rs := &IPCRemoteSigner{ + addr: socketAddr, + chainID: chainID, + connDeadline: time.Second * defaultConnDeadlineSeconds, + connRetries: defaultDialRetries, + privVal: privVal, + } + + rs.BaseService = *cmn.NewBaseService(logger, "IPCRemoteSigner", rs) + + return rs +} + +// OnStart implements cmn.Service. +func (rs *IPCRemoteSigner) OnStart() error { + err := rs.listen() + if err != nil { + err = cmn.ErrorWrap(err, "listen") + rs.Logger.Error("OnStart", "err", err) + return err + } + + go func() { + for { + conn, err := rs.listener.AcceptUnix() + if err != nil { + return + } + go rs.handleConnection(conn) + } + }() + + return nil +} + +// OnStop implements cmn.Service. +func (rs *IPCRemoteSigner) OnStop() { + if rs.listener != nil { + if err := rs.listener.Close(); err != nil { + rs.Logger.Error("OnStop", "err", cmn.ErrorWrap(err, "closing listener failed")) + } + } +} + +func (rs *IPCRemoteSigner) listen() error { + la, err := net.ResolveUnixAddr("unix", rs.addr) + if err != nil { + return err + } + + rs.listener, err = net.ListenUnix("unix", la) + + return err +} + +func (rs *IPCRemoteSigner) handleConnection(conn net.Conn) { + for { + if !rs.IsRunning() { + return // Ignore error from listener closing. + } + + // Reset the connection deadline + conn.SetDeadline(time.Now().Add(rs.connDeadline)) + + req, err := readMsg(conn) + if err != nil { + if err != io.EOF { + rs.Logger.Error("handleConnection", "err", err) + } + return + } + + res, err := handleRequest(req, rs.chainID, rs.privVal) + + if err != nil { + // only log the error; we'll reply with an error in res + rs.Logger.Error("handleConnection", "err", err) + } + + err = writeMsg(conn, res) + if err != nil { + rs.Logger.Error("handleConnection", "err", err) + return + } + } +} diff --git a/privval/ipc_test.go b/privval/ipc_test.go new file mode 100644 index 000000000..c8d6dfc77 --- /dev/null +++ b/privval/ipc_test.go @@ -0,0 +1,147 @@ +package privval + +import ( + "io/ioutil" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/types" +) + +func TestIPCPVVote(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupIPCSocketPair(t, chainID, types.NewMockPV()) + + ts = time.Now() + vType = types.PrecommitType + want = &types.Vote{Timestamp: ts, Type: vType} + have = &types.Vote{Timestamp: ts, Type: vType} + ) + defer sc.Stop() + defer rs.Stop() + + require.NoError(t, rs.privVal.SignVote(chainID, want)) + require.NoError(t, sc.SignVote(chainID, have)) + assert.Equal(t, want.Signature, have.Signature) +} + +func TestIPCPVVoteResetDeadline(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupIPCSocketPair(t, chainID, types.NewMockPV()) + + ts = time.Now() + vType = types.PrecommitType + want = &types.Vote{Timestamp: ts, Type: vType} + have = &types.Vote{Timestamp: ts, Type: vType} + ) + defer sc.Stop() + defer rs.Stop() + + time.Sleep(3 * time.Millisecond) + + require.NoError(t, rs.privVal.SignVote(chainID, want)) + require.NoError(t, sc.SignVote(chainID, have)) + assert.Equal(t, want.Signature, have.Signature) + + // This would exceed the deadline if it was not extended by the previous message + time.Sleep(3 * time.Millisecond) + + require.NoError(t, rs.privVal.SignVote(chainID, want)) + require.NoError(t, sc.SignVote(chainID, have)) + assert.Equal(t, want.Signature, have.Signature) +} + +func TestIPCPVVoteKeepalive(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupIPCSocketPair(t, chainID, types.NewMockPV()) + + ts = time.Now() + vType = types.PrecommitType + want = &types.Vote{Timestamp: ts, Type: vType} + have = &types.Vote{Timestamp: ts, Type: vType} + ) + defer sc.Stop() + defer rs.Stop() + + time.Sleep(10 * time.Millisecond) + + require.NoError(t, rs.privVal.SignVote(chainID, want)) + require.NoError(t, sc.SignVote(chainID, have)) + assert.Equal(t, want.Signature, have.Signature) +} + +func testSetupIPCSocketPair( + t *testing.T, + chainID string, + privValidator types.PrivValidator, +) (*IPCVal, *IPCRemoteSigner) { + addr, err := testUnixAddr() + require.NoError(t, err) + + var ( + logger = log.TestingLogger() + privVal = privValidator + readyc = make(chan struct{}) + rs = NewIPCRemoteSigner( + logger, + chainID, + addr, + privVal, + ) + sc = NewIPCVal( + logger, + addr, + ) + ) + + IPCValConnTimeout(5 * time.Millisecond)(sc) + IPCValHeartbeat(time.Millisecond)(sc) + + IPCRemoteSignerConnDeadline(time.Millisecond * 5)(rs) + + testStartIPCRemoteSigner(t, readyc, rs) + + <-readyc + + require.NoError(t, sc.Start()) + assert.True(t, sc.IsRunning()) + + return sc, rs +} + +func testStartIPCRemoteSigner(t *testing.T, readyc chan struct{}, rs *IPCRemoteSigner) { + go func(rs *IPCRemoteSigner) { + require.NoError(t, rs.Start()) + assert.True(t, rs.IsRunning()) + + readyc <- struct{}{} + }(rs) +} + +func testUnixAddr() (string, error) { + f, err := ioutil.TempFile("/tmp", "nettest") + if err != nil { + return "", err + } + + addr := f.Name() + err = f.Close() + if err != nil { + return "", err + } + err = os.Remove(addr) + if err != nil { + return "", err + } + + return addr, nil +} diff --git a/privval/remote_signer.go b/privval/remote_signer.go new file mode 100644 index 000000000..399ee7905 --- /dev/null +++ b/privval/remote_signer.go @@ -0,0 +1,303 @@ +package privval + +import ( + "fmt" + "io" + "net" + "sync" + + "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/types" +) + +// RemoteSignerClient implements PrivValidator, it uses a socket to request signatures +// from an external process. +type RemoteSignerClient struct { + conn net.Conn + lock sync.Mutex +} + +// Check that RemoteSignerClient implements PrivValidator. +var _ types.PrivValidator = (*RemoteSignerClient)(nil) + +// NewRemoteSignerClient returns an instance of RemoteSignerClient. +func NewRemoteSignerClient( + conn net.Conn, +) *RemoteSignerClient { + sc := &RemoteSignerClient{ + conn: conn, + } + return sc +} + +// GetAddress implements PrivValidator. +func (sc *RemoteSignerClient) GetAddress() types.Address { + pubKey, err := sc.getPubKey() + if err != nil { + panic(err) + } + + return pubKey.Address() +} + +// GetPubKey implements PrivValidator. +func (sc *RemoteSignerClient) GetPubKey() crypto.PubKey { + pubKey, err := sc.getPubKey() + if err != nil { + panic(err) + } + + return pubKey +} + +func (sc *RemoteSignerClient) getPubKey() (crypto.PubKey, error) { + sc.lock.Lock() + defer sc.lock.Unlock() + + err := writeMsg(sc.conn, &PubKeyMsg{}) + if err != nil { + return nil, err + } + + res, err := readMsg(sc.conn) + if err != nil { + return nil, err + } + + return res.(*PubKeyMsg).PubKey, nil +} + +// SignVote implements PrivValidator. +func (sc *RemoteSignerClient) SignVote(chainID string, vote *types.Vote) error { + sc.lock.Lock() + defer sc.lock.Unlock() + + err := writeMsg(sc.conn, &SignVoteRequest{Vote: vote}) + if err != nil { + return err + } + + res, err := readMsg(sc.conn) + if err != nil { + return err + } + + resp, ok := res.(*SignedVoteResponse) + if !ok { + return ErrUnexpectedResponse + } + if resp.Error != nil { + return resp.Error + } + *vote = *resp.Vote + + return nil +} + +// SignProposal implements PrivValidator. +func (sc *RemoteSignerClient) SignProposal( + chainID string, + proposal *types.Proposal, +) error { + sc.lock.Lock() + defer sc.lock.Unlock() + + err := writeMsg(sc.conn, &SignProposalRequest{Proposal: proposal}) + if err != nil { + return err + } + + res, err := readMsg(sc.conn) + if err != nil { + return err + } + resp, ok := res.(*SignedProposalResponse) + if !ok { + return ErrUnexpectedResponse + } + if resp.Error != nil { + return resp.Error + } + *proposal = *resp.Proposal + + return nil +} + +// SignHeartbeat implements PrivValidator. +func (sc *RemoteSignerClient) SignHeartbeat( + chainID string, + heartbeat *types.Heartbeat, +) error { + sc.lock.Lock() + defer sc.lock.Unlock() + + err := writeMsg(sc.conn, &SignHeartbeatRequest{Heartbeat: heartbeat}) + if err != nil { + return err + } + + res, err := readMsg(sc.conn) + if err != nil { + return err + } + resp, ok := res.(*SignedHeartbeatResponse) + if !ok { + return ErrUnexpectedResponse + } + if resp.Error != nil { + return resp.Error + } + *heartbeat = *resp.Heartbeat + + return nil +} + +// Ping is used to check connection health. +func (sc *RemoteSignerClient) Ping() error { + sc.lock.Lock() + defer sc.lock.Unlock() + + err := writeMsg(sc.conn, &PingRequest{}) + if err != nil { + return err + } + + res, err := readMsg(sc.conn) + if err != nil { + return err + } + _, ok := res.(*PingResponse) + if !ok { + return ErrUnexpectedResponse + } + + return nil +} + +// RemoteSignerMsg is sent between RemoteSigner and the RemoteSigner client. +type RemoteSignerMsg interface{} + +func RegisterRemoteSignerMsg(cdc *amino.Codec) { + cdc.RegisterInterface((*RemoteSignerMsg)(nil), nil) + cdc.RegisterConcrete(&PubKeyMsg{}, "tendermint/remotesigner/PubKeyMsg", nil) + cdc.RegisterConcrete(&SignVoteRequest{}, "tendermint/remotesigner/SignVoteRequest", nil) + cdc.RegisterConcrete(&SignedVoteResponse{}, "tendermint/remotesigner/SignedVoteResponse", nil) + cdc.RegisterConcrete(&SignProposalRequest{}, "tendermint/remotesigner/SignProposalRequest", nil) + cdc.RegisterConcrete(&SignedProposalResponse{}, "tendermint/remotesigner/SignedProposalResponse", nil) + cdc.RegisterConcrete(&SignHeartbeatRequest{}, "tendermint/remotesigner/SignHeartbeatRequest", nil) + cdc.RegisterConcrete(&SignedHeartbeatResponse{}, "tendermint/remotesigner/SignedHeartbeatResponse", nil) + cdc.RegisterConcrete(&PingRequest{}, "tendermint/remotesigner/PingRequest", nil) + cdc.RegisterConcrete(&PingResponse{}, "tendermint/remotesigner/PingResponse", nil) +} + +// PubKeyMsg is a PrivValidatorSocket message containing the public key. +type PubKeyMsg struct { + PubKey crypto.PubKey +} + +// SignVoteRequest is a PrivValidatorSocket message containing a vote. +type SignVoteRequest struct { + Vote *types.Vote +} + +// SignedVoteResponse is a PrivValidatorSocket message containing a signed vote along with a potenial error message. +type SignedVoteResponse struct { + Vote *types.Vote + Error *RemoteSignerError +} + +// SignProposalRequest is a PrivValidatorSocket message containing a Proposal. +type SignProposalRequest struct { + Proposal *types.Proposal +} + +type SignedProposalResponse struct { + Proposal *types.Proposal + Error *RemoteSignerError +} + +// SignHeartbeatRequest is a PrivValidatorSocket message containing a Heartbeat. +type SignHeartbeatRequest struct { + Heartbeat *types.Heartbeat +} + +type SignedHeartbeatResponse struct { + Heartbeat *types.Heartbeat + Error *RemoteSignerError +} + +// PingRequest is a PrivValidatorSocket message to keep the connection alive. +type PingRequest struct { +} + +type PingResponse struct { +} + +// RemoteSignerError allows (remote) validators to include meaningful error descriptions in their reply. +type RemoteSignerError struct { + // TODO(ismail): create an enum of known errors + Code int + Description string +} + +func (e *RemoteSignerError) Error() string { + return fmt.Sprintf("RemoteSigner returned error #%d: %s", e.Code, e.Description) +} + +func readMsg(r io.Reader) (msg RemoteSignerMsg, err error) { + const maxRemoteSignerMsgSize = 1024 * 10 + _, err = cdc.UnmarshalBinaryReader(r, &msg, maxRemoteSignerMsgSize) + if _, ok := err.(timeoutError); ok { + err = cmn.ErrorWrap(ErrConnTimeout, err.Error()) + } + return +} + +func writeMsg(w io.Writer, msg interface{}) (err error) { + _, err = cdc.MarshalBinaryWriter(w, msg) + if _, ok := err.(timeoutError); ok { + err = cmn.ErrorWrap(ErrConnTimeout, err.Error()) + } + return +} + +func handleRequest(req RemoteSignerMsg, chainID string, privVal types.PrivValidator) (RemoteSignerMsg, error) { + var res RemoteSignerMsg + var err error + + switch r := req.(type) { + case *PubKeyMsg: + var p crypto.PubKey + p = privVal.GetPubKey() + res = &PubKeyMsg{p} + case *SignVoteRequest: + err = privVal.SignVote(chainID, r.Vote) + if err != nil { + res = &SignedVoteResponse{nil, &RemoteSignerError{0, err.Error()}} + } else { + res = &SignedVoteResponse{r.Vote, nil} + } + case *SignProposalRequest: + err = privVal.SignProposal(chainID, r.Proposal) + if err != nil { + res = &SignedProposalResponse{nil, &RemoteSignerError{0, err.Error()}} + } else { + res = &SignedProposalResponse{r.Proposal, nil} + } + case *SignHeartbeatRequest: + err = privVal.SignHeartbeat(chainID, r.Heartbeat) + if err != nil { + res = &SignedHeartbeatResponse{nil, &RemoteSignerError{0, err.Error()}} + } else { + res = &SignedHeartbeatResponse{r.Heartbeat, nil} + } + case *PingRequest: + res = &PingResponse{} + default: + err = fmt.Errorf("unknown msg: %v", r) + } + + return res, err +} diff --git a/privval/socket.go b/privval/socket.go deleted file mode 100644 index 64d4c46d0..000000000 --- a/privval/socket.go +++ /dev/null @@ -1,605 +0,0 @@ -package privval - -import ( - "errors" - "fmt" - "io" - "net" - "time" - - "github.com/tendermint/go-amino" - - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/ed25519" - cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/libs/log" - p2pconn "github.com/tendermint/tendermint/p2p/conn" - "github.com/tendermint/tendermint/types" -) - -const ( - defaultAcceptDeadlineSeconds = 30 // tendermint waits this long for remote val to connect - defaultConnDeadlineSeconds = 3 // must be set before each read - defaultConnHeartBeatSeconds = 30 // tcp keep-alive period - defaultConnWaitSeconds = 60 // XXX: is this redundant with the accept deadline? - defaultDialRetries = 10 // try to connect to tendermint this many times -) - -// Socket errors. -var ( - ErrDialRetryMax = errors.New("dialed maximum retries") - ErrConnWaitTimeout = errors.New("waited for remote signer for too long") - ErrConnTimeout = errors.New("remote signer timed out") - ErrUnexpectedResponse = errors.New("received unexpected response") -) - -// SocketPVOption sets an optional parameter on the SocketPV. -type SocketPVOption func(*SocketPV) - -// SocketPVAcceptDeadline sets the deadline for the SocketPV listener. -// A zero time value disables the deadline. -func SocketPVAcceptDeadline(deadline time.Duration) SocketPVOption { - return func(sc *SocketPV) { sc.acceptDeadline = deadline } -} - -// SocketPVConnDeadline sets the read and write deadline for connections -// from external signing processes. -func SocketPVConnDeadline(deadline time.Duration) SocketPVOption { - return func(sc *SocketPV) { sc.connDeadline = deadline } -} - -// SocketPVHeartbeat sets the period on which to check the liveness of the -// connected Signer connections. -func SocketPVHeartbeat(period time.Duration) SocketPVOption { - return func(sc *SocketPV) { sc.connHeartbeat = period } -} - -// SocketPVConnWait sets the timeout duration before connection of external -// signing processes are considered to be unsuccessful. -func SocketPVConnWait(timeout time.Duration) SocketPVOption { - return func(sc *SocketPV) { sc.connWaitTimeout = timeout } -} - -// SocketPV implements PrivValidator, it uses a socket to request signatures -// from an external process. -type SocketPV struct { - cmn.BaseService - - addr string - acceptDeadline time.Duration - connDeadline time.Duration - connHeartbeat time.Duration - connWaitTimeout time.Duration - privKey ed25519.PrivKeyEd25519 - - conn net.Conn - listener net.Listener -} - -// Check that SocketPV implements PrivValidator. -var _ types.PrivValidator = (*SocketPV)(nil) - -// NewSocketPV returns an instance of SocketPV. -func NewSocketPV( - logger log.Logger, - socketAddr string, - privKey ed25519.PrivKeyEd25519, -) *SocketPV { - sc := &SocketPV{ - addr: socketAddr, - acceptDeadline: time.Second * defaultAcceptDeadlineSeconds, - connDeadline: time.Second * defaultConnDeadlineSeconds, - connHeartbeat: time.Second * defaultConnHeartBeatSeconds, - connWaitTimeout: time.Second * defaultConnWaitSeconds, - privKey: privKey, - } - - sc.BaseService = *cmn.NewBaseService(logger, "SocketPV", sc) - - return sc -} - -// GetAddress implements PrivValidator. -func (sc *SocketPV) GetAddress() types.Address { - addr, err := sc.getAddress() - if err != nil { - panic(err) - } - - return addr -} - -// Address is an alias for PubKey().Address(). -func (sc *SocketPV) getAddress() (cmn.HexBytes, error) { - p, err := sc.getPubKey() - if err != nil { - return nil, err - } - - return p.Address(), nil -} - -// GetPubKey implements PrivValidator. -func (sc *SocketPV) GetPubKey() crypto.PubKey { - pubKey, err := sc.getPubKey() - if err != nil { - panic(err) - } - - return pubKey -} - -func (sc *SocketPV) getPubKey() (crypto.PubKey, error) { - err := writeMsg(sc.conn, &PubKeyMsg{}) - if err != nil { - return nil, err - } - - res, err := readMsg(sc.conn) - if err != nil { - return nil, err - } - - return res.(*PubKeyMsg).PubKey, nil -} - -// SignVote implements PrivValidator. -func (sc *SocketPV) SignVote(chainID string, vote *types.Vote) error { - err := writeMsg(sc.conn, &SignVoteRequest{Vote: vote}) - if err != nil { - return err - } - - res, err := readMsg(sc.conn) - if err != nil { - return err - } - - resp, ok := res.(*SignedVoteResponse) - if !ok { - return ErrUnexpectedResponse - } - if resp.Error != nil { - return fmt.Errorf("remote error occurred: code: %v, description: %s", - resp.Error.Code, - resp.Error.Description) - } - *vote = *resp.Vote - - return nil -} - -// SignProposal implements PrivValidator. -func (sc *SocketPV) SignProposal( - chainID string, - proposal *types.Proposal, -) error { - err := writeMsg(sc.conn, &SignProposalRequest{Proposal: proposal}) - if err != nil { - return err - } - - res, err := readMsg(sc.conn) - if err != nil { - return err - } - resp, ok := res.(*SignedProposalResponse) - if !ok { - return ErrUnexpectedResponse - } - if resp.Error != nil { - return fmt.Errorf("remote error occurred: code: %v, description: %s", - resp.Error.Code, - resp.Error.Description) - } - *proposal = *resp.Proposal - - return nil -} - -// SignHeartbeat implements PrivValidator. -func (sc *SocketPV) SignHeartbeat( - chainID string, - heartbeat *types.Heartbeat, -) error { - err := writeMsg(sc.conn, &SignHeartbeatRequest{Heartbeat: heartbeat}) - if err != nil { - return err - } - - res, err := readMsg(sc.conn) - if err != nil { - return err - } - resp, ok := res.(*SignedHeartbeatResponse) - if !ok { - return ErrUnexpectedResponse - } - if resp.Error != nil { - return fmt.Errorf("remote error occurred: code: %v, description: %s", - resp.Error.Code, - resp.Error.Description) - } - *heartbeat = *resp.Heartbeat - - return nil -} - -// OnStart implements cmn.Service. -func (sc *SocketPV) OnStart() error { - if err := sc.listen(); err != nil { - err = cmn.ErrorWrap(err, "failed to listen") - sc.Logger.Error( - "OnStart", - "err", err, - ) - return err - } - - conn, err := sc.waitConnection() - if err != nil { - err = cmn.ErrorWrap(err, "failed to accept connection") - sc.Logger.Error( - "OnStart", - "err", err, - ) - - return err - } - - sc.conn = conn - - return nil -} - -// OnStop implements cmn.Service. -func (sc *SocketPV) OnStop() { - if sc.conn != nil { - if err := sc.conn.Close(); err != nil { - err = cmn.ErrorWrap(err, "failed to close connection") - sc.Logger.Error( - "OnStop", - "err", err, - ) - } - } - - if sc.listener != nil { - if err := sc.listener.Close(); err != nil { - err = cmn.ErrorWrap(err, "failed to close listener") - sc.Logger.Error( - "OnStop", - "err", err, - ) - } - } -} - -func (sc *SocketPV) acceptConnection() (net.Conn, error) { - conn, err := sc.listener.Accept() - if err != nil { - if !sc.IsRunning() { - return nil, nil // Ignore error from listener closing. - } - return nil, err - - } - - conn, err = p2pconn.MakeSecretConnection(conn, sc.privKey) - if err != nil { - return nil, err - } - - return conn, nil -} - -func (sc *SocketPV) listen() error { - ln, err := net.Listen(cmn.ProtocolAndAddress(sc.addr)) - if err != nil { - return err - } - - sc.listener = newTCPTimeoutListener( - ln, - sc.acceptDeadline, - sc.connDeadline, - sc.connHeartbeat, - ) - - return nil -} - -// waitConnection uses the configured wait timeout to error if no external -// process connects in the time period. -func (sc *SocketPV) waitConnection() (net.Conn, error) { - var ( - connc = make(chan net.Conn, 1) - errc = make(chan error, 1) - ) - - go func(connc chan<- net.Conn, errc chan<- error) { - conn, err := sc.acceptConnection() - if err != nil { - errc <- err - return - } - - connc <- conn - }(connc, errc) - - select { - case conn := <-connc: - return conn, nil - case err := <-errc: - if _, ok := err.(timeoutError); ok { - return nil, cmn.ErrorWrap(ErrConnWaitTimeout, err.Error()) - } - return nil, err - case <-time.After(sc.connWaitTimeout): - return nil, ErrConnWaitTimeout - } -} - -//--------------------------------------------------------- - -// RemoteSignerOption sets an optional parameter on the RemoteSigner. -type RemoteSignerOption func(*RemoteSigner) - -// RemoteSignerConnDeadline sets the read and write deadline for connections -// from external signing processes. -func RemoteSignerConnDeadline(deadline time.Duration) RemoteSignerOption { - return func(ss *RemoteSigner) { ss.connDeadline = deadline } -} - -// RemoteSignerConnRetries sets the amount of attempted retries to connect. -func RemoteSignerConnRetries(retries int) RemoteSignerOption { - return func(ss *RemoteSigner) { ss.connRetries = retries } -} - -// RemoteSigner implements PrivValidator by dialing to a socket. -type RemoteSigner struct { - cmn.BaseService - - addr string - chainID string - connDeadline time.Duration - connRetries int - privKey ed25519.PrivKeyEd25519 - privVal types.PrivValidator - - conn net.Conn -} - -// NewRemoteSigner returns an instance of RemoteSigner. -func NewRemoteSigner( - logger log.Logger, - chainID, socketAddr string, - privVal types.PrivValidator, - privKey ed25519.PrivKeyEd25519, -) *RemoteSigner { - rs := &RemoteSigner{ - addr: socketAddr, - chainID: chainID, - connDeadline: time.Second * defaultConnDeadlineSeconds, - connRetries: defaultDialRetries, - privKey: privKey, - privVal: privVal, - } - - rs.BaseService = *cmn.NewBaseService(logger, "RemoteSigner", rs) - - return rs -} - -// OnStart implements cmn.Service. -func (rs *RemoteSigner) OnStart() error { - conn, err := rs.connect() - if err != nil { - err = cmn.ErrorWrap(err, "connect") - rs.Logger.Error("OnStart", "err", err) - return err - } - - go rs.handleConnection(conn) - - return nil -} - -// OnStop implements cmn.Service. -func (rs *RemoteSigner) OnStop() { - if rs.conn == nil { - return - } - - if err := rs.conn.Close(); err != nil { - rs.Logger.Error("OnStop", "err", cmn.ErrorWrap(err, "closing listener failed")) - } -} - -func (rs *RemoteSigner) connect() (net.Conn, error) { - for retries := rs.connRetries; retries > 0; retries-- { - // Don't sleep if it is the first retry. - if retries != rs.connRetries { - time.Sleep(rs.connDeadline) - } - - conn, err := cmn.Connect(rs.addr) - if err != nil { - err = cmn.ErrorWrap(err, "connection failed") - rs.Logger.Error( - "connect", - "addr", rs.addr, - "err", err, - ) - - continue - } - - if err := conn.SetDeadline(time.Now().Add(time.Second * defaultConnDeadlineSeconds)); err != nil { - err = cmn.ErrorWrap(err, "setting connection timeout failed") - rs.Logger.Error( - "connect", - "err", err, - ) - continue - } - - conn, err = p2pconn.MakeSecretConnection(conn, rs.privKey) - if err != nil { - err = cmn.ErrorWrap(err, "encrypting connection failed") - rs.Logger.Error( - "connect", - "err", err, - ) - - continue - } - - return conn, nil - } - - return nil, ErrDialRetryMax -} - -func (rs *RemoteSigner) handleConnection(conn net.Conn) { - for { - if !rs.IsRunning() { - return // Ignore error from listener closing. - } - - req, err := readMsg(conn) - if err != nil { - if err != io.EOF { - rs.Logger.Error("handleConnection", "err", err) - } - return - } - - var res SocketPVMsg - - switch r := req.(type) { - case *PubKeyMsg: - var p crypto.PubKey - p = rs.privVal.GetPubKey() - res = &PubKeyMsg{p} - case *SignVoteRequest: - err = rs.privVal.SignVote(rs.chainID, r.Vote) - if err != nil { - res = &SignedVoteResponse{nil, &RemoteSignerError{0, err.Error()}} - } else { - res = &SignedVoteResponse{r.Vote, nil} - } - case *SignProposalRequest: - err = rs.privVal.SignProposal(rs.chainID, r.Proposal) - if err != nil { - res = &SignedProposalResponse{nil, &RemoteSignerError{0, err.Error()}} - } else { - res = &SignedProposalResponse{r.Proposal, nil} - } - case *SignHeartbeatRequest: - err = rs.privVal.SignHeartbeat(rs.chainID, r.Heartbeat) - if err != nil { - res = &SignedHeartbeatResponse{nil, &RemoteSignerError{0, err.Error()}} - } else { - res = &SignedHeartbeatResponse{r.Heartbeat, nil} - } - default: - err = fmt.Errorf("unknown msg: %v", r) - } - - if err != nil { - // only log the error; we'll reply with an error in res - rs.Logger.Error("handleConnection", "err", err) - } - - err = writeMsg(conn, res) - if err != nil { - rs.Logger.Error("handleConnection", "err", err) - return - } - } -} - -//--------------------------------------------------------- - -// SocketPVMsg is sent between RemoteSigner and SocketPV. -type SocketPVMsg interface{} - -func RegisterSocketPVMsg(cdc *amino.Codec) { - cdc.RegisterInterface((*SocketPVMsg)(nil), nil) - cdc.RegisterConcrete(&PubKeyMsg{}, "tendermint/socketpv/PubKeyMsg", nil) - cdc.RegisterConcrete(&SignVoteRequest{}, "tendermint/socketpv/SignVoteRequest", nil) - cdc.RegisterConcrete(&SignedVoteResponse{}, "tendermint/socketpv/SignedVoteResponse", nil) - cdc.RegisterConcrete(&SignProposalRequest{}, "tendermint/socketpv/SignProposalRequest", nil) - cdc.RegisterConcrete(&SignedProposalResponse{}, "tendermint/socketpv/SignedProposalResponse", nil) - cdc.RegisterConcrete(&SignHeartbeatRequest{}, "tendermint/socketpv/SignHeartbeatRequest", nil) - cdc.RegisterConcrete(&SignedHeartbeatResponse{}, "tendermint/socketpv/SignedHeartbeatResponse", nil) -} - -// PubKeyMsg is a PrivValidatorSocket message containing the public key. -type PubKeyMsg struct { - PubKey crypto.PubKey -} - -// SignVoteRequest is a PrivValidatorSocket message containing a vote. -type SignVoteRequest struct { - Vote *types.Vote -} - -// SignedVoteResponse is a PrivValidatorSocket message containing a signed vote along with a potenial error message. -type SignedVoteResponse struct { - Vote *types.Vote - Error *RemoteSignerError -} - -// SignProposalRequest is a PrivValidatorSocket message containing a Proposal. -type SignProposalRequest struct { - Proposal *types.Proposal -} - -type SignedProposalResponse struct { - Proposal *types.Proposal - Error *RemoteSignerError -} - -// SignHeartbeatRequest is a PrivValidatorSocket message containing a Heartbeat. -type SignHeartbeatRequest struct { - Heartbeat *types.Heartbeat -} - -type SignedHeartbeatResponse struct { - Heartbeat *types.Heartbeat - Error *RemoteSignerError -} - -// RemoteSignerError allows (remote) validators to include meaningful error descriptions in their reply. -type RemoteSignerError struct { - // TODO(ismail): create an enum of known errors - Code int - Description string -} - -func readMsg(r io.Reader) (msg SocketPVMsg, err error) { - const maxSocketPVMsgSize = 1024 * 10 - - // set deadline before trying to read - conn := r.(net.Conn) - if err := conn.SetDeadline(time.Now().Add(time.Second * defaultConnDeadlineSeconds)); err != nil { - err = cmn.ErrorWrap(err, "setting connection timeout failed in readMsg") - return msg, err - } - - _, err = cdc.UnmarshalBinaryReader(r, &msg, maxSocketPVMsgSize) - if _, ok := err.(timeoutError); ok { - err = cmn.ErrorWrap(ErrConnTimeout, err.Error()) - } - return -} - -func writeMsg(w io.Writer, msg interface{}) (err error) { - _, err = cdc.MarshalBinaryWriter(w, msg) - if _, ok := err.(timeoutError); ok { - err = cmn.ErrorWrap(ErrConnTimeout, err.Error()) - } - return -} diff --git a/privval/tcp.go b/privval/tcp.go new file mode 100644 index 000000000..11bd833c0 --- /dev/null +++ b/privval/tcp.go @@ -0,0 +1,214 @@ +package privval + +import ( + "errors" + "net" + "time" + + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + p2pconn "github.com/tendermint/tendermint/p2p/conn" + "github.com/tendermint/tendermint/types" +) + +const ( + defaultAcceptDeadlineSeconds = 3 + defaultConnDeadlineSeconds = 3 + defaultConnHeartBeatSeconds = 2 + defaultDialRetries = 10 +) + +// Socket errors. +var ( + ErrDialRetryMax = errors.New("dialed maximum retries") + ErrConnTimeout = errors.New("remote signer timed out") + ErrUnexpectedResponse = errors.New("received unexpected response") +) + +var ( + acceptDeadline = time.Second * defaultAcceptDeadlineSeconds + connTimeout = time.Second * defaultConnDeadlineSeconds + connHeartbeat = time.Second * defaultConnHeartBeatSeconds +) + +// TCPValOption sets an optional parameter on the SocketPV. +type TCPValOption func(*TCPVal) + +// TCPValAcceptDeadline sets the deadline for the TCPVal listener. +// A zero time value disables the deadline. +func TCPValAcceptDeadline(deadline time.Duration) TCPValOption { + return func(sc *TCPVal) { sc.acceptDeadline = deadline } +} + +// TCPValConnTimeout sets the read and write timeout for connections +// from external signing processes. +func TCPValConnTimeout(timeout time.Duration) TCPValOption { + return func(sc *TCPVal) { sc.connTimeout = timeout } +} + +// TCPValHeartbeat sets the period on which to check the liveness of the +// connected Signer connections. +func TCPValHeartbeat(period time.Duration) TCPValOption { + return func(sc *TCPVal) { sc.connHeartbeat = period } +} + +// TCPVal implements PrivValidator, it uses a socket to request signatures +// from an external process. +type TCPVal struct { + cmn.BaseService + *RemoteSignerClient + + addr string + acceptDeadline time.Duration + connTimeout time.Duration + connHeartbeat time.Duration + privKey ed25519.PrivKeyEd25519 + + conn net.Conn + listener net.Listener + cancelPing chan struct{} + pingTicker *time.Ticker +} + +// Check that TCPVal implements PrivValidator. +var _ types.PrivValidator = (*TCPVal)(nil) + +// NewTCPVal returns an instance of TCPVal. +func NewTCPVal( + logger log.Logger, + socketAddr string, + privKey ed25519.PrivKeyEd25519, +) *TCPVal { + sc := &TCPVal{ + addr: socketAddr, + acceptDeadline: acceptDeadline, + connTimeout: connTimeout, + connHeartbeat: connHeartbeat, + privKey: privKey, + } + + sc.BaseService = *cmn.NewBaseService(logger, "TCPVal", sc) + + return sc +} + +// OnStart implements cmn.Service. +func (sc *TCPVal) OnStart() error { + if err := sc.listen(); err != nil { + sc.Logger.Error("OnStart", "err", err) + return err + } + + conn, err := sc.waitConnection() + if err != nil { + sc.Logger.Error("OnStart", "err", err) + return err + } + + sc.conn = conn + + sc.RemoteSignerClient = NewRemoteSignerClient(sc.conn) + + // Start a routine to keep the connection alive + sc.cancelPing = make(chan struct{}, 1) + sc.pingTicker = time.NewTicker(sc.connHeartbeat) + go func() { + for { + select { + case <-sc.pingTicker.C: + err := sc.Ping() + if err != nil { + sc.Logger.Error( + "Ping", + "err", err, + ) + } + case <-sc.cancelPing: + sc.pingTicker.Stop() + return + } + } + }() + + return nil +} + +// OnStop implements cmn.Service. +func (sc *TCPVal) OnStop() { + if sc.cancelPing != nil { + close(sc.cancelPing) + } + + if sc.conn != nil { + if err := sc.conn.Close(); err != nil { + sc.Logger.Error("OnStop", "err", err) + } + } + + if sc.listener != nil { + if err := sc.listener.Close(); err != nil { + sc.Logger.Error("OnStop", "err", err) + } + } +} + +func (sc *TCPVal) acceptConnection() (net.Conn, error) { + conn, err := sc.listener.Accept() + if err != nil { + if !sc.IsRunning() { + return nil, nil // Ignore error from listener closing. + } + return nil, err + + } + + conn, err = p2pconn.MakeSecretConnection(conn, sc.privKey) + if err != nil { + return nil, err + } + + return conn, nil +} + +func (sc *TCPVal) listen() error { + ln, err := net.Listen(cmn.ProtocolAndAddress(sc.addr)) + if err != nil { + return err + } + + sc.listener = newTCPTimeoutListener( + ln, + sc.acceptDeadline, + sc.connTimeout, + sc.connHeartbeat, + ) + + return nil +} + +// waitConnection uses the configured wait timeout to error if no external +// process connects in the time period. +func (sc *TCPVal) waitConnection() (net.Conn, error) { + var ( + connc = make(chan net.Conn, 1) + errc = make(chan error, 1) + ) + + go func(connc chan<- net.Conn, errc chan<- error) { + conn, err := sc.acceptConnection() + if err != nil { + errc <- err + return + } + + connc <- conn + }(connc, errc) + + select { + case conn := <-connc: + return conn, nil + case err := <-errc: + return nil, err + } +} diff --git a/privval/tcp_server.go b/privval/tcp_server.go new file mode 100644 index 000000000..694023d76 --- /dev/null +++ b/privval/tcp_server.go @@ -0,0 +1,160 @@ +package privval + +import ( + "io" + "net" + "time" + + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + p2pconn "github.com/tendermint/tendermint/p2p/conn" + "github.com/tendermint/tendermint/types" +) + +// RemoteSignerOption sets an optional parameter on the RemoteSigner. +type RemoteSignerOption func(*RemoteSigner) + +// RemoteSignerConnDeadline sets the read and write deadline for connections +// from external signing processes. +func RemoteSignerConnDeadline(deadline time.Duration) RemoteSignerOption { + return func(ss *RemoteSigner) { ss.connDeadline = deadline } +} + +// RemoteSignerConnRetries sets the amount of attempted retries to connect. +func RemoteSignerConnRetries(retries int) RemoteSignerOption { + return func(ss *RemoteSigner) { ss.connRetries = retries } +} + +// RemoteSigner implements PrivValidator by dialing to a socket. +type RemoteSigner struct { + cmn.BaseService + + addr string + chainID string + connDeadline time.Duration + connRetries int + privKey ed25519.PrivKeyEd25519 + privVal types.PrivValidator + + conn net.Conn +} + +// NewRemoteSigner returns an instance of RemoteSigner. +func NewRemoteSigner( + logger log.Logger, + chainID, socketAddr string, + privVal types.PrivValidator, + privKey ed25519.PrivKeyEd25519, +) *RemoteSigner { + rs := &RemoteSigner{ + addr: socketAddr, + chainID: chainID, + connDeadline: time.Second * defaultConnDeadlineSeconds, + connRetries: defaultDialRetries, + privKey: privKey, + privVal: privVal, + } + + rs.BaseService = *cmn.NewBaseService(logger, "RemoteSigner", rs) + + return rs +} + +// OnStart implements cmn.Service. +func (rs *RemoteSigner) OnStart() error { + conn, err := rs.connect() + if err != nil { + rs.Logger.Error("OnStart", "err", err) + return err + } + + go rs.handleConnection(conn) + + return nil +} + +// OnStop implements cmn.Service. +func (rs *RemoteSigner) OnStop() { + if rs.conn == nil { + return + } + + if err := rs.conn.Close(); err != nil { + rs.Logger.Error("OnStop", "err", cmn.ErrorWrap(err, "closing listener failed")) + } +} + +func (rs *RemoteSigner) connect() (net.Conn, error) { + for retries := rs.connRetries; retries > 0; retries-- { + // Don't sleep if it is the first retry. + if retries != rs.connRetries { + time.Sleep(rs.connDeadline) + } + + conn, err := cmn.Connect(rs.addr) + if err != nil { + rs.Logger.Error( + "connect", + "addr", rs.addr, + "err", err, + ) + + continue + } + + if err := conn.SetDeadline(time.Now().Add(connTimeout)); err != nil { + rs.Logger.Error( + "connect", + "err", err, + ) + continue + } + + conn, err = p2pconn.MakeSecretConnection(conn, rs.privKey) + if err != nil { + rs.Logger.Error( + "connect", + "err", err, + ) + + continue + } + + return conn, nil + } + + return nil, ErrDialRetryMax +} + +func (rs *RemoteSigner) handleConnection(conn net.Conn) { + for { + if !rs.IsRunning() { + return // Ignore error from listener closing. + } + + // Reset the connection deadline + conn.SetDeadline(time.Now().Add(rs.connDeadline)) + + req, err := readMsg(conn) + if err != nil { + if err != io.EOF { + rs.Logger.Error("handleConnection", "err", err) + } + return + } + + res, err := handleRequest(req, rs.chainID, rs.privVal) + + if err != nil { + // only log the error; we'll reply with an error in res + rs.Logger.Error("handleConnection", "err", err) + } + + err = writeMsg(conn, res) + if err != nil { + rs.Logger.Error("handleConnection", "err", err) + return + } + } +} diff --git a/privval/socket_tcp.go b/privval/tcp_socket.go similarity index 59% rename from privval/socket_tcp.go rename to privval/tcp_socket.go index b26db00c2..2b17bf26e 100644 --- a/privval/socket_tcp.go +++ b/privval/tcp_socket.go @@ -24,6 +24,13 @@ type tcpTimeoutListener struct { period time.Duration } +// timeoutConn wraps a net.Conn to standardise protocol timeouts / deadline resets. +type timeoutConn struct { + net.Conn + + connDeadline time.Duration +} + // newTCPTimeoutListener returns an instance of tcpTimeoutListener. func newTCPTimeoutListener( ln net.Listener, @@ -38,6 +45,16 @@ func newTCPTimeoutListener( } } +// newTimeoutConn returns an instance of newTCPTimeoutConn. +func newTimeoutConn( + conn net.Conn, + connDeadline time.Duration) *timeoutConn { + return &timeoutConn{ + conn, + connDeadline, + } +} + // Accept implements net.Listener. func (ln tcpTimeoutListener) Accept() (net.Conn, error) { err := ln.SetDeadline(time.Now().Add(ln.acceptDeadline)) @@ -50,17 +67,24 @@ func (ln tcpTimeoutListener) Accept() (net.Conn, error) { return nil, err } - if err := tc.SetDeadline(time.Now().Add(ln.connDeadline)); err != nil { - return nil, err - } + // Wrap the conn in our timeout wrapper + conn := newTimeoutConn(tc, ln.connDeadline) - if err := tc.SetKeepAlive(true); err != nil { - return nil, err - } + return conn, nil +} - if err := tc.SetKeepAlivePeriod(ln.period); err != nil { - return nil, err - } +// Read implements net.Listener. +func (c timeoutConn) Read(b []byte) (n int, err error) { + // Reset deadline + c.Conn.SetReadDeadline(time.Now().Add(c.connDeadline)) + + return c.Conn.Read(b) +} + +// Write implements net.Listener. +func (c timeoutConn) Write(b []byte) (n int, err error) { + // Reset deadline + c.Conn.SetWriteDeadline(time.Now().Add(c.connDeadline)) - return tc, nil + return c.Conn.Write(b) } diff --git a/privval/socket_tcp_test.go b/privval/tcp_socket_test.go similarity index 91% rename from privval/socket_tcp_test.go rename to privval/tcp_socket_test.go index 44a673c0c..285e73ed5 100644 --- a/privval/socket_tcp_test.go +++ b/privval/tcp_socket_test.go @@ -44,13 +44,14 @@ func TestTCPTimeoutListenerConnDeadline(t *testing.T) { time.Sleep(2 * time.Millisecond) - _, err = c.Write([]byte("foo")) + msg := make([]byte, 200) + _, err = c.Read(msg) opErr, ok := err.(*net.OpError) if !ok { t.Fatalf("have %v, want *net.OpError", err) } - if have, want := opErr.Op, "write"; have != want { + if have, want := opErr.Op, "read"; have != want { t.Errorf("have %v, want %v", have, want) } }(ln) diff --git a/privval/socket_test.go b/privval/tcp_test.go similarity index 82% rename from privval/socket_test.go rename to privval/tcp_test.go index aa2e15fa0..6549759d0 100644 --- a/privval/socket_test.go +++ b/privval/tcp_test.go @@ -27,8 +27,7 @@ func TestSocketPVAddress(t *testing.T) { serverAddr := rs.privVal.GetAddress() - clientAddr, err := sc.getAddress() - require.NoError(t, err) + clientAddr := sc.GetAddress() assert.Equal(t, serverAddr, clientAddr) @@ -91,52 +90,83 @@ func TestSocketPVVote(t *testing.T) { assert.Equal(t, want.Signature, have.Signature) } -func TestSocketPVHeartbeat(t *testing.T) { +func TestSocketPVVoteResetDeadline(t *testing.T) { var ( chainID = cmn.RandStr(12) sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV()) - want = &types.Heartbeat{} - have = &types.Heartbeat{} + ts = time.Now() + vType = types.PrecommitType + want = &types.Vote{Timestamp: ts, Type: vType} + have = &types.Vote{Timestamp: ts, Type: vType} ) defer sc.Stop() defer rs.Stop() - require.NoError(t, rs.privVal.SignHeartbeat(chainID, want)) - require.NoError(t, sc.SignHeartbeat(chainID, have)) + time.Sleep(3 * time.Millisecond) + + require.NoError(t, rs.privVal.SignVote(chainID, want)) + require.NoError(t, sc.SignVote(chainID, have)) + assert.Equal(t, want.Signature, have.Signature) + + // This would exceed the deadline if it was not extended by the previous message + time.Sleep(3 * time.Millisecond) + + require.NoError(t, rs.privVal.SignVote(chainID, want)) + require.NoError(t, sc.SignVote(chainID, have)) assert.Equal(t, want.Signature, have.Signature) } -func TestSocketPVAcceptDeadline(t *testing.T) { +func TestSocketPVVoteKeepalive(t *testing.T) { var ( - sc = NewSocketPV( - log.TestingLogger(), - "127.0.0.1:0", - ed25519.GenPrivKey(), - ) + chainID = cmn.RandStr(12) + sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV()) + + ts = time.Now() + vType = types.PrecommitType + want = &types.Vote{Timestamp: ts, Type: vType} + have = &types.Vote{Timestamp: ts, Type: vType} ) defer sc.Stop() + defer rs.Stop() + + time.Sleep(10 * time.Millisecond) + + require.NoError(t, rs.privVal.SignVote(chainID, want)) + require.NoError(t, sc.SignVote(chainID, have)) + assert.Equal(t, want.Signature, have.Signature) +} + +func TestSocketPVHeartbeat(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV()) - SocketPVAcceptDeadline(time.Millisecond)(sc) + want = &types.Heartbeat{} + have = &types.Heartbeat{} + ) + defer sc.Stop() + defer rs.Stop() - assert.Equal(t, sc.Start().(cmn.Error).Data(), ErrConnWaitTimeout) + require.NoError(t, rs.privVal.SignHeartbeat(chainID, want)) + require.NoError(t, sc.SignHeartbeat(chainID, have)) + assert.Equal(t, want.Signature, have.Signature) } func TestSocketPVDeadline(t *testing.T) { var ( addr = testFreeAddr(t) listenc = make(chan struct{}) - sc = NewSocketPV( + sc = NewTCPVal( log.TestingLogger(), addr, ed25519.GenPrivKey(), ) ) - SocketPVConnDeadline(100 * time.Millisecond)(sc) - SocketPVConnWait(500 * time.Millisecond)(sc) + TCPValConnTimeout(100 * time.Millisecond)(sc) - go func(sc *SocketPV) { + go func(sc *TCPVal) { defer close(listenc) require.NoError(t, sc.Start()) @@ -161,26 +191,10 @@ func TestSocketPVDeadline(t *testing.T) { <-listenc - // Sleep to guarantee deadline has been hit. - time.Sleep(20 * time.Microsecond) - _, err := sc.getPubKey() assert.Equal(t, err.(cmn.Error).Data(), ErrConnTimeout) } -func TestSocketPVWait(t *testing.T) { - sc := NewSocketPV( - log.TestingLogger(), - "127.0.0.1:0", - ed25519.GenPrivKey(), - ) - defer sc.Stop() - - SocketPVConnWait(time.Millisecond)(sc) - - assert.Equal(t, sc.Start().(cmn.Error).Data(), ErrConnWaitTimeout) -} - func TestRemoteSignerRetry(t *testing.T) { var ( attemptc = make(chan int) @@ -221,7 +235,7 @@ func TestRemoteSignerRetry(t *testing.T) { RemoteSignerConnDeadline(time.Millisecond)(rs) RemoteSignerConnRetries(retries)(rs) - assert.Equal(t, rs.Start().(cmn.Error).Data(), ErrDialRetryMax) + assert.Equal(t, rs.Start(), ErrDialRetryMax) select { case attempts := <-attemptc: @@ -328,7 +342,7 @@ func TestErrUnexpectedResponse(t *testing.T) { types.NewMockPV(), ed25519.GenPrivKey(), ) - sc = NewSocketPV( + sc = NewTCPVal( logger, addr, ed25519.GenPrivKey(), @@ -383,7 +397,7 @@ func testSetupSocketPair( t *testing.T, chainID string, privValidator types.PrivValidator, -) (*SocketPV, *RemoteSigner) { +) (*TCPVal, *RemoteSigner) { var ( addr = testFreeAddr(t) logger = log.TestingLogger() @@ -396,18 +410,20 @@ func testSetupSocketPair( privVal, ed25519.GenPrivKey(), ) - sc = NewSocketPV( + sc = NewTCPVal( logger, addr, ed25519.GenPrivKey(), ) ) - testStartSocketPV(t, readyc, sc) - - RemoteSignerConnDeadline(time.Millisecond)(rs) + TCPValConnTimeout(5 * time.Millisecond)(sc) + TCPValHeartbeat(2 * time.Millisecond)(sc) + RemoteSignerConnDeadline(5 * time.Millisecond)(rs) RemoteSignerConnRetries(1e6)(rs) + testStartSocketPV(t, readyc, sc) + require.NoError(t, rs.Start()) assert.True(t, rs.IsRunning()) @@ -416,7 +432,7 @@ func testSetupSocketPair( return sc, rs } -func testReadWriteResponse(t *testing.T, resp SocketPVMsg, rsConn net.Conn) { +func testReadWriteResponse(t *testing.T, resp RemoteSignerMsg, rsConn net.Conn) { _, err := readMsg(rsConn) require.NoError(t, err) @@ -424,8 +440,8 @@ func testReadWriteResponse(t *testing.T, resp SocketPVMsg, rsConn net.Conn) { require.NoError(t, err) } -func testStartSocketPV(t *testing.T, readyc chan struct{}, sc *SocketPV) { - go func(sc *SocketPV) { +func testStartSocketPV(t *testing.T, readyc chan struct{}, sc *TCPVal) { + go func(sc *TCPVal) { require.NoError(t, sc.Start()) assert.True(t, sc.IsRunning()) diff --git a/privval/wire.go b/privval/wire.go index 50660ff34..2e11677e4 100644 --- a/privval/wire.go +++ b/privval/wire.go @@ -9,5 +9,5 @@ var cdc = amino.NewCodec() func init() { cryptoAmino.RegisterAmino(cdc) - RegisterSocketPVMsg(cdc) + RegisterRemoteSignerMsg(cdc) } From d20693fb16ad79c8313f6e590d187c9fb22f0d92 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 17 Oct 2018 20:47:56 +0400 Subject: [PATCH 081/113] install scripts: update go version, remove upgrade cmd (#2650) * [install scripts] update go version, remove upgrade - upgrading OS is out of scope of installation scripts * add missing log statement Refs https://github.com/tendermint/tendermint/pull/2642#discussion_r225786794 --- consensus/state_test.go | 2 ++ scripts/install/install_tendermint_bsd.sh | 11 +++++------ scripts/install/install_tendermint_ubuntu.sh | 10 ++++------ 3 files changed, 11 insertions(+), 12 deletions(-) diff --git a/consensus/state_test.go b/consensus/state_test.go index c4fc11c3d..83c4bb142 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -949,6 +949,8 @@ func TestProposeValidBlock(t *testing.T) { round = round + 2 // moving to the next round ensureNewRound(newRoundCh, height, round) + t.Log("### ONTO ROUND 3") + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) round = round + 1 // moving to the next round diff --git a/scripts/install/install_tendermint_bsd.sh b/scripts/install/install_tendermint_bsd.sh index aba584f2e..5b30eab31 100644 --- a/scripts/install/install_tendermint_bsd.sh +++ b/scripts/install/install_tendermint_bsd.sh @@ -17,16 +17,15 @@ set BRANCH=master sudo pkg update -sudo pkg upgrade -y sudo pkg install -y gmake sudo pkg install -y git # get and unpack golang -curl -O https://storage.googleapis.com/golang/go1.10.freebsd-amd64.tar.gz -tar -xvf go1.10.freebsd-amd64.tar.gz +curl -O https://storage.googleapis.com/golang/go1.11.freebsd-amd64.tar.gz +tar -xvf go1.11.freebsd-amd64.tar.gz -# move go binary and add to path -mv go /usr/local +# move go folder and add go binary to path +sudo mv go /usr/local set path=($path /usr/local/go/bin) @@ -41,7 +40,7 @@ source ~/.tcshrc # get the code and move into repo set REPO=github.com/tendermint/tendermint go get $REPO -cd $GOPATH/src/$REPO +cd "$GOPATH/src/$REPO" # build & install master git checkout $BRANCH diff --git a/scripts/install/install_tendermint_ubuntu.sh b/scripts/install/install_tendermint_ubuntu.sh index b9605de07..29e975088 100644 --- a/scripts/install/install_tendermint_ubuntu.sh +++ b/scripts/install/install_tendermint_ubuntu.sh @@ -14,14 +14,13 @@ REPO=github.com/tendermint/tendermint BRANCH=master sudo apt-get update -y -sudo apt-get upgrade -y sudo apt-get install -y make # get and unpack golang -curl -O https://storage.googleapis.com/golang/go1.10.linux-amd64.tar.gz -tar -xvf go1.10.linux-amd64.tar.gz +curl -O https://storage.googleapis.com/golang/go1.11.linux-amd64.tar.gz +tar -xvf go1.11.linux-amd64.tar.gz -# move go binary and add to path +# move go folder and add go binary to path sudo mv go /usr/local echo "export PATH=\$PATH:/usr/local/go/bin" >> ~/.profile @@ -29,12 +28,11 @@ echo "export PATH=\$PATH:/usr/local/go/bin" >> ~/.profile mkdir goApps echo "export GOPATH=$HOME/goApps" >> ~/.profile echo "export PATH=\$PATH:\$GOPATH/bin" >> ~/.profile - source ~/.profile # get the code and move into repo go get $REPO -cd $GOPATH/src/$REPO +cd "$GOPATH/src/$REPO" # build & install git checkout $BRANCH From 6a07f415e96b6311a1e1626a888cfecab47d43b5 Mon Sep 17 00:00:00 2001 From: Jack Zampolin Date: Wed, 17 Oct 2018 11:26:14 -0700 Subject: [PATCH 082/113] Change error output format for better SDK and Voyager UX (#2648) * Change error output format * Update tests * :facepalm: * apply suggestion --- libs/common/errors.go | 2 +- libs/common/errors_test.go | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/libs/common/errors.go b/libs/common/errors.go index 1dc909e89..10e40ebd2 100644 --- a/libs/common/errors.go +++ b/libs/common/errors.go @@ -146,7 +146,7 @@ func (err *cmnError) Format(s fmt.State, verb rune) { s.Write([]byte("--= /Error =--\n")) } else { // Write msg. - s.Write([]byte(fmt.Sprintf("Error{%v}", err.data))) // TODO tick-esc? + s.Write([]byte(fmt.Sprintf("%v", err.data))) } } } diff --git a/libs/common/errors_test.go b/libs/common/errors_test.go index 52c78a765..326468c94 100644 --- a/libs/common/errors_test.go +++ b/libs/common/errors_test.go @@ -24,7 +24,7 @@ func TestErrorPanic(t *testing.T) { var err = capturePanic() assert.Equal(t, pnk{"something"}, err.Data()) - assert.Equal(t, "Error{{something}}", fmt.Sprintf("%v", err)) +assert.Equal(t, "{something}", fmt.Sprintf("%v", err)) assert.Contains(t, fmt.Sprintf("%#v", err), "This is the message in ErrorWrap(r, message).") assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") } @@ -34,7 +34,7 @@ func TestErrorWrapSomething(t *testing.T) { var err = ErrorWrap("something", "formatter%v%v", 0, 1) assert.Equal(t, "something", err.Data()) - assert.Equal(t, "Error{something}", fmt.Sprintf("%v", err)) + assert.Equal(t, "something", fmt.Sprintf("%v", err)) assert.Regexp(t, `formatter01\n`, fmt.Sprintf("%#v", err)) assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") } @@ -46,7 +46,7 @@ func TestErrorWrapNothing(t *testing.T) { assert.Equal(t, FmtError{"formatter%v%v", []interface{}{0, 1}}, err.Data()) - assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err)) + assert.Equal(t, "formatter01", fmt.Sprintf("%v", err)) assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") } @@ -58,7 +58,7 @@ func TestErrorNewError(t *testing.T) { assert.Equal(t, FmtError{"formatter%v%v", []interface{}{0, 1}}, err.Data()) - assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err)) + assert.Equal(t, "formatter01", fmt.Sprintf("%v", err)) assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) assert.NotContains(t, fmt.Sprintf("%#v", err), "Stack Trace") } @@ -70,7 +70,7 @@ func TestErrorNewErrorWithStacktrace(t *testing.T) { assert.Equal(t, FmtError{"formatter%v%v", []interface{}{0, 1}}, err.Data()) - assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err)) + assert.Equal(t, "formatter01", fmt.Sprintf("%v", err)) assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") } @@ -85,7 +85,7 @@ func TestErrorNewErrorWithTrace(t *testing.T) { assert.Equal(t, FmtError{"formatter%v%v", []interface{}{0, 1}}, err.Data()) - assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err)) + assert.Equal(t, "formatter01", fmt.Sprintf("%v", err)) assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) dump := fmt.Sprintf("%#v", err) assert.NotContains(t, dump, "Stack Trace") From 455d34134cc53c334ebd3195ac22ea444c4b59bb Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 17 Oct 2018 15:30:53 -0400 Subject: [PATCH 083/113] ADR-016: Add versions to Block and State (#2644) * types: add Version to Header * abci: add Version to Header * state: add Version to State * node: check software and state protocol versions match * update changelog * docs/spec: update for versions * state: more tests * remove TODOs * remove empty test --- CHANGELOG_PENDING.md | 8 +- abci/types/types.pb.go | 863 +++++++++++++++++++---------- abci/types/types.proto | 37 +- abci/types/typespb_test.go | 124 +++++ docs/spec/abci/abci.md | 10 + docs/spec/blockchain/blockchain.md | 24 + docs/spec/blockchain/state.md | 1 + node/node.go | 9 + state/execution.go | 4 + state/state.go | 26 +- state/state_test.go | 4 +- state/validation.go | 7 + state/validation_test.go | 8 + types/block.go | 20 +- types/block_test.go | 14 +- types/proto3/block.pb.go | 276 ++++----- types/proto3/block.proto | 36 +- 17 files changed, 983 insertions(+), 488 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 05369ea66..5fcbbb7b1 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -12,10 +12,13 @@ BREAKING CHANGES: * [rpc] \#2298 `/abci_query` takes `prove` argument instead of `trusted` and switches the default behaviour to `prove=false` * [privval] \#2459 Split `SocketPVMsg`s implementations into Request and Response, where the Response may contain a error message (returned by the remote signer) + * [state] \#2644 Add Version field to State, breaking the format of State as + encoded on disk. * Apps * [abci] \#2298 ResponseQuery.Proof is now a structured merkle.Proof, not just arbitrary bytes + * [abci] \#2644 Add Version to Header and shift all fields by one * Go API * [node] Remove node.RunForever @@ -25,7 +28,8 @@ BREAKING CHANGES: * [crypto/merkle & lite] \#2298 Various changes to accomodate General Merkle trees * [crypto/merkle] \#2595 Remove all Hasher objects in favor of byte slices * [crypto/merkle] \#2635 merkle.SimpleHashFromTwoHashes is no longer exported - * [types] \#2598 `VoteTypeXxx` are now + * [types] \#2598 `VoteTypeXxx` are now of type `SignedMsgType byte` and named `XxxType`, eg. `PrevoteType`, + `PrecommitType`. * Blockchain Protocol * [types] Update SignBytes for `Vote`/`Proposal`/`Heartbeat`: @@ -34,7 +38,9 @@ BREAKING CHANGES: * \#2598 Change `Type` field fromt `string` to `byte` and use new `SignedMsgType` to enumerate. * [types] \#2512 Remove the pubkey field from the validator hash + * [types] \#2644 Add Version struct to Header * [state] \#2587 Require block.Time of the fist block to be genesis time + * [state] \#2644 Require block.Version to match state.Version * P2P Protocol diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 1ec516024..81fb74b42 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -61,7 +61,7 @@ func (m *Request) Reset() { *m = Request{} } func (m *Request) String() string { return proto.CompactTextString(m) } func (*Request) ProtoMessage() {} func (*Request) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{0} + return fileDescriptor_types_07d64ea985a686e2, []int{0} } func (m *Request) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -483,7 +483,7 @@ func (m *RequestEcho) Reset() { *m = RequestEcho{} } func (m *RequestEcho) String() string { return proto.CompactTextString(m) } func (*RequestEcho) ProtoMessage() {} func (*RequestEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{1} + return fileDescriptor_types_07d64ea985a686e2, []int{1} } func (m *RequestEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -529,7 +529,7 @@ func (m *RequestFlush) Reset() { *m = RequestFlush{} } func (m *RequestFlush) String() string { return proto.CompactTextString(m) } func (*RequestFlush) ProtoMessage() {} func (*RequestFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{2} + return fileDescriptor_types_07d64ea985a686e2, []int{2} } func (m *RequestFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -569,7 +569,7 @@ func (m *RequestInfo) Reset() { *m = RequestInfo{} } func (m *RequestInfo) String() string { return proto.CompactTextString(m) } func (*RequestInfo) ProtoMessage() {} func (*RequestInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{3} + return fileDescriptor_types_07d64ea985a686e2, []int{3} } func (m *RequestInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -618,7 +618,7 @@ func (m *RequestSetOption) Reset() { *m = RequestSetOption{} } func (m *RequestSetOption) String() string { return proto.CompactTextString(m) } func (*RequestSetOption) ProtoMessage() {} func (*RequestSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{4} + return fileDescriptor_types_07d64ea985a686e2, []int{4} } func (m *RequestSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -676,7 +676,7 @@ func (m *RequestInitChain) Reset() { *m = RequestInitChain{} } func (m *RequestInitChain) String() string { return proto.CompactTextString(m) } func (*RequestInitChain) ProtoMessage() {} func (*RequestInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{5} + return fileDescriptor_types_07d64ea985a686e2, []int{5} } func (m *RequestInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -754,7 +754,7 @@ func (m *RequestQuery) Reset() { *m = RequestQuery{} } func (m *RequestQuery) String() string { return proto.CompactTextString(m) } func (*RequestQuery) ProtoMessage() {} func (*RequestQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{6} + return fileDescriptor_types_07d64ea985a686e2, []int{6} } func (m *RequestQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -826,7 +826,7 @@ func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} } func (m *RequestBeginBlock) String() string { return proto.CompactTextString(m) } func (*RequestBeginBlock) ProtoMessage() {} func (*RequestBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{7} + return fileDescriptor_types_07d64ea985a686e2, []int{7} } func (m *RequestBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -894,7 +894,7 @@ func (m *RequestCheckTx) Reset() { *m = RequestCheckTx{} } func (m *RequestCheckTx) String() string { return proto.CompactTextString(m) } func (*RequestCheckTx) ProtoMessage() {} func (*RequestCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{8} + return fileDescriptor_types_07d64ea985a686e2, []int{8} } func (m *RequestCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -941,7 +941,7 @@ func (m *RequestDeliverTx) Reset() { *m = RequestDeliverTx{} } func (m *RequestDeliverTx) String() string { return proto.CompactTextString(m) } func (*RequestDeliverTx) ProtoMessage() {} func (*RequestDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{9} + return fileDescriptor_types_07d64ea985a686e2, []int{9} } func (m *RequestDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -988,7 +988,7 @@ func (m *RequestEndBlock) Reset() { *m = RequestEndBlock{} } func (m *RequestEndBlock) String() string { return proto.CompactTextString(m) } func (*RequestEndBlock) ProtoMessage() {} func (*RequestEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{10} + return fileDescriptor_types_07d64ea985a686e2, []int{10} } func (m *RequestEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1034,7 +1034,7 @@ func (m *RequestCommit) Reset() { *m = RequestCommit{} } func (m *RequestCommit) String() string { return proto.CompactTextString(m) } func (*RequestCommit) ProtoMessage() {} func (*RequestCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{11} + return fileDescriptor_types_07d64ea985a686e2, []int{11} } func (m *RequestCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1087,7 +1087,7 @@ func (m *Response) Reset() { *m = Response{} } func (m *Response) String() string { return proto.CompactTextString(m) } func (*Response) ProtoMessage() {} func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{12} + return fileDescriptor_types_07d64ea985a686e2, []int{12} } func (m *Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1540,7 +1540,7 @@ func (m *ResponseException) Reset() { *m = ResponseException{} } func (m *ResponseException) String() string { return proto.CompactTextString(m) } func (*ResponseException) ProtoMessage() {} func (*ResponseException) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{13} + return fileDescriptor_types_07d64ea985a686e2, []int{13} } func (m *ResponseException) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1587,7 +1587,7 @@ func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } func (*ResponseEcho) ProtoMessage() {} func (*ResponseEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{14} + return fileDescriptor_types_07d64ea985a686e2, []int{14} } func (m *ResponseEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1633,7 +1633,7 @@ func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } func (*ResponseFlush) ProtoMessage() {} func (*ResponseFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{15} + return fileDescriptor_types_07d64ea985a686e2, []int{15} } func (m *ResponseFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1676,7 +1676,7 @@ func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } func (*ResponseInfo) ProtoMessage() {} func (*ResponseInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{16} + return fileDescriptor_types_07d64ea985a686e2, []int{16} } func (m *ResponseInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1748,7 +1748,7 @@ func (m *ResponseSetOption) Reset() { *m = ResponseSetOption{} } func (m *ResponseSetOption) String() string { return proto.CompactTextString(m) } func (*ResponseSetOption) ProtoMessage() {} func (*ResponseSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{17} + return fileDescriptor_types_07d64ea985a686e2, []int{17} } func (m *ResponseSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1810,7 +1810,7 @@ func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } func (*ResponseInitChain) ProtoMessage() {} func (*ResponseInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{18} + return fileDescriptor_types_07d64ea985a686e2, []int{18} } func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1873,7 +1873,7 @@ func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } func (*ResponseQuery) ProtoMessage() {} func (*ResponseQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{19} + return fileDescriptor_types_07d64ea985a686e2, []int{19} } func (m *ResponseQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1976,7 +1976,7 @@ func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } func (*ResponseBeginBlock) ProtoMessage() {} func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{20} + return fileDescriptor_types_07d64ea985a686e2, []int{20} } func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2030,7 +2030,7 @@ func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } func (*ResponseCheckTx) ProtoMessage() {} func (*ResponseCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{21} + return fileDescriptor_types_07d64ea985a686e2, []int{21} } func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2133,7 +2133,7 @@ func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } func (*ResponseDeliverTx) ProtoMessage() {} func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{22} + return fileDescriptor_types_07d64ea985a686e2, []int{22} } func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2231,7 +2231,7 @@ func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } func (*ResponseEndBlock) ProtoMessage() {} func (*ResponseEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{23} + return fileDescriptor_types_07d64ea985a686e2, []int{23} } func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2293,7 +2293,7 @@ func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } func (*ResponseCommit) ProtoMessage() {} func (*ResponseCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{24} + return fileDescriptor_types_07d64ea985a686e2, []int{24} } func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2343,7 +2343,7 @@ func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } func (m *ConsensusParams) String() string { return proto.CompactTextString(m) } func (*ConsensusParams) ProtoMessage() {} func (*ConsensusParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{25} + return fileDescriptor_types_07d64ea985a686e2, []int{25} } func (m *ConsensusParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2401,7 +2401,7 @@ func (m *BlockSize) Reset() { *m = BlockSize{} } func (m *BlockSize) String() string { return proto.CompactTextString(m) } func (*BlockSize) ProtoMessage() {} func (*BlockSize) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{26} + return fileDescriptor_types_07d64ea985a686e2, []int{26} } func (m *BlockSize) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2457,7 +2457,7 @@ func (m *EvidenceParams) Reset() { *m = EvidenceParams{} } func (m *EvidenceParams) String() string { return proto.CompactTextString(m) } func (*EvidenceParams) ProtoMessage() {} func (*EvidenceParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{27} + return fileDescriptor_types_07d64ea985a686e2, []int{27} } func (m *EvidenceParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2505,7 +2505,7 @@ func (m *LastCommitInfo) Reset() { *m = LastCommitInfo{} } func (m *LastCommitInfo) String() string { return proto.CompactTextString(m) } func (*LastCommitInfo) ProtoMessage() {} func (*LastCommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{28} + return fileDescriptor_types_07d64ea985a686e2, []int{28} } func (m *LastCommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2550,25 +2550,26 @@ func (m *LastCommitInfo) GetVotes() []VoteInfo { type Header struct { // basic block info - ChainID string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` - Time time.Time `protobuf:"bytes,3,opt,name=time,stdtime" json:"time"` - NumTxs int64 `protobuf:"varint,4,opt,name=num_txs,json=numTxs,proto3" json:"num_txs,omitempty"` - TotalTxs int64 `protobuf:"varint,5,opt,name=total_txs,json=totalTxs,proto3" json:"total_txs,omitempty"` + Version Version `protobuf:"bytes,1,opt,name=version" json:"version"` + ChainID string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` + Time time.Time `protobuf:"bytes,4,opt,name=time,stdtime" json:"time"` + NumTxs int64 `protobuf:"varint,5,opt,name=num_txs,json=numTxs,proto3" json:"num_txs,omitempty"` + TotalTxs int64 `protobuf:"varint,6,opt,name=total_txs,json=totalTxs,proto3" json:"total_txs,omitempty"` // prev block info - LastBlockId BlockID `protobuf:"bytes,6,opt,name=last_block_id,json=lastBlockId" json:"last_block_id"` + LastBlockId BlockID `protobuf:"bytes,7,opt,name=last_block_id,json=lastBlockId" json:"last_block_id"` // hashes of block data - LastCommitHash []byte `protobuf:"bytes,7,opt,name=last_commit_hash,json=lastCommitHash,proto3" json:"last_commit_hash,omitempty"` - DataHash []byte `protobuf:"bytes,8,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"` + LastCommitHash []byte `protobuf:"bytes,8,opt,name=last_commit_hash,json=lastCommitHash,proto3" json:"last_commit_hash,omitempty"` + DataHash []byte `protobuf:"bytes,9,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"` // hashes from the app output from the prev block - ValidatorsHash []byte `protobuf:"bytes,9,opt,name=validators_hash,json=validatorsHash,proto3" json:"validators_hash,omitempty"` - NextValidatorsHash []byte `protobuf:"bytes,10,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` - ConsensusHash []byte `protobuf:"bytes,11,opt,name=consensus_hash,json=consensusHash,proto3" json:"consensus_hash,omitempty"` - AppHash []byte `protobuf:"bytes,12,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` - LastResultsHash []byte `protobuf:"bytes,13,opt,name=last_results_hash,json=lastResultsHash,proto3" json:"last_results_hash,omitempty"` + ValidatorsHash []byte `protobuf:"bytes,10,opt,name=validators_hash,json=validatorsHash,proto3" json:"validators_hash,omitempty"` + NextValidatorsHash []byte `protobuf:"bytes,11,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` + ConsensusHash []byte `protobuf:"bytes,12,opt,name=consensus_hash,json=consensusHash,proto3" json:"consensus_hash,omitempty"` + AppHash []byte `protobuf:"bytes,13,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` + LastResultsHash []byte `protobuf:"bytes,14,opt,name=last_results_hash,json=lastResultsHash,proto3" json:"last_results_hash,omitempty"` // consensus info - EvidenceHash []byte `protobuf:"bytes,14,opt,name=evidence_hash,json=evidenceHash,proto3" json:"evidence_hash,omitempty"` - ProposerAddress []byte `protobuf:"bytes,15,opt,name=proposer_address,json=proposerAddress,proto3" json:"proposer_address,omitempty"` + EvidenceHash []byte `protobuf:"bytes,15,opt,name=evidence_hash,json=evidenceHash,proto3" json:"evidence_hash,omitempty"` + ProposerAddress []byte `protobuf:"bytes,16,opt,name=proposer_address,json=proposerAddress,proto3" json:"proposer_address,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2578,7 +2579,7 @@ func (m *Header) Reset() { *m = Header{} } func (m *Header) String() string { return proto.CompactTextString(m) } func (*Header) ProtoMessage() {} func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{29} + return fileDescriptor_types_07d64ea985a686e2, []int{29} } func (m *Header) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2607,6 +2608,13 @@ func (m *Header) XXX_DiscardUnknown() { var xxx_messageInfo_Header proto.InternalMessageInfo +func (m *Header) GetVersion() Version { + if m != nil { + return m.Version + } + return Version{} +} + func (m *Header) GetChainID() string { if m != nil { return m.ChainID @@ -2712,6 +2720,61 @@ func (m *Header) GetProposerAddress() []byte { return nil } +type Version struct { + Block uint64 `protobuf:"varint,1,opt,name=Block,proto3" json:"Block,omitempty"` + App uint64 `protobuf:"varint,2,opt,name=App,proto3" json:"App,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { + return fileDescriptor_types_07d64ea985a686e2, []int{30} +} +func (m *Version) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Version.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_Version.Merge(dst, src) +} +func (m *Version) XXX_Size() int { + return m.Size() +} +func (m *Version) XXX_DiscardUnknown() { + xxx_messageInfo_Version.DiscardUnknown(m) +} + +var xxx_messageInfo_Version proto.InternalMessageInfo + +func (m *Version) GetBlock() uint64 { + if m != nil { + return m.Block + } + return 0 +} + +func (m *Version) GetApp() uint64 { + if m != nil { + return m.App + } + return 0 +} + type BlockID struct { Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` PartsHeader PartSetHeader `protobuf:"bytes,2,opt,name=parts_header,json=partsHeader" json:"parts_header"` @@ -2724,7 +2787,7 @@ func (m *BlockID) Reset() { *m = BlockID{} } func (m *BlockID) String() string { return proto.CompactTextString(m) } func (*BlockID) ProtoMessage() {} func (*BlockID) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{30} + return fileDescriptor_types_07d64ea985a686e2, []int{31} } func (m *BlockID) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2779,7 +2842,7 @@ func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } func (*PartSetHeader) ProtoMessage() {} func (*PartSetHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{31} + return fileDescriptor_types_07d64ea985a686e2, []int{32} } func (m *PartSetHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2836,7 +2899,7 @@ func (m *Validator) Reset() { *m = Validator{} } func (m *Validator) String() string { return proto.CompactTextString(m) } func (*Validator) ProtoMessage() {} func (*Validator) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{32} + return fileDescriptor_types_07d64ea985a686e2, []int{33} } func (m *Validator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2892,7 +2955,7 @@ func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } func (*ValidatorUpdate) ProtoMessage() {} func (*ValidatorUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{33} + return fileDescriptor_types_07d64ea985a686e2, []int{34} } func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2948,7 +3011,7 @@ func (m *VoteInfo) Reset() { *m = VoteInfo{} } func (m *VoteInfo) String() string { return proto.CompactTextString(m) } func (*VoteInfo) ProtoMessage() {} func (*VoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{34} + return fileDescriptor_types_07d64ea985a686e2, []int{35} } func (m *VoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3003,7 +3066,7 @@ func (m *PubKey) Reset() { *m = PubKey{} } func (m *PubKey) String() string { return proto.CompactTextString(m) } func (*PubKey) ProtoMessage() {} func (*PubKey) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{35} + return fileDescriptor_types_07d64ea985a686e2, []int{36} } func (m *PubKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3061,7 +3124,7 @@ func (m *Evidence) Reset() { *m = Evidence{} } func (m *Evidence) String() string { return proto.CompactTextString(m) } func (*Evidence) ProtoMessage() {} func (*Evidence) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4a7ab597ee120b05, []int{36} + return fileDescriptor_types_07d64ea985a686e2, []int{37} } func (m *Evidence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3186,6 +3249,8 @@ func init() { golang_proto.RegisterType((*LastCommitInfo)(nil), "types.LastCommitInfo") proto.RegisterType((*Header)(nil), "types.Header") golang_proto.RegisterType((*Header)(nil), "types.Header") + proto.RegisterType((*Version)(nil), "types.Version") + golang_proto.RegisterType((*Version)(nil), "types.Version") proto.RegisterType((*BlockID)(nil), "types.BlockID") golang_proto.RegisterType((*BlockID)(nil), "types.BlockID") proto.RegisterType((*PartSetHeader)(nil), "types.PartSetHeader") @@ -4735,6 +4800,9 @@ func (this *Header) Equal(that interface{}) bool { } else if this == nil { return false } + if !this.Version.Equal(&that1.Version) { + return false + } if this.ChainID != that1.ChainID { return false } @@ -4785,6 +4853,36 @@ func (this *Header) Equal(that interface{}) bool { } return true } +func (this *Version) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Version) + if !ok { + that2, ok := that.(Version) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Block != that1.Block { + return false + } + if this.App != that1.App { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} func (this *BlockID) Equal(that interface{}) bool { if that == nil { return this == nil @@ -6848,93 +6946,103 @@ func (m *Header) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Version.Size())) + n35, err := m.Version.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 if len(m.ChainID) > 0 { - dAtA[i] = 0xa + dAtA[i] = 0x12 i++ i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainID))) i += copy(dAtA[i:], m.ChainID) } if m.Height != 0 { - dAtA[i] = 0x10 + dAtA[i] = 0x18 i++ i = encodeVarintTypes(dAtA, i, uint64(m.Height)) } - dAtA[i] = 0x1a + dAtA[i] = 0x22 i++ i = encodeVarintTypes(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Time))) - n35, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) + n36, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) if err != nil { return 0, err } - i += n35 + i += n36 if m.NumTxs != 0 { - dAtA[i] = 0x20 + dAtA[i] = 0x28 i++ i = encodeVarintTypes(dAtA, i, uint64(m.NumTxs)) } if m.TotalTxs != 0 { - dAtA[i] = 0x28 + dAtA[i] = 0x30 i++ i = encodeVarintTypes(dAtA, i, uint64(m.TotalTxs)) } - dAtA[i] = 0x32 + dAtA[i] = 0x3a i++ i = encodeVarintTypes(dAtA, i, uint64(m.LastBlockId.Size())) - n36, err := m.LastBlockId.MarshalTo(dAtA[i:]) + n37, err := m.LastBlockId.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n36 + i += n37 if len(m.LastCommitHash) > 0 { - dAtA[i] = 0x3a + dAtA[i] = 0x42 i++ i = encodeVarintTypes(dAtA, i, uint64(len(m.LastCommitHash))) i += copy(dAtA[i:], m.LastCommitHash) } if len(m.DataHash) > 0 { - dAtA[i] = 0x42 + dAtA[i] = 0x4a i++ i = encodeVarintTypes(dAtA, i, uint64(len(m.DataHash))) i += copy(dAtA[i:], m.DataHash) } if len(m.ValidatorsHash) > 0 { - dAtA[i] = 0x4a + dAtA[i] = 0x52 i++ i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorsHash))) i += copy(dAtA[i:], m.ValidatorsHash) } if len(m.NextValidatorsHash) > 0 { - dAtA[i] = 0x52 + dAtA[i] = 0x5a i++ i = encodeVarintTypes(dAtA, i, uint64(len(m.NextValidatorsHash))) i += copy(dAtA[i:], m.NextValidatorsHash) } if len(m.ConsensusHash) > 0 { - dAtA[i] = 0x5a + dAtA[i] = 0x62 i++ i = encodeVarintTypes(dAtA, i, uint64(len(m.ConsensusHash))) i += copy(dAtA[i:], m.ConsensusHash) } if len(m.AppHash) > 0 { - dAtA[i] = 0x62 + dAtA[i] = 0x6a i++ i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) i += copy(dAtA[i:], m.AppHash) } if len(m.LastResultsHash) > 0 { - dAtA[i] = 0x6a + dAtA[i] = 0x72 i++ i = encodeVarintTypes(dAtA, i, uint64(len(m.LastResultsHash))) i += copy(dAtA[i:], m.LastResultsHash) } if len(m.EvidenceHash) > 0 { - dAtA[i] = 0x72 + dAtA[i] = 0x7a i++ i = encodeVarintTypes(dAtA, i, uint64(len(m.EvidenceHash))) i += copy(dAtA[i:], m.EvidenceHash) } if len(m.ProposerAddress) > 0 { - dAtA[i] = 0x7a + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 i++ i = encodeVarintTypes(dAtA, i, uint64(len(m.ProposerAddress))) i += copy(dAtA[i:], m.ProposerAddress) @@ -6945,6 +7053,37 @@ func (m *Header) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *Version) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Version) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Block != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Block)) + } + if m.App != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.App)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func (m *BlockID) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -6969,11 +7108,11 @@ func (m *BlockID) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintTypes(dAtA, i, uint64(m.PartsHeader.Size())) - n37, err := m.PartsHeader.MarshalTo(dAtA[i:]) + n38, err := m.PartsHeader.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n37 + i += n38 if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -7062,11 +7201,11 @@ func (m *ValidatorUpdate) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintTypes(dAtA, i, uint64(m.PubKey.Size())) - n38, err := m.PubKey.MarshalTo(dAtA[i:]) + n39, err := m.PubKey.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n38 + i += n39 if m.Power != 0 { dAtA[i] = 0x10 i++ @@ -7096,11 +7235,11 @@ func (m *VoteInfo) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintTypes(dAtA, i, uint64(m.Validator.Size())) - n39, err := m.Validator.MarshalTo(dAtA[i:]) + n40, err := m.Validator.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n40 if m.SignedLastBlock { dAtA[i] = 0x10 i++ @@ -7174,11 +7313,11 @@ func (m *Evidence) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintTypes(dAtA, i, uint64(m.Validator.Size())) - n40, err := m.Validator.MarshalTo(dAtA[i:]) + n41, err := m.Validator.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n41 if m.Height != 0 { dAtA[i] = 0x18 i++ @@ -7187,11 +7326,11 @@ func (m *Evidence) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintTypes(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Time))) - n41, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) + n42, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) if err != nil { return 0, err } - i += n41 + i += n42 if m.TotalVotingPower != 0 { dAtA[i] = 0x28 i++ @@ -7842,13 +7981,15 @@ func NewPopulatedLastCommitInfo(r randyTypes, easy bool) *LastCommitInfo { func NewPopulatedHeader(r randyTypes, easy bool) *Header { this := &Header{} + v33 := NewPopulatedVersion(r, easy) + this.Version = *v33 this.ChainID = string(randStringTypes(r)) this.Height = int64(r.Int63()) if r.Intn(2) == 0 { this.Height *= -1 } - v33 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) - this.Time = *v33 + v34 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) + this.Time = *v34 this.NumTxs = int64(r.Int63()) if r.Intn(2) == 0 { this.NumTxs *= -1 @@ -7857,68 +7998,78 @@ func NewPopulatedHeader(r randyTypes, easy bool) *Header { if r.Intn(2) == 0 { this.TotalTxs *= -1 } - v34 := NewPopulatedBlockID(r, easy) - this.LastBlockId = *v34 - v35 := r.Intn(100) - this.LastCommitHash = make([]byte, v35) - for i := 0; i < v35; i++ { - this.LastCommitHash[i] = byte(r.Intn(256)) - } + v35 := NewPopulatedBlockID(r, easy) + this.LastBlockId = *v35 v36 := r.Intn(100) - this.DataHash = make([]byte, v36) + this.LastCommitHash = make([]byte, v36) for i := 0; i < v36; i++ { - this.DataHash[i] = byte(r.Intn(256)) + this.LastCommitHash[i] = byte(r.Intn(256)) } v37 := r.Intn(100) - this.ValidatorsHash = make([]byte, v37) + this.DataHash = make([]byte, v37) for i := 0; i < v37; i++ { - this.ValidatorsHash[i] = byte(r.Intn(256)) + this.DataHash[i] = byte(r.Intn(256)) } v38 := r.Intn(100) - this.NextValidatorsHash = make([]byte, v38) + this.ValidatorsHash = make([]byte, v38) for i := 0; i < v38; i++ { - this.NextValidatorsHash[i] = byte(r.Intn(256)) + this.ValidatorsHash[i] = byte(r.Intn(256)) } v39 := r.Intn(100) - this.ConsensusHash = make([]byte, v39) + this.NextValidatorsHash = make([]byte, v39) for i := 0; i < v39; i++ { - this.ConsensusHash[i] = byte(r.Intn(256)) + this.NextValidatorsHash[i] = byte(r.Intn(256)) } v40 := r.Intn(100) - this.AppHash = make([]byte, v40) + this.ConsensusHash = make([]byte, v40) for i := 0; i < v40; i++ { - this.AppHash[i] = byte(r.Intn(256)) + this.ConsensusHash[i] = byte(r.Intn(256)) } v41 := r.Intn(100) - this.LastResultsHash = make([]byte, v41) + this.AppHash = make([]byte, v41) for i := 0; i < v41; i++ { - this.LastResultsHash[i] = byte(r.Intn(256)) + this.AppHash[i] = byte(r.Intn(256)) } v42 := r.Intn(100) - this.EvidenceHash = make([]byte, v42) + this.LastResultsHash = make([]byte, v42) for i := 0; i < v42; i++ { - this.EvidenceHash[i] = byte(r.Intn(256)) + this.LastResultsHash[i] = byte(r.Intn(256)) } v43 := r.Intn(100) - this.ProposerAddress = make([]byte, v43) + this.EvidenceHash = make([]byte, v43) for i := 0; i < v43; i++ { + this.EvidenceHash[i] = byte(r.Intn(256)) + } + v44 := r.Intn(100) + this.ProposerAddress = make([]byte, v44) + for i := 0; i < v44; i++ { this.ProposerAddress[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 16) + this.XXX_unrecognized = randUnrecognizedTypes(r, 17) + } + return this +} + +func NewPopulatedVersion(r randyTypes, easy bool) *Version { + this := &Version{} + this.Block = uint64(uint64(r.Uint32())) + this.App = uint64(uint64(r.Uint32())) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 3) } return this } func NewPopulatedBlockID(r randyTypes, easy bool) *BlockID { this := &BlockID{} - v44 := r.Intn(100) - this.Hash = make([]byte, v44) - for i := 0; i < v44; i++ { + v45 := r.Intn(100) + this.Hash = make([]byte, v45) + for i := 0; i < v45; i++ { this.Hash[i] = byte(r.Intn(256)) } - v45 := NewPopulatedPartSetHeader(r, easy) - this.PartsHeader = *v45 + v46 := NewPopulatedPartSetHeader(r, easy) + this.PartsHeader = *v46 if !easy && r.Intn(10) != 0 { this.XXX_unrecognized = randUnrecognizedTypes(r, 3) } @@ -7931,9 +8082,9 @@ func NewPopulatedPartSetHeader(r randyTypes, easy bool) *PartSetHeader { if r.Intn(2) == 0 { this.Total *= -1 } - v46 := r.Intn(100) - this.Hash = make([]byte, v46) - for i := 0; i < v46; i++ { + v47 := r.Intn(100) + this.Hash = make([]byte, v47) + for i := 0; i < v47; i++ { this.Hash[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -7944,9 +8095,9 @@ func NewPopulatedPartSetHeader(r randyTypes, easy bool) *PartSetHeader { func NewPopulatedValidator(r randyTypes, easy bool) *Validator { this := &Validator{} - v47 := r.Intn(100) - this.Address = make([]byte, v47) - for i := 0; i < v47; i++ { + v48 := r.Intn(100) + this.Address = make([]byte, v48) + for i := 0; i < v48; i++ { this.Address[i] = byte(r.Intn(256)) } this.Power = int64(r.Int63()) @@ -7961,8 +8112,8 @@ func NewPopulatedValidator(r randyTypes, easy bool) *Validator { func NewPopulatedValidatorUpdate(r randyTypes, easy bool) *ValidatorUpdate { this := &ValidatorUpdate{} - v48 := NewPopulatedPubKey(r, easy) - this.PubKey = *v48 + v49 := NewPopulatedPubKey(r, easy) + this.PubKey = *v49 this.Power = int64(r.Int63()) if r.Intn(2) == 0 { this.Power *= -1 @@ -7975,8 +8126,8 @@ func NewPopulatedValidatorUpdate(r randyTypes, easy bool) *ValidatorUpdate { func NewPopulatedVoteInfo(r randyTypes, easy bool) *VoteInfo { this := &VoteInfo{} - v49 := NewPopulatedValidator(r, easy) - this.Validator = *v49 + v50 := NewPopulatedValidator(r, easy) + this.Validator = *v50 this.SignedLastBlock = bool(bool(r.Intn(2) == 0)) if !easy && r.Intn(10) != 0 { this.XXX_unrecognized = randUnrecognizedTypes(r, 3) @@ -7987,9 +8138,9 @@ func NewPopulatedVoteInfo(r randyTypes, easy bool) *VoteInfo { func NewPopulatedPubKey(r randyTypes, easy bool) *PubKey { this := &PubKey{} this.Type = string(randStringTypes(r)) - v50 := r.Intn(100) - this.Data = make([]byte, v50) - for i := 0; i < v50; i++ { + v51 := r.Intn(100) + this.Data = make([]byte, v51) + for i := 0; i < v51; i++ { this.Data[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -8001,14 +8152,14 @@ func NewPopulatedPubKey(r randyTypes, easy bool) *PubKey { func NewPopulatedEvidence(r randyTypes, easy bool) *Evidence { this := &Evidence{} this.Type = string(randStringTypes(r)) - v51 := NewPopulatedValidator(r, easy) - this.Validator = *v51 + v52 := NewPopulatedValidator(r, easy) + this.Validator = *v52 this.Height = int64(r.Int63()) if r.Intn(2) == 0 { this.Height *= -1 } - v52 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) - this.Time = *v52 + v53 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) + this.Time = *v53 this.TotalVotingPower = int64(r.Int63()) if r.Intn(2) == 0 { this.TotalVotingPower *= -1 @@ -8038,9 +8189,9 @@ func randUTF8RuneTypes(r randyTypes) rune { return rune(ru + 61) } func randStringTypes(r randyTypes) string { - v53 := r.Intn(100) - tmps := make([]rune, v53) - for i := 0; i < v53; i++ { + v54 := r.Intn(100) + tmps := make([]rune, v54) + for i := 0; i < v54; i++ { tmps[i] = randUTF8RuneTypes(r) } return string(tmps) @@ -8062,11 +8213,11 @@ func randFieldTypes(dAtA []byte, r randyTypes, fieldNumber int, wire int) []byte switch wire { case 0: dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) - v54 := r.Int63() + v55 := r.Int63() if r.Intn(2) == 0 { - v54 *= -1 + v55 *= -1 } - dAtA = encodeVarintPopulateTypes(dAtA, uint64(v54)) + dAtA = encodeVarintPopulateTypes(dAtA, uint64(v55)) case 1: dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) @@ -8834,6 +8985,8 @@ func (m *LastCommitInfo) Size() (n int) { func (m *Header) Size() (n int) { var l int _ = l + l = m.Version.Size() + n += 1 + l + sovTypes(uint64(l)) l = len(m.ChainID) if l > 0 { n += 1 + l + sovTypes(uint64(l)) @@ -8885,7 +9038,22 @@ func (m *Header) Size() (n int) { } l = len(m.ProposerAddress) if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + n += 2 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Version) Size() (n int) { + var l int + _ = l + if m.Block != 0 { + n += 1 + sovTypes(uint64(m.Block)) + } + if m.App != 0 { + n += 1 + sovTypes(uint64(m.App)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) @@ -13126,6 +13294,36 @@ func (m *Header) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) } @@ -13154,7 +13352,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } m.ChainID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } @@ -13173,7 +13371,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { break } } - case 3: + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) } @@ -13203,7 +13401,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 4: + case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field NumTxs", wireType) } @@ -13222,7 +13420,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { break } } - case 5: + case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field TotalTxs", wireType) } @@ -13241,7 +13439,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { break } } - case 6: + case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field LastBlockId", wireType) } @@ -13271,7 +13469,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 7: + case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field LastCommitHash", wireType) } @@ -13302,7 +13500,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { m.LastCommitHash = []byte{} } iNdEx = postIndex - case 8: + case 9: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field DataHash", wireType) } @@ -13333,7 +13531,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { m.DataHash = []byte{} } iNdEx = postIndex - case 9: + case 10: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ValidatorsHash", wireType) } @@ -13364,7 +13562,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { m.ValidatorsHash = []byte{} } iNdEx = postIndex - case 10: + case 11: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType) } @@ -13395,7 +13593,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { m.NextValidatorsHash = []byte{} } iNdEx = postIndex - case 11: + case 12: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ConsensusHash", wireType) } @@ -13426,7 +13624,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { m.ConsensusHash = []byte{} } iNdEx = postIndex - case 12: + case 13: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) } @@ -13457,7 +13655,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { m.AppHash = []byte{} } iNdEx = postIndex - case 13: + case 14: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field LastResultsHash", wireType) } @@ -13488,7 +13686,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { m.LastResultsHash = []byte{} } iNdEx = postIndex - case 14: + case 15: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field EvidenceHash", wireType) } @@ -13519,7 +13717,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { m.EvidenceHash = []byte{} } iNdEx = postIndex - case 15: + case 16: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ProposerAddress", wireType) } @@ -13572,6 +13770,95 @@ func (m *Header) Unmarshal(dAtA []byte) error { } return nil } +func (m *Version) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Version: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Version: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) + } + m.Block = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Block |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field App", wireType) + } + m.App = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.App |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *BlockID) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -14481,143 +14768,145 @@ var ( ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_4a7ab597ee120b05) } +func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_07d64ea985a686e2) } func init() { - golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_4a7ab597ee120b05) -} - -var fileDescriptor_types_4a7ab597ee120b05 = []byte{ - // 2107 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x73, 0x23, 0x47, - 0x15, 0xf7, 0x48, 0xb2, 0xa4, 0x79, 0xb6, 0x25, 0xa7, 0xd7, 0x6b, 0x6b, 0x45, 0xb0, 0xb7, 0x26, - 0x90, 0xd8, 0xc4, 0x91, 0x53, 0x0e, 0xa1, 0xbc, 0xd9, 0x90, 0x2a, 0x6b, 0x77, 0xc1, 0xae, 0x04, - 0x30, 0xb3, 0xbb, 0xe6, 0x42, 0xd5, 0x54, 0x4b, 0xd3, 0x96, 0xa6, 0x56, 0x9a, 0x99, 0xcc, 0xb4, - 0x1c, 0x79, 0x8f, 0x9c, 0x73, 0xc8, 0x81, 0x2a, 0xfe, 0x05, 0xfe, 0x04, 0x8e, 0x9c, 0xa8, 0x1c, - 0x29, 0x8a, 0xf3, 0x02, 0xa6, 0x38, 0xc0, 0x95, 0xa2, 0x8a, 0x23, 0xd5, 0xaf, 0xbb, 0xe7, 0xcb, - 0xa3, 0x25, 0x1b, 0x6e, 0x5c, 0xa4, 0xee, 0xf7, 0xd1, 0x1f, 0x6f, 0xde, 0x7b, 0xbf, 0xf7, 0x1a, - 0x36, 0xe9, 0x60, 0xe8, 0x1d, 0xf0, 0xab, 0x90, 0xc5, 0xf2, 0xb7, 0x17, 0x46, 0x01, 0x0f, 0xc8, - 0x32, 0x4e, 0xba, 0xef, 0x8c, 0x3c, 0x3e, 0x9e, 0x0d, 0x7a, 0xc3, 0x60, 0x7a, 0x30, 0x0a, 0x46, - 0xc1, 0x01, 0x72, 0x07, 0xb3, 0x0b, 0x9c, 0xe1, 0x04, 0x47, 0x52, 0xab, 0xbb, 0x33, 0x0a, 0x82, - 0xd1, 0x84, 0xa5, 0x52, 0xdc, 0x9b, 0xb2, 0x98, 0xd3, 0x69, 0xa8, 0x04, 0x8e, 0x32, 0xeb, 0x71, - 0xe6, 0xbb, 0x2c, 0x9a, 0x7a, 0x3e, 0xcf, 0x0e, 0x27, 0xde, 0x20, 0x3e, 0x18, 0x06, 0xd3, 0x69, - 0xe0, 0x67, 0x0f, 0xd4, 0xbd, 0xff, 0x5f, 0x35, 0x87, 0xd1, 0x55, 0xc8, 0x83, 0x83, 0x29, 0x8b, - 0x9e, 0x4d, 0x98, 0xfa, 0x93, 0xca, 0xd6, 0xef, 0x6a, 0xd0, 0xb0, 0xd9, 0xa7, 0x33, 0x16, 0x73, - 0xb2, 0x0b, 0x35, 0x36, 0x1c, 0x07, 0x9d, 0xca, 0x5d, 0x63, 0x77, 0xe5, 0x90, 0xf4, 0xe4, 0x26, - 0x8a, 0xfb, 0x68, 0x38, 0x0e, 0x4e, 0x96, 0x6c, 0x94, 0x20, 0x6f, 0xc3, 0xf2, 0xc5, 0x64, 0x16, - 0x8f, 0x3b, 0x55, 0x14, 0xbd, 0x95, 0x17, 0xfd, 0x81, 0x60, 0x9d, 0x2c, 0xd9, 0x52, 0x46, 0x2c, - 0xeb, 0xf9, 0x17, 0x41, 0xa7, 0x56, 0xb6, 0xec, 0xa9, 0x7f, 0x81, 0xcb, 0x0a, 0x09, 0x72, 0x04, - 0x10, 0x33, 0xee, 0x04, 0x21, 0xf7, 0x02, 0xbf, 0xb3, 0x8c, 0xf2, 0x5b, 0x79, 0xf9, 0xc7, 0x8c, - 0xff, 0x04, 0xd9, 0x27, 0x4b, 0xb6, 0x19, 0xeb, 0x89, 0xd0, 0xf4, 0x7c, 0x8f, 0x3b, 0xc3, 0x31, - 0xf5, 0xfc, 0x4e, 0xbd, 0x4c, 0xf3, 0xd4, 0xf7, 0xf8, 0x03, 0xc1, 0x16, 0x9a, 0x9e, 0x9e, 0x88, - 0xab, 0x7c, 0x3a, 0x63, 0xd1, 0x55, 0xa7, 0x51, 0x76, 0x95, 0x9f, 0x0a, 0x96, 0xb8, 0x0a, 0xca, - 0x90, 0xfb, 0xb0, 0x32, 0x60, 0x23, 0xcf, 0x77, 0x06, 0x93, 0x60, 0xf8, 0xac, 0xd3, 0x44, 0x95, - 0x4e, 0x5e, 0xa5, 0x2f, 0x04, 0xfa, 0x82, 0x7f, 0xb2, 0x64, 0xc3, 0x20, 0x99, 0x91, 0x43, 0x68, - 0x0e, 0xc7, 0x6c, 0xf8, 0xcc, 0xe1, 0xf3, 0x8e, 0x89, 0x9a, 0xb7, 0xf3, 0x9a, 0x0f, 0x04, 0xf7, - 0xc9, 0xfc, 0x64, 0xc9, 0x6e, 0x0c, 0xe5, 0x90, 0xbc, 0x0f, 0x26, 0xf3, 0x5d, 0xb5, 0xdd, 0x0a, - 0x2a, 0x6d, 0x16, 0xbe, 0x8b, 0xef, 0xea, 0xcd, 0x9a, 0x4c, 0x8d, 0x49, 0x0f, 0xea, 0xc2, 0x51, - 0x3c, 0xde, 0x59, 0x45, 0x9d, 0x8d, 0xc2, 0x46, 0xc8, 0x3b, 0x59, 0xb2, 0x95, 0x94, 0x30, 0x9f, - 0xcb, 0x26, 0xde, 0x25, 0x8b, 0xc4, 0xe1, 0x6e, 0x95, 0x99, 0xef, 0xa1, 0xe4, 0xe3, 0xf1, 0x4c, - 0x57, 0x4f, 0xfa, 0x0d, 0x58, 0xbe, 0xa4, 0x93, 0x19, 0xb3, 0xde, 0x82, 0x95, 0x8c, 0xa7, 0x90, - 0x0e, 0x34, 0xa6, 0x2c, 0x8e, 0xe9, 0x88, 0x75, 0x8c, 0xbb, 0xc6, 0xae, 0x69, 0xeb, 0xa9, 0xd5, - 0x82, 0xd5, 0xac, 0x9f, 0x64, 0x14, 0x85, 0x2f, 0x08, 0xc5, 0x4b, 0x16, 0xc5, 0xc2, 0x01, 0x94, - 0xa2, 0x9a, 0x5a, 0x1f, 0xc0, 0x7a, 0xd1, 0x09, 0xc8, 0x3a, 0x54, 0x9f, 0xb1, 0x2b, 0x25, 0x29, - 0x86, 0x64, 0x43, 0x1d, 0x08, 0xbd, 0xd8, 0xb4, 0xd5, 0xe9, 0xbe, 0xa8, 0x24, 0xca, 0x89, 0x1f, - 0x90, 0x23, 0xa8, 0x89, 0x28, 0x44, 0xed, 0x95, 0xc3, 0x6e, 0x4f, 0x86, 0x68, 0x4f, 0x87, 0x68, - 0xef, 0x89, 0x0e, 0xd1, 0x7e, 0xf3, 0xcb, 0x17, 0x3b, 0x4b, 0x5f, 0xfc, 0x69, 0xc7, 0xb0, 0x51, - 0x83, 0xdc, 0x11, 0x9f, 0x92, 0x7a, 0xbe, 0xe3, 0xb9, 0x6a, 0x9f, 0x06, 0xce, 0x4f, 0x5d, 0x72, - 0x0c, 0xeb, 0xc3, 0xc0, 0x8f, 0x99, 0x1f, 0xcf, 0x62, 0x27, 0xa4, 0x11, 0x9d, 0xc6, 0x2a, 0x4a, - 0xf4, 0x87, 0x7b, 0xa0, 0xd9, 0x67, 0xc8, 0xb5, 0xdb, 0xc3, 0x3c, 0x81, 0x7c, 0x08, 0x70, 0x49, - 0x27, 0x9e, 0x4b, 0x79, 0x10, 0xc5, 0x9d, 0xda, 0xdd, 0x6a, 0x46, 0xf9, 0x5c, 0x33, 0x9e, 0x86, - 0x2e, 0xe5, 0xac, 0x5f, 0x13, 0x27, 0xb3, 0x33, 0xf2, 0xe4, 0x4d, 0x68, 0xd3, 0x30, 0x74, 0x62, - 0x4e, 0x39, 0x73, 0x06, 0x57, 0x9c, 0xc5, 0x18, 0x49, 0xab, 0xf6, 0x1a, 0x0d, 0xc3, 0xc7, 0x82, - 0xda, 0x17, 0x44, 0xcb, 0x4d, 0xbe, 0x03, 0x3a, 0x39, 0x21, 0x50, 0x73, 0x29, 0xa7, 0x68, 0x8d, - 0x55, 0x1b, 0xc7, 0x82, 0x16, 0x52, 0x3e, 0x56, 0x77, 0xc4, 0x31, 0xd9, 0x84, 0xfa, 0x98, 0x79, - 0xa3, 0x31, 0xc7, 0x6b, 0x55, 0x6d, 0x35, 0x13, 0x86, 0x0f, 0xa3, 0xe0, 0x92, 0x61, 0x9c, 0x37, - 0x6d, 0x39, 0xb1, 0xfe, 0x66, 0xc0, 0x6b, 0x37, 0x02, 0x43, 0xac, 0x3b, 0xa6, 0xf1, 0x58, 0xef, - 0x25, 0xc6, 0xe4, 0x6d, 0xb1, 0x2e, 0x75, 0x59, 0xa4, 0xf2, 0xcf, 0x9a, 0xba, 0xf1, 0x09, 0x12, - 0xd5, 0x45, 0x95, 0x08, 0x79, 0x04, 0xeb, 0x13, 0x1a, 0x73, 0x47, 0xfa, 0xaf, 0x83, 0xf9, 0xa5, - 0x9a, 0x8b, 0xa9, 0x4f, 0xa8, 0xf6, 0x73, 0xe1, 0x56, 0x4a, 0xbd, 0x35, 0xc9, 0x51, 0xc9, 0x09, - 0x6c, 0x0c, 0xae, 0x9e, 0x53, 0x9f, 0x7b, 0x3e, 0x73, 0x6e, 0xd8, 0xbc, 0xad, 0x96, 0x7a, 0x74, - 0xe9, 0xb9, 0xcc, 0x1f, 0x6a, 0x63, 0xdf, 0x4a, 0x54, 0x92, 0x8f, 0x11, 0x5b, 0x77, 0xa1, 0x95, - 0x8f, 0x62, 0xd2, 0x82, 0x0a, 0x9f, 0xab, 0x1b, 0x56, 0xf8, 0xdc, 0xb2, 0x12, 0x0f, 0x4c, 0x42, - 0xe9, 0x86, 0xcc, 0x1e, 0xb4, 0x0b, 0x61, 0x9d, 0x31, 0xb7, 0x91, 0x35, 0xb7, 0xd5, 0x86, 0xb5, - 0x5c, 0x34, 0x5b, 0x9f, 0x2f, 0x43, 0xd3, 0x66, 0x71, 0x28, 0x9c, 0x89, 0x1c, 0x81, 0xc9, 0xe6, - 0x43, 0x26, 0x13, 0xa9, 0x51, 0x48, 0x53, 0x52, 0xe6, 0x91, 0xe6, 0x8b, 0x80, 0x4e, 0x84, 0xc9, - 0x5e, 0x0e, 0x04, 0x6e, 0x15, 0x95, 0xb2, 0x28, 0xb0, 0x9f, 0x47, 0x81, 0x8d, 0x82, 0x6c, 0x01, - 0x06, 0xf6, 0x72, 0x30, 0x50, 0x5c, 0x38, 0x87, 0x03, 0xf7, 0x4a, 0x70, 0xa0, 0x78, 0xfc, 0x05, - 0x40, 0x70, 0xaf, 0x04, 0x08, 0x3a, 0x37, 0xf6, 0x2a, 0x45, 0x82, 0xfd, 0x3c, 0x12, 0x14, 0xaf, - 0x53, 0x80, 0x82, 0x0f, 0xcb, 0xa0, 0xe0, 0x4e, 0x41, 0x67, 0x21, 0x16, 0xbc, 0x77, 0x03, 0x0b, - 0x36, 0x0b, 0xaa, 0x25, 0x60, 0x70, 0x2f, 0x97, 0xa5, 0xa1, 0xf4, 0x6e, 0xe5, 0x69, 0x9a, 0x7c, - 0xef, 0x26, 0x8e, 0x6c, 0x15, 0x3f, 0x6d, 0x19, 0x90, 0x1c, 0x14, 0x80, 0xe4, 0x76, 0xf1, 0x94, - 0x05, 0x24, 0x49, 0xf1, 0x60, 0x4f, 0xc4, 0x7d, 0xc1, 0xd3, 0x44, 0x8e, 0x60, 0x51, 0x14, 0x44, - 0x2a, 0x61, 0xcb, 0x89, 0xb5, 0x2b, 0x32, 0x51, 0xea, 0x5f, 0x2f, 0xc1, 0x0e, 0x74, 0xfa, 0x8c, - 0x77, 0x59, 0xbf, 0x32, 0x52, 0x5d, 0x8c, 0xe8, 0x6c, 0x16, 0x33, 0x55, 0x16, 0xcb, 0x40, 0x4a, - 0x25, 0x07, 0x29, 0xe4, 0x3b, 0xf0, 0x1a, 0xa6, 0x11, 0xb4, 0x8b, 0x93, 0x4b, 0x6b, 0x6d, 0xc1, - 0x90, 0x06, 0x91, 0xf9, 0xed, 0x1d, 0xb8, 0x95, 0x91, 0x15, 0x29, 0x16, 0x53, 0x58, 0x0d, 0x83, - 0x77, 0x3d, 0x91, 0x3e, 0x0e, 0xc3, 0x13, 0x1a, 0x8f, 0xad, 0x1f, 0xa5, 0xf7, 0x4f, 0xe1, 0x8a, - 0x40, 0x6d, 0x18, 0xb8, 0xf2, 0x5a, 0x6b, 0x36, 0x8e, 0x05, 0x84, 0x4d, 0x82, 0x11, 0xee, 0x6a, - 0xda, 0x62, 0x28, 0xa4, 0x92, 0x48, 0x31, 0x65, 0x48, 0x58, 0xbf, 0x34, 0xd2, 0xf5, 0x52, 0x04, - 0x2b, 0x03, 0x1b, 0xe3, 0x7f, 0x01, 0x9b, 0xca, 0xab, 0x81, 0x8d, 0x75, 0x6d, 0xa4, 0x5f, 0x24, - 0x81, 0x91, 0xaf, 0x77, 0x45, 0xe1, 0x1c, 0x9e, 0xef, 0xb2, 0x39, 0x06, 0x7c, 0xd5, 0x96, 0x13, - 0x8d, 0xf0, 0x75, 0x34, 0x73, 0x1e, 0xe1, 0x1b, 0x48, 0x93, 0x13, 0xf2, 0x06, 0xc2, 0x4f, 0x70, - 0xa1, 0x22, 0x71, 0xad, 0xa7, 0xca, 0xdc, 0x33, 0x41, 0xb4, 0x25, 0x2f, 0x93, 0x4c, 0xcd, 0x1c, - 0x76, 0xbd, 0x0e, 0xa6, 0x38, 0x68, 0x1c, 0xd2, 0x21, 0xc3, 0xc0, 0x32, 0xed, 0x94, 0x60, 0x9d, - 0x01, 0xb9, 0x19, 0xd0, 0xe4, 0x03, 0xa8, 0x71, 0x3a, 0x12, 0xf6, 0x16, 0x26, 0x6b, 0xf5, 0x64, - 0x65, 0xde, 0xfb, 0xf8, 0xfc, 0x8c, 0x7a, 0x51, 0x7f, 0x53, 0x98, 0xea, 0x1f, 0x2f, 0x76, 0x5a, - 0x42, 0x66, 0x3f, 0x98, 0x7a, 0x9c, 0x4d, 0x43, 0x7e, 0x65, 0xa3, 0x8e, 0xf5, 0x4f, 0x43, 0x24, - 0xfa, 0x5c, 0xa0, 0x97, 0x1a, 0x4e, 0x7b, 0x73, 0x25, 0x83, 0xc9, 0x5f, 0xcd, 0x98, 0xdf, 0x04, - 0x18, 0xd1, 0xd8, 0xf9, 0x8c, 0xfa, 0x9c, 0xb9, 0xca, 0xa2, 0xe6, 0x88, 0xc6, 0x3f, 0x43, 0x82, - 0x28, 0x60, 0x04, 0x7b, 0x16, 0x33, 0x17, 0x4d, 0x5b, 0xb5, 0x1b, 0x23, 0x1a, 0x3f, 0x8d, 0x99, - 0x9b, 0xdc, 0xab, 0xf1, 0xea, 0xf7, 0xca, 0xdb, 0xb1, 0x59, 0xb4, 0xe3, 0xbf, 0x32, 0x3e, 0x9c, - 0x62, 0xe0, 0xff, 0xff, 0xbd, 0xff, 0x6e, 0x08, 0xe8, 0xcf, 0x67, 0x59, 0x72, 0x0a, 0xaf, 0x25, - 0x71, 0xe4, 0xcc, 0x30, 0xbe, 0xb4, 0x2f, 0xbd, 0x3c, 0xfc, 0xd6, 0x2f, 0xf3, 0xe4, 0x98, 0xfc, - 0x18, 0xb6, 0x0a, 0x59, 0x20, 0x59, 0xb0, 0xf2, 0xd2, 0x64, 0x70, 0x3b, 0x9f, 0x0c, 0xf4, 0x7a, - 0xda, 0x12, 0xd5, 0xaf, 0xe1, 0xd9, 0xdf, 0x12, 0x75, 0x50, 0x16, 0x1b, 0xca, 0xbe, 0xa5, 0xf5, - 0x0b, 0x03, 0xda, 0x85, 0xc3, 0x90, 0x03, 0x00, 0x99, 0x5a, 0x63, 0xef, 0xb9, 0xae, 0xc9, 0xd7, - 0xd5, 0xc1, 0xd1, 0x64, 0x8f, 0xbd, 0xe7, 0xcc, 0x36, 0x07, 0x7a, 0x48, 0x3e, 0x82, 0x36, 0x53, - 0x95, 0x99, 0xce, 0x7d, 0x95, 0x1c, 0x48, 0xe9, 0xba, 0x4d, 0xdd, 0xb6, 0xc5, 0x72, 0x73, 0xeb, - 0x18, 0xcc, 0x64, 0x5d, 0xf2, 0x0d, 0x30, 0xa7, 0x74, 0xae, 0xea, 0x65, 0x59, 0x69, 0x35, 0xa7, - 0x74, 0x8e, 0xa5, 0x32, 0xd9, 0x82, 0x86, 0x60, 0x8e, 0xa8, 0xdc, 0xa1, 0x6a, 0xd7, 0xa7, 0x74, - 0xfe, 0x43, 0x1a, 0x5b, 0x7b, 0xd0, 0xca, 0x6f, 0xa2, 0x45, 0x35, 0x76, 0x49, 0xd1, 0xe3, 0x11, - 0xb3, 0x1e, 0x43, 0x2b, 0x5f, 0x92, 0x8a, 0x3c, 0x16, 0x05, 0x33, 0xdf, 0x45, 0xc1, 0x65, 0x5b, - 0x4e, 0x44, 0x3f, 0x7a, 0x19, 0xc8, 0x4f, 0x97, 0xad, 0x41, 0xcf, 0x03, 0xce, 0x32, 0x85, 0xac, - 0x94, 0xb1, 0xfe, 0x50, 0x83, 0xba, 0xac, 0x8f, 0xc9, 0x9b, 0x99, 0x96, 0x04, 0xc1, 0xaf, 0xbf, - 0x72, 0xfd, 0x62, 0xa7, 0x81, 0x38, 0x71, 0xfa, 0x30, 0xed, 0x4f, 0xd2, 0x14, 0x58, 0xc9, 0xa5, - 0x40, 0xdd, 0x0c, 0x55, 0x5f, 0xb9, 0x19, 0xda, 0x82, 0x86, 0x3f, 0x9b, 0x3a, 0x7c, 0x1e, 0x63, - 0x24, 0x56, 0xed, 0xba, 0x3f, 0x9b, 0x3e, 0x99, 0xc7, 0xc2, 0xa6, 0x3c, 0xe0, 0x74, 0x82, 0x2c, - 0x19, 0x8a, 0x4d, 0x24, 0x08, 0xe6, 0x11, 0xac, 0x65, 0xe0, 0xd4, 0x73, 0x55, 0xad, 0xd6, 0xca, - 0x7e, 0xf1, 0xd3, 0x87, 0xea, 0xba, 0x2b, 0x09, 0xbc, 0x9e, 0xba, 0x64, 0x37, 0x5f, 0xfb, 0x23, - 0x0a, 0x4b, 0x28, 0xc8, 0x94, 0xf7, 0x02, 0x83, 0xc5, 0x01, 0x84, 0xbb, 0x49, 0x91, 0x26, 0x8a, - 0x34, 0x05, 0x01, 0x99, 0x6f, 0x41, 0x3b, 0x05, 0x32, 0x29, 0x62, 0xca, 0x55, 0x52, 0x32, 0x0a, - 0xbe, 0x0b, 0x1b, 0x3e, 0x9b, 0x73, 0xa7, 0x28, 0x0d, 0x28, 0x4d, 0x04, 0xef, 0x3c, 0xaf, 0xf1, - 0x6d, 0x68, 0xa5, 0x01, 0x89, 0xb2, 0x2b, 0xb2, 0x03, 0x4b, 0xa8, 0x28, 0x76, 0x07, 0x9a, 0x49, - 0x19, 0xb1, 0x8a, 0x02, 0x0d, 0x2a, 0xab, 0x87, 0xa4, 0x30, 0x89, 0x58, 0x3c, 0x9b, 0x70, 0xb5, - 0xc8, 0x1a, 0xca, 0x60, 0x61, 0x62, 0x4b, 0x3a, 0xca, 0xbe, 0x01, 0x6b, 0x49, 0x1c, 0xa0, 0x5c, - 0x0b, 0xe5, 0x56, 0x35, 0x11, 0x85, 0xf6, 0x60, 0x3d, 0x8c, 0x82, 0x30, 0x88, 0x59, 0xe4, 0x50, - 0xd7, 0x8d, 0x58, 0x1c, 0x77, 0xda, 0x72, 0x3d, 0x4d, 0x3f, 0x96, 0x64, 0xeb, 0xe7, 0xd0, 0x50, - 0xd6, 0x2f, 0xed, 0xd3, 0xbe, 0x0f, 0xab, 0x21, 0x8d, 0xc4, 0x99, 0xb2, 0xdd, 0x9a, 0xae, 0x96, - 0xcf, 0x68, 0x24, 0xda, 0xf3, 0x5c, 0xd3, 0xb6, 0x82, 0xf2, 0x92, 0x64, 0xdd, 0x83, 0xb5, 0x9c, - 0x8c, 0x08, 0x03, 0x74, 0x0a, 0x1d, 0x06, 0x38, 0x49, 0x76, 0xae, 0xa4, 0x3b, 0x5b, 0xf7, 0xc1, - 0x4c, 0x0c, 0x2d, 0x8a, 0x3a, 0x7d, 0x0f, 0x43, 0xd9, 0x4e, 0x4e, 0xb1, 0x11, 0x0d, 0x3e, 0x63, - 0x91, 0x2a, 0xe4, 0xe4, 0xc4, 0x7a, 0x0a, 0xed, 0x42, 0x3e, 0x25, 0xfb, 0xd0, 0x08, 0x67, 0x03, - 0x47, 0x3f, 0x20, 0xa4, 0x2d, 0xe7, 0xd9, 0x6c, 0xf0, 0x31, 0xbb, 0xd2, 0x2d, 0x67, 0x88, 0xb3, - 0x74, 0xd9, 0x4a, 0x76, 0xd9, 0x09, 0x34, 0x75, 0x68, 0x92, 0xef, 0x82, 0x99, 0xf8, 0x48, 0x21, - 0x81, 0x25, 0x5b, 0xab, 0x45, 0x53, 0x41, 0xf1, 0xa9, 0x63, 0x6f, 0xe4, 0x33, 0xd7, 0x49, 0xe3, - 0x01, 0xf7, 0x68, 0xda, 0x6d, 0xc9, 0xf8, 0x44, 0x3b, 0xbf, 0xf5, 0x2e, 0xd4, 0xe5, 0xd9, 0x84, - 0x7d, 0xc4, 0xca, 0xba, 0xce, 0x15, 0xe3, 0xd2, 0x4c, 0xfb, 0x47, 0x03, 0x9a, 0x3a, 0x45, 0x95, - 0x2a, 0xe5, 0x0e, 0x5d, 0xf9, 0xaa, 0x87, 0x5e, 0xf4, 0x08, 0xa0, 0xb3, 0x48, 0xed, 0x95, 0xb3, - 0xc8, 0x3e, 0x10, 0x99, 0x2c, 0x2e, 0x03, 0xee, 0xf9, 0x23, 0x47, 0xda, 0x5a, 0x66, 0x8d, 0x75, - 0xe4, 0x9c, 0x23, 0xe3, 0x4c, 0xd0, 0x0f, 0x3f, 0x5f, 0x86, 0xf6, 0x71, 0xff, 0xc1, 0xe9, 0x71, - 0x18, 0x4e, 0xbc, 0x21, 0xc5, 0xe2, 0xfa, 0x00, 0x6a, 0xd8, 0x3e, 0x94, 0x3c, 0x5c, 0x76, 0xcb, - 0xfa, 0x58, 0x72, 0x08, 0xcb, 0xd8, 0x45, 0x90, 0xb2, 0xf7, 0xcb, 0x6e, 0x69, 0x3b, 0x2b, 0x36, - 0x91, 0x7d, 0xc6, 0xcd, 0x67, 0xcc, 0x6e, 0x59, 0x4f, 0x4b, 0x3e, 0x02, 0x33, 0xad, 0xff, 0x17, - 0x3d, 0x66, 0x76, 0x17, 0x76, 0xb7, 0x42, 0x3f, 0xad, 0x95, 0x16, 0xbd, 0xc9, 0x75, 0x17, 0xb6, - 0x81, 0xe4, 0x08, 0x1a, 0xba, 0xc2, 0x2c, 0x7f, 0x6e, 0xec, 0x2e, 0xe8, 0x3c, 0x85, 0x79, 0x64, - 0x49, 0x5f, 0xf6, 0x26, 0xda, 0x2d, 0x6d, 0x8f, 0xc9, 0xfb, 0x50, 0x57, 0xb0, 0x5f, 0xfa, 0xe4, - 0xd8, 0x2d, 0xef, 0x1f, 0xc5, 0x25, 0xd3, 0xa6, 0x66, 0xd1, 0xbb, 0x6d, 0x77, 0x61, 0x1f, 0x4f, - 0x8e, 0x01, 0x32, 0x95, 0xf9, 0xc2, 0x07, 0xd9, 0xee, 0xe2, 0xfe, 0x9c, 0xdc, 0x87, 0x66, 0xfa, - 0xe6, 0x52, 0xfe, 0xc4, 0xda, 0x5d, 0xd4, 0x32, 0xf7, 0x5f, 0xff, 0xf7, 0x5f, 0xb6, 0x8d, 0x5f, - 0x5f, 0x6f, 0x1b, 0xbf, 0xb9, 0xde, 0x36, 0xbe, 0xbc, 0xde, 0x36, 0x7e, 0x7f, 0xbd, 0x6d, 0xfc, - 0xf9, 0x7a, 0xdb, 0xf8, 0xed, 0x5f, 0xb7, 0x8d, 0x41, 0x1d, 0xdd, 0xff, 0xbd, 0xff, 0x04, 0x00, - 0x00, 0xff, 0xff, 0x2c, 0x0a, 0x65, 0x88, 0x52, 0x18, 0x00, 0x00, + golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_07d64ea985a686e2) +} + +var fileDescriptor_types_07d64ea985a686e2 = []byte{ + // 2133 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x93, 0x1b, 0x47, + 0x15, 0xdf, 0xd1, 0x6a, 0x25, 0xcd, 0xdb, 0x5d, 0x49, 0x6e, 0x7f, 0xc9, 0x22, 0xac, 0x5d, 0x13, + 0x48, 0xbc, 0xc4, 0xd1, 0x06, 0x87, 0x50, 0xeb, 0x38, 0xa4, 0x6a, 0x65, 0x1b, 0x76, 0x2b, 0x01, + 0x96, 0xf1, 0x07, 0x17, 0xaa, 0xa6, 0x5a, 0x9a, 0xb6, 0x34, 0x65, 0x69, 0x66, 0x32, 0xd3, 0xda, + 0x68, 0x7d, 0xcc, 0x39, 0x87, 0x1c, 0xa8, 0xe2, 0x5f, 0xe0, 0x4f, 0xe0, 0xc8, 0x89, 0xca, 0x91, + 0x03, 0x67, 0x03, 0x4b, 0x71, 0x80, 0x2b, 0x45, 0x15, 0x47, 0xea, 0xbd, 0xee, 0xf9, 0xdc, 0x91, + 0x89, 0x03, 0x27, 0x2e, 0xd2, 0xf4, 0xfb, 0xe8, 0x8f, 0xd7, 0xef, 0xbd, 0xdf, 0x7b, 0x0d, 0x57, + 0xf8, 0x68, 0xec, 0xed, 0xc9, 0xd3, 0x50, 0xc4, 0xea, 0x77, 0x10, 0x46, 0x81, 0x0c, 0xd8, 0x06, + 0x0d, 0xfa, 0x6f, 0x4f, 0x3c, 0x39, 0x5d, 0x8c, 0x06, 0xe3, 0x60, 0xbe, 0x37, 0x09, 0x26, 0xc1, + 0x1e, 0x71, 0x47, 0x8b, 0xa7, 0x34, 0xa2, 0x01, 0x7d, 0x29, 0xad, 0xfe, 0xf5, 0x49, 0x10, 0x4c, + 0x66, 0x22, 0x93, 0x92, 0xde, 0x5c, 0xc4, 0x92, 0xcf, 0x43, 0x2d, 0xb0, 0x9f, 0x9b, 0x4f, 0x0a, + 0xdf, 0x15, 0xd1, 0xdc, 0xf3, 0x65, 0xfe, 0x73, 0xe6, 0x8d, 0xe2, 0xbd, 0x71, 0x30, 0x9f, 0x07, + 0x7e, 0x7e, 0x43, 0xfd, 0xbb, 0xff, 0x51, 0x73, 0x1c, 0x9d, 0x86, 0x32, 0xd8, 0x9b, 0x8b, 0xe8, + 0xd9, 0x4c, 0xe8, 0x3f, 0xa5, 0x6c, 0xfd, 0xae, 0x0e, 0x4d, 0x5b, 0x7c, 0xb2, 0x10, 0xb1, 0x64, + 0x37, 0xa1, 0x2e, 0xc6, 0xd3, 0xa0, 0x57, 0xbb, 0x61, 0xdc, 0xdc, 0xbc, 0xcd, 0x06, 0x6a, 0x11, + 0xcd, 0x7d, 0x30, 0x9e, 0x06, 0x87, 0x6b, 0x36, 0x49, 0xb0, 0xb7, 0x60, 0xe3, 0xe9, 0x6c, 0x11, + 0x4f, 0x7b, 0xeb, 0x24, 0x7a, 0xb1, 0x28, 0xfa, 0x43, 0x64, 0x1d, 0xae, 0xd9, 0x4a, 0x06, 0xa7, + 0xf5, 0xfc, 0xa7, 0x41, 0xaf, 0x5e, 0x35, 0xed, 0x91, 0xff, 0x94, 0xa6, 0x45, 0x09, 0xb6, 0x0f, + 0x10, 0x0b, 0xe9, 0x04, 0xa1, 0xf4, 0x02, 0xbf, 0xb7, 0x41, 0xf2, 0x57, 0x8b, 0xf2, 0x0f, 0x85, + 0xfc, 0x29, 0xb1, 0x0f, 0xd7, 0x6c, 0x33, 0x4e, 0x06, 0xa8, 0xe9, 0xf9, 0x9e, 0x74, 0xc6, 0x53, + 0xee, 0xf9, 0xbd, 0x46, 0x95, 0xe6, 0x91, 0xef, 0xc9, 0x7b, 0xc8, 0x46, 0x4d, 0x2f, 0x19, 0xe0, + 0x51, 0x3e, 0x59, 0x88, 0xe8, 0xb4, 0xd7, 0xac, 0x3a, 0xca, 0xcf, 0x90, 0x85, 0x47, 0x21, 0x19, + 0x76, 0x17, 0x36, 0x47, 0x62, 0xe2, 0xf9, 0xce, 0x68, 0x16, 0x8c, 0x9f, 0xf5, 0x5a, 0xa4, 0xd2, + 0x2b, 0xaa, 0x0c, 0x51, 0x60, 0x88, 0xfc, 0xc3, 0x35, 0x1b, 0x46, 0xe9, 0x88, 0xdd, 0x86, 0xd6, + 0x78, 0x2a, 0xc6, 0xcf, 0x1c, 0xb9, 0xec, 0x99, 0xa4, 0x79, 0xb9, 0xa8, 0x79, 0x0f, 0xb9, 0x8f, + 0x96, 0x87, 0x6b, 0x76, 0x73, 0xac, 0x3e, 0xd9, 0x7b, 0x60, 0x0a, 0xdf, 0xd5, 0xcb, 0x6d, 0x92, + 0xd2, 0x95, 0xd2, 0xbd, 0xf8, 0x6e, 0xb2, 0x58, 0x4b, 0xe8, 0x6f, 0x36, 0x80, 0x06, 0x3a, 0x8a, + 0x27, 0x7b, 0x5b, 0xa4, 0x73, 0xa9, 0xb4, 0x10, 0xf1, 0x0e, 0xd7, 0x6c, 0x2d, 0x85, 0xe6, 0x73, + 0xc5, 0xcc, 0x3b, 0x11, 0x11, 0x6e, 0xee, 0x62, 0x95, 0xf9, 0xee, 0x2b, 0x3e, 0x6d, 0xcf, 0x74, + 0x93, 0xc1, 0xb0, 0x09, 0x1b, 0x27, 0x7c, 0xb6, 0x10, 0xd6, 0x9b, 0xb0, 0x99, 0xf3, 0x14, 0xd6, + 0x83, 0xe6, 0x5c, 0xc4, 0x31, 0x9f, 0x88, 0x9e, 0x71, 0xc3, 0xb8, 0x69, 0xda, 0xc9, 0xd0, 0x6a, + 0xc3, 0x56, 0xde, 0x4f, 0x72, 0x8a, 0xe8, 0x0b, 0xa8, 0x78, 0x22, 0xa2, 0x18, 0x1d, 0x40, 0x2b, + 0xea, 0xa1, 0xf5, 0x3e, 0x74, 0xcb, 0x4e, 0xc0, 0xba, 0xb0, 0xfe, 0x4c, 0x9c, 0x6a, 0x49, 0xfc, + 0x64, 0x97, 0xf4, 0x86, 0xc8, 0x8b, 0x4d, 0x5b, 0xef, 0xee, 0x8b, 0x5a, 0xaa, 0x9c, 0xfa, 0x01, + 0xdb, 0x87, 0x3a, 0x46, 0x21, 0x69, 0x6f, 0xde, 0xee, 0x0f, 0x54, 0x88, 0x0e, 0x92, 0x10, 0x1d, + 0x3c, 0x4a, 0x42, 0x74, 0xd8, 0xfa, 0xf2, 0xc5, 0xf5, 0xb5, 0x2f, 0xfe, 0x78, 0xdd, 0xb0, 0x49, + 0x83, 0x5d, 0xc3, 0xab, 0xe4, 0x9e, 0xef, 0x78, 0xae, 0x5e, 0xa7, 0x49, 0xe3, 0x23, 0x97, 0x1d, + 0x40, 0x77, 0x1c, 0xf8, 0xb1, 0xf0, 0xe3, 0x45, 0xec, 0x84, 0x3c, 0xe2, 0xf3, 0x58, 0x47, 0x49, + 0x72, 0x71, 0xf7, 0x12, 0xf6, 0x31, 0x71, 0xed, 0xce, 0xb8, 0x48, 0x60, 0x1f, 0x00, 0x9c, 0xf0, + 0x99, 0xe7, 0x72, 0x19, 0x44, 0x71, 0xaf, 0x7e, 0x63, 0x3d, 0xa7, 0xfc, 0x24, 0x61, 0x3c, 0x0e, + 0x5d, 0x2e, 0xc5, 0xb0, 0x8e, 0x3b, 0xb3, 0x73, 0xf2, 0xec, 0x0d, 0xe8, 0xf0, 0x30, 0x74, 0x62, + 0xc9, 0xa5, 0x70, 0x46, 0xa7, 0x52, 0xc4, 0x14, 0x49, 0x5b, 0xf6, 0x36, 0x0f, 0xc3, 0x87, 0x48, + 0x1d, 0x22, 0xd1, 0x72, 0xd3, 0x7b, 0x20, 0x27, 0x67, 0x0c, 0xea, 0x2e, 0x97, 0x9c, 0xac, 0xb1, + 0x65, 0xd3, 0x37, 0xd2, 0x42, 0x2e, 0xa7, 0xfa, 0x8c, 0xf4, 0xcd, 0xae, 0x40, 0x63, 0x2a, 0xbc, + 0xc9, 0x54, 0xd2, 0xb1, 0xd6, 0x6d, 0x3d, 0x42, 0xc3, 0x87, 0x51, 0x70, 0x22, 0x28, 0xce, 0x5b, + 0xb6, 0x1a, 0x58, 0x7f, 0x35, 0xe0, 0xc2, 0xb9, 0xc0, 0xc0, 0x79, 0xa7, 0x3c, 0x9e, 0x26, 0x6b, + 0xe1, 0x37, 0x7b, 0x0b, 0xe7, 0xe5, 0xae, 0x88, 0x74, 0xfe, 0xd9, 0xd6, 0x27, 0x3e, 0x24, 0xa2, + 0x3e, 0xa8, 0x16, 0x61, 0x0f, 0xa0, 0x3b, 0xe3, 0xb1, 0x74, 0x94, 0xff, 0x3a, 0x94, 0x5f, 0xd6, + 0x0b, 0x31, 0xf5, 0x31, 0x4f, 0xfc, 0x1c, 0xdd, 0x4a, 0xab, 0xb7, 0x67, 0x05, 0x2a, 0x3b, 0x84, + 0x4b, 0xa3, 0xd3, 0xe7, 0xdc, 0x97, 0x9e, 0x2f, 0x9c, 0x73, 0x36, 0xef, 0xe8, 0xa9, 0x1e, 0x9c, + 0x78, 0xae, 0xf0, 0xc7, 0x89, 0xb1, 0x2f, 0xa6, 0x2a, 0xe9, 0x65, 0xc4, 0xd6, 0x0d, 0x68, 0x17, + 0xa3, 0x98, 0xb5, 0xa1, 0x26, 0x97, 0xfa, 0x84, 0x35, 0xb9, 0xb4, 0xac, 0xd4, 0x03, 0xd3, 0x50, + 0x3a, 0x27, 0xb3, 0x0b, 0x9d, 0x52, 0x58, 0xe7, 0xcc, 0x6d, 0xe4, 0xcd, 0x6d, 0x75, 0x60, 0xbb, + 0x10, 0xcd, 0xd6, 0xe7, 0x1b, 0xd0, 0xb2, 0x45, 0x1c, 0xa2, 0x33, 0xb1, 0x7d, 0x30, 0xc5, 0x72, + 0x2c, 0x54, 0x22, 0x35, 0x4a, 0x69, 0x4a, 0xc9, 0x3c, 0x48, 0xf8, 0x18, 0xd0, 0xa9, 0x30, 0xdb, + 0x2d, 0x80, 0xc0, 0xc5, 0xb2, 0x52, 0x1e, 0x05, 0x6e, 0x15, 0x51, 0xe0, 0x52, 0x49, 0xb6, 0x04, + 0x03, 0xbb, 0x05, 0x18, 0x28, 0x4f, 0x5c, 0xc0, 0x81, 0x3b, 0x15, 0x38, 0x50, 0xde, 0xfe, 0x0a, + 0x20, 0xb8, 0x53, 0x01, 0x04, 0xbd, 0x73, 0x6b, 0x55, 0x22, 0xc1, 0xad, 0x22, 0x12, 0x94, 0x8f, + 0x53, 0x82, 0x82, 0x0f, 0xaa, 0xa0, 0xe0, 0x5a, 0x49, 0x67, 0x25, 0x16, 0xbc, 0x7b, 0x0e, 0x0b, + 0xae, 0x94, 0x54, 0x2b, 0xc0, 0xe0, 0x4e, 0x21, 0x4b, 0x43, 0xe5, 0xd9, 0xaa, 0xd3, 0x34, 0xfb, + 0xfe, 0x79, 0x1c, 0xb9, 0x5a, 0xbe, 0xda, 0x2a, 0x20, 0xd9, 0x2b, 0x01, 0xc9, 0xe5, 0xf2, 0x2e, + 0x4b, 0x48, 0x92, 0xe1, 0xc1, 0x2e, 0xc6, 0x7d, 0xc9, 0xd3, 0x30, 0x47, 0x88, 0x28, 0x0a, 0x22, + 0x9d, 0xb0, 0xd5, 0xc0, 0xba, 0x89, 0x99, 0x28, 0xf3, 0xaf, 0x97, 0x60, 0x07, 0x39, 0x7d, 0xce, + 0xbb, 0xac, 0x5f, 0x19, 0x99, 0x2e, 0x45, 0x74, 0x3e, 0x8b, 0x99, 0x3a, 0x8b, 0xe5, 0x20, 0xa5, + 0x56, 0x80, 0x14, 0xf6, 0x1d, 0xb8, 0x40, 0x69, 0x84, 0xec, 0xe2, 0x14, 0xd2, 0x5a, 0x07, 0x19, + 0xca, 0x20, 0x2a, 0xbf, 0xbd, 0x0d, 0x17, 0x73, 0xb2, 0x98, 0x62, 0x29, 0x85, 0xd5, 0x29, 0x78, + 0xbb, 0xa9, 0xf4, 0x41, 0x18, 0x1e, 0xf2, 0x78, 0x6a, 0xfd, 0x38, 0x3b, 0x7f, 0x06, 0x57, 0x0c, + 0xea, 0xe3, 0xc0, 0x55, 0xc7, 0xda, 0xb6, 0xe9, 0x1b, 0x21, 0x6c, 0x16, 0x4c, 0x68, 0x55, 0xd3, + 0xc6, 0x4f, 0x94, 0x4a, 0x23, 0xc5, 0x54, 0x21, 0x61, 0xfd, 0xd2, 0xc8, 0xe6, 0xcb, 0x10, 0xac, + 0x0a, 0x6c, 0x8c, 0xff, 0x06, 0x6c, 0x6a, 0xaf, 0x06, 0x36, 0xd6, 0x99, 0x91, 0xdd, 0x48, 0x0a, + 0x23, 0x5f, 0xef, 0x88, 0xe8, 0x1c, 0x9e, 0xef, 0x8a, 0x25, 0x05, 0xfc, 0xba, 0xad, 0x06, 0x09, + 0xc2, 0x37, 0xc8, 0xcc, 0x45, 0x84, 0x6f, 0x12, 0x4d, 0x0d, 0xd8, 0xeb, 0x04, 0x3f, 0xc1, 0x53, + 0x1d, 0x89, 0xdb, 0x03, 0x5d, 0xe6, 0x1e, 0x23, 0xd1, 0x56, 0xbc, 0x5c, 0x32, 0x35, 0x0b, 0xd8, + 0xf5, 0x1a, 0x98, 0xb8, 0xd1, 0x38, 0xe4, 0x63, 0x41, 0x81, 0x65, 0xda, 0x19, 0xc1, 0x3a, 0x06, + 0x76, 0x3e, 0xa0, 0xd9, 0xfb, 0x50, 0x97, 0x7c, 0x82, 0xf6, 0x46, 0x93, 0xb5, 0x07, 0xaa, 0x32, + 0x1f, 0x7c, 0xf4, 0xe4, 0x98, 0x7b, 0xd1, 0xf0, 0x0a, 0x9a, 0xea, 0xef, 0x2f, 0xae, 0xb7, 0x51, + 0xe6, 0x56, 0x30, 0xf7, 0xa4, 0x98, 0x87, 0xf2, 0xd4, 0x26, 0x1d, 0xeb, 0x1f, 0x06, 0x26, 0xfa, + 0x42, 0xa0, 0x57, 0x1a, 0x2e, 0xf1, 0xe6, 0x5a, 0x0e, 0x93, 0xbf, 0x9a, 0x31, 0xbf, 0x09, 0x30, + 0xe1, 0xb1, 0xf3, 0x29, 0xf7, 0xa5, 0x70, 0xb5, 0x45, 0xcd, 0x09, 0x8f, 0x7f, 0x4e, 0x04, 0x2c, + 0x60, 0x90, 0xbd, 0x88, 0x85, 0x4b, 0xa6, 0x5d, 0xb7, 0x9b, 0x13, 0x1e, 0x3f, 0x8e, 0x85, 0x9b, + 0x9e, 0xab, 0xf9, 0xea, 0xe7, 0x2a, 0xda, 0xb1, 0x55, 0xb6, 0xe3, 0x3f, 0x73, 0x3e, 0x9c, 0x61, + 0xe0, 0xff, 0xff, 0xb9, 0xff, 0x66, 0x20, 0xf4, 0x17, 0xb3, 0x2c, 0x3b, 0x82, 0x0b, 0x69, 0x1c, + 0x39, 0x0b, 0x8a, 0xaf, 0xc4, 0x97, 0x5e, 0x1e, 0x7e, 0xdd, 0x93, 0x22, 0x39, 0x66, 0x3f, 0x81, + 0xab, 0xa5, 0x2c, 0x90, 0x4e, 0x58, 0x7b, 0x69, 0x32, 0xb8, 0x5c, 0x4c, 0x06, 0xc9, 0x7c, 0x89, + 0x25, 0xd6, 0xbf, 0x86, 0x67, 0x7f, 0x0b, 0xeb, 0xa0, 0x3c, 0x36, 0x54, 0xdd, 0xa5, 0xf5, 0x99, + 0x01, 0x9d, 0xd2, 0x66, 0xd8, 0x1e, 0x80, 0x4a, 0xad, 0xb1, 0xf7, 0x3c, 0xa9, 0xc9, 0xbb, 0x7a, + 0xe3, 0x64, 0xb2, 0x87, 0xde, 0x73, 0x61, 0x9b, 0xa3, 0xe4, 0x93, 0x7d, 0x08, 0x1d, 0xa1, 0x2b, + 0xb3, 0x24, 0xf7, 0xd5, 0x0a, 0x20, 0x95, 0xd4, 0x6d, 0xfa, 0xb4, 0x6d, 0x51, 0x18, 0x5b, 0x07, + 0x60, 0xa6, 0xf3, 0xb2, 0x6f, 0x80, 0x39, 0xe7, 0x4b, 0x5d, 0x2f, 0xab, 0x4a, 0xab, 0x35, 0xe7, + 0x4b, 0x2a, 0x95, 0xd9, 0x55, 0x68, 0x22, 0x73, 0xc2, 0xd5, 0x0a, 0xeb, 0x76, 0x63, 0xce, 0x97, + 0x3f, 0xe2, 0xb1, 0xb5, 0x0b, 0xed, 0xe2, 0x22, 0x89, 0x68, 0x82, 0x5d, 0x4a, 0xf4, 0x60, 0x22, + 0xac, 0x87, 0xd0, 0x2e, 0x96, 0xa4, 0x98, 0xc7, 0xa2, 0x60, 0xe1, 0xbb, 0x24, 0xb8, 0x61, 0xab, + 0x01, 0xf6, 0xa3, 0x27, 0x81, 0xba, 0xba, 0x7c, 0x0d, 0xfa, 0x24, 0x90, 0x22, 0x57, 0xc8, 0x2a, + 0x19, 0xeb, 0xb3, 0x0d, 0x68, 0xa8, 0xfa, 0x98, 0x0d, 0x8a, 0x7d, 0x13, 0xde, 0x9b, 0xd6, 0x54, + 0x54, 0xad, 0x98, 0x42, 0xdf, 0x1b, 0xe5, 0x16, 0x66, 0xb8, 0x79, 0xf6, 0xe2, 0x7a, 0x93, 0x70, + 0xe5, 0xe8, 0x7e, 0xd6, 0xcf, 0xac, 0x2a, 0xf7, 0x93, 0xe6, 0xa9, 0xfe, 0xca, 0xcd, 0xd3, 0x55, + 0x68, 0xfa, 0x8b, 0xb9, 0x23, 0x97, 0xb1, 0x8e, 0xcf, 0x86, 0xbf, 0x98, 0x3f, 0x5a, 0xc6, 0x78, + 0x07, 0x32, 0x90, 0x7c, 0x46, 0x2c, 0x15, 0x9d, 0x2d, 0x22, 0x20, 0x73, 0x1f, 0xb6, 0x73, 0xf0, + 0xeb, 0xb9, 0xba, 0x4a, 0x6b, 0xe7, 0x3d, 0xe4, 0xe8, 0xbe, 0x3e, 0xe5, 0x66, 0x0a, 0xc7, 0x47, + 0x2e, 0xbb, 0x59, 0xec, 0x15, 0x08, 0xb5, 0x5b, 0xe4, 0x8c, 0xb9, 0x76, 0x00, 0x31, 0x1b, 0x37, + 0x80, 0xee, 0xa9, 0x44, 0x4c, 0x12, 0x69, 0x21, 0x81, 0x98, 0x6f, 0x42, 0x27, 0x03, 0x3e, 0x25, + 0x02, 0x6a, 0x96, 0x8c, 0x4c, 0x82, 0xef, 0xc0, 0x25, 0x5f, 0x2c, 0xa5, 0x53, 0x96, 0xde, 0x24, + 0x69, 0x86, 0xbc, 0x27, 0x45, 0x8d, 0x6f, 0x43, 0x3b, 0x0b, 0x60, 0x92, 0xdd, 0x52, 0x1d, 0x5b, + 0x4a, 0x25, 0xb1, 0x6b, 0xd0, 0x4a, 0xcb, 0x8e, 0x6d, 0x12, 0x68, 0x72, 0x55, 0x6d, 0xa4, 0x85, + 0x4c, 0x24, 0xe2, 0xc5, 0x4c, 0xea, 0x49, 0xda, 0x24, 0x43, 0x85, 0x8c, 0xad, 0xe8, 0x24, 0xfb, + 0x3a, 0x6c, 0xa7, 0x71, 0x43, 0x72, 0x1d, 0x92, 0xdb, 0x4a, 0x88, 0x24, 0xb4, 0x0b, 0xdd, 0x30, + 0x0a, 0xc2, 0x20, 0x16, 0x91, 0xc3, 0x5d, 0x37, 0x12, 0x71, 0xdc, 0xeb, 0xaa, 0xf9, 0x12, 0xfa, + 0x81, 0x22, 0x5b, 0xdf, 0x85, 0xa6, 0xf6, 0x31, 0x74, 0x69, 0xb2, 0x3a, 0xb9, 0x60, 0xdd, 0x56, + 0x03, 0xcc, 0xdc, 0x07, 0x61, 0x48, 0x5e, 0x56, 0xb7, 0xf1, 0xd3, 0xfa, 0x05, 0x34, 0xf5, 0x85, + 0x55, 0xb6, 0x82, 0x3f, 0x80, 0xad, 0x90, 0x47, 0x78, 0x8c, 0x7c, 0x43, 0x98, 0x14, 0xe4, 0xc7, + 0x3c, 0x92, 0x0f, 0x85, 0x2c, 0xf4, 0x85, 0x9b, 0x24, 0xaf, 0x48, 0xd6, 0x1d, 0xd8, 0x2e, 0xc8, + 0xe0, 0xb6, 0xc8, 0x8f, 0x92, 0x48, 0xa3, 0x41, 0xba, 0x72, 0x2d, 0x5b, 0xd9, 0xba, 0x0b, 0x66, + 0x7a, 0x37, 0x58, 0x37, 0x26, 0x47, 0x37, 0xb4, 0xb9, 0xd5, 0x90, 0x7a, 0xdd, 0xe0, 0x53, 0x11, + 0xe9, 0x98, 0x50, 0x03, 0xeb, 0x31, 0x74, 0x4a, 0x29, 0x9b, 0xdd, 0x82, 0x66, 0xb8, 0x18, 0x39, + 0xc9, 0x1b, 0x45, 0xd6, 0xd5, 0x1e, 0x2f, 0x46, 0x1f, 0x89, 0xd3, 0xa4, 0xab, 0x0d, 0x69, 0x94, + 0x4d, 0x5b, 0xcb, 0x4f, 0x3b, 0x83, 0x56, 0x12, 0xfd, 0xec, 0x7b, 0x60, 0xa6, 0x6e, 0x55, 0xca, + 0x91, 0xe9, 0xd2, 0x7a, 0xd2, 0x4c, 0x10, 0xbd, 0x23, 0xf6, 0x26, 0xbe, 0x70, 0x9d, 0x2c, 0x84, + 0x68, 0x8d, 0x96, 0xdd, 0x51, 0x8c, 0x8f, 0x93, 0x78, 0xb1, 0xde, 0x81, 0x86, 0xda, 0x1b, 0xda, + 0x07, 0x67, 0x4e, 0x4a, 0x69, 0xfc, 0xae, 0x4c, 0xe6, 0x7f, 0x30, 0xa0, 0x95, 0x64, 0xc1, 0x4a, + 0xa5, 0xc2, 0xa6, 0x6b, 0x5f, 0x75, 0xd3, 0xff, 0xfb, 0xc4, 0x73, 0x0b, 0x98, 0xca, 0x2f, 0x27, + 0x81, 0xf4, 0xfc, 0x89, 0xa3, 0x6c, 0xad, 0x72, 0x50, 0x97, 0x38, 0x4f, 0x88, 0x71, 0x8c, 0xf4, + 0xdb, 0x9f, 0x6f, 0x40, 0xe7, 0x60, 0x78, 0xef, 0xe8, 0x20, 0x0c, 0x67, 0xde, 0x98, 0x53, 0xfd, + 0xbe, 0x07, 0x75, 0xea, 0x50, 0x2a, 0xde, 0x46, 0xfb, 0x55, 0xad, 0x32, 0xbb, 0x0d, 0x1b, 0xd4, + 0xa8, 0xb0, 0xaa, 0x27, 0xd2, 0x7e, 0x65, 0xc7, 0x8c, 0x8b, 0xa8, 0x56, 0xe6, 0xfc, 0x4b, 0x69, + 0xbf, 0xaa, 0x6d, 0x66, 0x1f, 0x82, 0x99, 0xb5, 0x18, 0xab, 0xde, 0x4b, 0xfb, 0x2b, 0x1b, 0x68, + 0xd4, 0xcf, 0xca, 0xb1, 0x55, 0xcf, 0x7e, 0xfd, 0x95, 0x9d, 0x26, 0xdb, 0x87, 0x66, 0x52, 0xc4, + 0x56, 0xbf, 0x68, 0xf6, 0x57, 0x34, 0xb7, 0x68, 0x1e, 0xd5, 0x35, 0x54, 0x3d, 0xbb, 0xf6, 0x2b, + 0x3b, 0x70, 0xf6, 0x1e, 0x34, 0x74, 0x65, 0x51, 0xf9, 0xaa, 0xd9, 0xaf, 0x6e, 0x51, 0xf1, 0x90, + 0x59, 0xdf, 0xb4, 0xea, 0x69, 0xb8, 0xbf, 0xf2, 0xa9, 0x80, 0x1d, 0x00, 0xe4, 0x8a, 0xff, 0x95, + 0x6f, 0xbe, 0xfd, 0xd5, 0x4f, 0x00, 0xec, 0x2e, 0xb4, 0xb2, 0x67, 0x9d, 0xea, 0x57, 0xdc, 0xfe, + 0xaa, 0xae, 0x7c, 0xf8, 0xda, 0xbf, 0xfe, 0xbc, 0x63, 0xfc, 0xfa, 0x6c, 0xc7, 0xf8, 0xcd, 0xd9, + 0x8e, 0xf1, 0xe5, 0xd9, 0x8e, 0xf1, 0xfb, 0xb3, 0x1d, 0xe3, 0x4f, 0x67, 0x3b, 0xc6, 0x6f, 0xff, + 0xb2, 0x63, 0x8c, 0x1a, 0xe4, 0xfe, 0xef, 0xfe, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xf4, 0x43, 0x2c, + 0xa9, 0xb5, 0x18, 0x00, 0x00, } diff --git a/abci/types/types.proto b/abci/types/types.proto index 39c96e0e3..517369b13 100644 --- a/abci/types/types.proto +++ b/abci/types/types.proto @@ -231,31 +231,38 @@ message LastCommitInfo { message Header { // basic block info - string chain_id = 1 [(gogoproto.customname)="ChainID"]; - int64 height = 2; - google.protobuf.Timestamp time = 3 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true]; - int64 num_txs = 4; - int64 total_txs = 5; + Version version = 1 [(gogoproto.nullable)=false]; + string chain_id = 2 [(gogoproto.customname)="ChainID"]; + int64 height = 3; + google.protobuf.Timestamp time = 4 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true]; + int64 num_txs = 5; + int64 total_txs = 6; // prev block info - BlockID last_block_id = 6 [(gogoproto.nullable)=false]; + BlockID last_block_id = 7 [(gogoproto.nullable)=false]; // hashes of block data - bytes last_commit_hash = 7; // commit from validators from the last block - bytes data_hash = 8; // transactions + bytes last_commit_hash = 8; // commit from validators from the last block + bytes data_hash = 9; // transactions // hashes from the app output from the prev block - bytes validators_hash = 9; // validators for the current block - bytes next_validators_hash = 10; // validators for the next block - bytes consensus_hash = 11; // consensus params for current block - bytes app_hash = 12; // state after txs from the previous block - bytes last_results_hash = 13;// root hash of all results from the txs from the previous block + bytes validators_hash = 10; // validators for the current block + bytes next_validators_hash = 11; // validators for the next block + bytes consensus_hash = 12; // consensus params for current block + bytes app_hash = 13; // state after txs from the previous block + bytes last_results_hash = 14;// root hash of all results from the txs from the previous block // consensus info - bytes evidence_hash = 14; // evidence included in the block - bytes proposer_address = 15; // original proposer of the block + bytes evidence_hash = 15; // evidence included in the block + bytes proposer_address = 16; // original proposer of the block } +message Version { + uint64 Block = 1; + uint64 App = 2; +} + + message BlockID { bytes hash = 1; PartSetHeader parts_header = 2 [(gogoproto.nullable)=false]; diff --git a/abci/types/typespb_test.go b/abci/types/typespb_test.go index 0ae0fea0d..53c5cd94a 100644 --- a/abci/types/typespb_test.go +++ b/abci/types/typespb_test.go @@ -1703,6 +1703,62 @@ func TestHeaderMarshalTo(t *testing.T) { } } +func TestVersionProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedVersion(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Version{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestVersionMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedVersion(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Version{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestBlockIDProto(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -2635,6 +2691,24 @@ func TestHeaderJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } +func TestVersionJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedVersion(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Version{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} func TestBlockIDJSON(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -3601,6 +3675,34 @@ func TestHeaderProtoCompactText(t *testing.T) { } } +func TestVersionProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedVersion(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &Version{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestVersionProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedVersion(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &Version{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestBlockIDProtoText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -4457,6 +4559,28 @@ func TestHeaderSize(t *testing.T) { } } +func TestVersionSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedVersion(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + func TestBlockIDSize(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) diff --git a/docs/spec/abci/abci.md b/docs/spec/abci/abci.md index 15e246249..54b7c899d 100644 --- a/docs/spec/abci/abci.md +++ b/docs/spec/abci/abci.md @@ -338,6 +338,7 @@ Commit are included in the header of the next block. ### Header - **Fields**: + - `Version (Version)`: Version of the blockchain and the application - `ChainID (string)`: ID of the blockchain - `Height (int64)`: Height of the block in the chain - `Time (google.protobuf.Timestamp)`: Time of the block. It is the proposer's @@ -363,6 +364,15 @@ Commit are included in the header of the next block. - Provides the proposer of the current block, for use in proposer-based reward mechanisms. +### Version + +- **Fields**: + - `Block (uint64)`: Protocol version of the blockchain data structures. + - `App (uint64)`: Protocol version of the application. +- **Usage**: + - Block version should be static in the life of a blockchain. + - App version may be updated over time by the application. + ### Validator - **Fields**: diff --git a/docs/spec/blockchain/blockchain.md b/docs/spec/blockchain/blockchain.md index 029b64fac..c5291ed45 100644 --- a/docs/spec/blockchain/blockchain.md +++ b/docs/spec/blockchain/blockchain.md @@ -8,6 +8,7 @@ The Tendermint blockchains consists of a short list of basic data types: - `Block` - `Header` +- `Version` - `BlockID` - `Time` - `Data` (for transactions) @@ -38,6 +39,7 @@ the data in the current block, the previous block, and the results returned by t ```go type Header struct { // basic block info + Version Version ChainID string Height int64 Time Time @@ -65,6 +67,19 @@ type Header struct { Further details on each of these fields is described below. +## Version + +The `Version` contains the protocol version for the blockchain and the +application as two `uint64` values: + +```go +type Version struct { + Block uint64 + App uint64 +} +``` + + ## BlockID The `BlockID` contains two distinct Merkle roots of the block. @@ -200,6 +215,15 @@ See [here](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockc A Header is valid if its corresponding fields are valid. +### Version + +``` +block.Version.Block == state.Version.Block +block.Version.App == state.Version.App +``` + +The block version must match the state version. + ### ChainID ``` diff --git a/docs/spec/blockchain/state.md b/docs/spec/blockchain/state.md index e904bb339..a0badd718 100644 --- a/docs/spec/blockchain/state.md +++ b/docs/spec/blockchain/state.md @@ -15,6 +15,7 @@ validation. ```go type State struct { + Version Version LastResults []Result AppHash []byte diff --git a/node/node.go b/node/node.go index 9939f1c65..12e0b8e67 100644 --- a/node/node.go +++ b/node/node.go @@ -208,6 +208,15 @@ func NewNode(config *cfg.Config, // reload the state (it may have been updated by the handshake) state = sm.LoadState(stateDB) + if state.Version.Consensus.Block != version.BlockProtocol { + return nil, fmt.Errorf( + "Block version of the software does not match that of the state.\n"+ + "Got version.BlockProtocol=%v, state.Version.Consensus.Block=%v", + version.BlockProtocol, + state.Version.Consensus.Block, + ) + } + // If an address is provided, listen on the socket for a // connection from an external signing process. if config.PrivValidatorListenAddr != "" { diff --git a/state/execution.go b/state/execution.go index 611efa516..68298a8d2 100644 --- a/state/execution.go +++ b/state/execution.go @@ -398,9 +398,13 @@ func updateState( lastHeightParamsChanged = header.Height + 1 } + // TODO: allow app to upgrade version + nextVersion := state.Version + // NOTE: the AppHash has not been populated. // It will be filled on state.Save. return State{ + Version: nextVersion, ChainID: state.ChainID, LastBlockHeight: header.Height, LastBlockTotalTx: state.LastBlockTotalTx + header.NumTxs, diff --git a/state/state.go b/state/state.go index 23c0d632c..aedb2b001 100644 --- a/state/state.go +++ b/state/state.go @@ -8,6 +8,7 @@ import ( "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" + "github.com/tendermint/tendermint/version" ) // database keys @@ -17,6 +18,25 @@ var ( //----------------------------------------------------------------------------- +// Version is for versioning the State. +// It holds the Block and App version needed for making blocks, +// and the software version to support upgrades to the format of +// the State as stored on disk. +type Version struct { + Consensus version.Consensus + Software string +} + +var initStateVersion = Version{ + Consensus: version.Consensus{ + Block: version.BlockProtocol, + App: 0, + }, + Software: version.TMCoreSemVer, +} + +//----------------------------------------------------------------------------- + // State is a short description of the latest committed block of the Tendermint consensus. // It keeps all information necessary to validate new blocks, // including the last validator set and the consensus params. @@ -25,6 +45,8 @@ var ( // Instead, use state.Copy() or state.NextState(...). // NOTE: not goroutine-safe. type State struct { + Version Version + // immutable ChainID string @@ -59,6 +81,7 @@ type State struct { // Copy makes a copy of the State for mutating. func (state State) Copy() State { return State{ + Version: state.Version, ChainID: state.ChainID, LastBlockHeight: state.LastBlockHeight, @@ -114,6 +137,7 @@ func (state State) MakeBlock( block := types.MakeBlock(height, txs, commit, evidence) // Fill rest of header with state data. + block.Version = state.Version.Consensus block.ChainID = state.ChainID // Set time @@ -217,7 +241,7 @@ func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { } return State{ - + Version: initStateVersion, ChainID: genDoc.ChainID, LastBlockHeight: 0, diff --git a/state/state_test.go b/state/state_test.go index 2c777307a..b1f24d301 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -319,9 +319,11 @@ func TestStateMakeBlock(t *testing.T) { defer tearDown(t) proposerAddress := state.Validators.GetProposer().Address + stateVersion := state.Version.Consensus block := makeBlock(state, 2) - // test we set proposer address + // test we set some fields + assert.Equal(t, stateVersion, block.Version) assert.Equal(t, proposerAddress, block.ProposerAddress) } diff --git a/state/validation.go b/state/validation.go index a308870e5..ff1791e2b 100644 --- a/state/validation.go +++ b/state/validation.go @@ -20,6 +20,13 @@ func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { } // Validate basic info. + if block.Version != state.Version.Consensus { + return fmt.Errorf( + "Wrong Block.Header.Version. Expected %v, got %v", + state.Version.Consensus, + block.Version, + ) + } if block.ChainID != state.ChainID { return fmt.Errorf( "Wrong Block.Header.ChainID. Expected %v, got %v", diff --git a/state/validation_test.go b/state/validation_test.go index 3c58c7130..f89fbdea9 100644 --- a/state/validation_test.go +++ b/state/validation_test.go @@ -5,6 +5,7 @@ import ( "time" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/libs/log" @@ -26,13 +27,20 @@ func TestValidateBlockHeader(t *testing.T) { err := blockExec.ValidateBlock(state, block) require.NoError(t, err) + // some bad values wrongHash := tmhash.Sum([]byte("this hash is wrong")) + wrongVersion1 := state.Version.Consensus + wrongVersion1.Block += 1 + wrongVersion2 := state.Version.Consensus + wrongVersion2.App += 1 // Manipulation of any header field causes failure. testCases := []struct { name string malleateBlock func(block *types.Block) }{ + {"Version wrong1", func(block *types.Block) { block.Version = wrongVersion1 }}, + {"Version wrong2", func(block *types.Block) { block.Version = wrongVersion2 }}, {"ChainID wrong", func(block *types.Block) { block.ChainID = "not-the-real-one" }}, {"Height wrong", func(block *types.Block) { block.Height += 10 }}, {"Time wrong", func(block *types.Block) { block.Time = block.Time.Add(-time.Second * 3600 * 24) }}, diff --git a/types/block.go b/types/block.go index 45a5b8c37..06ad55fcc 100644 --- a/types/block.go +++ b/types/block.go @@ -10,11 +10,12 @@ import ( "github.com/tendermint/tendermint/crypto/merkle" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/version" ) const ( // MaxHeaderBytes is a maximum header size (including amino overhead). - MaxHeaderBytes int64 = 511 + MaxHeaderBytes int64 = 534 // MaxAminoOverheadForBlock - maximum amino overhead to encode a block (up to // MaxBlockSizeBytes in size) not including it's parts except Data. @@ -27,7 +28,6 @@ const ( ) // Block defines the atomic unit of a Tendermint blockchain. -// TODO: add Version byte type Block struct { mtx sync.Mutex Header `json:"header"` @@ -258,16 +258,16 @@ func MaxDataBytesUnknownEvidence(maxBytes int64, valsCount int) int64 { //----------------------------------------------------------------------------- // Header defines the structure of a Tendermint block header -// TODO: limit header size // NOTE: changes to the Header should be duplicated in the abci Header // and in /docs/spec/blockchain/blockchain.md type Header struct { // basic block info - ChainID string `json:"chain_id"` - Height int64 `json:"height"` - Time time.Time `json:"time"` - NumTxs int64 `json:"num_txs"` - TotalTxs int64 `json:"total_txs"` + Version version.Consensus `json:"version"` + ChainID string `json:"chain_id"` + Height int64 `json:"height"` + Time time.Time `json:"time"` + NumTxs int64 `json:"num_txs"` + TotalTxs int64 `json:"total_txs"` // prev block info LastBlockID BlockID `json:"last_block_id"` @@ -297,6 +297,7 @@ func (h *Header) Hash() cmn.HexBytes { return nil } return merkle.SimpleHashFromMap(map[string][]byte{ + "Version": cdcEncode(h.Version), "ChainID": cdcEncode(h.ChainID), "Height": cdcEncode(h.Height), "Time": cdcEncode(h.Time), @@ -321,6 +322,7 @@ func (h *Header) StringIndented(indent string) string { return "nil-Header" } return fmt.Sprintf(`Header{ +%s Version: %v %s ChainID: %v %s Height: %v %s Time: %v @@ -337,6 +339,7 @@ func (h *Header) StringIndented(indent string) string { %s Evidence: %v %s Proposer: %v %s}#%v`, + indent, h.Version, indent, h.ChainID, indent, h.Height, indent, h.Time, @@ -538,6 +541,7 @@ func (sh SignedHeader) ValidateBasic(chainID string) error { if sh.Commit == nil { return errors.New("SignedHeader missing commit (precommit votes).") } + // Check ChainID. if sh.ChainID != chainID { return fmt.Errorf("Header belongs to another chain '%s' not '%s'", diff --git a/types/block_test.go b/types/block_test.go index 7abd79d79..d268e411e 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -12,6 +12,7 @@ import ( "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/version" ) func TestMain(m *testing.M) { @@ -242,6 +243,7 @@ func TestMaxHeaderBytes(t *testing.T) { } h := Header{ + Version: version.Consensus{math.MaxInt64, math.MaxInt64}, ChainID: maxChainID, Height: math.MaxInt64, Time: time.Now().UTC(), @@ -286,9 +288,9 @@ func TestBlockMaxDataBytes(t *testing.T) { }{ 0: {-10, 1, 0, true, 0}, 1: {10, 1, 0, true, 0}, - 2: {721, 1, 0, true, 0}, - 3: {722, 1, 0, false, 0}, - 4: {723, 1, 0, false, 1}, + 2: {744, 1, 0, true, 0}, + 3: {745, 1, 0, false, 0}, + 4: {746, 1, 0, false, 1}, } for i, tc := range testCases { @@ -314,9 +316,9 @@ func TestBlockMaxDataBytesUnknownEvidence(t *testing.T) { }{ 0: {-10, 1, true, 0}, 1: {10, 1, true, 0}, - 2: {801, 1, true, 0}, - 3: {802, 1, false, 0}, - 4: {803, 1, false, 1}, + 2: {826, 1, true, 0}, + 3: {827, 1, false, 0}, + 4: {828, 1, false, 1}, } for i, tc := range testCases { diff --git a/types/proto3/block.pb.go b/types/proto3/block.pb.go index ab1c66cfb..446b39197 100644 --- a/types/proto3/block.pb.go +++ b/types/proto3/block.pb.go @@ -1,7 +1,19 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: types/proto3/block.proto +// source: block.proto -//nolint +/* +Package proto3 is a generated protocol buffer package. + +It is generated from these files: + block.proto + +It has these top-level messages: + PartSetHeader + BlockID + Header + Version + Timestamp +*/ package proto3 import proto "github.com/golang/protobuf/proto" @@ -20,36 +32,14 @@ var _ = math.Inf const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type PartSetHeader struct { - Total int32 `protobuf:"zigzag32,1,opt,name=Total,proto3" json:"Total,omitempty"` - Hash []byte `protobuf:"bytes,2,opt,name=Hash,proto3" json:"Hash,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Total int32 `protobuf:"zigzag32,1,opt,name=Total" json:"Total,omitempty"` + Hash []byte `protobuf:"bytes,2,opt,name=Hash,proto3" json:"Hash,omitempty"` } -func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } -func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } -func (*PartSetHeader) ProtoMessage() {} -func (*PartSetHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_block_c8c1dcbe91697ccd, []int{0} -} -func (m *PartSetHeader) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PartSetHeader.Unmarshal(m, b) -} -func (m *PartSetHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PartSetHeader.Marshal(b, m, deterministic) -} -func (dst *PartSetHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_PartSetHeader.Merge(dst, src) -} -func (m *PartSetHeader) XXX_Size() int { - return xxx_messageInfo_PartSetHeader.Size(m) -} -func (m *PartSetHeader) XXX_DiscardUnknown() { - xxx_messageInfo_PartSetHeader.DiscardUnknown(m) -} - -var xxx_messageInfo_PartSetHeader proto.InternalMessageInfo +func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } +func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } +func (*PartSetHeader) ProtoMessage() {} +func (*PartSetHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (m *PartSetHeader) GetTotal() int32 { if m != nil { @@ -66,36 +56,14 @@ func (m *PartSetHeader) GetHash() []byte { } type BlockID struct { - Hash []byte `protobuf:"bytes,1,opt,name=Hash,proto3" json:"Hash,omitempty"` - PartsHeader *PartSetHeader `protobuf:"bytes,2,opt,name=PartsHeader,proto3" json:"PartsHeader,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BlockID) Reset() { *m = BlockID{} } -func (m *BlockID) String() string { return proto.CompactTextString(m) } -func (*BlockID) ProtoMessage() {} -func (*BlockID) Descriptor() ([]byte, []int) { - return fileDescriptor_block_c8c1dcbe91697ccd, []int{1} -} -func (m *BlockID) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BlockID.Unmarshal(m, b) -} -func (m *BlockID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BlockID.Marshal(b, m, deterministic) -} -func (dst *BlockID) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockID.Merge(dst, src) -} -func (m *BlockID) XXX_Size() int { - return xxx_messageInfo_BlockID.Size(m) -} -func (m *BlockID) XXX_DiscardUnknown() { - xxx_messageInfo_BlockID.DiscardUnknown(m) + Hash []byte `protobuf:"bytes,1,opt,name=Hash,proto3" json:"Hash,omitempty"` + PartsHeader *PartSetHeader `protobuf:"bytes,2,opt,name=PartsHeader" json:"PartsHeader,omitempty"` } -var xxx_messageInfo_BlockID proto.InternalMessageInfo +func (m *BlockID) Reset() { *m = BlockID{} } +func (m *BlockID) String() string { return proto.CompactTextString(m) } +func (*BlockID) ProtoMessage() {} +func (*BlockID) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func (m *BlockID) GetHash() []byte { if m != nil { @@ -113,52 +81,39 @@ func (m *BlockID) GetPartsHeader() *PartSetHeader { type Header struct { // basic block info - ChainID string `protobuf:"bytes,1,opt,name=ChainID,proto3" json:"ChainID,omitempty"` - Height int64 `protobuf:"zigzag64,2,opt,name=Height,proto3" json:"Height,omitempty"` - Time *Timestamp `protobuf:"bytes,3,opt,name=Time,proto3" json:"Time,omitempty"` - NumTxs int64 `protobuf:"zigzag64,4,opt,name=NumTxs,proto3" json:"NumTxs,omitempty"` - TotalTxs int64 `protobuf:"zigzag64,5,opt,name=TotalTxs,proto3" json:"TotalTxs,omitempty"` + Version *Version `protobuf:"bytes,1,opt,name=Version" json:"Version,omitempty"` + ChainID string `protobuf:"bytes,2,opt,name=ChainID" json:"ChainID,omitempty"` + Height int64 `protobuf:"zigzag64,3,opt,name=Height" json:"Height,omitempty"` + Time *Timestamp `protobuf:"bytes,4,opt,name=Time" json:"Time,omitempty"` + NumTxs int64 `protobuf:"zigzag64,5,opt,name=NumTxs" json:"NumTxs,omitempty"` + TotalTxs int64 `protobuf:"zigzag64,6,opt,name=TotalTxs" json:"TotalTxs,omitempty"` // prev block info - LastBlockID *BlockID `protobuf:"bytes,6,opt,name=LastBlockID,proto3" json:"LastBlockID,omitempty"` + LastBlockID *BlockID `protobuf:"bytes,7,opt,name=LastBlockID" json:"LastBlockID,omitempty"` // hashes of block data - LastCommitHash []byte `protobuf:"bytes,7,opt,name=LastCommitHash,proto3" json:"LastCommitHash,omitempty"` - DataHash []byte `protobuf:"bytes,8,opt,name=DataHash,proto3" json:"DataHash,omitempty"` + LastCommitHash []byte `protobuf:"bytes,8,opt,name=LastCommitHash,proto3" json:"LastCommitHash,omitempty"` + DataHash []byte `protobuf:"bytes,9,opt,name=DataHash,proto3" json:"DataHash,omitempty"` // hashes from the app output from the prev block - ValidatorsHash []byte `protobuf:"bytes,9,opt,name=ValidatorsHash,proto3" json:"ValidatorsHash,omitempty"` - ConsensusHash []byte `protobuf:"bytes,10,opt,name=ConsensusHash,proto3" json:"ConsensusHash,omitempty"` - AppHash []byte `protobuf:"bytes,11,opt,name=AppHash,proto3" json:"AppHash,omitempty"` - LastResultsHash []byte `protobuf:"bytes,12,opt,name=LastResultsHash,proto3" json:"LastResultsHash,omitempty"` + ValidatorsHash []byte `protobuf:"bytes,10,opt,name=ValidatorsHash,proto3" json:"ValidatorsHash,omitempty"` + NextValidatorsHash []byte `protobuf:"bytes,11,opt,name=NextValidatorsHash,proto3" json:"NextValidatorsHash,omitempty"` + ConsensusHash []byte `protobuf:"bytes,12,opt,name=ConsensusHash,proto3" json:"ConsensusHash,omitempty"` + AppHash []byte `protobuf:"bytes,13,opt,name=AppHash,proto3" json:"AppHash,omitempty"` + LastResultsHash []byte `protobuf:"bytes,14,opt,name=LastResultsHash,proto3" json:"LastResultsHash,omitempty"` // consensus info - EvidenceHash []byte `protobuf:"bytes,13,opt,name=EvidenceHash,proto3" json:"EvidenceHash,omitempty"` - ProposerAddress []byte `protobuf:"bytes,14,opt,name=ProposerAddress,proto3" json:"ProposerAddress,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + EvidenceHash []byte `protobuf:"bytes,15,opt,name=EvidenceHash,proto3" json:"EvidenceHash,omitempty"` + ProposerAddress []byte `protobuf:"bytes,16,opt,name=ProposerAddress,proto3" json:"ProposerAddress,omitempty"` } -func (m *Header) Reset() { *m = Header{} } -func (m *Header) String() string { return proto.CompactTextString(m) } -func (*Header) ProtoMessage() {} -func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_block_c8c1dcbe91697ccd, []int{2} -} -func (m *Header) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Header.Unmarshal(m, b) -} -func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Header.Marshal(b, m, deterministic) -} -func (dst *Header) XXX_Merge(src proto.Message) { - xxx_messageInfo_Header.Merge(dst, src) -} -func (m *Header) XXX_Size() int { - return xxx_messageInfo_Header.Size(m) -} -func (m *Header) XXX_DiscardUnknown() { - xxx_messageInfo_Header.DiscardUnknown(m) -} +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } -var xxx_messageInfo_Header proto.InternalMessageInfo +func (m *Header) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} func (m *Header) GetChainID() string { if m != nil { @@ -223,6 +178,13 @@ func (m *Header) GetValidatorsHash() []byte { return nil } +func (m *Header) GetNextValidatorsHash() []byte { + if m != nil { + return m.NextValidatorsHash + } + return nil +} + func (m *Header) GetConsensusHash() []byte { if m != nil { return m.ConsensusHash @@ -258,41 +220,43 @@ func (m *Header) GetProposerAddress() []byte { return nil } +type Version struct { + Block uint64 `protobuf:"varint,1,opt,name=Block" json:"Block,omitempty"` + App uint64 `protobuf:"varint,2,opt,name=App" json:"App,omitempty"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Version) GetBlock() uint64 { + if m != nil { + return m.Block + } + return 0 +} + +func (m *Version) GetApp() uint64 { + if m != nil { + return m.App + } + return 0 +} + // Timestamp wraps how amino encodes time. Note that this is different from the protobuf well-known type // protobuf/timestamp.proto in the sense that there seconds and nanos are varint encoded. See: // https://github.com/google/protobuf/blob/d2980062c859649523d5fd51d6b55ab310e47482/src/google/protobuf/timestamp.proto#L123-L135 // Also nanos do not get skipped if they are zero in amino. type Timestamp struct { - Seconds int64 `protobuf:"fixed64,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"fixed32,2,opt,name=nanos,proto3" json:"nanos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_block_c8c1dcbe91697ccd, []int{3} -} -func (m *Timestamp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Timestamp.Unmarshal(m, b) -} -func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) -} -func (dst *Timestamp) XXX_Merge(src proto.Message) { - xxx_messageInfo_Timestamp.Merge(dst, src) -} -func (m *Timestamp) XXX_Size() int { - return xxx_messageInfo_Timestamp.Size(m) -} -func (m *Timestamp) XXX_DiscardUnknown() { - xxx_messageInfo_Timestamp.DiscardUnknown(m) + Seconds int64 `protobuf:"fixed64,1,opt,name=seconds" json:"seconds,omitempty"` + Nanos int32 `protobuf:"fixed32,2,opt,name=nanos" json:"nanos,omitempty"` } -var xxx_messageInfo_Timestamp proto.InternalMessageInfo +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } func (m *Timestamp) GetSeconds() int64 { if m != nil { @@ -312,36 +276,40 @@ func init() { proto.RegisterType((*PartSetHeader)(nil), "proto3.PartSetHeader") proto.RegisterType((*BlockID)(nil), "proto3.BlockID") proto.RegisterType((*Header)(nil), "proto3.Header") + proto.RegisterType((*Version)(nil), "proto3.Version") proto.RegisterType((*Timestamp)(nil), "proto3.Timestamp") } -func init() { proto.RegisterFile("types/proto3/block.proto", fileDescriptor_block_c8c1dcbe91697ccd) } - -var fileDescriptor_block_c8c1dcbe91697ccd = []byte{ - // 395 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x52, 0x4b, 0x8b, 0xdb, 0x30, - 0x10, 0xc6, 0xcd, 0x7b, 0x9c, 0x47, 0x23, 0xda, 0x22, 0x7a, 0x0a, 0xa6, 0x2d, 0x39, 0x25, 0xb4, - 0x39, 0x94, 0xd2, 0x53, 0x9a, 0x14, 0x12, 0x28, 0x25, 0x68, 0x43, 0xee, 0x4a, 0x2c, 0x36, 0x66, - 0x6d, 0xcb, 0x78, 0x94, 0x65, 0xf7, 0x3f, 0xef, 0x8f, 0x58, 0x34, 0xb2, 0xbd, 0x71, 0x6e, 0xfe, - 0x1e, 0xfa, 0x3e, 0x79, 0x46, 0xc0, 0xcd, 0x73, 0xa6, 0x70, 0x9e, 0xe5, 0xda, 0xe8, 0xc5, 0xfc, - 0x18, 0xeb, 0xd3, 0xc3, 0x8c, 0x00, 0x6b, 0x3b, 0x2e, 0xf8, 0x05, 0x83, 0x9d, 0xcc, 0xcd, 0x9d, - 0x32, 0x1b, 0x25, 0x43, 0x95, 0xb3, 0x0f, 0xd0, 0xda, 0x6b, 0x23, 0x63, 0xee, 0x4d, 0xbc, 0xe9, - 0x58, 0x38, 0xc0, 0x18, 0x34, 0x37, 0x12, 0xcf, 0xfc, 0xdd, 0xc4, 0x9b, 0xf6, 0x05, 0x7d, 0x07, - 0x07, 0xe8, 0xfc, 0xb1, 0x89, 0xdb, 0x75, 0x25, 0x7b, 0x6f, 0x32, 0xfb, 0x09, 0xbe, 0x4d, 0x46, - 0x97, 0x4b, 0x27, 0xfd, 0x1f, 0x1f, 0x5d, 0xfd, 0x62, 0x56, 0x2b, 0x15, 0xd7, 0xce, 0xe0, 0xa5, - 0x01, 0xed, 0xe2, 0x32, 0x1c, 0x3a, 0xab, 0xb3, 0x8c, 0xd2, 0xed, 0x9a, 0xa2, 0x7b, 0xa2, 0x84, - 0xec, 0x93, 0xf5, 0x44, 0xf7, 0x67, 0x43, 0xc1, 0x4c, 0x14, 0x88, 0x7d, 0x85, 0xe6, 0x3e, 0x4a, - 0x14, 0x6f, 0x50, 0xdd, 0xb8, 0xac, 0xb3, 0x1c, 0x1a, 0x99, 0x64, 0x82, 0x64, 0x7b, 0xfc, 0xff, - 0x25, 0xd9, 0x3f, 0x21, 0x6f, 0xba, 0xe3, 0x0e, 0xb1, 0xcf, 0xd0, 0xa5, 0x1f, 0xb6, 0x4a, 0x8b, - 0x94, 0x0a, 0xb3, 0xef, 0xe0, 0xff, 0x93, 0x68, 0x8a, 0x7f, 0xe6, 0x6d, 0x6a, 0x18, 0x95, 0x0d, - 0x05, 0x2d, 0xae, 0x3d, 0xec, 0x1b, 0x0c, 0x2d, 0x5c, 0xe9, 0x24, 0x89, 0x0c, 0x4d, 0xa8, 0x43, - 0x13, 0xba, 0x61, 0x6d, 0xed, 0x5a, 0x1a, 0x49, 0x8e, 0x2e, 0x39, 0x2a, 0x6c, 0x33, 0x0e, 0x32, - 0x8e, 0x42, 0x69, 0x74, 0x8e, 0xe4, 0xe8, 0xb9, 0x8c, 0x3a, 0xcb, 0xbe, 0xc0, 0x60, 0xa5, 0x53, - 0x54, 0x29, 0x5e, 0x9c, 0x0d, 0xc8, 0x56, 0x27, 0xed, 0x44, 0x97, 0x59, 0x46, 0xba, 0x4f, 0x7a, - 0x09, 0xd9, 0x14, 0x46, 0xf6, 0x56, 0x42, 0xe1, 0x25, 0x36, 0x2e, 0xa1, 0x4f, 0x8e, 0x5b, 0x9a, - 0x05, 0xd0, 0xff, 0xfb, 0x18, 0x85, 0x2a, 0x3d, 0x29, 0xb2, 0x0d, 0xc8, 0x56, 0xe3, 0x6c, 0xda, - 0x2e, 0xd7, 0x99, 0x46, 0x95, 0x2f, 0xc3, 0x30, 0x57, 0x88, 0x7c, 0xe8, 0xd2, 0x6e, 0xe8, 0xe0, - 0x37, 0xf4, 0xaa, 0xed, 0xd8, 0xeb, 0xa1, 0x3a, 0xe9, 0x34, 0x44, 0x5a, 0xf8, 0x7b, 0x51, 0x42, - 0xfb, 0x2e, 0x53, 0x99, 0x6a, 0xa4, 0x7d, 0x8f, 0x84, 0x03, 0xc7, 0xe2, 0x19, 0xbf, 0x06, 0x00, - 0x00, 0xff, 0xff, 0xde, 0x29, 0x34, 0x75, 0xe9, 0x02, 0x00, 0x00, +func init() { proto.RegisterFile("block.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 443 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0xcd, 0x6a, 0xdb, 0x40, + 0x10, 0x46, 0x8d, 0x62, 0xc7, 0x23, 0x3b, 0x76, 0x86, 0xb6, 0x88, 0x9e, 0x8c, 0x68, 0x8b, 0x7b, + 0x31, 0x24, 0x39, 0x94, 0xd2, 0x93, 0x6b, 0x17, 0x12, 0x28, 0x21, 0x6c, 0x8d, 0xef, 0x1b, 0x6b, + 0xa9, 0x45, 0x2d, 0xad, 0xd0, 0xac, 0x4b, 0xde, 0xb0, 0xaf, 0x55, 0x66, 0x56, 0x52, 0x2d, 0x93, + 0x93, 0xf7, 0xfb, 0x99, 0x6f, 0x76, 0xc7, 0x23, 0x88, 0x9e, 0xf6, 0x76, 0xfb, 0x7b, 0x5e, 0x56, + 0xd6, 0x59, 0xec, 0xc9, 0xcf, 0x6d, 0xf2, 0x05, 0x46, 0x8f, 0xba, 0x72, 0x3f, 0x8d, 0xbb, 0x33, + 0x3a, 0x35, 0x15, 0xbe, 0x86, 0xf3, 0xb5, 0x75, 0x7a, 0x1f, 0x07, 0xd3, 0x60, 0x76, 0xa5, 0x3c, + 0x40, 0x84, 0xf0, 0x4e, 0xd3, 0x2e, 0x7e, 0x35, 0x0d, 0x66, 0x43, 0x25, 0xe7, 0x64, 0x03, 0xfd, + 0x6f, 0x9c, 0x78, 0xbf, 0x6a, 0xe5, 0xe0, 0xbf, 0x8c, 0x9f, 0x21, 0xe2, 0x64, 0xf2, 0xb9, 0x52, + 0x19, 0xdd, 0xbc, 0xf1, 0xed, 0x6f, 0xe7, 0x9d, 0xa6, 0xea, 0xd8, 0x99, 0xfc, 0x0d, 0xa1, 0x57, + 0x5f, 0xe6, 0x13, 0xf4, 0x37, 0xa6, 0xa2, 0xcc, 0x16, 0x12, 0x1d, 0xdd, 0x8c, 0x9b, 0xfa, 0x9a, + 0x56, 0x8d, 0x8e, 0x31, 0xf4, 0x97, 0x3b, 0x9d, 0x15, 0xf7, 0x2b, 0x69, 0x35, 0x50, 0x0d, 0xc4, + 0xb7, 0x1c, 0x97, 0xfd, 0xda, 0xb9, 0xf8, 0x6c, 0x1a, 0xcc, 0x50, 0xd5, 0x08, 0x3f, 0x40, 0xb8, + 0xce, 0x72, 0x13, 0x87, 0x92, 0x7c, 0xd5, 0x24, 0x33, 0x47, 0x4e, 0xe7, 0xa5, 0x12, 0x99, 0xcb, + 0x1f, 0x0e, 0xf9, 0xfa, 0x99, 0xe2, 0x73, 0x5f, 0xee, 0x11, 0xbe, 0x83, 0x0b, 0x99, 0x0d, 0x2b, + 0x3d, 0x51, 0x5a, 0x8c, 0xd7, 0x10, 0xfd, 0xd0, 0xe4, 0xea, 0xf1, 0xc4, 0xfd, 0xee, 0xdd, 0x6b, + 0x5a, 0x1d, 0x7b, 0xf0, 0x23, 0x5c, 0x32, 0x5c, 0xda, 0x3c, 0xcf, 0x9c, 0x0c, 0xf3, 0x42, 0x86, + 0x79, 0xc2, 0x72, 0xdb, 0x95, 0x76, 0x5a, 0x1c, 0x03, 0x71, 0xb4, 0x98, 0x33, 0x36, 0x7a, 0x9f, + 0xa5, 0xda, 0xd9, 0x8a, 0xc4, 0x01, 0x3e, 0xa3, 0xcb, 0xe2, 0x1c, 0xf0, 0xc1, 0x3c, 0xbb, 0x13, + 0x6f, 0x24, 0xde, 0x17, 0x14, 0x7c, 0x0f, 0xa3, 0xa5, 0x2d, 0xc8, 0x14, 0x74, 0xf0, 0xd6, 0xa1, + 0x58, 0xbb, 0x24, 0xff, 0x03, 0x8b, 0xb2, 0x14, 0x7d, 0x24, 0x7a, 0x03, 0x71, 0x06, 0x63, 0x7e, + 0x85, 0x32, 0x74, 0xd8, 0x3b, 0x9f, 0x70, 0x29, 0x8e, 0x53, 0x1a, 0x13, 0x18, 0x7e, 0xff, 0x93, + 0xa5, 0xa6, 0xd8, 0x1a, 0xb1, 0x8d, 0xc5, 0xd6, 0xe1, 0x38, 0xed, 0xb1, 0xb2, 0xa5, 0x25, 0x53, + 0x2d, 0xd2, 0xb4, 0x32, 0x44, 0xf1, 0xc4, 0xa7, 0x9d, 0xd0, 0xc9, 0x75, 0xbb, 0x3e, 0xbc, 0xd6, + 0x32, 0x69, 0xd9, 0xa3, 0x50, 0x79, 0x80, 0x13, 0x38, 0x5b, 0x94, 0xa5, 0x2c, 0x4c, 0xa8, 0xf8, + 0x98, 0x7c, 0x85, 0x41, 0xbb, 0x00, 0xfc, 0x22, 0x32, 0x5b, 0x5b, 0xa4, 0x24, 0x65, 0x13, 0xd5, + 0x40, 0x8e, 0x2b, 0x74, 0x61, 0x49, 0x4a, 0xc7, 0xca, 0x83, 0xa7, 0xfa, 0xa3, 0xfa, 0x17, 0x00, + 0x00, 0xff, 0xff, 0xd5, 0x8b, 0x28, 0x26, 0x6a, 0x03, 0x00, 0x00, } diff --git a/types/proto3/block.proto b/types/proto3/block.proto index 835d6b74b..dd64a9e98 100644 --- a/types/proto3/block.proto +++ b/types/proto3/block.proto @@ -15,29 +15,35 @@ message BlockID { message Header { // basic block info - string ChainID = 1; - sint64 Height = 2; - Timestamp Time = 3; - sint64 NumTxs = 4; - sint64 TotalTxs = 5; + Version Version = 1; + string ChainID = 2; + sint64 Height = 3; + Timestamp Time = 4; + sint64 NumTxs = 5; + sint64 TotalTxs = 6; // prev block info - BlockID LastBlockID = 6; + BlockID LastBlockID = 7; // hashes of block data - bytes LastCommitHash = 7; // commit from validators from the last block - bytes DataHash = 8; // transactions + bytes LastCommitHash = 8; // commit from validators from the last block + bytes DataHash = 9; // transactions // hashes from the app output from the prev block - bytes ValidatorsHash = 9; // validators for the current block - bytes NextValidatorsHash = 10; // validators for the next block - bytes ConsensusHash = 11; // consensus params for current block - bytes AppHash = 12; // state after txs from the previous block - bytes LastResultsHash = 13; // root hash of all results from the txs from the previous block + bytes ValidatorsHash = 10; // validators for the current block + bytes NextValidatorsHash = 11; // validators for the next block + bytes ConsensusHash = 12; // consensus params for current block + bytes AppHash = 13; // state after txs from the previous block + bytes LastResultsHash = 14; // root hash of all results from the txs from the previous block // consensus info - bytes EvidenceHash = 14; // evidence included in the block - bytes ProposerAddress = 15; // original proposer of the block + bytes EvidenceHash = 15; // evidence included in the block + bytes ProposerAddress = 16; // original proposer of the block +} + +message Version { + uint64 Block = 1; + uint64 App = 2; } // Timestamp wraps how amino encodes time. Note that this is different from the protobuf well-known type From 14c1baeb246055743c857fe8866eac0f17e325ea Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 18 Oct 2018 10:29:59 -0400 Subject: [PATCH 084/113] ADR-016: Add protocol Version to NodeInfo (#2654) * p2p: add protocol Version to NodeInfo * update node pkg. remove extraneous version files * update changelog and docs * fix test * p2p: Version -> ProtocolVersion; more ValidateBasic and tests --- CHANGELOG_PENDING.md | 5 ++ benchmarks/codec_test.go | 15 ++--- consensus/version.go | 11 ---- docs/spec/p2p/peer.md | 16 ++--- node/node.go | 16 ++--- p2p/node_info.go | 131 +++++++++++++++++++++------------------ p2p/node_info_test.go | 123 ++++++++++++++++++++++++++++++++++++ p2p/peer_test.go | 13 ++-- p2p/test_util.go | 17 +++-- p2p/version.go | 3 - rpc/core/status.go | 9 +-- rpc/core/version.go | 5 -- rpc/lib/version.go | 7 --- 13 files changed, 244 insertions(+), 127 deletions(-) delete mode 100644 consensus/version.go create mode 100644 p2p/node_info_test.go delete mode 100644 p2p/version.go delete mode 100644 rpc/core/version.go delete mode 100644 rpc/lib/version.go diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 5fcbbb7b1..f4858d953 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -14,6 +14,8 @@ BREAKING CHANGES: * [privval] \#2459 Split `SocketPVMsg`s implementations into Request and Response, where the Response may contain a error message (returned by the remote signer) * [state] \#2644 Add Version field to State, breaking the format of State as encoded on disk. + * [rpc] \#2654 Remove all `node_info.other.*_version` fields in `/status` and + `/net_info` * Apps * [abci] \#2298 ResponseQuery.Proof is now a structured merkle.Proof, not just @@ -43,6 +45,9 @@ BREAKING CHANGES: * [state] \#2644 Require block.Version to match state.Version * P2P Protocol + * [p2p] \#2654 Add `ProtocolVersion` struct with protocol versions to top of + DefaultNodeInfo and require `ProtocolVersion.Block` to match during peer handshake + FEATURES: - [crypto/merkle] \#2298 General Merkle Proof scheme for chaining various types of Merkle trees together diff --git a/benchmarks/codec_test.go b/benchmarks/codec_test.go index 71d7a83b2..2be1db156 100644 --- a/benchmarks/codec_test.go +++ b/benchmarks/codec_test.go @@ -14,14 +14,15 @@ import ( func testNodeInfo(id p2p.ID) p2p.DefaultNodeInfo { return p2p.DefaultNodeInfo{ - ID_: id, - Moniker: "SOMENAME", - Network: "SOMENAME", - ListenAddr: "SOMEADDR", - Version: "SOMEVER", + ProtocolVersion: p2p.InitProtocolVersion, + ID_: id, + Moniker: "SOMENAME", + Network: "SOMENAME", + ListenAddr: "SOMEADDR", + Version: "SOMEVER", Other: p2p.DefaultNodeInfoOther{ - AminoVersion: "SOMESTRING", - P2PVersion: "OTHERSTRING", + TxIndex: "on", + RPCAddress: "0.0.0.0:26657", }, } } diff --git a/consensus/version.go b/consensus/version.go deleted file mode 100644 index c04d2ac7d..000000000 --- a/consensus/version.go +++ /dev/null @@ -1,11 +0,0 @@ -package consensus - -import "fmt" - -// kind of arbitrary -var Spec = "1" // async -var Major = "0" // -var Minor = "2" // replay refactor -var Revision = "2" // validation -> commit - -var Version = fmt.Sprintf("v%s/%s.%s.%s", Spec, Major, Minor, Revision) diff --git a/docs/spec/p2p/peer.md b/docs/spec/p2p/peer.md index a1ff25d8b..f5c2e7bf2 100644 --- a/docs/spec/p2p/peer.md +++ b/docs/spec/p2p/peer.md @@ -75,22 +75,25 @@ The Tendermint Version Handshake allows the peers to exchange their NodeInfo: ```golang type NodeInfo struct { + Version p2p.Version ID p2p.ID ListenAddr string Network string - Version string + SoftwareVersion string Channels []int8 Moniker string Other NodeInfoOther } +type Version struct { + P2P uint64 + Block uint64 + App uint64 +} + type NodeInfoOther struct { - AminoVersion string - P2PVersion string - ConsensusVersion string - RPCVersion string TxIndex string RPCAddress string } @@ -99,8 +102,7 @@ type NodeInfoOther struct { The connection is disconnected if: - `peer.NodeInfo.ID` is not equal `peerConn.ID` -- `peer.NodeInfo.Version` is not formatted as `X.X.X` where X are integers known as Major, Minor, and Revision -- `peer.NodeInfo.Version` Major is not the same as ours +- `peer.NodeInfo.Version.Block` does not match ours - `peer.NodeInfo.Network` is not the same as ours - `peer.Channels` does not intersect with our known Channels. - `peer.NodeInfo.ListenAddr` is malformed or is a DNS host that cannot be diff --git a/node/node.go b/node/node.go index 12e0b8e67..97de24736 100644 --- a/node/node.go +++ b/node/node.go @@ -32,7 +32,6 @@ import ( rpccore "github.com/tendermint/tendermint/rpc/core" ctypes "github.com/tendermint/tendermint/rpc/core/types" grpccore "github.com/tendermint/tendermint/rpc/grpc" - "github.com/tendermint/tendermint/rpc/lib" "github.com/tendermint/tendermint/rpc/lib/server" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/txindex" @@ -771,9 +770,10 @@ func makeNodeInfo( txIndexerStatus = "off" } nodeInfo := p2p.DefaultNodeInfo{ - ID_: nodeID, - Network: chainID, - Version: version.Version, + ProtocolVersion: p2p.InitProtocolVersion, + ID_: nodeID, + Network: chainID, + Version: version.TMCoreSemVer, Channels: []byte{ bc.BlockchainChannel, cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel, @@ -782,12 +782,8 @@ func makeNodeInfo( }, Moniker: config.Moniker, Other: p2p.DefaultNodeInfoOther{ - AminoVersion: amino.Version, - P2PVersion: p2p.Version, - ConsensusVersion: cs.Version, - RPCVersion: fmt.Sprintf("%v/%v", rpc.Version, rpccore.Version), - TxIndex: txIndexerStatus, - RPCAddress: config.RPC.ListenAddress, + TxIndex: txIndexerStatus, + RPCAddress: config.RPC.ListenAddress, }, } diff --git a/p2p/node_info.go b/p2p/node_info.go index a468443d1..5874dc857 100644 --- a/p2p/node_info.go +++ b/p2p/node_info.go @@ -3,9 +3,9 @@ package p2p import ( "fmt" "reflect" - "strings" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/version" ) const ( @@ -18,8 +18,10 @@ func MaxNodeInfoSize() int { return maxNodeInfoSize } +//------------------------------------------------------------- + // NodeInfo exposes basic info of a node -// and determines if we're compatible +// and determines if we're compatible. type NodeInfo interface { nodeInfoAddress nodeInfoTransport @@ -31,16 +33,38 @@ type nodeInfoAddress interface { NetAddress() *NetAddress } -// nodeInfoTransport is validates a nodeInfo and checks +// nodeInfoTransport validates a nodeInfo and checks // our compatibility with it. It's for use in the handshake. type nodeInfoTransport interface { ValidateBasic() error CompatibleWith(other NodeInfo) error } +//------------------------------------------------------------- + +// ProtocolVersion contains the protocol versions for the software. +type ProtocolVersion struct { + P2P version.Protocol `json:"p2p"` + Block version.Protocol `json:"block"` + App version.Protocol `json:"app"` +} + +var InitProtocolVersion = ProtocolVersion{ + P2P: version.P2PProtocol, + Block: version.BlockProtocol, + App: 0, +} + +//------------------------------------------------------------- + +// Assert DefaultNodeInfo satisfies NodeInfo +var _ NodeInfo = DefaultNodeInfo{} + // DefaultNodeInfo is the basic node information exchanged // between two peers during the Tendermint P2P handshake. type DefaultNodeInfo struct { + ProtocolVersion ProtocolVersion `json:"protocol_version"` + // Authenticate // TODO: replace with NetAddress ID_ ID `json:"id"` // authenticated identifier @@ -59,12 +83,8 @@ type DefaultNodeInfo struct { // DefaultNodeInfoOther is the misc. applcation specific data type DefaultNodeInfoOther struct { - AminoVersion string `json:"amino_version"` - P2PVersion string `json:"p2p_version"` - ConsensusVersion string `json:"consensus_version"` - RPCVersion string `json:"rpc_version"` - TxIndex string `json:"tx_index"` - RPCAddress string `json:"rpc_address"` + TxIndex string `json:"tx_index"` + RPCAddress string `json:"rpc_address"` } // ID returns the node's peer ID. @@ -86,35 +106,28 @@ func (info DefaultNodeInfo) ID() ID { // url-encoding), and we just need to be careful with how we handle that in our // clients. (e.g. off by default). func (info DefaultNodeInfo) ValidateBasic() error { - if len(info.Channels) > maxNumChannels { - return fmt.Errorf("info.Channels is too long (%v). Max is %v", len(info.Channels), maxNumChannels) - } - // Sanitize ASCII text fields. - if !cmn.IsASCIIText(info.Moniker) || cmn.ASCIITrim(info.Moniker) == "" { - return fmt.Errorf("info.Moniker must be valid non-empty ASCII text without tabs, but got %v", info.Moniker) - } + // ID is already validated. - // Sanitize versions - // XXX: Should we be more strict about version and address formats? - other := info.Other - versions := []string{ - other.AminoVersion, - other.P2PVersion, - other.ConsensusVersion, - other.RPCVersion} - for i, v := range versions { - if cmn.ASCIITrim(v) != "" && !cmn.IsASCIIText(v) { - return fmt.Errorf("info.Other[%d]=%v must be valid non-empty ASCII text without tabs", i, v) - } - } - if cmn.ASCIITrim(other.TxIndex) != "" && (other.TxIndex != "on" && other.TxIndex != "off") { - return fmt.Errorf("info.Other.TxIndex should be either 'on' or 'off', got '%v'", other.TxIndex) + // Validate ListenAddr. + _, err := NewNetAddressString(IDAddressString(info.ID(), info.ListenAddr)) + if err != nil { + return err } - if cmn.ASCIITrim(other.RPCAddress) != "" && !cmn.IsASCIIText(other.RPCAddress) { - return fmt.Errorf("info.Other.RPCAddress=%v must be valid non-empty ASCII text without tabs", other.RPCAddress) + + // Network is validated in CompatibleWith. + + // Validate Version + if len(info.Version) > 0 && + (!cmn.IsASCIIText(info.Version) || cmn.ASCIITrim(info.Version) == "") { + + return fmt.Errorf("info.Version must be valid ASCII text without tabs, but got %v", info.Version) } + // Validate Channels - ensure max and check for duplicates. + if len(info.Channels) > maxNumChannels { + return fmt.Errorf("info.Channels is too long (%v). Max is %v", len(info.Channels), maxNumChannels) + } channels := make(map[byte]struct{}) for _, ch := range info.Channels { _, ok := channels[ch] @@ -124,13 +137,30 @@ func (info DefaultNodeInfo) ValidateBasic() error { channels[ch] = struct{}{} } - // ensure ListenAddr is good - _, err := NewNetAddressString(IDAddressString(info.ID(), info.ListenAddr)) - return err + // Validate Moniker. + if !cmn.IsASCIIText(info.Moniker) || cmn.ASCIITrim(info.Moniker) == "" { + return fmt.Errorf("info.Moniker must be valid non-empty ASCII text without tabs, but got %v", info.Moniker) + } + + // Validate Other. + other := info.Other + txIndex := other.TxIndex + switch txIndex { + case "", "on", "off": + default: + return fmt.Errorf("info.Other.TxIndex should be either 'on' or 'off', got '%v'", txIndex) + } + // XXX: Should we be more strict about address formats? + rpcAddr := other.RPCAddress + if len(rpcAddr) > 0 && (!cmn.IsASCIIText(rpcAddr) || cmn.ASCIITrim(rpcAddr) == "") { + return fmt.Errorf("info.Other.RPCAddress=%v must be valid ASCII text without tabs", rpcAddr) + } + + return nil } // CompatibleWith checks if two DefaultNodeInfo are compatible with eachother. -// CONTRACT: two nodes are compatible if the major version matches and network match +// CONTRACT: two nodes are compatible if the Block version and network match // and they have at least one channel in common. func (info DefaultNodeInfo) CompatibleWith(other_ NodeInfo) error { other, ok := other_.(DefaultNodeInfo) @@ -138,22 +168,9 @@ func (info DefaultNodeInfo) CompatibleWith(other_ NodeInfo) error { return fmt.Errorf("wrong NodeInfo type. Expected DefaultNodeInfo, got %v", reflect.TypeOf(other_)) } - iMajor, _, _, iErr := splitVersion(info.Version) - oMajor, _, _, oErr := splitVersion(other.Version) - - // if our own version number is not formatted right, we messed up - if iErr != nil { - return iErr - } - - // version number must be formatted correctly ("x.x.x") - if oErr != nil { - return oErr - } - - // major version must match - if iMajor != oMajor { - return fmt.Errorf("Peer is on a different major version. Got %v, expected %v", oMajor, iMajor) + if info.ProtocolVersion.Block != other.ProtocolVersion.Block { + return fmt.Errorf("Peer is on a different Block version. Got %v, expected %v", + other.ProtocolVersion.Block, info.ProtocolVersion.Block) } // nodes must be on the same network @@ -201,11 +218,3 @@ func (info DefaultNodeInfo) NetAddress() *NetAddress { } return netAddr } - -func splitVersion(version string) (string, string, string, error) { - spl := strings.Split(version, ".") - if len(spl) != 3 { - return "", "", "", fmt.Errorf("Invalid version format %v", version) - } - return spl[0], spl[1], spl[2], nil -} diff --git a/p2p/node_info_test.go b/p2p/node_info_test.go new file mode 100644 index 000000000..c9a72dbc2 --- /dev/null +++ b/p2p/node_info_test.go @@ -0,0 +1,123 @@ +package p2p + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/crypto/ed25519" +) + +func TestNodeInfoValidate(t *testing.T) { + + // empty fails + ni := DefaultNodeInfo{} + assert.Error(t, ni.ValidateBasic()) + + channels := make([]byte, maxNumChannels) + for i := 0; i < maxNumChannels; i++ { + channels[i] = byte(i) + } + dupChannels := make([]byte, 5) + copy(dupChannels[:], channels[:5]) + dupChannels = append(dupChannels, testCh) + + nonAscii := "¢§µ" + emptyTab := fmt.Sprintf("\t") + emptySpace := fmt.Sprintf(" ") + + testCases := []struct { + testName string + malleateNodeInfo func(*DefaultNodeInfo) + expectErr bool + }{ + {"Too Many Channels", func(ni *DefaultNodeInfo) { ni.Channels = append(channels, byte(maxNumChannels)) }, true}, + {"Duplicate Channel", func(ni *DefaultNodeInfo) { ni.Channels = dupChannels }, true}, + {"Good Channels", func(ni *DefaultNodeInfo) { ni.Channels = ni.Channels[:5] }, false}, + + {"Invalid NetAddress", func(ni *DefaultNodeInfo) { ni.ListenAddr = "not-an-address" }, true}, + {"Good NetAddress", func(ni *DefaultNodeInfo) { ni.ListenAddr = "0.0.0.0:26656" }, false}, + + {"Non-ASCII Version", func(ni *DefaultNodeInfo) { ni.Version = nonAscii }, true}, + {"Empty tab Version", func(ni *DefaultNodeInfo) { ni.Version = emptyTab }, true}, + {"Empty space Version", func(ni *DefaultNodeInfo) { ni.Version = emptySpace }, true}, + {"Empty Version", func(ni *DefaultNodeInfo) { ni.Version = "" }, false}, + + {"Non-ASCII Moniker", func(ni *DefaultNodeInfo) { ni.Moniker = nonAscii }, true}, + {"Empty tab Moniker", func(ni *DefaultNodeInfo) { ni.Moniker = emptyTab }, true}, + {"Empty space Moniker", func(ni *DefaultNodeInfo) { ni.Moniker = emptySpace }, true}, + {"Empty Moniker", func(ni *DefaultNodeInfo) { ni.Moniker = "" }, true}, + {"Good Moniker", func(ni *DefaultNodeInfo) { ni.Moniker = "hey its me" }, false}, + + {"Non-ASCII TxIndex", func(ni *DefaultNodeInfo) { ni.Other.TxIndex = nonAscii }, true}, + {"Empty tab TxIndex", func(ni *DefaultNodeInfo) { ni.Other.TxIndex = emptyTab }, true}, + {"Empty space TxIndex", func(ni *DefaultNodeInfo) { ni.Other.TxIndex = emptySpace }, true}, + {"Empty TxIndex", func(ni *DefaultNodeInfo) { ni.Other.TxIndex = "" }, false}, + {"Off TxIndex", func(ni *DefaultNodeInfo) { ni.Other.TxIndex = "off" }, false}, + + {"Non-ASCII RPCAddress", func(ni *DefaultNodeInfo) { ni.Other.RPCAddress = nonAscii }, true}, + {"Empty tab RPCAddress", func(ni *DefaultNodeInfo) { ni.Other.RPCAddress = emptyTab }, true}, + {"Empty space RPCAddress", func(ni *DefaultNodeInfo) { ni.Other.RPCAddress = emptySpace }, true}, + {"Empty RPCAddress", func(ni *DefaultNodeInfo) { ni.Other.RPCAddress = "" }, false}, + {"Good RPCAddress", func(ni *DefaultNodeInfo) { ni.Other.RPCAddress = "0.0.0.0:26657" }, false}, + } + + nodeKey := NodeKey{PrivKey: ed25519.GenPrivKey()} + name := "testing" + + // test case passes + ni = testNodeInfo(nodeKey.ID(), name).(DefaultNodeInfo) + ni.Channels = channels + assert.NoError(t, ni.ValidateBasic()) + + for _, tc := range testCases { + ni := testNodeInfo(nodeKey.ID(), name).(DefaultNodeInfo) + ni.Channels = channels + tc.malleateNodeInfo(&ni) + err := ni.ValidateBasic() + if tc.expectErr { + assert.Error(t, err, tc.testName) + } else { + assert.NoError(t, err, tc.testName) + } + } + +} + +func TestNodeInfoCompatible(t *testing.T) { + + nodeKey1 := NodeKey{PrivKey: ed25519.GenPrivKey()} + nodeKey2 := NodeKey{PrivKey: ed25519.GenPrivKey()} + name := "testing" + + var newTestChannel byte = 0x2 + + // test NodeInfo is compatible + ni1 := testNodeInfo(nodeKey1.ID(), name).(DefaultNodeInfo) + ni2 := testNodeInfo(nodeKey2.ID(), name).(DefaultNodeInfo) + assert.NoError(t, ni1.CompatibleWith(ni2)) + + // add another channel; still compatible + ni2.Channels = []byte{newTestChannel, testCh} + assert.NoError(t, ni1.CompatibleWith(ni2)) + + // wrong NodeInfo type is not compatible + _, netAddr := CreateRoutableAddr() + ni3 := mockNodeInfo{netAddr} + assert.Error(t, ni1.CompatibleWith(ni3)) + + testCases := []struct { + testName string + malleateNodeInfo func(*DefaultNodeInfo) + }{ + {"Wrong block version", func(ni *DefaultNodeInfo) { ni.ProtocolVersion.Block += 1 }}, + {"Wrong network", func(ni *DefaultNodeInfo) { ni.Network += "-wrong" }}, + {"No common channels", func(ni *DefaultNodeInfo) { ni.Channels = []byte{newTestChannel} }}, + } + + for _, tc := range testCases { + ni := testNodeInfo(nodeKey2.ID(), name).(DefaultNodeInfo) + tc.malleateNodeInfo(&ni) + assert.Error(t, ni1.CompatibleWith(ni)) + } +} diff --git a/p2p/peer_test.go b/p2p/peer_test.go index fecf7f1cc..9c330ee52 100644 --- a/p2p/peer_test.go +++ b/p2p/peer_test.go @@ -207,11 +207,12 @@ func (rp *remotePeer) accept(l net.Listener) { func (rp *remotePeer) nodeInfo(l net.Listener) NodeInfo { return DefaultNodeInfo{ - ID_: rp.Addr().ID, - Moniker: "remote_peer", - Network: "testing", - Version: "123.123.123", - ListenAddr: l.Addr().String(), - Channels: rp.channels, + ProtocolVersion: InitProtocolVersion, + ID_: rp.Addr().ID, + ListenAddr: l.Addr().String(), + Network: "testing", + Version: "1.2.3-rc0-deadbeef", + Channels: rp.channels, + Moniker: "remote_peer", } } diff --git a/p2p/test_util.go b/p2p/test_util.go index 2859dc645..4d43175bb 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -247,11 +247,16 @@ func testNodeInfo(id ID, name string) NodeInfo { func testNodeInfoWithNetwork(id ID, name, network string) NodeInfo { return DefaultNodeInfo{ - ID_: id, - ListenAddr: fmt.Sprintf("127.0.0.1:%d", cmn.RandIntn(64512)+1023), - Moniker: name, - Network: network, - Version: "123.123.123", - Channels: []byte{testCh}, + ProtocolVersion: InitProtocolVersion, + ID_: id, + ListenAddr: fmt.Sprintf("127.0.0.1:%d", cmn.RandIntn(64512)+1023), + Network: network, + Version: "1.2.3-rc0-deadbeef", + Channels: []byte{testCh}, + Moniker: name, + Other: DefaultNodeInfoOther{ + TxIndex: "on", + RPCAddress: fmt.Sprintf("127.0.0.1:%d", cmn.RandIntn(64512)+1023), + }, } } diff --git a/p2p/version.go b/p2p/version.go deleted file mode 100644 index 9a4c7bbaf..000000000 --- a/p2p/version.go +++ /dev/null @@ -1,3 +0,0 @@ -package p2p - -const Version = "0.5.0" diff --git a/rpc/core/status.go b/rpc/core/status.go index c26b06b8a..793e1ade7 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -31,6 +31,11 @@ import ( // "id": "", // "result": { // "node_info": { +// "protocol_version": { +// "p2p": "4", +// "block": "7", +// "app": "0" +// }, // "id": "53729852020041b956e86685e24394e0bee4373f", // "listen_addr": "10.0.2.15:26656", // "network": "test-chain-Y1OHx6", @@ -38,10 +43,6 @@ import ( // "channels": "4020212223303800", // "moniker": "ubuntu-xenial", // "other": { -// "amino_version": "0.12.0", -// "p2p_version": "0.5.0", -// "consensus_version": "v1/0.2.2", -// "rpc_version": "0.7.0/3", // "tx_index": "on", // "rpc_addr": "tcp://0.0.0.0:26657" // } diff --git a/rpc/core/version.go b/rpc/core/version.go deleted file mode 100644 index e283de479..000000000 --- a/rpc/core/version.go +++ /dev/null @@ -1,5 +0,0 @@ -package core - -// a single integer is sufficient here - -const Version = "3" // rpc routes for profiling, setting config diff --git a/rpc/lib/version.go b/rpc/lib/version.go deleted file mode 100644 index 8828f260b..000000000 --- a/rpc/lib/version.go +++ /dev/null @@ -1,7 +0,0 @@ -package rpc - -const Maj = "0" -const Min = "7" -const Fix = "0" - -const Version = Maj + "." + Min + "." + Fix From 055d7adffbb58b676841eb48d436ad3ead0e391d Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Thu, 18 Oct 2018 20:03:45 +0400 Subject: [PATCH 085/113] add tm-abci python ABCI server (fork of py-abci) (#2658) It utilises async IO -> greater performance. --- docs/app-dev/ecosystem.json | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/app-dev/ecosystem.json b/docs/app-dev/ecosystem.json index 67aca2efb..1c2bc2b25 100644 --- a/docs/app-dev/ecosystem.json +++ b/docs/app-dev/ecosystem.json @@ -163,6 +163,12 @@ "language": "Python", "author": "Dave Bryson" }, + { + "name": "tm-abci", + "url": "https://github.com/SoftblocksCo/tm-abci", + "language": "Python", + "author": "Softblocks" + }, { "name": "Spearmint", "url": "https://github.com/dennismckinnon/spearmint", From ed4ce5ff6cc455114d749bd2121096c81098a84f Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 18 Oct 2018 16:51:17 -0400 Subject: [PATCH 086/113] ADR-016: Update ABCI Info method for versions (#2662) * abci: update RequestInfo for versions * abci: update ResponseInfo for versions * compile fix * fix test * software_version -> version * comment fix * update spec * add test * comments and fix test --- CHANGELOG_PENDING.md | 3 + abci/example/kvstore/kvstore.go | 9 +- abci/types/types.pb.go | 484 ++++++++++++++++++++------------ abci/types/types.proto | 9 +- consensus/replay.go | 14 +- consensus/replay_test.go | 12 +- consensus/wal_generator.go | 4 +- docs/spec/abci/abci.md | 8 +- node/node.go | 20 +- node/node_test.go | 22 ++ p2p/node_info.go | 17 +- proxy/app_conn_test.go | 2 +- proxy/version.go | 15 + rpc/client/mock/abci.go | 4 +- rpc/core/abci.go | 4 +- state/state.go | 4 + version/version.go | 6 + 17 files changed, 425 insertions(+), 212 deletions(-) create mode 100644 proxy/version.go diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index f4858d953..99c389974 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -21,6 +21,8 @@ BREAKING CHANGES: * [abci] \#2298 ResponseQuery.Proof is now a structured merkle.Proof, not just arbitrary bytes * [abci] \#2644 Add Version to Header and shift all fields by one + * [abci] \#2662 Bump the field numbers for some `ResponseInfo` fields to make room for + `AppVersion` * Go API * [node] Remove node.RunForever @@ -52,6 +54,7 @@ BREAKING CHANGES: FEATURES: - [crypto/merkle] \#2298 General Merkle Proof scheme for chaining various types of Merkle trees together - [abci] \#2557 Add `Codespace` field to `Response{CheckTx, DeliverTx, Query}` +- [abci] \#2662 Add `BlockVersion` and `P2PVersion` to `RequestInfo` IMPROVEMENTS: - Additional Metrics diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index 9523bf746..955baefb4 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -10,11 +10,14 @@ import ( "github.com/tendermint/tendermint/abci/types" cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/version" ) var ( stateKey = []byte("stateKey") kvPairPrefixKey = []byte("kvPairKey:") + + ProtocolVersion version.Protocol = 0x1 ) type State struct { @@ -65,7 +68,11 @@ func NewKVStoreApplication() *KVStoreApplication { } func (app *KVStoreApplication) Info(req types.RequestInfo) (resInfo types.ResponseInfo) { - return types.ResponseInfo{Data: fmt.Sprintf("{\"size\":%v}", app.state.Size)} + return types.ResponseInfo{ + Data: fmt.Sprintf("{\"size\":%v}", app.state.Size), + Version: version.ABCIVersion, + AppVersion: ProtocolVersion.Uint64(), + } } // tx is either "key=value" or just arbitrary bytes diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 81fb74b42..6a70bb979 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -61,7 +61,7 @@ func (m *Request) Reset() { *m = Request{} } func (m *Request) String() string { return proto.CompactTextString(m) } func (*Request) ProtoMessage() {} func (*Request) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{0} + return fileDescriptor_types_4449c1011851ea19, []int{0} } func (m *Request) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -483,7 +483,7 @@ func (m *RequestEcho) Reset() { *m = RequestEcho{} } func (m *RequestEcho) String() string { return proto.CompactTextString(m) } func (*RequestEcho) ProtoMessage() {} func (*RequestEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{1} + return fileDescriptor_types_4449c1011851ea19, []int{1} } func (m *RequestEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -529,7 +529,7 @@ func (m *RequestFlush) Reset() { *m = RequestFlush{} } func (m *RequestFlush) String() string { return proto.CompactTextString(m) } func (*RequestFlush) ProtoMessage() {} func (*RequestFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{2} + return fileDescriptor_types_4449c1011851ea19, []int{2} } func (m *RequestFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -560,6 +560,8 @@ var xxx_messageInfo_RequestFlush proto.InternalMessageInfo type RequestInfo struct { Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + BlockVersion uint64 `protobuf:"varint,2,opt,name=block_version,json=blockVersion,proto3" json:"block_version,omitempty"` + P2PVersion uint64 `protobuf:"varint,3,opt,name=p2p_version,json=p2pVersion,proto3" json:"p2p_version,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -569,7 +571,7 @@ func (m *RequestInfo) Reset() { *m = RequestInfo{} } func (m *RequestInfo) String() string { return proto.CompactTextString(m) } func (*RequestInfo) ProtoMessage() {} func (*RequestInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{3} + return fileDescriptor_types_4449c1011851ea19, []int{3} } func (m *RequestInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -605,6 +607,20 @@ func (m *RequestInfo) GetVersion() string { return "" } +func (m *RequestInfo) GetBlockVersion() uint64 { + if m != nil { + return m.BlockVersion + } + return 0 +} + +func (m *RequestInfo) GetP2PVersion() uint64 { + if m != nil { + return m.P2PVersion + } + return 0 +} + // nondeterministic type RequestSetOption struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` @@ -618,7 +634,7 @@ func (m *RequestSetOption) Reset() { *m = RequestSetOption{} } func (m *RequestSetOption) String() string { return proto.CompactTextString(m) } func (*RequestSetOption) ProtoMessage() {} func (*RequestSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{4} + return fileDescriptor_types_4449c1011851ea19, []int{4} } func (m *RequestSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -676,7 +692,7 @@ func (m *RequestInitChain) Reset() { *m = RequestInitChain{} } func (m *RequestInitChain) String() string { return proto.CompactTextString(m) } func (*RequestInitChain) ProtoMessage() {} func (*RequestInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{5} + return fileDescriptor_types_4449c1011851ea19, []int{5} } func (m *RequestInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -754,7 +770,7 @@ func (m *RequestQuery) Reset() { *m = RequestQuery{} } func (m *RequestQuery) String() string { return proto.CompactTextString(m) } func (*RequestQuery) ProtoMessage() {} func (*RequestQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{6} + return fileDescriptor_types_4449c1011851ea19, []int{6} } func (m *RequestQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -826,7 +842,7 @@ func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} } func (m *RequestBeginBlock) String() string { return proto.CompactTextString(m) } func (*RequestBeginBlock) ProtoMessage() {} func (*RequestBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{7} + return fileDescriptor_types_4449c1011851ea19, []int{7} } func (m *RequestBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -894,7 +910,7 @@ func (m *RequestCheckTx) Reset() { *m = RequestCheckTx{} } func (m *RequestCheckTx) String() string { return proto.CompactTextString(m) } func (*RequestCheckTx) ProtoMessage() {} func (*RequestCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{8} + return fileDescriptor_types_4449c1011851ea19, []int{8} } func (m *RequestCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -941,7 +957,7 @@ func (m *RequestDeliverTx) Reset() { *m = RequestDeliverTx{} } func (m *RequestDeliverTx) String() string { return proto.CompactTextString(m) } func (*RequestDeliverTx) ProtoMessage() {} func (*RequestDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{9} + return fileDescriptor_types_4449c1011851ea19, []int{9} } func (m *RequestDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -988,7 +1004,7 @@ func (m *RequestEndBlock) Reset() { *m = RequestEndBlock{} } func (m *RequestEndBlock) String() string { return proto.CompactTextString(m) } func (*RequestEndBlock) ProtoMessage() {} func (*RequestEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{10} + return fileDescriptor_types_4449c1011851ea19, []int{10} } func (m *RequestEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1034,7 +1050,7 @@ func (m *RequestCommit) Reset() { *m = RequestCommit{} } func (m *RequestCommit) String() string { return proto.CompactTextString(m) } func (*RequestCommit) ProtoMessage() {} func (*RequestCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{11} + return fileDescriptor_types_4449c1011851ea19, []int{11} } func (m *RequestCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1087,7 +1103,7 @@ func (m *Response) Reset() { *m = Response{} } func (m *Response) String() string { return proto.CompactTextString(m) } func (*Response) ProtoMessage() {} func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{12} + return fileDescriptor_types_4449c1011851ea19, []int{12} } func (m *Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1540,7 +1556,7 @@ func (m *ResponseException) Reset() { *m = ResponseException{} } func (m *ResponseException) String() string { return proto.CompactTextString(m) } func (*ResponseException) ProtoMessage() {} func (*ResponseException) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{13} + return fileDescriptor_types_4449c1011851ea19, []int{13} } func (m *ResponseException) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1587,7 +1603,7 @@ func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } func (*ResponseEcho) ProtoMessage() {} func (*ResponseEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{14} + return fileDescriptor_types_4449c1011851ea19, []int{14} } func (m *ResponseEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1633,7 +1649,7 @@ func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } func (*ResponseFlush) ProtoMessage() {} func (*ResponseFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{15} + return fileDescriptor_types_4449c1011851ea19, []int{15} } func (m *ResponseFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1665,8 +1681,9 @@ var xxx_messageInfo_ResponseFlush proto.InternalMessageInfo type ResponseInfo struct { Data string `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - LastBlockHeight int64 `protobuf:"varint,3,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` - LastBlockAppHash []byte `protobuf:"bytes,4,opt,name=last_block_app_hash,json=lastBlockAppHash,proto3" json:"last_block_app_hash,omitempty"` + AppVersion uint64 `protobuf:"varint,3,opt,name=app_version,json=appVersion,proto3" json:"app_version,omitempty"` + LastBlockHeight int64 `protobuf:"varint,4,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` + LastBlockAppHash []byte `protobuf:"bytes,5,opt,name=last_block_app_hash,json=lastBlockAppHash,proto3" json:"last_block_app_hash,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1676,7 +1693,7 @@ func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } func (*ResponseInfo) ProtoMessage() {} func (*ResponseInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{16} + return fileDescriptor_types_4449c1011851ea19, []int{16} } func (m *ResponseInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1719,6 +1736,13 @@ func (m *ResponseInfo) GetVersion() string { return "" } +func (m *ResponseInfo) GetAppVersion() uint64 { + if m != nil { + return m.AppVersion + } + return 0 +} + func (m *ResponseInfo) GetLastBlockHeight() int64 { if m != nil { return m.LastBlockHeight @@ -1748,7 +1772,7 @@ func (m *ResponseSetOption) Reset() { *m = ResponseSetOption{} } func (m *ResponseSetOption) String() string { return proto.CompactTextString(m) } func (*ResponseSetOption) ProtoMessage() {} func (*ResponseSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{17} + return fileDescriptor_types_4449c1011851ea19, []int{17} } func (m *ResponseSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1810,7 +1834,7 @@ func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } func (*ResponseInitChain) ProtoMessage() {} func (*ResponseInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{18} + return fileDescriptor_types_4449c1011851ea19, []int{18} } func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1873,7 +1897,7 @@ func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } func (*ResponseQuery) ProtoMessage() {} func (*ResponseQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{19} + return fileDescriptor_types_4449c1011851ea19, []int{19} } func (m *ResponseQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1976,7 +2000,7 @@ func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } func (*ResponseBeginBlock) ProtoMessage() {} func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{20} + return fileDescriptor_types_4449c1011851ea19, []int{20} } func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2030,7 +2054,7 @@ func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } func (*ResponseCheckTx) ProtoMessage() {} func (*ResponseCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{21} + return fileDescriptor_types_4449c1011851ea19, []int{21} } func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2133,7 +2157,7 @@ func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } func (*ResponseDeliverTx) ProtoMessage() {} func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{22} + return fileDescriptor_types_4449c1011851ea19, []int{22} } func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2231,7 +2255,7 @@ func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } func (*ResponseEndBlock) ProtoMessage() {} func (*ResponseEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{23} + return fileDescriptor_types_4449c1011851ea19, []int{23} } func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2293,7 +2317,7 @@ func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } func (*ResponseCommit) ProtoMessage() {} func (*ResponseCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{24} + return fileDescriptor_types_4449c1011851ea19, []int{24} } func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2343,7 +2367,7 @@ func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } func (m *ConsensusParams) String() string { return proto.CompactTextString(m) } func (*ConsensusParams) ProtoMessage() {} func (*ConsensusParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{25} + return fileDescriptor_types_4449c1011851ea19, []int{25} } func (m *ConsensusParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2401,7 +2425,7 @@ func (m *BlockSize) Reset() { *m = BlockSize{} } func (m *BlockSize) String() string { return proto.CompactTextString(m) } func (*BlockSize) ProtoMessage() {} func (*BlockSize) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{26} + return fileDescriptor_types_4449c1011851ea19, []int{26} } func (m *BlockSize) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2457,7 +2481,7 @@ func (m *EvidenceParams) Reset() { *m = EvidenceParams{} } func (m *EvidenceParams) String() string { return proto.CompactTextString(m) } func (*EvidenceParams) ProtoMessage() {} func (*EvidenceParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{27} + return fileDescriptor_types_4449c1011851ea19, []int{27} } func (m *EvidenceParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2505,7 +2529,7 @@ func (m *LastCommitInfo) Reset() { *m = LastCommitInfo{} } func (m *LastCommitInfo) String() string { return proto.CompactTextString(m) } func (*LastCommitInfo) ProtoMessage() {} func (*LastCommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{28} + return fileDescriptor_types_4449c1011851ea19, []int{28} } func (m *LastCommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2579,7 +2603,7 @@ func (m *Header) Reset() { *m = Header{} } func (m *Header) String() string { return proto.CompactTextString(m) } func (*Header) ProtoMessage() {} func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{29} + return fileDescriptor_types_4449c1011851ea19, []int{29} } func (m *Header) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2732,7 +2756,7 @@ func (m *Version) Reset() { *m = Version{} } func (m *Version) String() string { return proto.CompactTextString(m) } func (*Version) ProtoMessage() {} func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{30} + return fileDescriptor_types_4449c1011851ea19, []int{30} } func (m *Version) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2787,7 +2811,7 @@ func (m *BlockID) Reset() { *m = BlockID{} } func (m *BlockID) String() string { return proto.CompactTextString(m) } func (*BlockID) ProtoMessage() {} func (*BlockID) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{31} + return fileDescriptor_types_4449c1011851ea19, []int{31} } func (m *BlockID) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2842,7 +2866,7 @@ func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } func (*PartSetHeader) ProtoMessage() {} func (*PartSetHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{32} + return fileDescriptor_types_4449c1011851ea19, []int{32} } func (m *PartSetHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2899,7 +2923,7 @@ func (m *Validator) Reset() { *m = Validator{} } func (m *Validator) String() string { return proto.CompactTextString(m) } func (*Validator) ProtoMessage() {} func (*Validator) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{33} + return fileDescriptor_types_4449c1011851ea19, []int{33} } func (m *Validator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2955,7 +2979,7 @@ func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } func (*ValidatorUpdate) ProtoMessage() {} func (*ValidatorUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{34} + return fileDescriptor_types_4449c1011851ea19, []int{34} } func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3011,7 +3035,7 @@ func (m *VoteInfo) Reset() { *m = VoteInfo{} } func (m *VoteInfo) String() string { return proto.CompactTextString(m) } func (*VoteInfo) ProtoMessage() {} func (*VoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{35} + return fileDescriptor_types_4449c1011851ea19, []int{35} } func (m *VoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3066,7 +3090,7 @@ func (m *PubKey) Reset() { *m = PubKey{} } func (m *PubKey) String() string { return proto.CompactTextString(m) } func (*PubKey) ProtoMessage() {} func (*PubKey) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{36} + return fileDescriptor_types_4449c1011851ea19, []int{36} } func (m *PubKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3124,7 +3148,7 @@ func (m *Evidence) Reset() { *m = Evidence{} } func (m *Evidence) String() string { return proto.CompactTextString(m) } func (*Evidence) ProtoMessage() {} func (*Evidence) Descriptor() ([]byte, []int) { - return fileDescriptor_types_07d64ea985a686e2, []int{37} + return fileDescriptor_types_4449c1011851ea19, []int{37} } func (m *Evidence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3636,6 +3660,12 @@ func (this *RequestInfo) Equal(that interface{}) bool { if this.Version != that1.Version { return false } + if this.BlockVersion != that1.BlockVersion { + return false + } + if this.P2PVersion != that1.P2PVersion { + return false + } if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { return false } @@ -4321,6 +4351,9 @@ func (this *ResponseInfo) Equal(that interface{}) bool { if this.Version != that1.Version { return false } + if this.AppVersion != that1.AppVersion { + return false + } if this.LastBlockHeight != that1.LastBlockHeight { return false } @@ -5758,6 +5791,16 @@ func (m *RequestInfo) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(len(m.Version))) i += copy(dAtA[i:], m.Version) } + if m.BlockVersion != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.BlockVersion)) + } + if m.P2PVersion != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.P2PVersion)) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -6362,13 +6405,18 @@ func (m *ResponseInfo) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(len(m.Version))) i += copy(dAtA[i:], m.Version) } - if m.LastBlockHeight != 0 { + if m.AppVersion != 0 { dAtA[i] = 0x18 i++ + i = encodeVarintTypes(dAtA, i, uint64(m.AppVersion)) + } + if m.LastBlockHeight != 0 { + dAtA[i] = 0x20 + i++ i = encodeVarintTypes(dAtA, i, uint64(m.LastBlockHeight)) } if len(m.LastBlockAppHash) > 0 { - dAtA[i] = 0x22 + dAtA[i] = 0x2a i++ i = encodeVarintTypes(dAtA, i, uint64(len(m.LastBlockAppHash))) i += copy(dAtA[i:], m.LastBlockAppHash) @@ -7459,8 +7507,10 @@ func NewPopulatedRequestFlush(r randyTypes, easy bool) *RequestFlush { func NewPopulatedRequestInfo(r randyTypes, easy bool) *RequestInfo { this := &RequestInfo{} this.Version = string(randStringTypes(r)) + this.BlockVersion = uint64(uint64(r.Uint32())) + this.P2PVersion = uint64(uint64(r.Uint32())) if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 2) + this.XXX_unrecognized = randUnrecognizedTypes(r, 4) } return this } @@ -7717,6 +7767,7 @@ func NewPopulatedResponseInfo(r randyTypes, easy bool) *ResponseInfo { this := &ResponseInfo{} this.Data = string(randStringTypes(r)) this.Version = string(randStringTypes(r)) + this.AppVersion = uint64(uint64(r.Uint32())) this.LastBlockHeight = int64(r.Int63()) if r.Intn(2) == 0 { this.LastBlockHeight *= -1 @@ -7727,7 +7778,7 @@ func NewPopulatedResponseInfo(r randyTypes, easy bool) *ResponseInfo { this.LastBlockAppHash[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 5) + this.XXX_unrecognized = randUnrecognizedTypes(r, 6) } return this } @@ -8382,6 +8433,12 @@ func (m *RequestInfo) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.BlockVersion != 0 { + n += 1 + sovTypes(uint64(m.BlockVersion)) + } + if m.P2PVersion != 0 { + n += 1 + sovTypes(uint64(m.P2PVersion)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -8693,6 +8750,9 @@ func (m *ResponseInfo) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.AppVersion != 0 { + n += 1 + sovTypes(uint64(m.AppVersion)) + } if m.LastBlockHeight != 0 { n += 1 + sovTypes(uint64(m.LastBlockHeight)) } @@ -9781,6 +9841,44 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { } m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockVersion", wireType) + } + m.BlockVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockVersion |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field P2PVersion", wireType) + } + m.P2PVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.P2PVersion |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -11459,6 +11557,25 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AppVersion", wireType) + } + m.AppVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AppVersion |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field LastBlockHeight", wireType) } @@ -11477,7 +11594,7 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { break } } - case 4: + case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field LastBlockAppHash", wireType) } @@ -14768,145 +14885,148 @@ var ( ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_07d64ea985a686e2) } +func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_4449c1011851ea19) } func init() { - golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_07d64ea985a686e2) + golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_4449c1011851ea19) } -var fileDescriptor_types_07d64ea985a686e2 = []byte{ - // 2133 bytes of a gzipped FileDescriptorProto +var fileDescriptor_types_4449c1011851ea19 = []byte{ + // 2177 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x93, 0x1b, 0x47, - 0x15, 0xdf, 0xd1, 0x6a, 0x25, 0xcd, 0xdb, 0x5d, 0x49, 0x6e, 0x7f, 0xc9, 0x22, 0xac, 0x5d, 0x13, - 0x48, 0xbc, 0xc4, 0xd1, 0x06, 0x87, 0x50, 0xeb, 0x38, 0xa4, 0x6a, 0x65, 0x1b, 0x76, 0x2b, 0x01, - 0x96, 0xf1, 0x07, 0x17, 0xaa, 0xa6, 0x5a, 0x9a, 0xb6, 0x34, 0x65, 0x69, 0x66, 0x32, 0xd3, 0xda, - 0x68, 0x7d, 0xcc, 0x39, 0x87, 0x1c, 0xa8, 0xe2, 0x5f, 0xe0, 0x4f, 0xe0, 0xc8, 0x89, 0xca, 0x91, - 0x03, 0x67, 0x03, 0x4b, 0x71, 0x80, 0x2b, 0x45, 0x15, 0x47, 0xea, 0xbd, 0xee, 0xf9, 0xdc, 0x91, - 0x89, 0x03, 0x27, 0x2e, 0xd2, 0xf4, 0xfb, 0xe8, 0x8f, 0xd7, 0xef, 0xbd, 0xdf, 0x7b, 0x0d, 0x57, - 0xf8, 0x68, 0xec, 0xed, 0xc9, 0xd3, 0x50, 0xc4, 0xea, 0x77, 0x10, 0x46, 0x81, 0x0c, 0xd8, 0x06, - 0x0d, 0xfa, 0x6f, 0x4f, 0x3c, 0x39, 0x5d, 0x8c, 0x06, 0xe3, 0x60, 0xbe, 0x37, 0x09, 0x26, 0xc1, - 0x1e, 0x71, 0x47, 0x8b, 0xa7, 0x34, 0xa2, 0x01, 0x7d, 0x29, 0xad, 0xfe, 0xf5, 0x49, 0x10, 0x4c, - 0x66, 0x22, 0x93, 0x92, 0xde, 0x5c, 0xc4, 0x92, 0xcf, 0x43, 0x2d, 0xb0, 0x9f, 0x9b, 0x4f, 0x0a, - 0xdf, 0x15, 0xd1, 0xdc, 0xf3, 0x65, 0xfe, 0x73, 0xe6, 0x8d, 0xe2, 0xbd, 0x71, 0x30, 0x9f, 0x07, - 0x7e, 0x7e, 0x43, 0xfd, 0xbb, 0xff, 0x51, 0x73, 0x1c, 0x9d, 0x86, 0x32, 0xd8, 0x9b, 0x8b, 0xe8, - 0xd9, 0x4c, 0xe8, 0x3f, 0xa5, 0x6c, 0xfd, 0xae, 0x0e, 0x4d, 0x5b, 0x7c, 0xb2, 0x10, 0xb1, 0x64, - 0x37, 0xa1, 0x2e, 0xc6, 0xd3, 0xa0, 0x57, 0xbb, 0x61, 0xdc, 0xdc, 0xbc, 0xcd, 0x06, 0x6a, 0x11, - 0xcd, 0x7d, 0x30, 0x9e, 0x06, 0x87, 0x6b, 0x36, 0x49, 0xb0, 0xb7, 0x60, 0xe3, 0xe9, 0x6c, 0x11, - 0x4f, 0x7b, 0xeb, 0x24, 0x7a, 0xb1, 0x28, 0xfa, 0x43, 0x64, 0x1d, 0xae, 0xd9, 0x4a, 0x06, 0xa7, - 0xf5, 0xfc, 0xa7, 0x41, 0xaf, 0x5e, 0x35, 0xed, 0x91, 0xff, 0x94, 0xa6, 0x45, 0x09, 0xb6, 0x0f, - 0x10, 0x0b, 0xe9, 0x04, 0xa1, 0xf4, 0x02, 0xbf, 0xb7, 0x41, 0xf2, 0x57, 0x8b, 0xf2, 0x0f, 0x85, - 0xfc, 0x29, 0xb1, 0x0f, 0xd7, 0x6c, 0x33, 0x4e, 0x06, 0xa8, 0xe9, 0xf9, 0x9e, 0x74, 0xc6, 0x53, - 0xee, 0xf9, 0xbd, 0x46, 0x95, 0xe6, 0x91, 0xef, 0xc9, 0x7b, 0xc8, 0x46, 0x4d, 0x2f, 0x19, 0xe0, - 0x51, 0x3e, 0x59, 0x88, 0xe8, 0xb4, 0xd7, 0xac, 0x3a, 0xca, 0xcf, 0x90, 0x85, 0x47, 0x21, 0x19, - 0x76, 0x17, 0x36, 0x47, 0x62, 0xe2, 0xf9, 0xce, 0x68, 0x16, 0x8c, 0x9f, 0xf5, 0x5a, 0xa4, 0xd2, - 0x2b, 0xaa, 0x0c, 0x51, 0x60, 0x88, 0xfc, 0xc3, 0x35, 0x1b, 0x46, 0xe9, 0x88, 0xdd, 0x86, 0xd6, - 0x78, 0x2a, 0xc6, 0xcf, 0x1c, 0xb9, 0xec, 0x99, 0xa4, 0x79, 0xb9, 0xa8, 0x79, 0x0f, 0xb9, 0x8f, - 0x96, 0x87, 0x6b, 0x76, 0x73, 0xac, 0x3e, 0xd9, 0x7b, 0x60, 0x0a, 0xdf, 0xd5, 0xcb, 0x6d, 0x92, - 0xd2, 0x95, 0xd2, 0xbd, 0xf8, 0x6e, 0xb2, 0x58, 0x4b, 0xe8, 0x6f, 0x36, 0x80, 0x06, 0x3a, 0x8a, - 0x27, 0x7b, 0x5b, 0xa4, 0x73, 0xa9, 0xb4, 0x10, 0xf1, 0x0e, 0xd7, 0x6c, 0x2d, 0x85, 0xe6, 0x73, - 0xc5, 0xcc, 0x3b, 0x11, 0x11, 0x6e, 0xee, 0x62, 0x95, 0xf9, 0xee, 0x2b, 0x3e, 0x6d, 0xcf, 0x74, - 0x93, 0xc1, 0xb0, 0x09, 0x1b, 0x27, 0x7c, 0xb6, 0x10, 0xd6, 0x9b, 0xb0, 0x99, 0xf3, 0x14, 0xd6, - 0x83, 0xe6, 0x5c, 0xc4, 0x31, 0x9f, 0x88, 0x9e, 0x71, 0xc3, 0xb8, 0x69, 0xda, 0xc9, 0xd0, 0x6a, - 0xc3, 0x56, 0xde, 0x4f, 0x72, 0x8a, 0xe8, 0x0b, 0xa8, 0x78, 0x22, 0xa2, 0x18, 0x1d, 0x40, 0x2b, - 0xea, 0xa1, 0xf5, 0x3e, 0x74, 0xcb, 0x4e, 0xc0, 0xba, 0xb0, 0xfe, 0x4c, 0x9c, 0x6a, 0x49, 0xfc, - 0x64, 0x97, 0xf4, 0x86, 0xc8, 0x8b, 0x4d, 0x5b, 0xef, 0xee, 0x8b, 0x5a, 0xaa, 0x9c, 0xfa, 0x01, - 0xdb, 0x87, 0x3a, 0x46, 0x21, 0x69, 0x6f, 0xde, 0xee, 0x0f, 0x54, 0x88, 0x0e, 0x92, 0x10, 0x1d, - 0x3c, 0x4a, 0x42, 0x74, 0xd8, 0xfa, 0xf2, 0xc5, 0xf5, 0xb5, 0x2f, 0xfe, 0x78, 0xdd, 0xb0, 0x49, - 0x83, 0x5d, 0xc3, 0xab, 0xe4, 0x9e, 0xef, 0x78, 0xae, 0x5e, 0xa7, 0x49, 0xe3, 0x23, 0x97, 0x1d, - 0x40, 0x77, 0x1c, 0xf8, 0xb1, 0xf0, 0xe3, 0x45, 0xec, 0x84, 0x3c, 0xe2, 0xf3, 0x58, 0x47, 0x49, - 0x72, 0x71, 0xf7, 0x12, 0xf6, 0x31, 0x71, 0xed, 0xce, 0xb8, 0x48, 0x60, 0x1f, 0x00, 0x9c, 0xf0, - 0x99, 0xe7, 0x72, 0x19, 0x44, 0x71, 0xaf, 0x7e, 0x63, 0x3d, 0xa7, 0xfc, 0x24, 0x61, 0x3c, 0x0e, - 0x5d, 0x2e, 0xc5, 0xb0, 0x8e, 0x3b, 0xb3, 0x73, 0xf2, 0xec, 0x0d, 0xe8, 0xf0, 0x30, 0x74, 0x62, - 0xc9, 0xa5, 0x70, 0x46, 0xa7, 0x52, 0xc4, 0x14, 0x49, 0x5b, 0xf6, 0x36, 0x0f, 0xc3, 0x87, 0x48, - 0x1d, 0x22, 0xd1, 0x72, 0xd3, 0x7b, 0x20, 0x27, 0x67, 0x0c, 0xea, 0x2e, 0x97, 0x9c, 0xac, 0xb1, - 0x65, 0xd3, 0x37, 0xd2, 0x42, 0x2e, 0xa7, 0xfa, 0x8c, 0xf4, 0xcd, 0xae, 0x40, 0x63, 0x2a, 0xbc, - 0xc9, 0x54, 0xd2, 0xb1, 0xd6, 0x6d, 0x3d, 0x42, 0xc3, 0x87, 0x51, 0x70, 0x22, 0x28, 0xce, 0x5b, - 0xb6, 0x1a, 0x58, 0x7f, 0x35, 0xe0, 0xc2, 0xb9, 0xc0, 0xc0, 0x79, 0xa7, 0x3c, 0x9e, 0x26, 0x6b, - 0xe1, 0x37, 0x7b, 0x0b, 0xe7, 0xe5, 0xae, 0x88, 0x74, 0xfe, 0xd9, 0xd6, 0x27, 0x3e, 0x24, 0xa2, - 0x3e, 0xa8, 0x16, 0x61, 0x0f, 0xa0, 0x3b, 0xe3, 0xb1, 0x74, 0x94, 0xff, 0x3a, 0x94, 0x5f, 0xd6, - 0x0b, 0x31, 0xf5, 0x31, 0x4f, 0xfc, 0x1c, 0xdd, 0x4a, 0xab, 0xb7, 0x67, 0x05, 0x2a, 0x3b, 0x84, - 0x4b, 0xa3, 0xd3, 0xe7, 0xdc, 0x97, 0x9e, 0x2f, 0x9c, 0x73, 0x36, 0xef, 0xe8, 0xa9, 0x1e, 0x9c, - 0x78, 0xae, 0xf0, 0xc7, 0x89, 0xb1, 0x2f, 0xa6, 0x2a, 0xe9, 0x65, 0xc4, 0xd6, 0x0d, 0x68, 0x17, - 0xa3, 0x98, 0xb5, 0xa1, 0x26, 0x97, 0xfa, 0x84, 0x35, 0xb9, 0xb4, 0xac, 0xd4, 0x03, 0xd3, 0x50, - 0x3a, 0x27, 0xb3, 0x0b, 0x9d, 0x52, 0x58, 0xe7, 0xcc, 0x6d, 0xe4, 0xcd, 0x6d, 0x75, 0x60, 0xbb, - 0x10, 0xcd, 0xd6, 0xe7, 0x1b, 0xd0, 0xb2, 0x45, 0x1c, 0xa2, 0x33, 0xb1, 0x7d, 0x30, 0xc5, 0x72, - 0x2c, 0x54, 0x22, 0x35, 0x4a, 0x69, 0x4a, 0xc9, 0x3c, 0x48, 0xf8, 0x18, 0xd0, 0xa9, 0x30, 0xdb, - 0x2d, 0x80, 0xc0, 0xc5, 0xb2, 0x52, 0x1e, 0x05, 0x6e, 0x15, 0x51, 0xe0, 0x52, 0x49, 0xb6, 0x04, - 0x03, 0xbb, 0x05, 0x18, 0x28, 0x4f, 0x5c, 0xc0, 0x81, 0x3b, 0x15, 0x38, 0x50, 0xde, 0xfe, 0x0a, - 0x20, 0xb8, 0x53, 0x01, 0x04, 0xbd, 0x73, 0x6b, 0x55, 0x22, 0xc1, 0xad, 0x22, 0x12, 0x94, 0x8f, - 0x53, 0x82, 0x82, 0x0f, 0xaa, 0xa0, 0xe0, 0x5a, 0x49, 0x67, 0x25, 0x16, 0xbc, 0x7b, 0x0e, 0x0b, - 0xae, 0x94, 0x54, 0x2b, 0xc0, 0xe0, 0x4e, 0x21, 0x4b, 0x43, 0xe5, 0xd9, 0xaa, 0xd3, 0x34, 0xfb, - 0xfe, 0x79, 0x1c, 0xb9, 0x5a, 0xbe, 0xda, 0x2a, 0x20, 0xd9, 0x2b, 0x01, 0xc9, 0xe5, 0xf2, 0x2e, - 0x4b, 0x48, 0x92, 0xe1, 0xc1, 0x2e, 0xc6, 0x7d, 0xc9, 0xd3, 0x30, 0x47, 0x88, 0x28, 0x0a, 0x22, - 0x9d, 0xb0, 0xd5, 0xc0, 0xba, 0x89, 0x99, 0x28, 0xf3, 0xaf, 0x97, 0x60, 0x07, 0x39, 0x7d, 0xce, - 0xbb, 0xac, 0x5f, 0x19, 0x99, 0x2e, 0x45, 0x74, 0x3e, 0x8b, 0x99, 0x3a, 0x8b, 0xe5, 0x20, 0xa5, - 0x56, 0x80, 0x14, 0xf6, 0x1d, 0xb8, 0x40, 0x69, 0x84, 0xec, 0xe2, 0x14, 0xd2, 0x5a, 0x07, 0x19, - 0xca, 0x20, 0x2a, 0xbf, 0xbd, 0x0d, 0x17, 0x73, 0xb2, 0x98, 0x62, 0x29, 0x85, 0xd5, 0x29, 0x78, - 0xbb, 0xa9, 0xf4, 0x41, 0x18, 0x1e, 0xf2, 0x78, 0x6a, 0xfd, 0x38, 0x3b, 0x7f, 0x06, 0x57, 0x0c, - 0xea, 0xe3, 0xc0, 0x55, 0xc7, 0xda, 0xb6, 0xe9, 0x1b, 0x21, 0x6c, 0x16, 0x4c, 0x68, 0x55, 0xd3, - 0xc6, 0x4f, 0x94, 0x4a, 0x23, 0xc5, 0x54, 0x21, 0x61, 0xfd, 0xd2, 0xc8, 0xe6, 0xcb, 0x10, 0xac, - 0x0a, 0x6c, 0x8c, 0xff, 0x06, 0x6c, 0x6a, 0xaf, 0x06, 0x36, 0xd6, 0x99, 0x91, 0xdd, 0x48, 0x0a, - 0x23, 0x5f, 0xef, 0x88, 0xe8, 0x1c, 0x9e, 0xef, 0x8a, 0x25, 0x05, 0xfc, 0xba, 0xad, 0x06, 0x09, - 0xc2, 0x37, 0xc8, 0xcc, 0x45, 0x84, 0x6f, 0x12, 0x4d, 0x0d, 0xd8, 0xeb, 0x04, 0x3f, 0xc1, 0x53, - 0x1d, 0x89, 0xdb, 0x03, 0x5d, 0xe6, 0x1e, 0x23, 0xd1, 0x56, 0xbc, 0x5c, 0x32, 0x35, 0x0b, 0xd8, - 0xf5, 0x1a, 0x98, 0xb8, 0xd1, 0x38, 0xe4, 0x63, 0x41, 0x81, 0x65, 0xda, 0x19, 0xc1, 0x3a, 0x06, - 0x76, 0x3e, 0xa0, 0xd9, 0xfb, 0x50, 0x97, 0x7c, 0x82, 0xf6, 0x46, 0x93, 0xb5, 0x07, 0xaa, 0x32, - 0x1f, 0x7c, 0xf4, 0xe4, 0x98, 0x7b, 0xd1, 0xf0, 0x0a, 0x9a, 0xea, 0xef, 0x2f, 0xae, 0xb7, 0x51, - 0xe6, 0x56, 0x30, 0xf7, 0xa4, 0x98, 0x87, 0xf2, 0xd4, 0x26, 0x1d, 0xeb, 0x1f, 0x06, 0x26, 0xfa, - 0x42, 0xa0, 0x57, 0x1a, 0x2e, 0xf1, 0xe6, 0x5a, 0x0e, 0x93, 0xbf, 0x9a, 0x31, 0xbf, 0x09, 0x30, - 0xe1, 0xb1, 0xf3, 0x29, 0xf7, 0xa5, 0x70, 0xb5, 0x45, 0xcd, 0x09, 0x8f, 0x7f, 0x4e, 0x04, 0x2c, - 0x60, 0x90, 0xbd, 0x88, 0x85, 0x4b, 0xa6, 0x5d, 0xb7, 0x9b, 0x13, 0x1e, 0x3f, 0x8e, 0x85, 0x9b, - 0x9e, 0xab, 0xf9, 0xea, 0xe7, 0x2a, 0xda, 0xb1, 0x55, 0xb6, 0xe3, 0x3f, 0x73, 0x3e, 0x9c, 0x61, - 0xe0, 0xff, 0xff, 0xb9, 0xff, 0x66, 0x20, 0xf4, 0x17, 0xb3, 0x2c, 0x3b, 0x82, 0x0b, 0x69, 0x1c, - 0x39, 0x0b, 0x8a, 0xaf, 0xc4, 0x97, 0x5e, 0x1e, 0x7e, 0xdd, 0x93, 0x22, 0x39, 0x66, 0x3f, 0x81, - 0xab, 0xa5, 0x2c, 0x90, 0x4e, 0x58, 0x7b, 0x69, 0x32, 0xb8, 0x5c, 0x4c, 0x06, 0xc9, 0x7c, 0x89, - 0x25, 0xd6, 0xbf, 0x86, 0x67, 0x7f, 0x0b, 0xeb, 0xa0, 0x3c, 0x36, 0x54, 0xdd, 0xa5, 0xf5, 0x99, - 0x01, 0x9d, 0xd2, 0x66, 0xd8, 0x1e, 0x80, 0x4a, 0xad, 0xb1, 0xf7, 0x3c, 0xa9, 0xc9, 0xbb, 0x7a, - 0xe3, 0x64, 0xb2, 0x87, 0xde, 0x73, 0x61, 0x9b, 0xa3, 0xe4, 0x93, 0x7d, 0x08, 0x1d, 0xa1, 0x2b, - 0xb3, 0x24, 0xf7, 0xd5, 0x0a, 0x20, 0x95, 0xd4, 0x6d, 0xfa, 0xb4, 0x6d, 0x51, 0x18, 0x5b, 0x07, - 0x60, 0xa6, 0xf3, 0xb2, 0x6f, 0x80, 0x39, 0xe7, 0x4b, 0x5d, 0x2f, 0xab, 0x4a, 0xab, 0x35, 0xe7, - 0x4b, 0x2a, 0x95, 0xd9, 0x55, 0x68, 0x22, 0x73, 0xc2, 0xd5, 0x0a, 0xeb, 0x76, 0x63, 0xce, 0x97, - 0x3f, 0xe2, 0xb1, 0xb5, 0x0b, 0xed, 0xe2, 0x22, 0x89, 0x68, 0x82, 0x5d, 0x4a, 0xf4, 0x60, 0x22, - 0xac, 0x87, 0xd0, 0x2e, 0x96, 0xa4, 0x98, 0xc7, 0xa2, 0x60, 0xe1, 0xbb, 0x24, 0xb8, 0x61, 0xab, - 0x01, 0xf6, 0xa3, 0x27, 0x81, 0xba, 0xba, 0x7c, 0x0d, 0xfa, 0x24, 0x90, 0x22, 0x57, 0xc8, 0x2a, - 0x19, 0xeb, 0xb3, 0x0d, 0x68, 0xa8, 0xfa, 0x98, 0x0d, 0x8a, 0x7d, 0x13, 0xde, 0x9b, 0xd6, 0x54, - 0x54, 0xad, 0x98, 0x42, 0xdf, 0x1b, 0xe5, 0x16, 0x66, 0xb8, 0x79, 0xf6, 0xe2, 0x7a, 0x93, 0x70, - 0xe5, 0xe8, 0x7e, 0xd6, 0xcf, 0xac, 0x2a, 0xf7, 0x93, 0xe6, 0xa9, 0xfe, 0xca, 0xcd, 0xd3, 0x55, - 0x68, 0xfa, 0x8b, 0xb9, 0x23, 0x97, 0xb1, 0x8e, 0xcf, 0x86, 0xbf, 0x98, 0x3f, 0x5a, 0xc6, 0x78, - 0x07, 0x32, 0x90, 0x7c, 0x46, 0x2c, 0x15, 0x9d, 0x2d, 0x22, 0x20, 0x73, 0x1f, 0xb6, 0x73, 0xf0, - 0xeb, 0xb9, 0xba, 0x4a, 0x6b, 0xe7, 0x3d, 0xe4, 0xe8, 0xbe, 0x3e, 0xe5, 0x66, 0x0a, 0xc7, 0x47, - 0x2e, 0xbb, 0x59, 0xec, 0x15, 0x08, 0xb5, 0x5b, 0xe4, 0x8c, 0xb9, 0x76, 0x00, 0x31, 0x1b, 0x37, - 0x80, 0xee, 0xa9, 0x44, 0x4c, 0x12, 0x69, 0x21, 0x81, 0x98, 0x6f, 0x42, 0x27, 0x03, 0x3e, 0x25, - 0x02, 0x6a, 0x96, 0x8c, 0x4c, 0x82, 0xef, 0xc0, 0x25, 0x5f, 0x2c, 0xa5, 0x53, 0x96, 0xde, 0x24, - 0x69, 0x86, 0xbc, 0x27, 0x45, 0x8d, 0x6f, 0x43, 0x3b, 0x0b, 0x60, 0x92, 0xdd, 0x52, 0x1d, 0x5b, - 0x4a, 0x25, 0xb1, 0x6b, 0xd0, 0x4a, 0xcb, 0x8e, 0x6d, 0x12, 0x68, 0x72, 0x55, 0x6d, 0xa4, 0x85, - 0x4c, 0x24, 0xe2, 0xc5, 0x4c, 0xea, 0x49, 0xda, 0x24, 0x43, 0x85, 0x8c, 0xad, 0xe8, 0x24, 0xfb, - 0x3a, 0x6c, 0xa7, 0x71, 0x43, 0x72, 0x1d, 0x92, 0xdb, 0x4a, 0x88, 0x24, 0xb4, 0x0b, 0xdd, 0x30, - 0x0a, 0xc2, 0x20, 0x16, 0x91, 0xc3, 0x5d, 0x37, 0x12, 0x71, 0xdc, 0xeb, 0xaa, 0xf9, 0x12, 0xfa, - 0x81, 0x22, 0x5b, 0xdf, 0x85, 0xa6, 0xf6, 0x31, 0x74, 0x69, 0xb2, 0x3a, 0xb9, 0x60, 0xdd, 0x56, - 0x03, 0xcc, 0xdc, 0x07, 0x61, 0x48, 0x5e, 0x56, 0xb7, 0xf1, 0xd3, 0xfa, 0x05, 0x34, 0xf5, 0x85, - 0x55, 0xb6, 0x82, 0x3f, 0x80, 0xad, 0x90, 0x47, 0x78, 0x8c, 0x7c, 0x43, 0x98, 0x14, 0xe4, 0xc7, - 0x3c, 0x92, 0x0f, 0x85, 0x2c, 0xf4, 0x85, 0x9b, 0x24, 0xaf, 0x48, 0xd6, 0x1d, 0xd8, 0x2e, 0xc8, - 0xe0, 0xb6, 0xc8, 0x8f, 0x92, 0x48, 0xa3, 0x41, 0xba, 0x72, 0x2d, 0x5b, 0xd9, 0xba, 0x0b, 0x66, - 0x7a, 0x37, 0x58, 0x37, 0x26, 0x47, 0x37, 0xb4, 0xb9, 0xd5, 0x90, 0x7a, 0xdd, 0xe0, 0x53, 0x11, - 0xe9, 0x98, 0x50, 0x03, 0xeb, 0x31, 0x74, 0x4a, 0x29, 0x9b, 0xdd, 0x82, 0x66, 0xb8, 0x18, 0x39, - 0xc9, 0x1b, 0x45, 0xd6, 0xd5, 0x1e, 0x2f, 0x46, 0x1f, 0x89, 0xd3, 0xa4, 0xab, 0x0d, 0x69, 0x94, - 0x4d, 0x5b, 0xcb, 0x4f, 0x3b, 0x83, 0x56, 0x12, 0xfd, 0xec, 0x7b, 0x60, 0xa6, 0x6e, 0x55, 0xca, - 0x91, 0xe9, 0xd2, 0x7a, 0xd2, 0x4c, 0x10, 0xbd, 0x23, 0xf6, 0x26, 0xbe, 0x70, 0x9d, 0x2c, 0x84, - 0x68, 0x8d, 0x96, 0xdd, 0x51, 0x8c, 0x8f, 0x93, 0x78, 0xb1, 0xde, 0x81, 0x86, 0xda, 0x1b, 0xda, - 0x07, 0x67, 0x4e, 0x4a, 0x69, 0xfc, 0xae, 0x4c, 0xe6, 0x7f, 0x30, 0xa0, 0x95, 0x64, 0xc1, 0x4a, - 0xa5, 0xc2, 0xa6, 0x6b, 0x5f, 0x75, 0xd3, 0xff, 0xfb, 0xc4, 0x73, 0x0b, 0x98, 0xca, 0x2f, 0x27, - 0x81, 0xf4, 0xfc, 0x89, 0xa3, 0x6c, 0xad, 0x72, 0x50, 0x97, 0x38, 0x4f, 0x88, 0x71, 0x8c, 0xf4, - 0xdb, 0x9f, 0x6f, 0x40, 0xe7, 0x60, 0x78, 0xef, 0xe8, 0x20, 0x0c, 0x67, 0xde, 0x98, 0x53, 0xfd, - 0xbe, 0x07, 0x75, 0xea, 0x50, 0x2a, 0xde, 0x46, 0xfb, 0x55, 0xad, 0x32, 0xbb, 0x0d, 0x1b, 0xd4, - 0xa8, 0xb0, 0xaa, 0x27, 0xd2, 0x7e, 0x65, 0xc7, 0x8c, 0x8b, 0xa8, 0x56, 0xe6, 0xfc, 0x4b, 0x69, - 0xbf, 0xaa, 0x6d, 0x66, 0x1f, 0x82, 0x99, 0xb5, 0x18, 0xab, 0xde, 0x4b, 0xfb, 0x2b, 0x1b, 0x68, - 0xd4, 0xcf, 0xca, 0xb1, 0x55, 0xcf, 0x7e, 0xfd, 0x95, 0x9d, 0x26, 0xdb, 0x87, 0x66, 0x52, 0xc4, - 0x56, 0xbf, 0x68, 0xf6, 0x57, 0x34, 0xb7, 0x68, 0x1e, 0xd5, 0x35, 0x54, 0x3d, 0xbb, 0xf6, 0x2b, - 0x3b, 0x70, 0xf6, 0x1e, 0x34, 0x74, 0x65, 0x51, 0xf9, 0xaa, 0xd9, 0xaf, 0x6e, 0x51, 0xf1, 0x90, - 0x59, 0xdf, 0xb4, 0xea, 0x69, 0xb8, 0xbf, 0xf2, 0xa9, 0x80, 0x1d, 0x00, 0xe4, 0x8a, 0xff, 0x95, - 0x6f, 0xbe, 0xfd, 0xd5, 0x4f, 0x00, 0xec, 0x2e, 0xb4, 0xb2, 0x67, 0x9d, 0xea, 0x57, 0xdc, 0xfe, - 0xaa, 0xae, 0x7c, 0xf8, 0xda, 0xbf, 0xfe, 0xbc, 0x63, 0xfc, 0xfa, 0x6c, 0xc7, 0xf8, 0xcd, 0xd9, - 0x8e, 0xf1, 0xe5, 0xd9, 0x8e, 0xf1, 0xfb, 0xb3, 0x1d, 0xe3, 0x4f, 0x67, 0x3b, 0xc6, 0x6f, 0xff, - 0xb2, 0x63, 0x8c, 0x1a, 0xe4, 0xfe, 0xef, 0xfe, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xf4, 0x43, 0x2c, - 0xa9, 0xb5, 0x18, 0x00, 0x00, + 0x15, 0xdf, 0xd1, 0x6a, 0x25, 0xcd, 0xd3, 0xea, 0x23, 0xed, 0xb5, 0x2d, 0x8b, 0xb0, 0xeb, 0x1a, + 0x43, 0xe2, 0x25, 0x8e, 0x36, 0x6c, 0x08, 0xb5, 0x8e, 0x43, 0xaa, 0x56, 0xb6, 0x61, 0xb7, 0x12, + 0x60, 0x19, 0xdb, 0xcb, 0x85, 0xaa, 0xa9, 0x96, 0xa6, 0x2d, 0x4d, 0x59, 0x9a, 0x99, 0xcc, 0xb4, + 0x36, 0x5a, 0x1f, 0x73, 0xce, 0x21, 0x07, 0xfe, 0x08, 0xfe, 0x84, 0x1c, 0x39, 0x51, 0x39, 0x72, + 0xe0, 0x6c, 0x60, 0x29, 0x0e, 0x70, 0xa5, 0xa8, 0xe2, 0x48, 0xf5, 0xeb, 0xee, 0xf9, 0xda, 0x91, + 0x89, 0x03, 0x27, 0x2e, 0x52, 0xf7, 0xfb, 0xe8, 0x8f, 0x37, 0xef, 0xbd, 0xdf, 0x7b, 0x0d, 0xd7, + 0xe8, 0x68, 0xec, 0xed, 0xf1, 0xf3, 0x90, 0xc5, 0xf2, 0x77, 0x10, 0x46, 0x01, 0x0f, 0xc8, 0x06, + 0x4e, 0xfa, 0x6f, 0x4f, 0x3c, 0x3e, 0x5d, 0x8c, 0x06, 0xe3, 0x60, 0xbe, 0x37, 0x09, 0x26, 0xc1, + 0x1e, 0x72, 0x47, 0x8b, 0xa7, 0x38, 0xc3, 0x09, 0x8e, 0xa4, 0x56, 0x7f, 0x67, 0x12, 0x04, 0x93, + 0x19, 0x4b, 0xa5, 0xb8, 0x37, 0x67, 0x31, 0xa7, 0xf3, 0x50, 0x09, 0x1c, 0x64, 0xd6, 0xe3, 0xcc, + 0x77, 0x59, 0x34, 0xf7, 0x7c, 0x9e, 0x1d, 0xce, 0xbc, 0x51, 0xbc, 0x37, 0x0e, 0xe6, 0xf3, 0xc0, + 0xcf, 0x1e, 0xa8, 0x7f, 0xef, 0x3f, 0x6a, 0x8e, 0xa3, 0xf3, 0x90, 0x07, 0x7b, 0x73, 0x16, 0x3d, + 0x9b, 0x31, 0xf5, 0x27, 0x95, 0xad, 0xdf, 0x55, 0xa1, 0x6e, 0xb3, 0x4f, 0x16, 0x2c, 0xe6, 0xe4, + 0x36, 0x54, 0xd9, 0x78, 0x1a, 0xf4, 0x2a, 0x37, 0x8d, 0xdb, 0xcd, 0x7d, 0x32, 0x90, 0x9b, 0x28, + 0xee, 0xc3, 0xf1, 0x34, 0x38, 0x5a, 0xb3, 0x51, 0x82, 0xbc, 0x05, 0x1b, 0x4f, 0x67, 0x8b, 0x78, + 0xda, 0x5b, 0x47, 0xd1, 0x2b, 0x79, 0xd1, 0x1f, 0x0b, 0xd6, 0xd1, 0x9a, 0x2d, 0x65, 0xc4, 0xb2, + 0x9e, 0xff, 0x34, 0xe8, 0x55, 0xcb, 0x96, 0x3d, 0xf6, 0x9f, 0xe2, 0xb2, 0x42, 0x82, 0x1c, 0x00, + 0xc4, 0x8c, 0x3b, 0x41, 0xc8, 0xbd, 0xc0, 0xef, 0x6d, 0xa0, 0xfc, 0xf5, 0xbc, 0xfc, 0x23, 0xc6, + 0x7f, 0x8e, 0xec, 0xa3, 0x35, 0xdb, 0x8c, 0xf5, 0x44, 0x68, 0x7a, 0xbe, 0xc7, 0x9d, 0xf1, 0x94, + 0x7a, 0x7e, 0xaf, 0x56, 0xa6, 0x79, 0xec, 0x7b, 0xfc, 0xbe, 0x60, 0x0b, 0x4d, 0x4f, 0x4f, 0xc4, + 0x55, 0x3e, 0x59, 0xb0, 0xe8, 0xbc, 0x57, 0x2f, 0xbb, 0xca, 0x2f, 0x04, 0x4b, 0x5c, 0x05, 0x65, + 0xc8, 0x3d, 0x68, 0x8e, 0xd8, 0xc4, 0xf3, 0x9d, 0xd1, 0x2c, 0x18, 0x3f, 0xeb, 0x35, 0x50, 0xa5, + 0x97, 0x57, 0x19, 0x0a, 0x81, 0xa1, 0xe0, 0x1f, 0xad, 0xd9, 0x30, 0x4a, 0x66, 0x64, 0x1f, 0x1a, + 0xe3, 0x29, 0x1b, 0x3f, 0x73, 0xf8, 0xb2, 0x67, 0xa2, 0xe6, 0xd5, 0xbc, 0xe6, 0x7d, 0xc1, 0x7d, + 0xbc, 0x3c, 0x5a, 0xb3, 0xeb, 0x63, 0x39, 0x24, 0xef, 0x81, 0xc9, 0x7c, 0x57, 0x6d, 0xd7, 0x44, + 0xa5, 0x6b, 0x85, 0xef, 0xe2, 0xbb, 0x7a, 0xb3, 0x06, 0x53, 0x63, 0x32, 0x80, 0x9a, 0x70, 0x14, + 0x8f, 0xf7, 0x36, 0x51, 0x67, 0xab, 0xb0, 0x11, 0xf2, 0x8e, 0xd6, 0x6c, 0x25, 0x25, 0xcc, 0xe7, + 0xb2, 0x99, 0x77, 0xc6, 0x22, 0x71, 0xb8, 0x2b, 0x65, 0xe6, 0x7b, 0x20, 0xf9, 0x78, 0x3c, 0xd3, + 0xd5, 0x93, 0x61, 0x1d, 0x36, 0xce, 0xe8, 0x6c, 0xc1, 0xac, 0x37, 0xa1, 0x99, 0xf1, 0x14, 0xd2, + 0x83, 0xfa, 0x9c, 0xc5, 0x31, 0x9d, 0xb0, 0x9e, 0x71, 0xd3, 0xb8, 0x6d, 0xda, 0x7a, 0x6a, 0xb5, + 0x61, 0x33, 0xeb, 0x27, 0xd6, 0x3c, 0x51, 0x14, 0xbe, 0x20, 0x14, 0xcf, 0x58, 0x14, 0x0b, 0x07, + 0x50, 0x8a, 0x6a, 0x4a, 0x6e, 0x41, 0x0b, 0xed, 0xe0, 0x68, 0xbe, 0xf0, 0xd3, 0xaa, 0xbd, 0x89, + 0xc4, 0x53, 0x25, 0xb4, 0x03, 0xcd, 0x70, 0x3f, 0x4c, 0x44, 0xd6, 0x51, 0x04, 0xc2, 0xfd, 0x50, + 0x09, 0x58, 0xef, 0x43, 0xb7, 0xe8, 0x4a, 0xa4, 0x0b, 0xeb, 0xcf, 0xd8, 0xb9, 0xda, 0x4f, 0x0c, + 0xc9, 0x96, 0xba, 0x16, 0xee, 0x61, 0xda, 0xea, 0x8e, 0x5f, 0x54, 0x12, 0xe5, 0xc4, 0x9b, 0xc8, + 0x01, 0x54, 0x45, 0x2c, 0xa3, 0x76, 0x73, 0xbf, 0x3f, 0x90, 0x81, 0x3e, 0xd0, 0x81, 0x3e, 0x78, + 0xac, 0x03, 0x7d, 0xd8, 0xf8, 0xea, 0xc5, 0xce, 0xda, 0x17, 0x7f, 0xdc, 0x31, 0x6c, 0xd4, 0x20, + 0x37, 0x84, 0x43, 0x50, 0xcf, 0x77, 0x3c, 0x57, 0xed, 0x53, 0xc7, 0xf9, 0xb1, 0x4b, 0x0e, 0xa1, + 0x3b, 0x0e, 0xfc, 0x98, 0xf9, 0xf1, 0x22, 0x76, 0x42, 0x1a, 0xd1, 0x79, 0xac, 0x62, 0x4d, 0x7f, + 0xfe, 0xfb, 0x9a, 0x7d, 0x82, 0x5c, 0xbb, 0x33, 0xce, 0x13, 0xc8, 0x07, 0x00, 0x67, 0x74, 0xe6, + 0xb9, 0x94, 0x07, 0x51, 0xdc, 0xab, 0xde, 0x5c, 0xcf, 0x28, 0x9f, 0x6a, 0xc6, 0x93, 0xd0, 0xa5, + 0x9c, 0x0d, 0xab, 0xe2, 0x64, 0x76, 0x46, 0x9e, 0xbc, 0x01, 0x1d, 0x1a, 0x86, 0x4e, 0xcc, 0x29, + 0x67, 0xce, 0xe8, 0x9c, 0xb3, 0x18, 0xe3, 0x71, 0xd3, 0x6e, 0xd1, 0x30, 0x7c, 0x24, 0xa8, 0x43, + 0x41, 0xb4, 0xdc, 0xe4, 0x6b, 0x62, 0xa8, 0x10, 0x02, 0x55, 0x97, 0x72, 0x8a, 0xd6, 0xd8, 0xb4, + 0x71, 0x2c, 0x68, 0x21, 0xe5, 0x53, 0x75, 0x47, 0x1c, 0x93, 0x6b, 0x50, 0x9b, 0x32, 0x6f, 0x32, + 0xe5, 0x78, 0xad, 0x75, 0x5b, 0xcd, 0x84, 0xe1, 0xc3, 0x28, 0x38, 0x63, 0x98, 0x2d, 0x1a, 0xb6, + 0x9c, 0x58, 0x7f, 0x35, 0xe0, 0xb5, 0x4b, 0xe1, 0x25, 0xd6, 0x9d, 0xd2, 0x78, 0xaa, 0xf7, 0x12, + 0x63, 0xf2, 0x96, 0x58, 0x97, 0xba, 0x2c, 0x52, 0x59, 0xac, 0xa5, 0x6e, 0x7c, 0x84, 0x44, 0x75, + 0x51, 0x25, 0x42, 0x1e, 0x42, 0x77, 0x46, 0x63, 0xee, 0xc8, 0x28, 0x70, 0x30, 0x4b, 0xad, 0xe7, + 0x22, 0xf3, 0x63, 0xaa, 0xa3, 0x45, 0x38, 0xa7, 0x52, 0x6f, 0xcf, 0x72, 0x54, 0x72, 0x04, 0x5b, + 0xa3, 0xf3, 0xe7, 0xd4, 0xe7, 0x9e, 0xcf, 0x9c, 0x4b, 0x36, 0xef, 0xa8, 0xa5, 0x1e, 0x9e, 0x79, + 0x2e, 0xf3, 0xc7, 0xda, 0xd8, 0x57, 0x12, 0x95, 0xe4, 0x63, 0xc4, 0xd6, 0x4d, 0x68, 0xe7, 0x73, + 0x01, 0x69, 0x43, 0x85, 0x2f, 0xd5, 0x0d, 0x2b, 0x7c, 0x69, 0x59, 0x89, 0x07, 0x26, 0x01, 0x79, + 0x49, 0x66, 0x17, 0x3a, 0x85, 0xe4, 0x90, 0x31, 0xb7, 0x91, 0x35, 0xb7, 0xd5, 0x81, 0x56, 0x2e, + 0x27, 0x58, 0x9f, 0x6f, 0x40, 0xc3, 0x66, 0x71, 0x28, 0x9c, 0x89, 0x1c, 0x80, 0xc9, 0x96, 0x63, + 0x26, 0xd3, 0xb1, 0x51, 0x48, 0x76, 0x52, 0xe6, 0xa1, 0xe6, 0x8b, 0xb4, 0x90, 0x08, 0x93, 0xdd, + 0x1c, 0x94, 0x5c, 0x29, 0x2a, 0x65, 0xb1, 0xe4, 0x4e, 0x1e, 0x4b, 0xb6, 0x0a, 0xb2, 0x05, 0x30, + 0xd9, 0xcd, 0x81, 0x49, 0x71, 0xe1, 0x1c, 0x9a, 0xdc, 0x2d, 0x41, 0x93, 0xe2, 0xf1, 0x57, 0xc0, + 0xc9, 0xdd, 0x12, 0x38, 0xe9, 0x5d, 0xda, 0xab, 0x14, 0x4f, 0xee, 0xe4, 0xf1, 0xa4, 0x78, 0x9d, + 0x02, 0xa0, 0x7c, 0x50, 0x06, 0x28, 0x37, 0x0a, 0x3a, 0x2b, 0x11, 0xe5, 0xdd, 0x4b, 0x88, 0x72, + 0xad, 0xa0, 0x5a, 0x02, 0x29, 0x77, 0x73, 0xb9, 0x1e, 0x4a, 0xef, 0x56, 0x9e, 0xec, 0xc9, 0x0f, + 0x2f, 0xa3, 0xd1, 0xf5, 0xe2, 0xa7, 0x2d, 0x83, 0xa3, 0xbd, 0x02, 0x1c, 0x5d, 0x2d, 0x9e, 0xb2, + 0x80, 0x47, 0x29, 0xaa, 0xec, 0x8a, 0xb8, 0x2f, 0x78, 0x9a, 0xc8, 0x11, 0x2c, 0x8a, 0x82, 0x48, + 0x25, 0x6c, 0x39, 0xb1, 0x6e, 0x8b, 0x4c, 0x94, 0xfa, 0xd7, 0x4b, 0x10, 0x08, 0x9d, 0x3e, 0xe3, + 0x5d, 0xd6, 0x97, 0x46, 0xaa, 0x8b, 0x11, 0x9d, 0xcd, 0x62, 0xa6, 0xca, 0x62, 0x19, 0x60, 0xaa, + 0xe4, 0x81, 0x69, 0x07, 0x9a, 0x22, 0x57, 0x16, 0x30, 0x87, 0x86, 0x1a, 0x73, 0xc8, 0xf7, 0xe0, + 0x35, 0xcc, 0x33, 0x12, 0xbe, 0x54, 0x20, 0x56, 0x31, 0x10, 0x3b, 0x82, 0x21, 0x2d, 0x26, 0x13, + 0xe0, 0xdb, 0x70, 0x25, 0x23, 0x2b, 0xd6, 0xc5, 0x1c, 0x27, 0x93, 0x6f, 0x37, 0x91, 0x3e, 0x0c, + 0xc3, 0x23, 0x1a, 0x4f, 0xad, 0x9f, 0xa6, 0x06, 0x4a, 0xf1, 0x8c, 0x40, 0x75, 0x1c, 0xb8, 0xf2, + 0xde, 0x2d, 0x1b, 0xc7, 0x02, 0xe3, 0x66, 0xc1, 0x04, 0x0f, 0x67, 0xda, 0x62, 0x28, 0xa4, 0x92, + 0x50, 0x32, 0x65, 0xcc, 0x58, 0xbf, 0x36, 0xd2, 0xf5, 0x52, 0x88, 0x2b, 0x43, 0x23, 0xe3, 0xbf, + 0x41, 0xa3, 0xca, 0xab, 0xa1, 0x91, 0x75, 0x61, 0xa4, 0x9f, 0x2c, 0xc1, 0x99, 0x6f, 0x76, 0x45, + 0xe1, 0x3d, 0x9e, 0xef, 0xb2, 0x25, 0x9a, 0x74, 0xdd, 0x96, 0x13, 0x5d, 0x02, 0xd4, 0xd0, 0xcc, + 0xf9, 0x12, 0xa0, 0x8e, 0x34, 0x39, 0x21, 0xb7, 0x10, 0x9f, 0x82, 0xa7, 0x2a, 0x54, 0x5b, 0x03, + 0x55, 0x4d, 0x9f, 0x08, 0xa2, 0x2d, 0x79, 0x99, 0x6c, 0x6b, 0xe6, 0xc0, 0xed, 0x75, 0x30, 0xc5, + 0x41, 0xe3, 0x90, 0x8e, 0x19, 0x46, 0x9e, 0x69, 0xa7, 0x04, 0xeb, 0x04, 0xc8, 0xe5, 0x88, 0x27, + 0xef, 0x43, 0x95, 0xd3, 0x89, 0xb0, 0xb7, 0x30, 0x59, 0x7b, 0x20, 0x1b, 0x80, 0xc1, 0x47, 0xa7, + 0x27, 0xd4, 0x8b, 0x86, 0xd7, 0x84, 0xa9, 0xfe, 0xfe, 0x62, 0xa7, 0x2d, 0x64, 0xee, 0x04, 0x73, + 0x8f, 0xb3, 0x79, 0xc8, 0xcf, 0x6d, 0xd4, 0xb1, 0xfe, 0x61, 0x08, 0x24, 0xc8, 0x65, 0x82, 0x52, + 0xc3, 0x69, 0x77, 0xaf, 0x64, 0x40, 0xfb, 0xeb, 0x19, 0xf3, 0xdb, 0x00, 0x13, 0x1a, 0x3b, 0x9f, + 0x52, 0x9f, 0x33, 0x57, 0x59, 0xd4, 0x9c, 0xd0, 0xf8, 0x97, 0x48, 0x10, 0x15, 0x8e, 0x60, 0x2f, + 0x62, 0xe6, 0xa2, 0x69, 0xd7, 0xed, 0xfa, 0x84, 0xc6, 0x4f, 0x62, 0xe6, 0x26, 0xf7, 0xaa, 0xbf, + 0xfa, 0xbd, 0xf2, 0x76, 0x6c, 0x14, 0xed, 0xf8, 0xcf, 0x8c, 0x0f, 0xa7, 0x20, 0xf9, 0xff, 0x7f, + 0xef, 0xbf, 0x19, 0xa2, 0x36, 0xc8, 0xa7, 0x61, 0x72, 0x0c, 0xaf, 0x25, 0x71, 0xe4, 0x2c, 0x30, + 0xbe, 0xb4, 0x2f, 0xbd, 0x3c, 0xfc, 0xba, 0x67, 0x79, 0x72, 0x4c, 0x7e, 0x06, 0xd7, 0x0b, 0x59, + 0x20, 0x59, 0xb0, 0xf2, 0xd2, 0x64, 0x70, 0x35, 0x9f, 0x0c, 0xf4, 0x7a, 0xda, 0x12, 0xeb, 0xdf, + 0xc0, 0xb3, 0xbf, 0x23, 0x0a, 0xa5, 0x2c, 0x78, 0x94, 0x7d, 0x4b, 0xeb, 0x33, 0x03, 0x3a, 0x85, + 0xc3, 0x90, 0x3d, 0x00, 0x99, 0x5a, 0x63, 0xef, 0xb9, 0x2e, 0xda, 0xbb, 0xea, 0xe0, 0x68, 0xb2, + 0x47, 0xde, 0x73, 0x66, 0x9b, 0x23, 0x3d, 0x24, 0x1f, 0x42, 0x87, 0xa9, 0xd2, 0x4d, 0xe7, 0xbe, + 0x4a, 0x0e, 0xc5, 0x74, 0x61, 0xa7, 0x6e, 0xdb, 0x66, 0xb9, 0xb9, 0x75, 0x08, 0x66, 0xb2, 0x2e, + 0xf9, 0x16, 0x98, 0x73, 0xba, 0x54, 0x05, 0xb5, 0x2c, 0xc5, 0x1a, 0x73, 0xba, 0xc4, 0x5a, 0x9a, + 0x5c, 0x87, 0xba, 0x60, 0x4e, 0xa8, 0xdc, 0x61, 0xdd, 0xae, 0xcd, 0xe9, 0xf2, 0x27, 0x34, 0xb6, + 0x76, 0xa1, 0x9d, 0xdf, 0x44, 0x8b, 0x6a, 0x70, 0x93, 0xa2, 0x87, 0x13, 0x66, 0x3d, 0x82, 0x76, + 0xbe, 0x66, 0x15, 0x79, 0x2c, 0x0a, 0x16, 0xbe, 0x8b, 0x82, 0x1b, 0xb6, 0x9c, 0x88, 0xb6, 0xf7, + 0x2c, 0x90, 0x9f, 0x2e, 0x5b, 0xa4, 0x9e, 0x06, 0x9c, 0x65, 0x2a, 0x5d, 0x29, 0x63, 0x7d, 0xb6, + 0x01, 0x35, 0x59, 0x40, 0x93, 0x41, 0xbe, 0x3d, 0x13, 0xdf, 0x4d, 0x69, 0x4a, 0xaa, 0x52, 0x4c, + 0xb0, 0xf1, 0x8d, 0x62, 0x8f, 0x33, 0x6c, 0x5e, 0xbc, 0xd8, 0xa9, 0x23, 0xae, 0x1c, 0x3f, 0x48, + 0x1b, 0x9e, 0x55, 0xfd, 0x80, 0xee, 0xae, 0xaa, 0xaf, 0xdc, 0x5d, 0x5d, 0x87, 0xba, 0xbf, 0x98, + 0x3b, 0x7c, 0x19, 0xab, 0xf8, 0xac, 0xf9, 0x8b, 0xf9, 0xe3, 0x65, 0x2c, 0xbe, 0x01, 0x0f, 0x38, + 0x9d, 0x21, 0x4b, 0x46, 0x67, 0x03, 0x09, 0x82, 0x79, 0x00, 0xad, 0x0c, 0xfc, 0x7a, 0xae, 0x2a, + 0xe3, 0xda, 0x59, 0x0f, 0x39, 0x7e, 0xa0, 0x6e, 0xd9, 0x4c, 0xe0, 0xf8, 0xd8, 0x25, 0xb7, 0xf3, + 0xcd, 0x04, 0xa2, 0x76, 0x03, 0x9d, 0x31, 0xd3, 0x2f, 0x08, 0xcc, 0x16, 0x07, 0x10, 0xee, 0x29, + 0x45, 0x4c, 0x14, 0x69, 0x08, 0x02, 0x32, 0xdf, 0x84, 0x4e, 0x0a, 0x7c, 0x52, 0x04, 0xe4, 0x2a, + 0x29, 0x19, 0x05, 0xdf, 0x81, 0x2d, 0x9f, 0x2d, 0xb9, 0x53, 0x94, 0x6e, 0xa2, 0x34, 0x11, 0xbc, + 0xd3, 0xbc, 0xc6, 0x77, 0xa1, 0x9d, 0x06, 0x30, 0xca, 0x6e, 0xca, 0x96, 0x2e, 0xa1, 0xa2, 0xd8, + 0x0d, 0x68, 0x24, 0x65, 0x47, 0x0b, 0x05, 0xea, 0x54, 0x56, 0x1b, 0x49, 0x21, 0x13, 0xb1, 0x78, + 0x31, 0xe3, 0x6a, 0x91, 0x36, 0xca, 0x60, 0x21, 0x63, 0x4b, 0x3a, 0xca, 0xde, 0x82, 0x56, 0x12, + 0x37, 0x28, 0xd7, 0x41, 0xb9, 0x4d, 0x4d, 0x44, 0xa1, 0x5d, 0xe8, 0x86, 0x51, 0x10, 0x06, 0x31, + 0x8b, 0x1c, 0xea, 0xba, 0x11, 0x8b, 0xe3, 0x5e, 0x57, 0xae, 0xa7, 0xe9, 0x87, 0x92, 0x6c, 0x7d, + 0x1f, 0xea, 0xba, 0x9e, 0xda, 0x82, 0x0d, 0xb4, 0x3a, 0xba, 0x60, 0xd5, 0x96, 0x13, 0x91, 0xb9, + 0x0f, 0xc3, 0x50, 0xbd, 0x0a, 0x88, 0xa1, 0xf5, 0x2b, 0xa8, 0xab, 0x0f, 0x56, 0xda, 0x2b, 0xfe, + 0x08, 0x36, 0x43, 0x1a, 0x89, 0x6b, 0x64, 0x3b, 0x46, 0x5d, 0xb1, 0x9f, 0xd0, 0x88, 0x3f, 0x62, + 0x3c, 0xd7, 0x38, 0x36, 0x51, 0x5e, 0x92, 0xac, 0xbb, 0xd0, 0xca, 0xc9, 0x88, 0x63, 0xa1, 0x1f, + 0xe9, 0x48, 0xc3, 0x49, 0xb2, 0x73, 0x25, 0xdd, 0xd9, 0xba, 0x07, 0x66, 0xf2, 0x6d, 0x44, 0x61, + 0xa9, 0xaf, 0x6e, 0x28, 0x73, 0xcb, 0x29, 0x36, 0xc3, 0xc1, 0xa7, 0x2c, 0x52, 0x31, 0x21, 0x27, + 0xd6, 0x13, 0xe8, 0x14, 0x52, 0x36, 0xb9, 0x03, 0xf5, 0x70, 0x31, 0x72, 0xf4, 0x23, 0x46, 0xda, + 0xf6, 0x9e, 0x2c, 0x46, 0x1f, 0xb1, 0x73, 0xdd, 0xf6, 0x86, 0x38, 0x4b, 0x97, 0xad, 0x64, 0x97, + 0x9d, 0x41, 0x43, 0x47, 0x3f, 0xf9, 0x01, 0x98, 0x89, 0x5b, 0x15, 0x72, 0x64, 0xb2, 0xb5, 0x5a, + 0x34, 0x15, 0x14, 0xde, 0x11, 0x7b, 0x13, 0x9f, 0xb9, 0x4e, 0x1a, 0x42, 0xb8, 0x47, 0xc3, 0xee, + 0x48, 0xc6, 0xc7, 0x3a, 0x5e, 0xac, 0x77, 0xa0, 0x26, 0xcf, 0x26, 0xec, 0x23, 0x56, 0xd6, 0xb5, + 0xb6, 0x18, 0x97, 0x26, 0xf3, 0x3f, 0x18, 0xd0, 0xd0, 0x59, 0xb0, 0x54, 0x29, 0x77, 0xe8, 0xca, + 0xd7, 0x3d, 0xf4, 0xff, 0x3e, 0xf1, 0xdc, 0x01, 0x22, 0xf3, 0xcb, 0x59, 0xc0, 0x3d, 0x7f, 0xe2, + 0x48, 0x5b, 0xcb, 0x1c, 0xd4, 0x45, 0xce, 0x29, 0x32, 0x4e, 0x04, 0x7d, 0xff, 0xf3, 0x0d, 0xe8, + 0x1c, 0x0e, 0xef, 0x1f, 0x1f, 0x86, 0xe1, 0xcc, 0x1b, 0x53, 0xac, 0xdf, 0xf7, 0xa0, 0x8a, 0x2d, + 0x4c, 0xc9, 0x13, 0x6c, 0xbf, 0xac, 0x97, 0x26, 0xfb, 0xb0, 0x81, 0x9d, 0x0c, 0x29, 0x7b, 0x89, + 0xed, 0x97, 0xb6, 0xd4, 0x62, 0x13, 0xd9, 0xeb, 0x5c, 0x7e, 0x90, 0xed, 0x97, 0xf5, 0xd5, 0xe4, + 0x43, 0x30, 0xd3, 0x16, 0x63, 0xd5, 0xb3, 0x6c, 0x7f, 0x65, 0x87, 0x2d, 0xf4, 0xd3, 0x72, 0x6c, + 0xd5, 0xeb, 0x62, 0x7f, 0x65, 0x2b, 0x4a, 0x0e, 0xa0, 0xae, 0x8b, 0xd8, 0xf2, 0x87, 0xd3, 0xfe, + 0x8a, 0xee, 0x57, 0x98, 0x47, 0x76, 0x0d, 0x65, 0xaf, 0xbb, 0xfd, 0xd2, 0x16, 0x9d, 0xbc, 0x07, + 0x35, 0x55, 0x59, 0x94, 0x3e, 0x9e, 0xf6, 0xcb, 0x7b, 0x58, 0x71, 0xc9, 0xb4, 0x6f, 0x5a, 0xf5, + 0x02, 0xdd, 0x5f, 0xf9, 0x96, 0x40, 0x0e, 0x01, 0x32, 0xc5, 0xff, 0xca, 0xa7, 0xe5, 0xfe, 0xea, + 0x37, 0x02, 0x72, 0x0f, 0x1a, 0xe9, 0xbb, 0x4f, 0xf9, 0x63, 0x71, 0x7f, 0x55, 0xdb, 0x3e, 0x7c, + 0xfd, 0x5f, 0x7f, 0xde, 0x36, 0x7e, 0x73, 0xb1, 0x6d, 0x7c, 0x79, 0xb1, 0x6d, 0x7c, 0x75, 0xb1, + 0x6d, 0xfc, 0xfe, 0x62, 0xdb, 0xf8, 0xd3, 0xc5, 0xb6, 0xf1, 0xdb, 0xbf, 0x6c, 0x1b, 0xa3, 0x1a, + 0xba, 0xff, 0xbb, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x7c, 0xbd, 0x95, 0x1c, 0x19, 0x00, + 0x00, } diff --git a/abci/types/types.proto b/abci/types/types.proto index 517369b13..ffa321836 100644 --- a/abci/types/types.proto +++ b/abci/types/types.proto @@ -49,6 +49,8 @@ message RequestFlush { message RequestInfo { string version = 1; + uint64 block_version = 2; + uint64 p2p_version = 3; } // nondeterministic @@ -129,9 +131,12 @@ message ResponseFlush { message ResponseInfo { string data = 1; + string version = 2; - int64 last_block_height = 3; - bytes last_block_app_hash = 4; + uint64 app_version = 3; + + int64 last_block_height = 4; + bytes last_block_app_hash = 5; } // nondeterministic diff --git a/consensus/replay.go b/consensus/replay.go index af6369c3b..bffab8d28 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -11,6 +11,7 @@ import ( "time" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/version" //auto "github.com/tendermint/tendermint/libs/autofile" cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" @@ -19,7 +20,6 @@ import ( "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/version" ) var crc32c = crc32.MakeTable(crc32.Castagnoli) @@ -227,7 +227,7 @@ func (h *Handshaker) NBlocks() int { func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { // Handshake is done via ABCI Info on the query conn. - res, err := proxyApp.Query().InfoSync(abci.RequestInfo{Version: version.Version}) + res, err := proxyApp.Query().InfoSync(proxy.RequestInfo) if err != nil { return fmt.Errorf("Error calling Info: %v", err) } @@ -238,9 +238,15 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { } appHash := res.LastBlockAppHash - h.logger.Info("ABCI Handshake", "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash)) + h.logger.Info("ABCI Handshake App Info", + "height", blockHeight, + "hash", fmt.Sprintf("%X", appHash), + "software-version", res.Version, + "protocol-version", res.AppVersion, + ) - // TODO: check app version. + // Set AppVersion on the state. + h.initialState.Version.Consensus.App = version.Protocol(res.AppVersion) // Replay blocks up to the latest in the blockstore. _, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp) diff --git a/consensus/replay_test.go b/consensus/replay_test.go index 160e777c3..4e1fa2b77 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -20,6 +20,7 @@ import ( crypto "github.com/tendermint/tendermint/crypto" auto "github.com/tendermint/tendermint/libs/autofile" dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/version" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" @@ -337,7 +338,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) { t.Fatalf(err.Error()) } - stateDB, state, store := stateAndStore(config, privVal.GetPubKey()) + stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), kvstore.ProtocolVersion) store.chain = chain store.commits = commits @@ -352,7 +353,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) { // run nBlocks against a new client to build up the app state. // use a throwaway tendermint state proxyApp := proxy.NewAppConns(clientCreator2) - stateDB, state, _ := stateAndStore(config, privVal.GetPubKey()) + stateDB, state, _ := stateAndStore(config, privVal.GetPubKey(), kvstore.ProtocolVersion) buildAppStateFromChain(proxyApp, stateDB, state, chain, nBlocks, mode) } @@ -442,7 +443,7 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB, func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, chain []*types.Block, mode uint) sm.State { // run the whole chain against this client to build up the tendermint state clientCreator := proxy.NewLocalClientCreator(kvstore.NewPersistentKVStoreApplication(path.Join(config.DBDir(), "1"))) - proxyApp := proxy.NewAppConns(clientCreator) // sm.NewHandshaker(config, state, store, ReplayLastBlock)) + proxyApp := proxy.NewAppConns(clientCreator) if err := proxyApp.Start(); err != nil { panic(err) } @@ -588,9 +589,10 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} { } // fresh state and mock store -func stateAndStore(config *cfg.Config, pubKey crypto.PubKey) (dbm.DB, sm.State, *mockBlockStore) { +func stateAndStore(config *cfg.Config, pubKey crypto.PubKey, appVersion version.Protocol) (dbm.DB, sm.State, *mockBlockStore) { stateDB := dbm.NewMemDB() state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile()) + state.Version.Consensus.App = appVersion store := NewMockBlockStore(config, state.ConsensusParams) return stateDB, state, store } @@ -639,7 +641,7 @@ func TestInitChainUpdateValidators(t *testing.T) { config := ResetConfig("proxy_test_") privVal := privval.LoadFilePV(config.PrivValidatorFile()) - stateDB, state, store := stateAndStore(config, privVal.GetPubKey()) + stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), 0x0) oldValAddr := state.Validators.Validators[0].Address diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go index 980a44892..5ff597a52 100644 --- a/consensus/wal_generator.go +++ b/consensus/wal_generator.go @@ -38,7 +38,8 @@ func WALGenerateNBlocks(wr io.Writer, numBlocks int) (err error) { ///////////////////////////////////////////////////////////////////////////// // COPY PASTE FROM node.go WITH A FEW MODIFICATIONS - // NOTE: we can't import node package because of circular dependency + // NOTE: we can't import node package because of circular dependency. + // NOTE: we don't do handshake so need to set state.Version.Consensus.App directly. privValidatorFile := config.PrivValidatorFile() privValidator := privval.LoadOrGenFilePV(privValidatorFile) genDoc, err := types.GenesisDocFromFile(config.GenesisFile()) @@ -51,6 +52,7 @@ func WALGenerateNBlocks(wr io.Writer, numBlocks int) (err error) { if err != nil { return errors.Wrap(err, "failed to make genesis state") } + state.Version.Consensus.App = kvstore.ProtocolVersion blockStore := bc.NewBlockStore(blockStoreDB) proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app)) proxyApp.SetLogger(logger.With("module", "proxy")) diff --git a/docs/spec/abci/abci.md b/docs/spec/abci/abci.md index 54b7c899d..afd726174 100644 --- a/docs/spec/abci/abci.md +++ b/docs/spec/abci/abci.md @@ -134,10 +134,13 @@ Commit are included in the header of the next block. ### Info - **Request**: - - `Version (string)`: The Tendermint version + - `Version (string)`: The Tendermint software semantic version + - `BlockVersion (uint64)`: The Tendermint Block Protocol version + - `P2PVersion (uint64)`: The Tendermint P2P Protocol version - **Response**: - `Data (string)`: Some arbitrary information - - `Version (Version)`: Version information + - `Version (string)`: The application software semantic version + - `AppVersion (uint64)`: The application protocol version - `LastBlockHeight (int64)`: Latest block for which the app has called Commit - `LastBlockAppHash ([]byte)`: Latest result of Commit @@ -145,6 +148,7 @@ Commit are included in the header of the next block. - Return information about the application state. - Used to sync Tendermint with the application during a handshake that happens on startup. + - The returned `AppVersion` will be included in the Header of every block. - Tendermint expects `LastBlockAppHash` and `LastBlockHeight` to be updated during `Commit`, ensuring that `Commit` is never called twice for the same block height. diff --git a/node/node.go b/node/node.go index 97de24736..522f18e91 100644 --- a/node/node.go +++ b/node/node.go @@ -195,8 +195,8 @@ func NewNode(config *cfg.Config, return nil, fmt.Errorf("Error starting proxy app connections: %v", err) } - // Create the handshaker, which calls RequestInfo and replays any blocks - // as necessary to sync tendermint with the app. + // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, + // and replays any blocks as necessary to sync tendermint with the app. consensusLogger := logger.With("module", "consensus") handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc) handshaker.SetLogger(consensusLogger) @@ -204,9 +204,12 @@ func NewNode(config *cfg.Config, return nil, fmt.Errorf("Error during handshake: %v", err) } - // reload the state (it may have been updated by the handshake) + // Reload the state. It will have the Version.Consensus.App set by the + // Handshake, and may have other modifications as well (ie. depending on + // what happened during block replay). state = sm.LoadState(stateDB) + // Ensure the state's block version matches that of the software. if state.Version.Consensus.Block != version.BlockProtocol { return nil, fmt.Errorf( "Block version of the software does not match that of the state.\n"+ @@ -359,7 +362,13 @@ func NewNode(config *cfg.Config, var ( p2pLogger = logger.With("module", "p2p") - nodeInfo = makeNodeInfo(config, nodeKey.ID(), txIndexer, genDoc.ChainID) + nodeInfo = makeNodeInfo( + config, + nodeKey.ID(), + txIndexer, + genDoc.ChainID, + p2p.ProtocolVersionWithApp(state.Version.Consensus.App), + ) ) // Setup Transport. @@ -764,13 +773,14 @@ func makeNodeInfo( nodeID p2p.ID, txIndexer txindex.TxIndexer, chainID string, + protocolVersion p2p.ProtocolVersion, ) p2p.NodeInfo { txIndexerStatus := "on" if _, ok := txIndexer.(*null.TxIndex); ok { txIndexerStatus = "off" } nodeInfo := p2p.DefaultNodeInfo{ - ProtocolVersion: p2p.InitProtocolVersion, + ProtocolVersion: protocolVersion, ID_: nodeID, Network: chainID, Version: version.TMCoreSemVer, diff --git a/node/node_test.go b/node/node_test.go index f4c1f6a16..3a33e6bbb 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -10,7 +10,11 @@ import ( "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/p2p" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/version" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/types" @@ -91,3 +95,21 @@ func TestNodeDelayedStop(t *testing.T) { startTime := tmtime.Now() assert.Equal(t, true, startTime.After(n.GenesisDoc().GenesisTime)) } + +func TestNodeSetAppVersion(t *testing.T) { + config := cfg.ResetTestRoot("node_app_version_test") + + // create & start node + n, err := DefaultNewNode(config, log.TestingLogger()) + assert.NoError(t, err, "expected no err on DefaultNewNode") + + // default config uses the kvstore app + var appVersion version.Protocol = kvstore.ProtocolVersion + + // check version is set in state + state := sm.LoadState(n.stateDB) + assert.Equal(t, state.Version.Consensus.App, appVersion) + + // check version is set in node info + assert.Equal(t, n.nodeInfo.(p2p.DefaultNodeInfo).ProtocolVersion.App, appVersion) +} diff --git a/p2p/node_info.go b/p2p/node_info.go index 5874dc857..1d408eb68 100644 --- a/p2p/node_info.go +++ b/p2p/node_info.go @@ -49,10 +49,17 @@ type ProtocolVersion struct { App version.Protocol `json:"app"` } -var InitProtocolVersion = ProtocolVersion{ - P2P: version.P2PProtocol, - Block: version.BlockProtocol, - App: 0, +// InitProtocolVersion populates the Block and P2P versions, but not the App. +var InitProtocolVersion = ProtocolVersionWithApp(0) + +// ProtocolVersionWithApp returns a fully populated ProtocolVersion +// using the provided App version and the Block and P2P versions defined in the `version` package. +func ProtocolVersionWithApp(appVersion version.Protocol) ProtocolVersion { + return ProtocolVersion{ + P2P: version.P2PProtocol, + Block: version.BlockProtocol, + App: appVersion, + } } //------------------------------------------------------------- @@ -148,7 +155,7 @@ func (info DefaultNodeInfo) ValidateBasic() error { switch txIndex { case "", "on", "off": default: - return fmt.Errorf("info.Other.TxIndex should be either 'on' or 'off', got '%v'", txIndex) + return fmt.Errorf("info.Other.TxIndex should be either 'on', 'off', or empty string, got '%v'", txIndex) } // XXX: Should we be more strict about address formats? rpcAddr := other.RPCAddress diff --git a/proxy/app_conn_test.go b/proxy/app_conn_test.go index 5eadb032f..ca98f1be4 100644 --- a/proxy/app_conn_test.go +++ b/proxy/app_conn_test.go @@ -143,7 +143,7 @@ func TestInfo(t *testing.T) { proxy := NewAppConnTest(cli) t.Log("Connected") - resInfo, err := proxy.InfoSync(types.RequestInfo{Version: ""}) + resInfo, err := proxy.InfoSync(RequestInfo) if err != nil { t.Errorf("Unexpected error: %v", err) } diff --git a/proxy/version.go b/proxy/version.go new file mode 100644 index 000000000..fb506e659 --- /dev/null +++ b/proxy/version.go @@ -0,0 +1,15 @@ +package proxy + +import ( + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/version" +) + +// RequestInfo contains all the information for sending +// the abci.RequestInfo message during handshake with the app. +// It contains only compile-time version information. +var RequestInfo = abci.RequestInfo{ + Version: version.Version, + BlockVersion: version.BlockProtocol.Uint64(), + P2PVersion: version.P2PProtocol.Uint64(), +} diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index 3a0ed79cd..e63d22e0c 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -3,10 +3,10 @@ package mock import ( abci "github.com/tendermint/tendermint/abci/types" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/version" ) // ABCIApp will send all abci related request to the named app, @@ -23,7 +23,7 @@ var ( ) func (a ABCIApp) ABCIInfo() (*ctypes.ResultABCIInfo, error) { - return &ctypes.ResultABCIInfo{a.App.Info(abci.RequestInfo{Version: version.Version})}, nil + return &ctypes.ResultABCIInfo{a.App.Info(proxy.RequestInfo)}, nil } func (a ABCIApp) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { diff --git a/rpc/core/abci.go b/rpc/core/abci.go index 47219563c..2468a5f05 100644 --- a/rpc/core/abci.go +++ b/rpc/core/abci.go @@ -3,8 +3,8 @@ package core import ( abci "github.com/tendermint/tendermint/abci/types" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/proxy" ctypes "github.com/tendermint/tendermint/rpc/core/types" - "github.com/tendermint/tendermint/version" ) // Query the application for some information. @@ -87,7 +87,7 @@ func ABCIQuery(path string, data cmn.HexBytes, height int64, prove bool) (*ctype // } // ``` func ABCIInfo() (*ctypes.ResultABCIInfo, error) { - resInfo, err := proxyAppQuery.InfoSync(abci.RequestInfo{Version: version.Version}) + resInfo, err := proxyAppQuery.InfoSync(proxy.RequestInfo) if err != nil { return nil, err } diff --git a/state/state.go b/state/state.go index aedb2b001..5c1b68a26 100644 --- a/state/state.go +++ b/state/state.go @@ -27,6 +27,10 @@ type Version struct { Software string } +// initStateVersion sets the Consensus.Block and Software versions, +// but leaves the Consensus.App version blank. +// The Consensus.App version will be set during the Handshake, once +// we hear from the app what protocol version it is running. var initStateVersion = Version{ Consensus: version.Consensus{ Block: version.BlockProtocol, diff --git a/version/version.go b/version/version.go index 5a089141f..19b3f3da7 100644 --- a/version/version.go +++ b/version/version.go @@ -28,6 +28,12 @@ const ( // Protocol is used for implementation agnostic versioning. type Protocol uint64 +// Uint64 returns the Protocol version as a uint64, +// eg. for compatibility with ABCI types. +func (p Protocol) Uint64() uint64 { + return uint64(p) +} + var ( // P2PProtocol versions all p2p behaviour and msgs. P2PProtocol Protocol = 4 From c3384e88e5fee31ecf2d3c62b20204eae965bf02 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 18 Oct 2018 17:32:53 -0400 Subject: [PATCH 087/113] adr-016: update int64->uint64; add version to ConsensusParams (#2667) --- .../architecture/adr-016-protocol-versions.md | 51 +++++++++++-------- 1 file changed, 29 insertions(+), 22 deletions(-) diff --git a/docs/architecture/adr-016-protocol-versions.md b/docs/architecture/adr-016-protocol-versions.md index 1ae1f467f..3a2351a56 100644 --- a/docs/architecture/adr-016-protocol-versions.md +++ b/docs/architecture/adr-016-protocol-versions.md @@ -96,7 +96,7 @@ Each component of the software is independently versioned in a modular way and i ## Proposal -Each of BlockVersion, AppVersion, P2PVersion, is a monotonically increasing int64. +Each of BlockVersion, AppVersion, P2PVersion, is a monotonically increasing uint64. To use these versions, we need to update the block Header, the p2p NodeInfo, and the ABCI. @@ -106,8 +106,8 @@ Block Header should include a `Version` struct as its first field like: ``` type Version struct { - Block int64 - App int64 + Block uint64 + App uint64 } ``` @@ -130,9 +130,9 @@ NodeInfo should include a Version struct as its first field like: ``` type Version struct { - P2P int64 - Block int64 - App int64 + P2P uint64 + Block uint64 + App uint64 Other []string } @@ -168,9 +168,9 @@ RequestInfo should add support for protocol versions like: ``` message RequestInfo { - string software_version - int64 block_version - int64 p2p_version + string version + uint64 block_version + uint64 p2p_version } ``` @@ -180,39 +180,46 @@ Similarly, ResponseInfo should return the versions: message ResponseInfo { string data - string software_version - int64 app_version + string version + uint64 app_version int64 last_block_height bytes last_block_app_hash } ``` +The existing `version` fields should be called `software_version` but we leave +them for now to reduce the number of breaking changes. + #### EndBlock Updating the version could be done either with new fields or by using the existing `tags`. Since we're trying to communicate information that will be included in Tendermint block Headers, it should be native to the ABCI, and not -something embedded through some scheme in the tags. +something embedded through some scheme in the tags. Thus, version updates should +be communicated through EndBlock. -ResponseEndBlock will include a new field `version_updates`: +EndBlock already contains `ConsensusParams`. We can add version information to +the ConsensusParams as well: ``` -message ResponseEndBlock { - repeated Validator validator_updates - ConsensusParams consensus_param_updates - repeated common.KVPair tags +message ConsensusParams { - VersionUpdate version_update + BlockSize block_size + EvidenceParams evidence_params + VersionParams version } -message VersionUpdate { - int64 app_version +message VersionParams { + uint64 block_version + uint64 app_version } ``` -Tendermint will use the information in VersionUpdate for the next block it -proposes. +For now, the `block_version` will be ignored, as we do not allow block version +to be updated live. If the `app_version` is set, it signals that the app's +protocol version has changed, and the new `app_version` will be included in the +`Block.Header.Version.App` for the next block. ### BlockVersion From e798766a27a0825f5e5deb460d755d2bf8813f96 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 18 Oct 2018 18:02:20 -0400 Subject: [PATCH 088/113] types: remove Version from CanonicalXxx (#2666) --- docs/spec/blockchain/encoding.md | 1 - types/canonical.go | 10 ++------ types/vote_test.go | 42 +++++++++++++++----------------- 3 files changed, 21 insertions(+), 32 deletions(-) diff --git a/docs/spec/blockchain/encoding.md b/docs/spec/blockchain/encoding.md index ed92739d0..5657784dc 100644 --- a/docs/spec/blockchain/encoding.md +++ b/docs/spec/blockchain/encoding.md @@ -307,7 +307,6 @@ We call this encoding the SignBytes. For instance, SignBytes for a vote is the A ```go type CanonicalVote struct { - Version uint64 `binary:"fixed64"` Height int64 `binary:"fixed64"` Round int64 `binary:"fixed64"` VoteType byte diff --git a/types/canonical.go b/types/canonical.go index 8a33debda..c40f35dd3 100644 --- a/types/canonical.go +++ b/types/canonical.go @@ -23,7 +23,6 @@ type CanonicalPartSetHeader struct { } type CanonicalProposal struct { - Version uint64 `binary:"fixed64"` Height int64 `binary:"fixed64"` Round int64 `binary:"fixed64"` Type SignedMsgType // type alias for byte @@ -35,7 +34,6 @@ type CanonicalProposal struct { } type CanonicalVote struct { - Version uint64 `binary:"fixed64"` Height int64 `binary:"fixed64"` Round int64 `binary:"fixed64"` Type SignedMsgType // type alias for byte @@ -45,9 +43,8 @@ type CanonicalVote struct { } type CanonicalHeartbeat struct { - Version uint64 `binary:"fixed64"` - Height int64 `binary:"fixed64"` - Round int `binary:"fixed64"` + Height int64 `binary:"fixed64"` + Round int `binary:"fixed64"` Type byte Sequence int `binary:"fixed64"` ValidatorAddress Address @@ -74,7 +71,6 @@ func CanonicalizePartSetHeader(psh PartSetHeader) CanonicalPartSetHeader { func CanonicalizeProposal(chainID string, proposal *Proposal) CanonicalProposal { return CanonicalProposal{ - Version: 0, // TODO Height: proposal.Height, Round: int64(proposal.Round), // cast int->int64 to make amino encode it fixed64 (does not work for int) Type: ProposalType, @@ -88,7 +84,6 @@ func CanonicalizeProposal(chainID string, proposal *Proposal) CanonicalProposal func CanonicalizeVote(chainID string, vote *Vote) CanonicalVote { return CanonicalVote{ - Version: 0, // TODO Height: vote.Height, Round: int64(vote.Round), // cast int->int64 to make amino encode it fixed64 (does not work for int) Type: vote.Type, @@ -100,7 +95,6 @@ func CanonicalizeVote(chainID string, vote *Vote) CanonicalVote { func CanonicalizeHeartbeat(chainID string, heartbeat *Heartbeat) CanonicalHeartbeat { return CanonicalHeartbeat{ - Version: 0, // TODO Height: heartbeat.Height, Round: heartbeat.Round, Type: byte(HeartbeatType), diff --git a/types/vote_test.go b/types/vote_test.go index 282953f46..066df4964 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -54,8 +54,7 @@ func TestVoteSignable(t *testing.T) { } func TestVoteSignableTestVectors(t *testing.T) { - voteWithVersion := CanonicalizeVote("", &Vote{Height: 1, Round: 1}) - voteWithVersion.Version = 123 + vote := CanonicalizeVote("", &Vote{Height: 1, Round: 1}) tests := []struct { canonicalVote CanonicalVote @@ -64,20 +63,20 @@ func TestVoteSignableTestVectors(t *testing.T) { { CanonicalizeVote("", &Vote{}), // NOTE: Height and Round are skipped here. This case needs to be considered while parsing. - []byte{0xb, 0x2a, 0x9, 0x9, 0x0, 0x9, 0x6e, 0x88, 0xf1, 0xff, 0xff, 0xff}, + []byte{0xb, 0x22, 0x9, 0x9, 0x0, 0x9, 0x6e, 0x88, 0xf1, 0xff, 0xff, 0xff}, }, // with proper (fixed size) height and round (PreCommit): { CanonicalizeVote("", &Vote{Height: 1, Round: 1, Type: PrecommitType}), []byte{ 0x1f, // total length - 0x11, // (field_number << 3) | wire_type (version is missing) + 0x9, // (field_number << 3) | wire_type 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height - 0x19, // (field_number << 3) | wire_type + 0x11, // (field_number << 3) | wire_type 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round - 0x20, // (field_number << 3) | wire_type + 0x18, // (field_number << 3) | wire_type 0x2, // PrecommitType - 0x2a, // (field_number << 3) | wire_type + 0x22, // (field_number << 3) | wire_type // remaining fields (timestamp): 0x9, 0x9, 0x0, 0x9, 0x6e, 0x88, 0xf1, 0xff, 0xff, 0xff}, }, @@ -86,29 +85,26 @@ func TestVoteSignableTestVectors(t *testing.T) { CanonicalizeVote("", &Vote{Height: 1, Round: 1, Type: PrevoteType}), []byte{ 0x1f, // total length - 0x11, // (field_number << 3) | wire_type (version is missing) + 0x9, // (field_number << 3) | wire_type 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height - 0x19, // (field_number << 3) | wire_type + 0x11, // (field_number << 3) | wire_type 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round - 0x20, // (field_number << 3) | wire_type + 0x18, // (field_number << 3) | wire_type 0x1, // PrevoteType - 0x2a, // (field_number << 3) | wire_type + 0x22, // (field_number << 3) | wire_type // remaining fields (timestamp): 0x9, 0x9, 0x0, 0x9, 0x6e, 0x88, 0xf1, 0xff, 0xff, 0xff}, }, - // containing version (empty type) { - voteWithVersion, + vote, []byte{ - 0x26, // total length - 0x9, // (field_number << 3) | wire_type - 0x7b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // version (123) - 0x11, // (field_number << 3) | wire_type + 0x1d, // total length + 0x9, // (field_number << 3) | wire_type 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height - 0x19, // (field_number << 3) | wire_type + 0x11, // (field_number << 3) | wire_type 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round // remaining fields (timestamp): - 0x2a, + 0x22, 0x9, 0x9, 0x0, 0x9, 0x6e, 0x88, 0xf1, 0xff, 0xff, 0xff}, }, // containing non-empty chain_id: @@ -116,14 +112,14 @@ func TestVoteSignableTestVectors(t *testing.T) { CanonicalizeVote("test_chain_id", &Vote{Height: 1, Round: 1}), []byte{ 0x2c, // total length - 0x11, // (field_number << 3) | wire_type + 0x9, // (field_number << 3) | wire_type 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height - 0x19, // (field_number << 3) | wire_type + 0x11, // (field_number << 3) | wire_type 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round // remaining fields: - 0x2a, // (field_number << 3) | wire_type + 0x22, // (field_number << 3) | wire_type 0x9, 0x9, 0x0, 0x9, 0x6e, 0x88, 0xf1, 0xff, 0xff, 0xff, // timestamp - 0x3a, // (field_number << 3) | wire_type + 0x32, // (field_number << 3) | wire_type 0xd, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64}, // chainID }, } From 746d137f86f34ecdb5f2a1d2b94a66913c1c9efe Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 18 Oct 2018 18:26:32 -0400 Subject: [PATCH 089/113] p2p: Restore OriginalAddr (#2668) * p2p: bring back OriginalAddr * p2p: set OriginalAddr * update changelog --- CHANGELOG_PENDING.md | 2 ++ blockchain/reactor_test.go | 1 + p2p/dummy/peer.go | 5 +++++ p2p/peer.go | 28 ++++++++++++++++++++++++++++ p2p/peer_set_test.go | 1 + p2p/peer_test.go | 2 +- p2p/pex/pex_reactor_test.go | 1 + p2p/switch.go | 9 ++++++--- p2p/test_util.go | 12 +++++++----- p2p/transport.go | 21 +++++++++++++-------- 10 files changed, 65 insertions(+), 17 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 99c389974..758bfeb2f 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -82,3 +82,5 @@ Proposal or timeoutProposal before entering prevote - [p2p] \#2555 fix p2p switch FlushThrottle value (@goolAdapter) - [libs/event] \#2518 fix event concurrency flaw (@goolAdapter) - [state] \#2616 Pass nil to NewValidatorSet() when genesis file's Validators field is nil +- [p2p] \#2668 Reconnect to originally dialed address (not self-reported + address) for persistent peers diff --git a/blockchain/reactor_test.go b/blockchain/reactor_test.go index 7fc7ffb77..fca063e0c 100644 --- a/blockchain/reactor_test.go +++ b/blockchain/reactor_test.go @@ -206,3 +206,4 @@ func (tp *bcrTestPeer) IsPersistent() bool { return true } func (tp *bcrTestPeer) Get(s string) interface{} { return s } func (tp *bcrTestPeer) Set(string, interface{}) {} func (tp *bcrTestPeer) RemoteIP() net.IP { return []byte{127, 0, 0, 1} } +func (tp *bcrTestPeer) OriginalAddr() *p2p.NetAddress { return nil } diff --git a/p2p/dummy/peer.go b/p2p/dummy/peer.go index 4871719d4..65ff65fb2 100644 --- a/p2p/dummy/peer.go +++ b/p2p/dummy/peer.go @@ -78,3 +78,8 @@ func (p *peer) Get(key string) interface{} { } return nil } + +// OriginalAddr always returns nil. +func (p *peer) OriginalAddr() *p2p.NetAddress { + return nil +} diff --git a/p2p/peer.go b/p2p/peer.go index 009313141..944174b0e 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -26,6 +26,7 @@ type Peer interface { NodeInfo() NodeInfo // peer's info Status() tmconn.ConnectionStatus + OriginalAddr() *NetAddress Send(byte, []byte) bool TrySend(byte, []byte) bool @@ -43,10 +44,28 @@ type peerConn struct { config *config.P2PConfig conn net.Conn // source connection + originalAddr *NetAddress // nil for inbound connections + // cached RemoteIP() ip net.IP } +func newPeerConn( + outbound, persistent bool, + config *config.P2PConfig, + conn net.Conn, + originalAddr *NetAddress, +) peerConn { + + return peerConn{ + outbound: outbound, + persistent: persistent, + config: config, + conn: conn, + originalAddr: originalAddr, + } +} + // ID only exists for SecretConnection. // NOTE: Will panic if conn is not *SecretConnection. func (pc peerConn) ID() ID { @@ -195,6 +214,15 @@ func (p *peer) NodeInfo() NodeInfo { return p.nodeInfo } +// OriginalAddr returns the original address, which was used to connect with +// the peer. Returns nil for inbound peers. +func (p *peer) OriginalAddr() *NetAddress { + if p.peerConn.outbound { + return p.peerConn.originalAddr + } + return nil +} + // Status returns the peer's ConnectionStatus. func (p *peer) Status() tmconn.ConnectionStatus { return p.mconn.Status() diff --git a/p2p/peer_set_test.go b/p2p/peer_set_test.go index c0ad80005..daa9b2c82 100644 --- a/p2p/peer_set_test.go +++ b/p2p/peer_set_test.go @@ -28,6 +28,7 @@ func (mp *mockPeer) IsPersistent() bool { return true } func (mp *mockPeer) Get(s string) interface{} { return s } func (mp *mockPeer) Set(string, interface{}) {} func (mp *mockPeer) RemoteIP() net.IP { return mp.ip } +func (mp *mockPeer) OriginalAddr() *NetAddress { return nil } // Returns a mock peer func newMockPeer(ip net.IP) *mockPeer { diff --git a/p2p/peer_test.go b/p2p/peer_test.go index 9c330ee52..02f1d2c0f 100644 --- a/p2p/peer_test.go +++ b/p2p/peer_test.go @@ -114,7 +114,7 @@ func testOutboundPeerConn( return peerConn{}, cmn.ErrorWrap(err, "Error creating peer") } - pc, err := testPeerConn(conn, config, true, persistent, ourNodePrivKey) + pc, err := testPeerConn(conn, config, true, persistent, ourNodePrivKey, addr) if err != nil { if cerr := conn.Close(); cerr != nil { return peerConn{}, cmn.ErrorWrap(err, cerr.Error()) diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go index b0338c3c2..9d3f49bba 100644 --- a/p2p/pex/pex_reactor_test.go +++ b/p2p/pex/pex_reactor_test.go @@ -402,6 +402,7 @@ func (mockPeer) Send(byte, []byte) bool { return false } func (mockPeer) TrySend(byte, []byte) bool { return false } func (mockPeer) Set(string, interface{}) {} func (mockPeer) Get(string) interface{} { return nil } +func (mockPeer) OriginalAddr() *p2p.NetAddress { return nil } func assertPeersWithTimeout( t *testing.T, diff --git a/p2p/switch.go b/p2p/switch.go index 64e248fc3..b1406b9b0 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -280,9 +280,12 @@ func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) { sw.stopAndRemovePeer(peer, reason) if peer.IsPersistent() { - // TODO: use the original address dialed, not the self reported one - // See #2618. - addr := peer.NodeInfo().NetAddress() + addr := peer.OriginalAddr() + if addr == nil { + // FIXME: persistent peers can't be inbound right now. + // self-reported address for inbound persistent peers + addr = peer.NodeInfo().NetAddress() + } go sw.reconnectToPeer(addr) } } diff --git a/p2p/test_util.go b/p2p/test_util.go index 4d43175bb..e1f7b5040 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -206,7 +206,7 @@ func testInboundPeerConn( config *config.P2PConfig, ourNodePrivKey crypto.PrivKey, ) (peerConn, error) { - return testPeerConn(conn, config, false, false, ourNodePrivKey) + return testPeerConn(conn, config, false, false, ourNodePrivKey, nil) } func testPeerConn( @@ -214,6 +214,7 @@ func testPeerConn( cfg *config.P2PConfig, outbound, persistent bool, ourNodePrivKey crypto.PrivKey, + originalAddr *NetAddress, ) (pc peerConn, err error) { conn := rawConn @@ -231,10 +232,11 @@ func testPeerConn( // Only the information we already have return peerConn{ - config: cfg, - outbound: outbound, - persistent: persistent, - conn: conn, + config: cfg, + outbound: outbound, + persistent: persistent, + conn: conn, + originalAddr: originalAddr, }, nil } diff --git a/p2p/transport.go b/p2p/transport.go index b20f32f3d..10565d8a9 100644 --- a/p2p/transport.go +++ b/p2p/transport.go @@ -171,7 +171,7 @@ func (mt *MultiplexTransport) Accept(cfg peerConfig) (Peer, error) { cfg.outbound = false - return mt.wrapPeer(a.conn, a.nodeInfo, cfg), nil + return mt.wrapPeer(a.conn, a.nodeInfo, cfg, nil), nil case <-mt.closec: return nil, &ErrTransportClosed{} } @@ -199,7 +199,7 @@ func (mt *MultiplexTransport) Dial( cfg.outbound = true - p := mt.wrapPeer(secretConn, nodeInfo, cfg) + p := mt.wrapPeer(secretConn, nodeInfo, cfg, &addr) return p, nil } @@ -399,14 +399,19 @@ func (mt *MultiplexTransport) wrapPeer( c net.Conn, ni NodeInfo, cfg peerConfig, + dialedAddr *NetAddress, ) Peer { + + peerConn := newPeerConn( + cfg.outbound, + cfg.persistent, + &mt.p2pConfig, + c, + dialedAddr, + ) + p := newPeer( - peerConn{ - conn: c, - config: &mt.p2pConfig, - outbound: cfg.outbound, - persistent: cfg.persistent, - }, + peerConn, mt.mConfig, ni, cfg.reactorsByCh, From f536089f0b7ff7894df2d656c35bdb0508f4e7f2 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 19 Oct 2018 11:39:27 -0400 Subject: [PATCH 090/113] types: dont use SimpleHashFromMap for header. closes #1841 (#2670) * types: dont use SimpleHashFromMap for header. closes #1841 * changelog and spec * comments --- CHANGELOG_PENDING.md | 2 ++ docs/spec/blockchain/encoding.md | 2 +- types/block.go | 42 +++++++++++++++++--------------- 3 files changed, 26 insertions(+), 20 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 758bfeb2f..ed33be782 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -45,6 +45,8 @@ BREAKING CHANGES: * [types] \#2644 Add Version struct to Header * [state] \#2587 Require block.Time of the fist block to be genesis time * [state] \#2644 Require block.Version to match state.Version + * [types] \#2670 Header.Hash() builds Merkle tree out of fields in the same + order they appear in the header, instead of sorting by field name * P2P Protocol * [p2p] \#2654 Add `ProtocolVersion` struct with protocol versions to top of diff --git a/docs/spec/blockchain/encoding.md b/docs/spec/blockchain/encoding.md index 5657784dc..563b0a885 100644 --- a/docs/spec/blockchain/encoding.md +++ b/docs/spec/blockchain/encoding.md @@ -216,7 +216,7 @@ prefix) before being concatenated together and hashed. Note: we will abuse notion and invoke `SimpleMerkleRoot` with arguments of type `struct` or type `[]struct`. For `struct` arguments, we compute a `[][]byte` containing the hash of each -field in the struct sorted by the hash of the field name. +field in the struct, in the same order the fields appear in the struct. For `[]struct` arguments, we compute a `[][]byte` by hashing the individual `struct` elements. ### Simple Merkle Proof diff --git a/types/block.go b/types/block.go index 06ad55fcc..2a5b5fc4a 100644 --- a/types/block.go +++ b/types/block.go @@ -258,8 +258,10 @@ func MaxDataBytesUnknownEvidence(maxBytes int64, valsCount int) int64 { //----------------------------------------------------------------------------- // Header defines the structure of a Tendermint block header -// NOTE: changes to the Header should be duplicated in the abci Header -// and in /docs/spec/blockchain/blockchain.md +// NOTE: changes to the Header should be duplicated in: +// - header.Hash() +// - abci.Header +// - /docs/spec/blockchain/blockchain.md type Header struct { // basic block info Version version.Consensus `json:"version"` @@ -289,6 +291,8 @@ type Header struct { } // Hash returns the hash of the header. +// It computes a Merkle tree from the header fields +// ordered as they appear in the Header. // Returns nil if ValidatorHash is missing, // since a Header is not valid unless there is // a ValidatorsHash (corresponding to the validator set). @@ -296,23 +300,23 @@ func (h *Header) Hash() cmn.HexBytes { if h == nil || len(h.ValidatorsHash) == 0 { return nil } - return merkle.SimpleHashFromMap(map[string][]byte{ - "Version": cdcEncode(h.Version), - "ChainID": cdcEncode(h.ChainID), - "Height": cdcEncode(h.Height), - "Time": cdcEncode(h.Time), - "NumTxs": cdcEncode(h.NumTxs), - "TotalTxs": cdcEncode(h.TotalTxs), - "LastBlockID": cdcEncode(h.LastBlockID), - "LastCommitHash": cdcEncode(h.LastCommitHash), - "DataHash": cdcEncode(h.DataHash), - "ValidatorsHash": cdcEncode(h.ValidatorsHash), - "NextValidatorsHash": cdcEncode(h.NextValidatorsHash), - "AppHash": cdcEncode(h.AppHash), - "ConsensusHash": cdcEncode(h.ConsensusHash), - "LastResultsHash": cdcEncode(h.LastResultsHash), - "EvidenceHash": cdcEncode(h.EvidenceHash), - "ProposerAddress": cdcEncode(h.ProposerAddress), + return merkle.SimpleHashFromByteSlices([][]byte{ + cdcEncode(h.Version), + cdcEncode(h.ChainID), + cdcEncode(h.Height), + cdcEncode(h.Time), + cdcEncode(h.NumTxs), + cdcEncode(h.TotalTxs), + cdcEncode(h.LastBlockID), + cdcEncode(h.LastCommitHash), + cdcEncode(h.DataHash), + cdcEncode(h.ValidatorsHash), + cdcEncode(h.NextValidatorsHash), + cdcEncode(h.ConsensusHash), + cdcEncode(h.AppHash), + cdcEncode(h.LastResultsHash), + cdcEncode(h.EvidenceHash), + cdcEncode(h.ProposerAddress), }) } From 7c6519adbd62cdc4c1e3003e14bbb61e71725e02 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 19 Oct 2018 13:49:04 -0400 Subject: [PATCH 091/113] Bucky/changelog (#2673) * update changelog, add authors script * update changelog * update changelog --- CHANGELOG_PENDING.md | 59 +++++++++++++++++++++++++++++--------------- scripts/authors.sh | 16 ++++++++++++ 2 files changed, 55 insertions(+), 20 deletions(-) create mode 100755 scripts/authors.sh diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index ed33be782..5490ae77c 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -1,7 +1,23 @@ # Pending +## v0.26.0 + +*October 19, 2018* + Special thanks to external contributors on this release: -@goolAdapter, @bradyjoestar +@bradyjoestar, @connorwstein, @goolAdapter, @HaoyangLiu, +@james-ray, @overbool, @phymbert, @Slamper, @Uzair1995 + +This release is primarily about adding Version fields to various data structures, +optimizing consensus messages for signing and verification in +restricted environments (like HSMs and the Ethereum Virtual Machine), and +aligning the consensus code with the [specification](https://arxiv.org/abs/1807.04938). +It also includes our first take at a generalized merkle proof system. + +See the [UPGRADING.md](UPGRADING.md#v0.26.0) for details on upgrading to the new +version. + +Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint). BREAKING CHANGES: @@ -9,11 +25,11 @@ BREAKING CHANGES: * [config] \#2232 timeouts as time.Duration, not ints * [config] \#2505 Remove Mempool.RecheckEmpty (it was effectively useless anyways) * [config] `mempool.wal` is disabled by default - * [rpc] \#2298 `/abci_query` takes `prove` argument instead of `trusted` and switches the default - behaviour to `prove=false` * [privval] \#2459 Split `SocketPVMsg`s implementations into Request and Response, where the Response may contain a error message (returned by the remote signer) * [state] \#2644 Add Version field to State, breaking the format of State as encoded on disk. + * [rpc] \#2298 `/abci_query` takes `prove` argument instead of `trusted` and switches the default + behaviour to `prove=false` * [rpc] \#2654 Remove all `node_info.other.*_version` fields in `/status` and `/net_info` @@ -25,13 +41,13 @@ BREAKING CHANGES: `AppVersion` * Go API - * [node] Remove node.RunForever * [config] \#2232 timeouts as time.Duration, not ints - * [rpc/client] \#2298 `ABCIQueryOptions.Trusted` -> `ABCIQueryOptions.Prove` - * [types] \#2298 Remove `Index` and `Total` fields from `TxProof`. * [crypto/merkle & lite] \#2298 Various changes to accomodate General Merkle trees * [crypto/merkle] \#2595 Remove all Hasher objects in favor of byte slices * [crypto/merkle] \#2635 merkle.SimpleHashFromTwoHashes is no longer exported + * [node] Remove node.RunForever + * [rpc/client] \#2298 `ABCIQueryOptions.Trusted` -> `ABCIQueryOptions.Prove` + * [types] \#2298 Remove `Index` and `Total` fields from `TxProof`. * [types] \#2598 `VoteTypeXxx` are now of type `SignedMsgType byte` and named `XxxType`, eg. `PrevoteType`, `PrecommitType`. @@ -43,6 +59,8 @@ BREAKING CHANGES: `SignedMsgType` to enumerate. * [types] \#2512 Remove the pubkey field from the validator hash * [types] \#2644 Add Version struct to Header + * [types] \#2609 ConsensusParams.Hash() is the hash of the amino encoded + struct instead of the Merkle tree of the fields * [state] \#2587 Require block.Time of the fist block to be genesis time * [state] \#2644 Require block.Version to match state.Version * [types] \#2670 Header.Hash() builds Merkle tree out of fields in the same @@ -52,11 +70,10 @@ BREAKING CHANGES: * [p2p] \#2654 Add `ProtocolVersion` struct with protocol versions to top of DefaultNodeInfo and require `ProtocolVersion.Block` to match during peer handshake - FEATURES: -- [crypto/merkle] \#2298 General Merkle Proof scheme for chaining various types of Merkle trees together - [abci] \#2557 Add `Codespace` field to `Response{CheckTx, DeliverTx, Query}` - [abci] \#2662 Add `BlockVersion` and `P2PVersion` to `RequestInfo` +- [crypto/merkle] \#2298 General Merkle Proof scheme for chaining various types of Merkle trees together IMPROVEMENTS: - Additional Metrics @@ -66,23 +83,25 @@ IMPROVEMENTS: - [crypto/ed25519] \#2558 Switch to use latest `golang.org/x/crypto` through our fork at github.com/tendermint/crypto - [tools] \#2238 Binary dependencies are now locked to a specific git commit -- [crypto] \#2099 make crypto random use chacha, and have forward secrecy of generated randomness BUG FIXES: - [autofile] \#2428 Group.RotateFile need call Flush() before rename (@goolAdapter) -- [node] \#2434 Make node respond to signal interrupts while sleeping for genesis time -- [consensus] [\#1690](https://github.com/tendermint/tendermint/issues/1690) wait for -timeoutPrecommit before starting next round -- [consensus] [\#1745](https://github.com/tendermint/tendermint/issues/1745) wait for -Proposal or timeoutProposal before entering prevote -- [evidence] \#2515 fix db iter leak (@goolAdapter) -- [common/bit_array] Fixed a bug in the `Or` function -- [common/bit_array] Fixed a bug in the `Sub` function (@james-ray) -- [common] \#2534 Make bit array's PickRandom choose uniformly from true bits +- [common] Fixed a bug in the `BitArray.Or` method +- [common] Fixed a bug in the `BitArray.Sub` method (@james-ray) +- [common] \#2534 Fix `BitArray.PickRandom` to choose uniformly from true bits +- [consensus] [\#1690](https://github.com/tendermint/tendermint/issues/1690) Wait for + timeoutPrecommit before starting next round +- [consensus] [\#1745](https://github.com/tendermint/tendermint/issues/1745) Wait for + Proposal or timeoutProposal before entering prevote +- [consensus] \#2642 Only propose ValidBlock, not LockedBlock +- [consensus] \#2642 Initialized ValidRound and LockedRound to -1 - [consensus] \#1637 Limit the amount of evidence that can be included in a block -- [p2p] \#2555 fix p2p switch FlushThrottle value (@goolAdapter) -- [libs/event] \#2518 fix event concurrency flaw (@goolAdapter) +- [evidence] \#2515 Fix db iter leak (@goolAdapter) +- [libs/event] \#2518 Fix event concurrency flaw (@goolAdapter) +- [node] \#2434 Make node respond to signal interrupts while sleeping for genesis time - [state] \#2616 Pass nil to NewValidatorSet() when genesis file's Validators field is nil +- [p2p] \#2555 Fix p2p switch FlushThrottle value (@goolAdapter) - [p2p] \#2668 Reconnect to originally dialed address (not self-reported address) for persistent peers + diff --git a/scripts/authors.sh b/scripts/authors.sh new file mode 100755 index 000000000..7aafb0127 --- /dev/null +++ b/scripts/authors.sh @@ -0,0 +1,16 @@ +#! /bin/bash + +# Usage: +# `./authors.sh` +# Print a list of all authors who have committed to develop since master. +# +# `./authors.sh ` +# Lookup the email address on Github and print the associated username + +author=$1 + +if [[ "$author" == "" ]]; then + git log master..develop | grep Author | sort | uniq +else + curl -s "https://api.github.com/search/users?q=$author+in%3Aemail&type=Users&utf8=%E2%9C%93" | jq .items[0].login +fi From 30519e8361c19f4bf320ef4d26288ebc621ad725 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 19 Oct 2018 14:23:14 -0400 Subject: [PATCH 092/113] types: first field in Canonical structs is Type (#2675) * types: first field in Canonical structs is Type * fix spec --- docs/spec/blockchain/encoding.md | 4 ++-- types/canonical.go | 14 +++++++------- types/vote_test.go | 24 ++++++++++++------------ 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/docs/spec/blockchain/encoding.md b/docs/spec/blockchain/encoding.md index 563b0a885..2f9fcdca1 100644 --- a/docs/spec/blockchain/encoding.md +++ b/docs/spec/blockchain/encoding.md @@ -301,15 +301,15 @@ Where the `"value"` is the base64 encoding of the raw pubkey bytes, and the Signed messages (eg. votes, proposals) in the consensus are encoded using Amino. When signing, the elements of a message are re-ordered so the fixed-length fields -are first, making it easy to quickly check the version, height, round, and type. +are first, making it easy to quickly check the type, height, and round. The `ChainID` is also appended to the end. We call this encoding the SignBytes. For instance, SignBytes for a vote is the Amino encoding of the following struct: ```go type CanonicalVote struct { + Type byte Height int64 `binary:"fixed64"` Round int64 `binary:"fixed64"` - VoteType byte Timestamp time.Time BlockID CanonicalBlockID ChainID string diff --git a/types/canonical.go b/types/canonical.go index c40f35dd3..632dcb624 100644 --- a/types/canonical.go +++ b/types/canonical.go @@ -23,9 +23,9 @@ type CanonicalPartSetHeader struct { } type CanonicalProposal struct { + Type SignedMsgType // type alias for byte Height int64 `binary:"fixed64"` Round int64 `binary:"fixed64"` - Type SignedMsgType // type alias for byte POLRound int64 `binary:"fixed64"` Timestamp time.Time BlockPartsHeader CanonicalPartSetHeader @@ -34,19 +34,19 @@ type CanonicalProposal struct { } type CanonicalVote struct { + Type SignedMsgType // type alias for byte Height int64 `binary:"fixed64"` Round int64 `binary:"fixed64"` - Type SignedMsgType // type alias for byte Timestamp time.Time BlockID CanonicalBlockID ChainID string } type CanonicalHeartbeat struct { + Type byte Height int64 `binary:"fixed64"` Round int `binary:"fixed64"` - Type byte - Sequence int `binary:"fixed64"` + Sequence int `binary:"fixed64"` ValidatorAddress Address ValidatorIndex int ChainID string @@ -71,9 +71,9 @@ func CanonicalizePartSetHeader(psh PartSetHeader) CanonicalPartSetHeader { func CanonicalizeProposal(chainID string, proposal *Proposal) CanonicalProposal { return CanonicalProposal{ + Type: ProposalType, Height: proposal.Height, Round: int64(proposal.Round), // cast int->int64 to make amino encode it fixed64 (does not work for int) - Type: ProposalType, POLRound: int64(proposal.POLRound), Timestamp: proposal.Timestamp, BlockPartsHeader: CanonicalizePartSetHeader(proposal.BlockPartsHeader), @@ -84,9 +84,9 @@ func CanonicalizeProposal(chainID string, proposal *Proposal) CanonicalProposal func CanonicalizeVote(chainID string, vote *Vote) CanonicalVote { return CanonicalVote{ + Type: vote.Type, Height: vote.Height, Round: int64(vote.Round), // cast int->int64 to make amino encode it fixed64 (does not work for int) - Type: vote.Type, Timestamp: vote.Timestamp, BlockID: CanonicalizeBlockID(vote.BlockID), ChainID: chainID, @@ -95,9 +95,9 @@ func CanonicalizeVote(chainID string, vote *Vote) CanonicalVote { func CanonicalizeHeartbeat(chainID string, heartbeat *Heartbeat) CanonicalHeartbeat { return CanonicalHeartbeat{ + Type: byte(HeartbeatType), Height: heartbeat.Height, Round: heartbeat.Round, - Type: byte(HeartbeatType), Sequence: heartbeat.Sequence, ValidatorAddress: heartbeat.ValidatorAddress, ValidatorIndex: heartbeat.ValidatorIndex, diff --git a/types/vote_test.go b/types/vote_test.go index 066df4964..2172f0600 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -70,12 +70,12 @@ func TestVoteSignableTestVectors(t *testing.T) { CanonicalizeVote("", &Vote{Height: 1, Round: 1, Type: PrecommitType}), []byte{ 0x1f, // total length - 0x9, // (field_number << 3) | wire_type - 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height + 0x8, // (field_number << 3) | wire_type + 0x2, // PrecommitType 0x11, // (field_number << 3) | wire_type + 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height + 0x19, // (field_number << 3) | wire_type 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round - 0x18, // (field_number << 3) | wire_type - 0x2, // PrecommitType 0x22, // (field_number << 3) | wire_type // remaining fields (timestamp): 0x9, 0x9, 0x0, 0x9, 0x6e, 0x88, 0xf1, 0xff, 0xff, 0xff}, @@ -85,12 +85,12 @@ func TestVoteSignableTestVectors(t *testing.T) { CanonicalizeVote("", &Vote{Height: 1, Round: 1, Type: PrevoteType}), []byte{ 0x1f, // total length - 0x9, // (field_number << 3) | wire_type - 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height + 0x8, // (field_number << 3) | wire_type + 0x1, // PrevoteType 0x11, // (field_number << 3) | wire_type + 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height + 0x19, // (field_number << 3) | wire_type 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round - 0x18, // (field_number << 3) | wire_type - 0x1, // PrevoteType 0x22, // (field_number << 3) | wire_type // remaining fields (timestamp): 0x9, 0x9, 0x0, 0x9, 0x6e, 0x88, 0xf1, 0xff, 0xff, 0xff}, @@ -99,9 +99,9 @@ func TestVoteSignableTestVectors(t *testing.T) { vote, []byte{ 0x1d, // total length - 0x9, // (field_number << 3) | wire_type - 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height 0x11, // (field_number << 3) | wire_type + 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height + 0x19, // (field_number << 3) | wire_type 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round // remaining fields (timestamp): 0x22, @@ -112,9 +112,9 @@ func TestVoteSignableTestVectors(t *testing.T) { CanonicalizeVote("test_chain_id", &Vote{Height: 1, Round: 1}), []byte{ 0x2c, // total length - 0x9, // (field_number << 3) | wire_type - 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height 0x11, // (field_number << 3) | wire_type + 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height + 0x19, // (field_number << 3) | wire_type 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round // remaining fields: 0x22, // (field_number << 3) | wire_type From 9d62bd0ad3bf691002a815339b4dd675441bce93 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 19 Oct 2018 14:29:45 -0400 Subject: [PATCH 093/113] crypto: use stdlib crypto/rand. ref #2099 (#2669) * crypto: use stdlib crypto/rand. ref #2099 * comment --- crypto/random.go | 42 +++++++++++++++++++++++++++++++----------- 1 file changed, 31 insertions(+), 11 deletions(-) diff --git a/crypto/random.go b/crypto/random.go index af3286427..914c321b7 100644 --- a/crypto/random.go +++ b/crypto/random.go @@ -9,10 +9,11 @@ import ( "sync" "golang.org/x/crypto/chacha20poly1305" - - . "github.com/tendermint/tendermint/libs/common" ) +// NOTE: This is ignored for now until we have time +// to properly review the MixEntropy function - https://github.com/tendermint/tendermint/issues/2099. +// // The randomness here is derived from xoring a chacha20 keystream with // output from crypto/rand's OS Entropy Reader. (Due to fears of the OS' // entropy being backdoored) @@ -23,9 +24,13 @@ var gRandInfo *randInfo func init() { gRandInfo = &randInfo{} - gRandInfo.MixEntropy(randBytes(32)) // Init + + // TODO: uncomment after reviewing MixEntropy - + // https://github.com/tendermint/tendermint/issues/2099 + // gRandInfo.MixEntropy(randBytes(32)) // Init } +// WARNING: This function needs review - https://github.com/tendermint/tendermint/issues/2099. // Mix additional bytes of randomness, e.g. from hardware, user-input, etc. // It is OK to call it multiple times. It does not diminish security. func MixEntropy(seedBytes []byte) { @@ -37,20 +42,28 @@ func randBytes(numBytes int) []byte { b := make([]byte, numBytes) _, err := crand.Read(b) if err != nil { - PanicCrisis(err) + panic(err) } return b } +// This only uses the OS's randomness +func CRandBytes(numBytes int) []byte { + return randBytes(numBytes) +} + +/* TODO: uncomment after reviewing MixEntropy - https://github.com/tendermint/tendermint/issues/2099 // This uses the OS and the Seed(s). func CRandBytes(numBytes int) []byte { - b := make([]byte, numBytes) - _, err := gRandInfo.Read(b) - if err != nil { - PanicCrisis(err) - } - return b + return randBytes(numBytes) + b := make([]byte, numBytes) + _, err := gRandInfo.Read(b) + if err != nil { + panic(err) + } + return b } +*/ // CRandHex returns a hex encoded string that's floor(numDigits/2) * 2 long. // @@ -60,10 +73,17 @@ func CRandHex(numDigits int) string { return hex.EncodeToString(CRandBytes(numDigits / 2)) } +// Returns a crand.Reader. +func CReader() io.Reader { + return crand.Reader +} + +/* TODO: uncomment after reviewing MixEntropy - https://github.com/tendermint/tendermint/issues/2099 // Returns a crand.Reader mixed with user-supplied entropy func CReader() io.Reader { return gRandInfo } +*/ //-------------------------------------------------------------------------------- @@ -75,7 +95,7 @@ type randInfo struct { } // You can call this as many times as you'd like. -// XXX TODO review +// XXX/TODO: review - https://github.com/tendermint/tendermint/issues/2099 func (ri *randInfo) MixEntropy(seedBytes []byte) { ri.mtx.Lock() defer ri.mtx.Unlock() From f94eb42ebe1c53db52b07e4080e060acc4077a5a Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 19 Oct 2018 20:27:00 -0400 Subject: [PATCH 094/113] Version bump; Update Upgrading.md; linkify Changelog (#2679) * version bump * update UPGRADING.md * add missing pr numbers to changelog pending * linkify changelog --- CHANGELOG_PENDING.md | 94 ++++++++++++++++++++++---------------------- UPGRADING.md | 76 +++++++++++++++++++++++++++++++++++ version/version.go | 4 +- 3 files changed, 125 insertions(+), 49 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 5490ae77c..163c4649f 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -22,86 +22,86 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi BREAKING CHANGES: * CLI/RPC/Config - * [config] \#2232 timeouts as time.Duration, not ints - * [config] \#2505 Remove Mempool.RecheckEmpty (it was effectively useless anyways) - * [config] `mempool.wal` is disabled by default - * [privval] \#2459 Split `SocketPVMsg`s implementations into Request and Response, where the Response may contain a error message (returned by the remote signer) - * [state] \#2644 Add Version field to State, breaking the format of State as + * [config] [\#2232](https://github.com/tendermint/tendermint/issues/2232) timeouts as time.Duration, not ints + * [config] [\#2505](https://github.com/tendermint/tendermint/issues/2505) Remove Mempool.RecheckEmpty (it was effectively useless anyways) + * [config] [\#2490](https://github.com/tendermint/tendermint/issues/2490) `mempool.wal` is disabled by default + * [privval] [\#2459](https://github.com/tendermint/tendermint/issues/2459) Split `SocketPVMsg`s implementations into Request and Response, where the Response may contain a error message (returned by the remote signer) + * [state] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version field to State, breaking the format of State as encoded on disk. - * [rpc] \#2298 `/abci_query` takes `prove` argument instead of `trusted` and switches the default + * [rpc] [\#2298](https://github.com/tendermint/tendermint/issues/2298) `/abci_query` takes `prove` argument instead of `trusted` and switches the default behaviour to `prove=false` - * [rpc] \#2654 Remove all `node_info.other.*_version` fields in `/status` and + * [rpc] [\#2654](https://github.com/tendermint/tendermint/issues/2654) Remove all `node_info.other.*_version` fields in `/status` and `/net_info` * Apps - * [abci] \#2298 ResponseQuery.Proof is now a structured merkle.Proof, not just + * [abci] [\#2298](https://github.com/tendermint/tendermint/issues/2298) ResponseQuery.Proof is now a structured merkle.Proof, not just arbitrary bytes - * [abci] \#2644 Add Version to Header and shift all fields by one - * [abci] \#2662 Bump the field numbers for some `ResponseInfo` fields to make room for + * [abci] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version to Header and shift all fields by one + * [abci] [\#2662](https://github.com/tendermint/tendermint/issues/2662) Bump the field numbers for some `ResponseInfo` fields to make room for `AppVersion` * Go API - * [config] \#2232 timeouts as time.Duration, not ints - * [crypto/merkle & lite] \#2298 Various changes to accomodate General Merkle trees - * [crypto/merkle] \#2595 Remove all Hasher objects in favor of byte slices - * [crypto/merkle] \#2635 merkle.SimpleHashFromTwoHashes is no longer exported - * [node] Remove node.RunForever - * [rpc/client] \#2298 `ABCIQueryOptions.Trusted` -> `ABCIQueryOptions.Prove` - * [types] \#2298 Remove `Index` and `Total` fields from `TxProof`. - * [types] \#2598 `VoteTypeXxx` are now of type `SignedMsgType byte` and named `XxxType`, eg. `PrevoteType`, + * [config] [\#2232](https://github.com/tendermint/tendermint/issues/2232) timeouts as time.Duration, not ints + * [crypto/merkle & lite] [\#2298](https://github.com/tendermint/tendermint/issues/2298) Various changes to accomodate General Merkle trees + * [crypto/merkle] [\#2595](https://github.com/tendermint/tendermint/issues/2595) Remove all Hasher objects in favor of byte slices + * [crypto/merkle] [\#2635](https://github.com/tendermint/tendermint/issues/2635) merkle.SimpleHashFromTwoHashes is no longer exported + * [node] [\#2479](https://github.com/tendermint/tendermint/issues/2479) Remove node.RunForever + * [rpc/client] [\#2298](https://github.com/tendermint/tendermint/issues/2298) `ABCIQueryOptions.Trusted` -> `ABCIQueryOptions.Prove` + * [types] [\#2298](https://github.com/tendermint/tendermint/issues/2298) Remove `Index` and `Total` fields from `TxProof`. + * [types] [\#2598](https://github.com/tendermint/tendermint/issues/2598) `VoteTypeXxx` are now of type `SignedMsgType byte` and named `XxxType`, eg. `PrevoteType`, `PrecommitType`. * Blockchain Protocol * [types] Update SignBytes for `Vote`/`Proposal`/`Heartbeat`: - * \#2459 Use amino encoding instead of JSON in `SignBytes`. - * \#2598 Reorder fields and use fixed sized encoding. - * \#2598 Change `Type` field fromt `string` to `byte` and use new + * [\#2459](https://github.com/tendermint/tendermint/issues/2459) Use amino encoding instead of JSON in `SignBytes`. + * [\#2598](https://github.com/tendermint/tendermint/issues/2598) Reorder fields and use fixed sized encoding. + * [\#2598](https://github.com/tendermint/tendermint/issues/2598) Change `Type` field fromt `string` to `byte` and use new `SignedMsgType` to enumerate. - * [types] \#2512 Remove the pubkey field from the validator hash - * [types] \#2644 Add Version struct to Header - * [types] \#2609 ConsensusParams.Hash() is the hash of the amino encoded + * [types] [\#2512](https://github.com/tendermint/tendermint/issues/2512) Remove the pubkey field from the validator hash + * [types] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version struct to Header + * [types] [\#2609](https://github.com/tendermint/tendermint/issues/2609) ConsensusParams.Hash() is the hash of the amino encoded struct instead of the Merkle tree of the fields - * [state] \#2587 Require block.Time of the fist block to be genesis time - * [state] \#2644 Require block.Version to match state.Version - * [types] \#2670 Header.Hash() builds Merkle tree out of fields in the same + * [state] [\#2587](https://github.com/tendermint/tendermint/issues/2587) Require block.Time of the fist block to be genesis time + * [state] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Require block.Version to match state.Version + * [types] [\#2670](https://github.com/tendermint/tendermint/issues/2670) Header.Hash() builds Merkle tree out of fields in the same order they appear in the header, instead of sorting by field name * P2P Protocol - * [p2p] \#2654 Add `ProtocolVersion` struct with protocol versions to top of + * [p2p] [\#2654](https://github.com/tendermint/tendermint/issues/2654) Add `ProtocolVersion` struct with protocol versions to top of DefaultNodeInfo and require `ProtocolVersion.Block` to match during peer handshake FEATURES: -- [abci] \#2557 Add `Codespace` field to `Response{CheckTx, DeliverTx, Query}` -- [abci] \#2662 Add `BlockVersion` and `P2PVersion` to `RequestInfo` -- [crypto/merkle] \#2298 General Merkle Proof scheme for chaining various types of Merkle trees together +- [abci] [\#2557](https://github.com/tendermint/tendermint/issues/2557) Add `Codespace` field to `Response{CheckTx, DeliverTx, Query}` +- [abci] [\#2662](https://github.com/tendermint/tendermint/issues/2662) Add `BlockVersion` and `P2PVersion` to `RequestInfo` +- [crypto/merkle] [\#2298](https://github.com/tendermint/tendermint/issues/2298) General Merkle Proof scheme for chaining various types of Merkle trees together IMPROVEMENTS: - Additional Metrics - [consensus] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) - [p2p] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) -- [config] \#2232 Added ValidateBasic method, which performs basic checks -- [crypto/ed25519] \#2558 Switch to use latest `golang.org/x/crypto` through our fork at +- [config] [\#2232](https://github.com/tendermint/tendermint/issues/2232) Added ValidateBasic method, which performs basic checks +- [crypto/ed25519] [\#2558](https://github.com/tendermint/tendermint/issues/2558) Switch to use latest `golang.org/x/crypto` through our fork at github.com/tendermint/crypto -- [tools] \#2238 Binary dependencies are now locked to a specific git commit +- [tools] [\#2238](https://github.com/tendermint/tendermint/issues/2238) Binary dependencies are now locked to a specific git commit BUG FIXES: -- [autofile] \#2428 Group.RotateFile need call Flush() before rename (@goolAdapter) -- [common] Fixed a bug in the `BitArray.Or` method -- [common] Fixed a bug in the `BitArray.Sub` method (@james-ray) -- [common] \#2534 Fix `BitArray.PickRandom` to choose uniformly from true bits +- [autofile] [\#2428](https://github.com/tendermint/tendermint/issues/2428) Group.RotateFile need call Flush() before rename (@goolAdapter) +- [common] [\#2533](https://github.com/tendermint/tendermint/issues/2533) Fixed a bug in the `BitArray.Or` method +- [common] [\#2506](https://github.com/tendermint/tendermint/issues/2506) Fixed a bug in the `BitArray.Sub` method (@james-ray) +- [common] [\#2534](https://github.com/tendermint/tendermint/issues/2534) Fix `BitArray.PickRandom` to choose uniformly from true bits - [consensus] [\#1690](https://github.com/tendermint/tendermint/issues/1690) Wait for timeoutPrecommit before starting next round - [consensus] [\#1745](https://github.com/tendermint/tendermint/issues/1745) Wait for Proposal or timeoutProposal before entering prevote -- [consensus] \#2642 Only propose ValidBlock, not LockedBlock -- [consensus] \#2642 Initialized ValidRound and LockedRound to -1 -- [consensus] \#1637 Limit the amount of evidence that can be included in a +- [consensus] [\#2642](https://github.com/tendermint/tendermint/issues/2642) Only propose ValidBlock, not LockedBlock +- [consensus] [\#2642](https://github.com/tendermint/tendermint/issues/2642) Initialized ValidRound and LockedRound to -1 +- [consensus] [\#1637](https://github.com/tendermint/tendermint/issues/1637) Limit the amount of evidence that can be included in a block -- [evidence] \#2515 Fix db iter leak (@goolAdapter) -- [libs/event] \#2518 Fix event concurrency flaw (@goolAdapter) -- [node] \#2434 Make node respond to signal interrupts while sleeping for genesis time -- [state] \#2616 Pass nil to NewValidatorSet() when genesis file's Validators field is nil -- [p2p] \#2555 Fix p2p switch FlushThrottle value (@goolAdapter) -- [p2p] \#2668 Reconnect to originally dialed address (not self-reported +- [evidence] [\#2515](https://github.com/tendermint/tendermint/issues/2515) Fix db iter leak (@goolAdapter) +- [libs/event] [\#2518](https://github.com/tendermint/tendermint/issues/2518) Fix event concurrency flaw (@goolAdapter) +- [node] [\#2434](https://github.com/tendermint/tendermint/issues/2434) Make node respond to signal interrupts while sleeping for genesis time +- [state] [\#2616](https://github.com/tendermint/tendermint/issues/2616) Pass nil to NewValidatorSet() when genesis file's Validators field is nil +- [p2p] [\#2555](https://github.com/tendermint/tendermint/issues/2555) Fix p2p switch FlushThrottle value (@goolAdapter) +- [p2p] [\#2668](https://github.com/tendermint/tendermint/issues/2668) Reconnect to originally dialed address (not self-reported address) for persistent peers diff --git a/UPGRADING.md b/UPGRADING.md index 81e56e588..cb0830a45 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -3,6 +3,82 @@ This guide provides steps to be followed when you upgrade your applications to a newer version of Tendermint Core. +## v0.26.0 + +New 0.26.0 release contains a lot of changes to core data types. It is not +compatible to the old versions and there is no straight forward way to update +old data to be compatible with the new version. + +To reset the state do: + +``` +$ tendermint unsafe_reset_all +``` + +Here we summarize some other notable changes to be mindful of. + +### Config Changes + +All timeouts must be changed from integers to strings with their duration, for +instance `flush_throttle_timeout = 100` would be changed to +`flush_throttle_timeout = "100ms"` and `timeout_propose = 3000` would be changed +to `timeout_propose = "3s"`. + +### RPC Changes + +The default behaviour of `/abci_query` has been changed to not return a proof, +and the name of the parameter that controls this has been changed from `trusted` +to `prove`. To get proofs with your queries, ensure you set `prove=true`. + +Various version fields like `amino_version`, `p2p_version`, `consensus_version`, +and `rpc_version` have been removed from the `node_info.other` and are +consolidated under the tendermint semantic version (ie. `node_info.version`) and +the new `block` and `p2p` protocol versions under `node_info.protocol_version`.. + +### ABCI Changes + +Field numbers were bumped in the `Header` and `ResponseInfo` messages to make +room for new `version` fields. It should be straight forward to recompile the +protobuf file for these changes. + +#### Proofs + +The `ResponseQuery.Proof` field is now structured as a `[]ProofOp` to support +generalized Merkle tree constructions where the leaves of one Merkle tree are +the root of another. If you don't need this functionaluty, and you used to +return `` here, you should instead return a single `ProofOp` with +just the `Data` field set: + +``` +[]ProofOp{ + ProofOp{ + Data: , + } +} +``` + +For more information, see: + +- [ADR-026](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/docs/architecture/adr-026-general-merkle-proof.md) +- [Relevant ABCI + documentation](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/docs/spec/abci/apps.md#query-proofs) +- [Description of + keys](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/crypto/merkle/proof_key_path.go#L14) + +### Go API Changes + +#### crypto.merkle + +The `merkle.Hasher` interface was removed. Functions which used to take `Hasher` +now simply take `[]byte`. This means that any objects being Merklized should be +serialized before they are passed in. + +#### node + +The `node.RunForever` function was removed. Signal handling and running forever +should instead be explicitly configured by the caller. See how we do it +[here](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/cmd/tendermint/commands/run_node.go#L60). + ## v0.25.0 This release has minimal impact. diff --git a/version/version.go b/version/version.go index 19b3f3da7..b4664fd77 100644 --- a/version/version.go +++ b/version/version.go @@ -18,10 +18,10 @@ const ( // TMCoreSemVer is the current version of Tendermint Core. // It's the Semantic Version of the software. // Must be a string because scripts like dist.sh read this file. - TMCoreSemVer = "0.25.0" + TMCoreSemVer = "0.26.0" // ABCISemVer is the semantic version of the ABCI library - ABCISemVer = "0.14.0" + ABCISemVer = "0.15.0" ABCIVersion = ABCISemVer ) From fe1d59ab7b80c04ddaaa60f93b0f8656c1ed8f4b Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 22 Oct 2018 17:55:49 -0400 Subject: [PATCH 095/113] Set protocol versions in NodeInfo from state (#2686) * use types.NewValidator * node: set p2p.ProtocolVersion from state, not globals --- benchmarks/codec_test.go | 2 +- node/node.go | 6 +++++- p2p/node_info.go | 20 ++++++++++++-------- p2p/peer_test.go | 2 +- p2p/test_util.go | 2 +- state/state.go | 11 +---------- types/protobuf_test.go | 18 +++--------------- 7 files changed, 24 insertions(+), 37 deletions(-) diff --git a/benchmarks/codec_test.go b/benchmarks/codec_test.go index 2be1db156..3e0270286 100644 --- a/benchmarks/codec_test.go +++ b/benchmarks/codec_test.go @@ -14,7 +14,7 @@ import ( func testNodeInfo(id p2p.ID) p2p.DefaultNodeInfo { return p2p.DefaultNodeInfo{ - ProtocolVersion: p2p.InitProtocolVersion, + ProtocolVersion: p2p.ProtocolVersion{1, 2, 3}, ID_: id, Moniker: "SOMENAME", Network: "SOMENAME", diff --git a/node/node.go b/node/node.go index 522f18e91..f62a8b472 100644 --- a/node/node.go +++ b/node/node.go @@ -367,7 +367,11 @@ func NewNode(config *cfg.Config, nodeKey.ID(), txIndexer, genDoc.ChainID, - p2p.ProtocolVersionWithApp(state.Version.Consensus.App), + p2p.NewProtocolVersion( + version.P2PProtocol, // global + state.Version.Consensus.Block, + state.Version.Consensus.App, + ), ) ) diff --git a/p2p/node_info.go b/p2p/node_info.go index 1d408eb68..e46174e07 100644 --- a/p2p/node_info.go +++ b/p2p/node_info.go @@ -49,16 +49,20 @@ type ProtocolVersion struct { App version.Protocol `json:"app"` } -// InitProtocolVersion populates the Block and P2P versions, but not the App. -var InitProtocolVersion = ProtocolVersionWithApp(0) +// defaultProtocolVersion populates the Block and P2P versions using +// the global values, but not the App. +var defaultProtocolVersion = NewProtocolVersion( + version.P2PProtocol, + version.BlockProtocol, + 0, +) -// ProtocolVersionWithApp returns a fully populated ProtocolVersion -// using the provided App version and the Block and P2P versions defined in the `version` package. -func ProtocolVersionWithApp(appVersion version.Protocol) ProtocolVersion { +// NewProtocolVersion returns a fully populated ProtocolVersion. +func NewProtocolVersion(p2p, block, app version.Protocol) ProtocolVersion { return ProtocolVersion{ - P2P: version.P2PProtocol, - Block: version.BlockProtocol, - App: appVersion, + P2P: p2p, + Block: block, + App: app, } } diff --git a/p2p/peer_test.go b/p2p/peer_test.go index 02f1d2c0f..d3d9f0c72 100644 --- a/p2p/peer_test.go +++ b/p2p/peer_test.go @@ -207,7 +207,7 @@ func (rp *remotePeer) accept(l net.Listener) { func (rp *remotePeer) nodeInfo(l net.Listener) NodeInfo { return DefaultNodeInfo{ - ProtocolVersion: InitProtocolVersion, + ProtocolVersion: defaultProtocolVersion, ID_: rp.Addr().ID, ListenAddr: l.Addr().String(), Network: "testing", diff --git a/p2p/test_util.go b/p2p/test_util.go index e1f7b5040..d72c0c760 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -249,7 +249,7 @@ func testNodeInfo(id ID, name string) NodeInfo { func testNodeInfoWithNetwork(id ID, name, network string) NodeInfo { return DefaultNodeInfo{ - ProtocolVersion: InitProtocolVersion, + ProtocolVersion: defaultProtocolVersion, ID_: id, ListenAddr: fmt.Sprintf("127.0.0.1:%d", cmn.RandIntn(64512)+1023), Network: network, diff --git a/state/state.go b/state/state.go index 5c1b68a26..d6ec6f0be 100644 --- a/state/state.go +++ b/state/state.go @@ -222,7 +222,6 @@ func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { return State{}, fmt.Errorf("Error in genesis file: %v", err) } - // Make validators slice var validatorSet, nextValidatorSet *types.ValidatorSet if genDoc.Validators == nil { validatorSet = types.NewValidatorSet(nil) @@ -230,15 +229,7 @@ func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { } else { validators := make([]*types.Validator, len(genDoc.Validators)) for i, val := range genDoc.Validators { - pubKey := val.PubKey - address := pubKey.Address() - - // Make validator - validators[i] = &types.Validator{ - Address: address, - PubKey: pubKey, - VotingPower: val.Power, - } + validators[i] = types.NewValidator(val.PubKey, val.Power) } validatorSet = types.NewValidatorSet(validators) nextValidatorSet = types.NewValidatorSet(validators).CopyIncrementAccum(1) diff --git a/types/protobuf_test.go b/types/protobuf_test.go index f8682abf8..c940f1b42 100644 --- a/types/protobuf_test.go +++ b/types/protobuf_test.go @@ -30,17 +30,9 @@ func TestABCIValidators(t *testing.T) { pkEd := ed25519.GenPrivKey().PubKey() // correct validator - tmValExpected := &Validator{ - Address: pkEd.Address(), - PubKey: pkEd, - VotingPower: 10, - } + tmValExpected := NewValidator(pkEd, 10) - tmVal := &Validator{ - Address: pkEd.Address(), - PubKey: pkEd, - VotingPower: 10, - } + tmVal := NewValidator(pkEd, 10) abciVal := TM2PB.ValidatorUpdate(tmVal) tmVals, err := PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{abciVal}) @@ -127,11 +119,7 @@ func TestABCIValidatorFromPubKeyAndPower(t *testing.T) { func TestABCIValidatorWithoutPubKey(t *testing.T) { pkEd := ed25519.GenPrivKey().PubKey() - abciVal := TM2PB.Validator(&Validator{ - Address: pkEd.Address(), - PubKey: pkEd, - VotingPower: 10, - }) + abciVal := TM2PB.Validator(NewValidator(pkEd, 10)) // pubkey must be nil tmValExpected := abci.Validator{ From be929acd6a726b322fe9dcc61fc6713dfc668349 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 23 Oct 2018 13:21:47 -0400 Subject: [PATCH 096/113] Update to Amino v0.13.0-rc0 (#2687) * types: test tm2pm on fully populated header * upgrade for amino v0.13.0-rc0 * fix lint * comment out final test --- Gopkg.lock | 6 ++-- Gopkg.toml | 2 +- state/state.go | 33 +++++++---------- types/block.go | 32 ++++++++++++++--- types/block_test.go | 18 ++++++---- types/evidence.go | 2 +- types/evidence_test.go | 2 +- types/proto3/block.pb.go | 63 +++++++++++++++++---------------- types/proto3/block.proto | 11 +++--- types/protobuf.go | 13 ++++--- types/protobuf_test.go | 76 +++++++++++++++++++++++++++++++++++----- types/results_test.go | 2 +- types/vote.go | 2 +- types/vote_test.go | 26 +++++++------- 14 files changed, 186 insertions(+), 102 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 0f70bb2f7..d5d6c1b28 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -365,12 +365,12 @@ revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df" [[projects]] - digest = "1:e0a2a4be1e20c305badc2b0a7a9ab7fef6da500763bec23ab81df3b5f9eec9ee" + digest = "1:3ff2c9d4def5ec999ab672b9059d0ba41a1351913ea78e63b5402e4ba4ef8da4" name = "github.com/tendermint/go-amino" packages = ["."] pruneopts = "UT" - revision = "a8328986c1608950fa5d3d1c0472cccc4f8fc02c" - version = "v0.12.0-rc0" + revision = "ff047d9e357e66d937d6900d4a2e04501cc62c70" + version = "v0.13.0-rc0" [[projects]] digest = "1:72b71e3a29775e5752ed7a8012052a3dee165e27ec18cedddae5288058f09acf" diff --git a/Gopkg.toml b/Gopkg.toml index 07ff3c534..622ca00e0 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -58,7 +58,7 @@ [[constraint]] name = "github.com/tendermint/go-amino" - version = "v0.12.0-rc0" + version = "v0.13.0-rc0" [[constraint]] name = "google.golang.org/grpc" diff --git a/state/state.go b/state/state.go index d6ec6f0be..0dbd718da 100644 --- a/state/state.go +++ b/state/state.go @@ -128,7 +128,7 @@ func (state State) IsEmpty() bool { // MakeBlock builds a block from the current state with the given txs, commit, // and evidence. Note it also takes a proposerAddress because the state does not -// track rounds, and hence doesn't know the correct proposer. TODO: alleviate this! +// track rounds, and hence does not know the correct proposer. TODO: fix this! func (state State) MakeBlock( height int64, txs []types.Tx, @@ -140,29 +140,22 @@ func (state State) MakeBlock( // Build base block with block data. block := types.MakeBlock(height, txs, commit, evidence) - // Fill rest of header with state data. - block.Version = state.Version.Consensus - block.ChainID = state.ChainID - - // Set time + // Set time. + var timestamp time.Time if height == 1 { - block.Time = state.LastBlockTime // genesis time + timestamp = state.LastBlockTime // genesis time } else { - block.Time = MedianTime(commit, state.LastValidators) + timestamp = MedianTime(commit, state.LastValidators) } - block.LastBlockID = state.LastBlockID - block.TotalTxs = state.LastBlockTotalTx + block.NumTxs - - block.ValidatorsHash = state.Validators.Hash() - block.NextValidatorsHash = state.NextValidators.Hash() - block.ConsensusHash = state.ConsensusParams.Hash() - block.AppHash = state.AppHash - block.LastResultsHash = state.LastResultsHash - - // NOTE: we can't use the state.Validators because we don't - // IncrementAccum for rounds there. - block.ProposerAddress = proposerAddress + // Fill rest of header with state data. + block.Header.Populate( + state.Version.Consensus, state.ChainID, + timestamp, state.LastBlockID, state.LastBlockTotalTx+block.NumTxs, + state.Validators.Hash(), state.NextValidators.Hash(), + state.ConsensusParams.Hash(), state.AppHash, state.LastResultsHash, + proposerAddress, + ) return block, block.MakePartSet(types.BlockPartSizeBytes) } diff --git a/types/block.go b/types/block.go index 2a5b5fc4a..ce605263c 100644 --- a/types/block.go +++ b/types/block.go @@ -15,7 +15,7 @@ import ( const ( // MaxHeaderBytes is a maximum header size (including amino overhead). - MaxHeaderBytes int64 = 534 + MaxHeaderBytes int64 = 537 // MaxAminoOverheadForBlock - maximum amino overhead to encode a block (up to // MaxBlockSizeBytes in size) not including it's parts except Data. @@ -257,11 +257,11 @@ func MaxDataBytesUnknownEvidence(maxBytes int64, valsCount int) int64 { //----------------------------------------------------------------------------- -// Header defines the structure of a Tendermint block header +// Header defines the structure of a Tendermint block header. // NOTE: changes to the Header should be duplicated in: -// - header.Hash() -// - abci.Header -// - /docs/spec/blockchain/blockchain.md +// - header.Hash() +// - abci.Header +// - /docs/spec/blockchain/blockchain.md type Header struct { // basic block info Version version.Consensus `json:"version"` @@ -290,6 +290,28 @@ type Header struct { ProposerAddress Address `json:"proposer_address"` // original proposer of the block } +// Populate the Header with state-derived data. +// Call this after MakeBlock to complete the Header. +func (h *Header) Populate( + version version.Consensus, chainID string, + timestamp time.Time, lastBlockID BlockID, totalTxs int64, + valHash, nextValHash []byte, + consensusHash, appHash, lastResultsHash []byte, + proposerAddress Address, +) { + h.Version = version + h.ChainID = chainID + h.Time = timestamp + h.LastBlockID = lastBlockID + h.TotalTxs = totalTxs + h.ValidatorsHash = valHash + h.NextValidatorsHash = nextValHash + h.ConsensusHash = consensusHash + h.AppHash = appHash + h.LastResultsHash = lastResultsHash + h.ProposerAddress = proposerAddress +} + // Hash returns the hash of the header. // It computes a Merkle tree from the header fields // ordered as they appear in the Header. diff --git a/types/block_test.go b/types/block_test.go index d268e411e..e34ba29b8 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -242,11 +242,15 @@ func TestMaxHeaderBytes(t *testing.T) { maxChainID += "𠜎" } + // time is varint encoded so need to pick the max. + // year int, month Month, day, hour, min, sec, nsec int, loc *Location + timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) + h := Header{ Version: version.Consensus{math.MaxInt64, math.MaxInt64}, ChainID: maxChainID, Height: math.MaxInt64, - Time: time.Now().UTC(), + Time: timestamp, NumTxs: math.MaxInt64, TotalTxs: math.MaxInt64, LastBlockID: makeBlockID(make([]byte, tmhash.Size), math.MaxInt64, make([]byte, tmhash.Size)), @@ -288,9 +292,9 @@ func TestBlockMaxDataBytes(t *testing.T) { }{ 0: {-10, 1, 0, true, 0}, 1: {10, 1, 0, true, 0}, - 2: {744, 1, 0, true, 0}, - 3: {745, 1, 0, false, 0}, - 4: {746, 1, 0, false, 1}, + 2: {750, 1, 0, true, 0}, + 3: {751, 1, 0, false, 0}, + 4: {752, 1, 0, false, 1}, } for i, tc := range testCases { @@ -316,9 +320,9 @@ func TestBlockMaxDataBytesUnknownEvidence(t *testing.T) { }{ 0: {-10, 1, true, 0}, 1: {10, 1, true, 0}, - 2: {826, 1, true, 0}, - 3: {827, 1, false, 0}, - 4: {828, 1, false, 1}, + 2: {833, 1, true, 0}, + 3: {834, 1, false, 0}, + 4: {835, 1, false, 1}, } for i, tc := range testCases { diff --git a/types/evidence.go b/types/evidence.go index 57523ab1e..6d42ed22c 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -14,7 +14,7 @@ import ( const ( // MaxEvidenceBytes is a maximum size of any evidence (including amino overhead). - MaxEvidenceBytes int64 = 440 + MaxEvidenceBytes int64 = 444 ) // ErrEvidenceInvalid wraps a piece of evidence and the error denoting how or why it is invalid. diff --git a/types/evidence_test.go b/types/evidence_test.go index a8d7efff8..79805691c 100644 --- a/types/evidence_test.go +++ b/types/evidence_test.go @@ -61,7 +61,7 @@ func TestEvidence(t *testing.T) { {vote1, makeVote(val, chainID, 0, 10, 3, 1, blockID2), false}, // wrong round {vote1, makeVote(val, chainID, 0, 10, 2, 2, blockID2), false}, // wrong step {vote1, makeVote(val2, chainID, 0, 10, 2, 1, blockID), false}, // wrong validator - {vote1, badVote, false}, // signed by wrong key + {vote1, badVote, false}, // signed by wrong key } pubKey := val.GetPubKey() diff --git a/types/proto3/block.pb.go b/types/proto3/block.pb.go index 446b39197..7efc7ca7d 100644 --- a/types/proto3/block.pb.go +++ b/types/proto3/block.pb.go @@ -244,13 +244,14 @@ func (m *Version) GetApp() uint64 { return 0 } -// Timestamp wraps how amino encodes time. Note that this is different from the protobuf well-known type -// protobuf/timestamp.proto in the sense that there seconds and nanos are varint encoded. See: +// Timestamp wraps how amino encodes time. +// This is the protobuf well-known type protobuf/timestamp.proto +// See: // https://github.com/google/protobuf/blob/d2980062c859649523d5fd51d6b55ab310e47482/src/google/protobuf/timestamp.proto#L123-L135 -// Also nanos do not get skipped if they are zero in amino. +// NOTE/XXX: nanos do not get skipped if they are zero in amino. type Timestamp struct { - Seconds int64 `protobuf:"fixed64,1,opt,name=seconds" json:"seconds,omitempty"` - Nanos int32 `protobuf:"fixed32,2,opt,name=nanos" json:"nanos,omitempty"` + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` } func (m *Timestamp) Reset() { *m = Timestamp{} } @@ -285,31 +286,31 @@ func init() { proto.RegisterFile("block.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 443 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0xcd, 0x6a, 0xdb, 0x40, - 0x10, 0x46, 0x8d, 0x62, 0xc7, 0x23, 0x3b, 0x76, 0x86, 0xb6, 0x88, 0x9e, 0x8c, 0x68, 0x8b, 0x7b, + 0x10, 0x46, 0xb5, 0x6c, 0xc7, 0x23, 0x3b, 0x4e, 0x86, 0xb6, 0x88, 0x9e, 0x8c, 0x68, 0x8b, 0x7b, 0x31, 0x24, 0x39, 0x94, 0xd2, 0x93, 0x6b, 0x17, 0x12, 0x28, 0x21, 0x6c, 0x8d, 0xef, 0x1b, 0x6b, - 0xa9, 0x45, 0x2d, 0xad, 0xd0, 0xac, 0x4b, 0xde, 0xb0, 0xaf, 0x55, 0x66, 0x56, 0x52, 0x2d, 0x93, - 0x93, 0xf7, 0xfb, 0x99, 0x6f, 0x76, 0xc7, 0x23, 0x88, 0x9e, 0xf6, 0x76, 0xfb, 0x7b, 0x5e, 0x56, - 0xd6, 0x59, 0xec, 0xc9, 0xcf, 0x6d, 0xf2, 0x05, 0x46, 0x8f, 0xba, 0x72, 0x3f, 0x8d, 0xbb, 0x33, - 0x3a, 0x35, 0x15, 0xbe, 0x86, 0xf3, 0xb5, 0x75, 0x7a, 0x1f, 0x07, 0xd3, 0x60, 0x76, 0xa5, 0x3c, - 0x40, 0x84, 0xf0, 0x4e, 0xd3, 0x2e, 0x7e, 0x35, 0x0d, 0x66, 0x43, 0x25, 0xe7, 0x64, 0x03, 0xfd, - 0x6f, 0x9c, 0x78, 0xbf, 0x6a, 0xe5, 0xe0, 0xbf, 0x8c, 0x9f, 0x21, 0xe2, 0x64, 0xf2, 0xb9, 0x52, - 0x19, 0xdd, 0xbc, 0xf1, 0xed, 0x6f, 0xe7, 0x9d, 0xa6, 0xea, 0xd8, 0x99, 0xfc, 0x0d, 0xa1, 0x57, - 0x5f, 0xe6, 0x13, 0xf4, 0x37, 0xa6, 0xa2, 0xcc, 0x16, 0x12, 0x1d, 0xdd, 0x8c, 0x9b, 0xfa, 0x9a, - 0x56, 0x8d, 0x8e, 0x31, 0xf4, 0x97, 0x3b, 0x9d, 0x15, 0xf7, 0x2b, 0x69, 0x35, 0x50, 0x0d, 0xc4, - 0xb7, 0x1c, 0x97, 0xfd, 0xda, 0xb9, 0xf8, 0x6c, 0x1a, 0xcc, 0x50, 0xd5, 0x08, 0x3f, 0x40, 0xb8, - 0xce, 0x72, 0x13, 0x87, 0x92, 0x7c, 0xd5, 0x24, 0x33, 0x47, 0x4e, 0xe7, 0xa5, 0x12, 0x99, 0xcb, - 0x1f, 0x0e, 0xf9, 0xfa, 0x99, 0xe2, 0x73, 0x5f, 0xee, 0x11, 0xbe, 0x83, 0x0b, 0x99, 0x0d, 0x2b, - 0x3d, 0x51, 0x5a, 0x8c, 0xd7, 0x10, 0xfd, 0xd0, 0xe4, 0xea, 0xf1, 0xc4, 0xfd, 0xee, 0xdd, 0x6b, - 0x5a, 0x1d, 0x7b, 0xf0, 0x23, 0x5c, 0x32, 0x5c, 0xda, 0x3c, 0xcf, 0x9c, 0x0c, 0xf3, 0x42, 0x86, - 0x79, 0xc2, 0x72, 0xdb, 0x95, 0x76, 0x5a, 0x1c, 0x03, 0x71, 0xb4, 0x98, 0x33, 0x36, 0x7a, 0x9f, - 0xa5, 0xda, 0xd9, 0x8a, 0xc4, 0x01, 0x3e, 0xa3, 0xcb, 0xe2, 0x1c, 0xf0, 0xc1, 0x3c, 0xbb, 0x13, - 0x6f, 0x24, 0xde, 0x17, 0x14, 0x7c, 0x0f, 0xa3, 0xa5, 0x2d, 0xc8, 0x14, 0x74, 0xf0, 0xd6, 0xa1, - 0x58, 0xbb, 0x24, 0xff, 0x03, 0x8b, 0xb2, 0x14, 0x7d, 0x24, 0x7a, 0x03, 0x71, 0x06, 0x63, 0x7e, - 0x85, 0x32, 0x74, 0xd8, 0x3b, 0x9f, 0x70, 0x29, 0x8e, 0x53, 0x1a, 0x13, 0x18, 0x7e, 0xff, 0x93, - 0xa5, 0xa6, 0xd8, 0x1a, 0xb1, 0x8d, 0xc5, 0xd6, 0xe1, 0x38, 0xed, 0xb1, 0xb2, 0xa5, 0x25, 0x53, - 0x2d, 0xd2, 0xb4, 0x32, 0x44, 0xf1, 0xc4, 0xa7, 0x9d, 0xd0, 0xc9, 0x75, 0xbb, 0x3e, 0xbc, 0xd6, - 0x32, 0x69, 0xd9, 0xa3, 0x50, 0x79, 0x80, 0x13, 0x38, 0x5b, 0x94, 0xa5, 0x2c, 0x4c, 0xa8, 0xf8, - 0x98, 0x7c, 0x85, 0x41, 0xbb, 0x00, 0xfc, 0x22, 0x32, 0x5b, 0x5b, 0xa4, 0x24, 0x65, 0x13, 0xd5, - 0x40, 0x8e, 0x2b, 0x74, 0x61, 0x49, 0x4a, 0xc7, 0xca, 0x83, 0xa7, 0xfa, 0xa3, 0xfa, 0x17, 0x00, - 0x00, 0xff, 0xff, 0xd5, 0x8b, 0x28, 0x26, 0x6a, 0x03, 0x00, 0x00, + 0xa9, 0x45, 0x2d, 0xad, 0xd0, 0xac, 0x4b, 0xde, 0xb0, 0xaf, 0x55, 0x66, 0x56, 0x52, 0x23, 0x93, + 0x93, 0xf7, 0xfb, 0x99, 0x6f, 0x76, 0xc7, 0x23, 0x88, 0x1e, 0x0f, 0x76, 0xf7, 0x7b, 0x51, 0x56, + 0xd6, 0x59, 0x1c, 0xc8, 0xcf, 0x4d, 0xf2, 0x05, 0x26, 0x0f, 0xba, 0x72, 0x3f, 0x8d, 0xbb, 0x35, + 0x3a, 0x35, 0x15, 0xbe, 0x86, 0xfe, 0xc6, 0x3a, 0x7d, 0x88, 0x83, 0x59, 0x30, 0xbf, 0x54, 0x1e, + 0x20, 0x42, 0x78, 0xab, 0x69, 0x1f, 0xbf, 0x9a, 0x05, 0xf3, 0xb1, 0x92, 0x73, 0xb2, 0x85, 0xe1, + 0x37, 0x4e, 0xbc, 0x5b, 0xb7, 0x72, 0xf0, 0x5f, 0xc6, 0xcf, 0x10, 0x71, 0x32, 0xf9, 0x5c, 0xa9, + 0x8c, 0xae, 0xdf, 0xf8, 0xf6, 0x37, 0x8b, 0x4e, 0x53, 0xf5, 0xdc, 0x99, 0xfc, 0x0d, 0x61, 0x50, + 0x5f, 0xe6, 0x13, 0x0c, 0xb7, 0xa6, 0xa2, 0xcc, 0x16, 0x12, 0x1d, 0x5d, 0x4f, 0x9b, 0xfa, 0x9a, + 0x56, 0x8d, 0x8e, 0x31, 0x0c, 0x57, 0x7b, 0x9d, 0x15, 0x77, 0x6b, 0x69, 0x35, 0x52, 0x0d, 0xc4, + 0xb7, 0x1c, 0x97, 0xfd, 0xda, 0xbb, 0xb8, 0x37, 0x0b, 0xe6, 0xa8, 0x6a, 0x84, 0x1f, 0x20, 0xdc, + 0x64, 0xb9, 0x89, 0x43, 0x49, 0xbe, 0x6c, 0x92, 0x99, 0x23, 0xa7, 0xf3, 0x52, 0x89, 0xcc, 0xe5, + 0xf7, 0xc7, 0x7c, 0xf3, 0x44, 0x71, 0xdf, 0x97, 0x7b, 0x84, 0xef, 0xe0, 0x4c, 0x66, 0xc3, 0xca, + 0x40, 0x94, 0x16, 0xe3, 0x15, 0x44, 0x3f, 0x34, 0xb9, 0x7a, 0x3c, 0xf1, 0xb0, 0x7b, 0xf7, 0x9a, + 0x56, 0xcf, 0x3d, 0xf8, 0x11, 0xce, 0x19, 0xae, 0x6c, 0x9e, 0x67, 0x4e, 0x86, 0x79, 0x26, 0xc3, + 0x3c, 0x61, 0xb9, 0xed, 0x5a, 0x3b, 0x2d, 0x8e, 0x91, 0x38, 0x5a, 0xcc, 0x19, 0x5b, 0x7d, 0xc8, + 0x52, 0xed, 0x6c, 0x45, 0xe2, 0x00, 0x9f, 0xd1, 0x65, 0x71, 0x01, 0x78, 0x6f, 0x9e, 0xdc, 0x89, + 0x37, 0x12, 0xef, 0x0b, 0x0a, 0xbe, 0x87, 0xc9, 0xca, 0x16, 0x64, 0x0a, 0x3a, 0x7a, 0xeb, 0x58, + 0xac, 0x5d, 0x92, 0xff, 0x81, 0x65, 0x59, 0x8a, 0x3e, 0x11, 0xbd, 0x81, 0x38, 0x87, 0x29, 0xbf, + 0x42, 0x19, 0x3a, 0x1e, 0x9c, 0x4f, 0x38, 0x17, 0xc7, 0x29, 0x8d, 0x09, 0x8c, 0xbf, 0xff, 0xc9, + 0x52, 0x53, 0xec, 0x8c, 0xd8, 0xa6, 0x62, 0xeb, 0x70, 0x9c, 0xf6, 0x50, 0xd9, 0xd2, 0x92, 0xa9, + 0x96, 0x69, 0x5a, 0x19, 0xa2, 0xf8, 0xc2, 0xa7, 0x9d, 0xd0, 0xc9, 0x55, 0xbb, 0x3e, 0xbc, 0xd6, + 0x32, 0x69, 0xd9, 0xa3, 0x50, 0x79, 0x80, 0x17, 0xd0, 0x5b, 0x96, 0xa5, 0x2c, 0x4c, 0xa8, 0xf8, + 0x98, 0x7c, 0x85, 0x51, 0xbb, 0x00, 0xfc, 0x22, 0x32, 0x3b, 0x5b, 0xa4, 0x24, 0x65, 0x3d, 0xd5, + 0x40, 0x8e, 0x2b, 0x74, 0x61, 0x49, 0x4a, 0xfb, 0xca, 0x83, 0xc7, 0xfa, 0xa3, 0xfa, 0x17, 0x00, + 0x00, 0xff, 0xff, 0x8f, 0x82, 0xc0, 0x0c, 0x6a, 0x03, 0x00, 0x00, } diff --git a/types/proto3/block.proto b/types/proto3/block.proto index dd64a9e98..1c76746c2 100644 --- a/types/proto3/block.proto +++ b/types/proto3/block.proto @@ -46,11 +46,12 @@ message Version { uint64 App = 2; } -// Timestamp wraps how amino encodes time. Note that this is different from the protobuf well-known type -// protobuf/timestamp.proto in the sense that there seconds and nanos are varint encoded. See: +// Timestamp wraps how amino encodes time. +// This is the protobuf well-known type protobuf/timestamp.proto +// See: // https://github.com/google/protobuf/blob/d2980062c859649523d5fd51d6b55ab310e47482/src/google/protobuf/timestamp.proto#L123-L135 -// Also nanos do not get skipped if they are zero in amino. +// NOTE/XXX: nanos do not get skipped if they are zero in amino. message Timestamp { - sfixed64 seconds = 1; - sfixed32 nanos = 2; + int64 seconds = 1; + int32 nanos = 2; } diff --git a/types/protobuf.go b/types/protobuf.go index c9c429c80..e1ec81e82 100644 --- a/types/protobuf.go +++ b/types/protobuf.go @@ -34,6 +34,10 @@ type tm2pb struct{} func (tm2pb) Header(header *Header) abci.Header { return abci.Header{ + Version: abci.Version{ + Block: header.Version.Block.Uint64(), + App: header.Version.App.Uint64(), + }, ChainID: header.ChainID, Height: header.Height, Time: header.Time, @@ -45,10 +49,11 @@ func (tm2pb) Header(header *Header) abci.Header { LastCommitHash: header.LastCommitHash, DataHash: header.DataHash, - ValidatorsHash: header.ValidatorsHash, - ConsensusHash: header.ConsensusHash, - AppHash: header.AppHash, - LastResultsHash: header.LastResultsHash, + ValidatorsHash: header.ValidatorsHash, + NextValidatorsHash: header.NextValidatorsHash, + ConsensusHash: header.ConsensusHash, + AppHash: header.AppHash, + LastResultsHash: header.LastResultsHash, EvidenceHash: header.EvidenceHash, ProposerAddress: header.ProposerAddress, diff --git a/types/protobuf_test.go b/types/protobuf_test.go index c940f1b42..7e7f55a1d 100644 --- a/types/protobuf_test.go +++ b/types/protobuf_test.go @@ -4,12 +4,16 @@ import ( "testing" "time" + "github.com/golang/protobuf/proto" "github.com/stretchr/testify/assert" + + "github.com/tendermint/go-amino" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/secp256k1" - tmtime "github.com/tendermint/tendermint/types/time" + "github.com/tendermint/tendermint/version" ) func TestABCIPubKey(t *testing.T) { @@ -67,17 +71,71 @@ func TestABCIConsensusParams(t *testing.T) { assert.Equal(t, *cp, cp2) } +func newHeader( + height, numTxs int64, + commitHash, dataHash, evidenceHash []byte, +) *Header { + return &Header{ + Height: height, + NumTxs: numTxs, + LastCommitHash: commitHash, + DataHash: dataHash, + EvidenceHash: evidenceHash, + } +} + func TestABCIHeader(t *testing.T) { - header := &Header{ - Height: int64(3), - Time: tmtime.Now(), - NumTxs: int64(10), - ProposerAddress: []byte("cloak"), + // build a full header + var height int64 = 5 + var numTxs int64 = 3 + header := newHeader( + height, numTxs, + []byte("lastCommitHash"), []byte("dataHash"), []byte("evidenceHash"), + ) + protocolVersion := version.Consensus{7, 8} + timestamp := time.Now() + lastBlockID := BlockID{ + Hash: []byte("hash"), + PartsHeader: PartSetHeader{ + Total: 10, + Hash: []byte("hash"), + }, } - abciHeader := TM2PB.Header(header) + var totalTxs int64 = 100 + header.Populate( + protocolVersion, "chainID", + timestamp, lastBlockID, totalTxs, + []byte("valHash"), []byte("nextValHash"), + []byte("consHash"), []byte("appHash"), []byte("lastResultsHash"), + []byte("proposerAddress"), + ) + + cdc := amino.NewCodec() + headerBz := cdc.MustMarshalBinaryBare(header) + + pbHeader := TM2PB.Header(header) + pbHeaderBz, err := proto.Marshal(&pbHeader) + assert.NoError(t, err) + + // assert some fields match + assert.EqualValues(t, protocolVersion.Block, pbHeader.Version.Block) + assert.EqualValues(t, protocolVersion.App, pbHeader.Version.App) + assert.EqualValues(t, "chainID", pbHeader.ChainID) + assert.EqualValues(t, height, pbHeader.Height) + assert.EqualValues(t, timestamp, pbHeader.Time) + assert.EqualValues(t, numTxs, pbHeader.NumTxs) + assert.EqualValues(t, totalTxs, pbHeader.TotalTxs) + assert.EqualValues(t, lastBlockID.Hash, pbHeader.LastBlockId.Hash) + assert.EqualValues(t, []byte("lastCommitHash"), pbHeader.LastCommitHash) + assert.Equal(t, []byte("proposerAddress"), pbHeader.ProposerAddress) + + // assert the encodings match + // NOTE: they don't yet because Amino encodes + // int64 as zig-zag and we're using non-zigzag in the protobuf. + // See https://github.com/tendermint/tendermint/issues/2682 + _, _ = headerBz, pbHeaderBz + // assert.EqualValues(t, headerBz, pbHeaderBz) - assert.Equal(t, int64(3), abciHeader.Height) - assert.Equal(t, []byte("cloak"), abciHeader.ProposerAddress) } func TestABCIEvidence(t *testing.T) { diff --git a/types/results_test.go b/types/results_test.go index 808033850..4e57e5804 100644 --- a/types/results_test.go +++ b/types/results_test.go @@ -43,7 +43,7 @@ func TestABCIResults(t *testing.T) { } } -func TestABCIBytes(t *testing.T) { +func TestABCIResultsBytes(t *testing.T) { results := NewResults([]*abci.ResponseDeliverTx{ {Code: 0, Data: []byte{}}, {Code: 0, Data: []byte("one")}, diff --git a/types/vote.go b/types/vote.go index 2d70e21b2..2a7133099 100644 --- a/types/vote.go +++ b/types/vote.go @@ -12,7 +12,7 @@ import ( const ( // MaxVoteBytes is a maximum vote size (including amino overhead). - MaxVoteBytes int64 = 200 + MaxVoteBytes int64 = 203 ) var ( diff --git a/types/vote_test.go b/types/vote_test.go index 2172f0600..3b2f08488 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -9,7 +9,6 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" - tmtime "github.com/tendermint/tendermint/types/time" ) func examplePrevote() *Vote { @@ -63,13 +62,13 @@ func TestVoteSignableTestVectors(t *testing.T) { { CanonicalizeVote("", &Vote{}), // NOTE: Height and Round are skipped here. This case needs to be considered while parsing. - []byte{0xb, 0x22, 0x9, 0x9, 0x0, 0x9, 0x6e, 0x88, 0xf1, 0xff, 0xff, 0xff}, + // []byte{0x22, 0x9, 0x9, 0x0, 0x9, 0x6e, 0x88, 0xf1, 0xff, 0xff, 0xff}, + []byte{0x22, 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1}, }, // with proper (fixed size) height and round (PreCommit): { CanonicalizeVote("", &Vote{Height: 1, Round: 1, Type: PrecommitType}), []byte{ - 0x1f, // total length 0x8, // (field_number << 3) | wire_type 0x2, // PrecommitType 0x11, // (field_number << 3) | wire_type @@ -78,13 +77,12 @@ func TestVoteSignableTestVectors(t *testing.T) { 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round 0x22, // (field_number << 3) | wire_type // remaining fields (timestamp): - 0x9, 0x9, 0x0, 0x9, 0x6e, 0x88, 0xf1, 0xff, 0xff, 0xff}, + 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1}, }, // with proper (fixed size) height and round (PreVote): { CanonicalizeVote("", &Vote{Height: 1, Round: 1, Type: PrevoteType}), []byte{ - 0x1f, // total length 0x8, // (field_number << 3) | wire_type 0x1, // PrevoteType 0x11, // (field_number << 3) | wire_type @@ -93,38 +91,36 @@ func TestVoteSignableTestVectors(t *testing.T) { 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round 0x22, // (field_number << 3) | wire_type // remaining fields (timestamp): - 0x9, 0x9, 0x0, 0x9, 0x6e, 0x88, 0xf1, 0xff, 0xff, 0xff}, + 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1}, }, { vote, []byte{ - 0x1d, // total length 0x11, // (field_number << 3) | wire_type 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height 0x19, // (field_number << 3) | wire_type 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round // remaining fields (timestamp): 0x22, - 0x9, 0x9, 0x0, 0x9, 0x6e, 0x88, 0xf1, 0xff, 0xff, 0xff}, + 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1}, }, // containing non-empty chain_id: { CanonicalizeVote("test_chain_id", &Vote{Height: 1, Round: 1}), []byte{ - 0x2c, // total length 0x11, // (field_number << 3) | wire_type 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height 0x19, // (field_number << 3) | wire_type 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round // remaining fields: - 0x22, // (field_number << 3) | wire_type - 0x9, 0x9, 0x0, 0x9, 0x6e, 0x88, 0xf1, 0xff, 0xff, 0xff, // timestamp + 0x22, // (field_number << 3) | wire_type + 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1, // timestamp 0x32, // (field_number << 3) | wire_type 0xd, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64}, // chainID }, } for i, tc := range tests { - got, err := cdc.MarshalBinary(tc.canonicalVote) + got, err := cdc.MarshalBinaryBare(tc.canonicalVote) require.NoError(t, err) require.Equal(t, tc.want, got, "test case #%v: got unexpected sign bytes for Vote.", i) @@ -210,12 +206,16 @@ func TestVoteVerify(t *testing.T) { } func TestMaxVoteBytes(t *testing.T) { + // time is varint encoded so need to pick the max. + // year int, month Month, day, hour, min, sec, nsec int, loc *Location + timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) + vote := &Vote{ ValidatorAddress: tmhash.Sum([]byte("validator_address")), ValidatorIndex: math.MaxInt64, Height: math.MaxInt64, Round: math.MaxInt64, - Timestamp: tmtime.Now(), + Timestamp: timestamp, Type: PrevoteType, BlockID: BlockID{ Hash: tmhash.Sum([]byte("blockID_hash")), From 9795e12ef2eae822733e5add1300bdd37caf5d7c Mon Sep 17 00:00:00 2001 From: Jun Kimura Date: Wed, 24 Oct 2018 17:07:33 +0900 Subject: [PATCH 097/113] fix `RecoverAndLogHandler` not to call multiple writeheader (#2688) --- rpc/lib/server/http_server.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/rpc/lib/server/http_server.go b/rpc/lib/server/http_server.go index 8069a81d4..6de376c29 100644 --- a/rpc/lib/server/http_server.go +++ b/rpc/lib/server/http_server.go @@ -173,8 +173,7 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler "Panic in RPC HTTP handler", "err", e, "stack", string(debug.Stack()), ) - rww.WriteHeader(http.StatusInternalServerError) - WriteRPCResponseHTTP(rww, types.RPCInternalError("", e.(error))) + WriteRPCResponseHTTPError(rww, http.StatusInternalServerError, types.RPCInternalError("", e.(error))) } } From 6643c5dd1151fdee803eb1a27e54b5dd81c65be5 Mon Sep 17 00:00:00 2001 From: Ismail Khoffi Date: Thu, 25 Oct 2018 03:34:01 +0200 Subject: [PATCH 098/113] Catch up with amino 0.13.0 (#2690) * catch up with amino changes in https://github.com/tendermint/go-amino/pull/222 * WIP: update to amino v0.13.0 * update to fixed amino release --- Gopkg.lock | 72 ++++++++++++++--------------- Gopkg.toml | 2 +- blockchain/store.go | 2 +- consensus/reactor.go | 6 +-- consensus/replay_test.go | 4 +- consensus/state.go | 2 +- crypto/merkle/proof_simple_value.go | 4 +- libs/common/errors_test.go | 2 +- lite/dbprovider.go | 10 ++-- p2p/conn/connection.go | 10 ++-- p2p/conn/connection_test.go | 38 +++++++-------- p2p/conn/secret_connection.go | 8 ++-- p2p/metrics.go | 2 +- p2p/transport.go | 4 +- p2p/transport_test.go | 4 +- privval/priv_validator.go | 12 ++--- privval/remote_signer.go | 4 +- types/block.go | 2 +- types/block_test.go | 2 +- types/evidence_test.go | 2 +- types/heartbeat.go | 2 +- types/heartbeat_test.go | 4 +- types/proposal.go | 2 +- types/proposal_test.go | 6 +-- types/results.go | 2 +- types/tx_test.go | 8 ++-- types/validator_set_test.go | 4 +- types/vote.go | 2 +- types/vote_test.go | 12 ++--- 29 files changed, 117 insertions(+), 117 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index d5d6c1b28..566fed4a2 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -11,11 +11,11 @@ [[projects]] branch = "master" - digest = "1:2c00f064ba355903866cbfbf3f7f4c0fe64af6638cc7d1b8bdcf3181bc67f1d8" + digest = "1:c0decf632843204d2b8781de7b26e7038584e2dcccc7e2f401e88ae85b1df2b7" name = "github.com/btcsuite/btcd" packages = ["btcec"] pruneopts = "UT" - revision = "f5e261fc9ec3437697fb31d8b38453c293204b29" + revision = "67e573d211ace594f1366b4ce9d39726c4b19bd0" [[projects]] digest = "1:1d8e1cb71c33a9470bbbae09bfec09db43c6bf358dfcae13cd8807c4e2a9a2bf" @@ -28,12 +28,12 @@ revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4" [[projects]] - digest = "1:a2c1d0e43bd3baaa071d1b9ed72c27d78169b2b269f71c105ac4ba34b1be4a39" + digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" name = "github.com/davecgh/go-spew" packages = ["spew"] pruneopts = "UT" - revision = "346938d642f2ec3594ed81d874461961cd0faa76" - version = "v1.1.0" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" [[projects]] digest = "1:c7644c73a3d23741fdba8a99b1464e021a224b7e205be497271a8003a15ca41b" @@ -83,12 +83,12 @@ version = "v0.3.0" [[projects]] - digest = "1:c4a2528ccbcabf90f9f3c464a5fc9e302d592861bbfd0b7135a7de8a943d0406" + digest = "1:586ea76dbd0374d6fb649a91d70d652b7fe0ccffb8910a77468e7702e7901f3d" name = "github.com/go-stack/stack" packages = ["."] pruneopts = "UT" - revision = "259ab82a6cad3992b4e21ff5cac294ccb06474bc" - version = "v1.7.0" + revision = "2fee6af1a9795aafbe0253a0cfbdf668e1fb8a9a" + version = "v1.8.0" [[projects]] digest = "1:35621fe20f140f05a0c4ef662c26c0ab4ee50bca78aa30fe87d33120bd28165e" @@ -136,8 +136,7 @@ version = "v1.2.0" [[projects]] - branch = "master" - digest = "1:12247a2e99a060cc692f6680e5272c8adf0b8f572e6bce0d7095e624c958a240" + digest = "1:ea40c24cdbacd054a6ae9de03e62c5f252479b96c716375aace5c120d68647c8" name = "github.com/hashicorp/hcl" packages = [ ".", @@ -151,7 +150,8 @@ "json/token", ] pruneopts = "UT" - revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168" + revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241" + version = "v1.0.0" [[projects]] digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be" @@ -193,12 +193,12 @@ version = "v1.0.1" [[projects]] - branch = "master" - digest = "1:5ab79470a1d0fb19b041a624415612f8236b3c06070161a910562f2b2d064355" + digest = "1:53bc4cd4914cd7cd52139990d5170d6dc99067ae31c56530621b18b35fc30318" name = "github.com/mitchellh/mapstructure" packages = ["."] pruneopts = "UT" - revision = "f15292f7a699fcc1a38a80977f80a046874ba8ac" + revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe" + version = "v1.1.2" [[projects]] digest = "1:95741de3af260a92cc5c7f3f3061e85273f5a81b5db20d4bd68da74bd521675e" @@ -244,7 +244,7 @@ [[projects]] branch = "master" - digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5" + digest = "1:db712fde5d12d6cdbdf14b777f0c230f4ff5ab0be8e35b239fc319953ed577a4" name = "github.com/prometheus/common" packages = [ "expfmt", @@ -252,11 +252,11 @@ "model", ] pruneopts = "UT" - revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" + revision = "7e9e6cabbd393fc208072eedef99188d0ce788b6" [[projects]] branch = "master" - digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290" + digest = "1:ef74914912f99c79434d9c09658274678bc85080ebe3ab32bec3940ebce5e1fc" name = "github.com/prometheus/procfs" packages = [ ".", @@ -265,7 +265,7 @@ "xfs", ] pruneopts = "UT" - revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92" + revision = "185b4288413d2a0dd0806f78c90dde719829e5ae" [[projects]] digest = "1:c4556a44e350b50a490544d9b06e9fba9c286c21d6c0e47f54f3a9214597298c" @@ -275,15 +275,15 @@ revision = "e2704e165165ec55d062f5919b4b29494e9fa790" [[projects]] - digest = "1:bd1ae00087d17c5a748660b8e89e1043e1e5479d0fea743352cda2f8dd8c4f84" + digest = "1:6a4a11ba764a56d2758899ec6f3848d24698d48442ebce85ee7a3f63284526cd" name = "github.com/spf13/afero" packages = [ ".", "mem", ] pruneopts = "UT" - revision = "787d034dfe70e44075ccc060d346146ef53270ad" - version = "v1.1.1" + revision = "d40851caa0d747393da1ffb28f7f9d8b4eeffebd" + version = "v1.1.2" [[projects]] digest = "1:516e71bed754268937f57d4ecb190e01958452336fa73dbac880894164e91c1f" @@ -302,20 +302,20 @@ version = "v0.0.1" [[projects]] - branch = "master" - digest = "1:080e5f630945ad754f4b920e60b4d3095ba0237ebf88dc462eb28002932e3805" + digest = "1:68ea4e23713989dc20b1bded5d9da2c5f9be14ff9885beef481848edd18c26cb" name = "github.com/spf13/jwalterweatherman" packages = ["."] pruneopts = "UT" - revision = "7c0cea34c8ece3fbeb2b27ab9b59511d360fb394" + revision = "4a4406e478ca629068e7768fc33f3f044173c0a6" + version = "v1.0.0" [[projects]] - digest = "1:9424f440bba8f7508b69414634aef3b2b3a877e522d8a4624692412805407bb7" + digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2" name = "github.com/spf13/pflag" packages = ["."] pruneopts = "UT" - revision = "583c0c0531f06d5278b7d917446061adc344b5cd" - version = "v1.0.1" + revision = "298182f68c66c05229eb03ac171abe6e309ee79a" + version = "v1.0.3" [[projects]] digest = "1:f8e1a678a2571e265f4bf91a3e5e32aa6b1474a55cb0ea849750cc177b664d96" @@ -338,7 +338,7 @@ [[projects]] branch = "master" - digest = "1:b3cfb8d82b1601a846417c3f31c03a7961862cb2c98dcf0959c473843e6d9a2b" + digest = "1:59483b8e8183f10ab21a85ba1f4cbb4a2335d48891801f79ed7b9499f44d383c" name = "github.com/syndtr/goleveldb" packages = [ "leveldb", @@ -355,7 +355,7 @@ "leveldb/util", ] pruneopts = "UT" - revision = "c4c61651e9e37fa117f53c5a906d3b63090d8445" + revision = "6b91fda63f2e36186f1c9d0e48578defb69c5d43" [[projects]] digest = "1:605b6546f3f43745695298ec2d342d3e952b6d91cdf9f349bea9315f677d759f" @@ -365,12 +365,12 @@ revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df" [[projects]] - digest = "1:3ff2c9d4def5ec999ab672b9059d0ba41a1351913ea78e63b5402e4ba4ef8da4" + digest = "1:5f52e817b6c9d52ddba70dece0ea31134d82a52c05bce98fbc739ab2a832df28" name = "github.com/tendermint/go-amino" packages = ["."] pruneopts = "UT" - revision = "ff047d9e357e66d937d6900d4a2e04501cc62c70" - version = "v0.13.0-rc0" + revision = "cb07448b240918aa8d8df4505153549b86b77134" + version = "v0.13.0" [[projects]] digest = "1:72b71e3a29775e5752ed7a8012052a3dee165e27ec18cedddae5288058f09acf" @@ -415,14 +415,14 @@ [[projects]] branch = "master" - digest = "1:bb0fe59917bdd5b89f49b9a8b26e5f465e325d9223b3a8e32254314bdf51e0f1" + digest = "1:d1da39c9bac61327dbef1d8ef9f210425e99fd2924b6fb5f0bc587a193353637" name = "golang.org/x/sys" packages = [ "cpu", "unix", ] pruneopts = "UT" - revision = "3dc4335d56c789b04b0ba99b7a37249d9b614314" + revision = "8a28ead16f52c8aaeffbf79239b251dfdf6c4f96" [[projects]] digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18" @@ -449,11 +449,11 @@ [[projects]] branch = "master" - digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c" + digest = "1:56b0bca90b7e5d1facf5fbdacba23e4e0ce069d25381b8e2f70ef1e7ebfb9c1a" name = "google.golang.org/genproto" packages = ["googleapis/rpc/status"] pruneopts = "UT" - revision = "daca94659cb50e9f37c1b834680f2e46358f10b0" + revision = "94acd270e44e65579b9ee3cdab25034d33fed608" [[projects]] digest = "1:2dab32a43451e320e49608ff4542fdfc653c95dcc35d0065ec9c6c3dd540ed74" diff --git a/Gopkg.toml b/Gopkg.toml index 622ca00e0..e24965dc1 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -58,7 +58,7 @@ [[constraint]] name = "github.com/tendermint/go-amino" - version = "v0.13.0-rc0" + version = "v0.13.0" [[constraint]] name = "google.golang.org/grpc" diff --git a/blockchain/store.go b/blockchain/store.go index fa9ee5189..498cca68d 100644 --- a/blockchain/store.go +++ b/blockchain/store.go @@ -63,7 +63,7 @@ func (bs *BlockStore) LoadBlock(height int64) *types.Block { part := bs.LoadBlockPart(height, i) buf = append(buf, part.Bytes...) } - err := cdc.UnmarshalBinary(buf, block) + err := cdc.UnmarshalBinaryLengthPrefixed(buf, block) if err != nil { // NOTE: The existence of meta should imply the existence of the // block. So, make sure meta is only saved after blocks are saved. diff --git a/consensus/reactor.go b/consensus/reactor.go index bcf77fb3a..6643273cb 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -429,9 +429,9 @@ func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote) { func makeRoundStepMessages(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage, csMsg *CommitStepMessage) { nrsMsg = &NewRoundStepMessage{ - Height: rs.Height, - Round: rs.Round, - Step: rs.Step, + Height: rs.Height, + Round: rs.Round, + Step: rs.Step, SecondsSinceStartTime: int(time.Since(rs.StartTime).Seconds()), LastCommitRound: rs.LastCommit.Round(), } diff --git a/consensus/replay_test.go b/consensus/replay_test.go index 4e1fa2b77..d6691103e 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -520,7 +520,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { // if its not the first one, we have a full block if thisBlockParts != nil { var block = new(types.Block) - _, err = cdc.UnmarshalBinaryReader(thisBlockParts.GetReader(), block, 0) + _, err = cdc.UnmarshalBinaryLengthPrefixedReader(thisBlockParts.GetReader(), block, 0) if err != nil { panic(err) } @@ -553,7 +553,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { } // grab the last block too var block = new(types.Block) - _, err = cdc.UnmarshalBinaryReader(thisBlockParts.GetReader(), block, 0) + _, err = cdc.UnmarshalBinaryLengthPrefixedReader(thisBlockParts.GetReader(), block, 0) if err != nil { panic(err) } diff --git a/consensus/state.go b/consensus/state.go index 375674008..0b079f13d 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -1468,7 +1468,7 @@ func (cs *ConsensusState) addProposalBlockPart(msg *BlockPartMessage, peerID p2p } if added && cs.ProposalBlockParts.IsComplete() { // Added and completed! - _, err = cdc.UnmarshalBinaryReader( + _, err = cdc.UnmarshalBinaryLengthPrefixedReader( cs.ProposalBlockParts.GetReader(), &cs.ProposalBlock, int64(cs.state.ConsensusParams.BlockSize.MaxBytes), diff --git a/crypto/merkle/proof_simple_value.go b/crypto/merkle/proof_simple_value.go index 5b7b52329..904b6e5ec 100644 --- a/crypto/merkle/proof_simple_value.go +++ b/crypto/merkle/proof_simple_value.go @@ -42,7 +42,7 @@ func SimpleValueOpDecoder(pop ProofOp) (ProofOperator, error) { return nil, cmn.NewError("unexpected ProofOp.Type; got %v, want %v", pop.Type, ProofOpSimpleValue) } var op SimpleValueOp // a bit strange as we'll discard this, but it works. - err := cdc.UnmarshalBinary(pop.Data, &op) + err := cdc.UnmarshalBinaryLengthPrefixed(pop.Data, &op) if err != nil { return nil, cmn.ErrorWrap(err, "decoding ProofOp.Data into SimpleValueOp") } @@ -50,7 +50,7 @@ func SimpleValueOpDecoder(pop ProofOp) (ProofOperator, error) { } func (op SimpleValueOp) ProofOp() ProofOp { - bz := cdc.MustMarshalBinary(op) + bz := cdc.MustMarshalBinaryLengthPrefixed(op) return ProofOp{ Type: ProofOpSimpleValue, Key: op.key, diff --git a/libs/common/errors_test.go b/libs/common/errors_test.go index 326468c94..b85936dd5 100644 --- a/libs/common/errors_test.go +++ b/libs/common/errors_test.go @@ -24,7 +24,7 @@ func TestErrorPanic(t *testing.T) { var err = capturePanic() assert.Equal(t, pnk{"something"}, err.Data()) -assert.Equal(t, "{something}", fmt.Sprintf("%v", err)) + assert.Equal(t, "{something}", fmt.Sprintf("%v", err)) assert.Contains(t, fmt.Sprintf("%#v", err), "This is the message in ErrorWrap(r, message).") assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") } diff --git a/lite/dbprovider.go b/lite/dbprovider.go index cab695b4a..e0c4e65b4 100644 --- a/lite/dbprovider.go +++ b/lite/dbprovider.go @@ -56,7 +56,7 @@ func (dbp *DBProvider) SaveFullCommit(fc FullCommit) error { // We might be overwriting what we already have, but // it makes the logic easier for now. vsKey := validatorSetKey(fc.ChainID(), fc.Height()) - vsBz, err := dbp.cdc.MarshalBinary(fc.Validators) + vsBz, err := dbp.cdc.MarshalBinaryLengthPrefixed(fc.Validators) if err != nil { return err } @@ -64,7 +64,7 @@ func (dbp *DBProvider) SaveFullCommit(fc FullCommit) error { // Save the fc.NextValidators. nvsKey := validatorSetKey(fc.ChainID(), fc.Height()+1) - nvsBz, err := dbp.cdc.MarshalBinary(fc.NextValidators) + nvsBz, err := dbp.cdc.MarshalBinaryLengthPrefixed(fc.NextValidators) if err != nil { return err } @@ -72,7 +72,7 @@ func (dbp *DBProvider) SaveFullCommit(fc FullCommit) error { // Save the fc.SignedHeader shKey := signedHeaderKey(fc.ChainID(), fc.Height()) - shBz, err := dbp.cdc.MarshalBinary(fc.SignedHeader) + shBz, err := dbp.cdc.MarshalBinaryLengthPrefixed(fc.SignedHeader) if err != nil { return err } @@ -121,7 +121,7 @@ func (dbp *DBProvider) LatestFullCommit(chainID string, minHeight, maxHeight int // Found the latest full commit signed header. shBz := itr.Value() sh := types.SignedHeader{} - err := dbp.cdc.UnmarshalBinary(shBz, &sh) + err := dbp.cdc.UnmarshalBinaryLengthPrefixed(shBz, &sh) if err != nil { return FullCommit{}, err } else { @@ -150,7 +150,7 @@ func (dbp *DBProvider) getValidatorSet(chainID string, height int64) (valset *ty err = lerr.ErrUnknownValidators(chainID, height) return } - err = dbp.cdc.UnmarshalBinary(vsBz, &valset) + err = dbp.cdc.UnmarshalBinaryLengthPrefixed(vsBz, &valset) if err != nil { return } diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index 0e33adab9..80fc53ddb 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -337,7 +337,7 @@ FOR_LOOP: } case <-c.pingTimer.Chan(): c.Logger.Debug("Send Ping") - _n, err = cdc.MarshalBinaryWriter(c.bufConnWriter, PacketPing{}) + _n, err = cdc.MarshalBinaryLengthPrefixedWriter(c.bufConnWriter, PacketPing{}) if err != nil { break SELECTION } @@ -359,7 +359,7 @@ FOR_LOOP: } case <-c.pong: c.Logger.Debug("Send Pong") - _n, err = cdc.MarshalBinaryWriter(c.bufConnWriter, PacketPong{}) + _n, err = cdc.MarshalBinaryLengthPrefixedWriter(c.bufConnWriter, PacketPong{}) if err != nil { break SELECTION } @@ -477,7 +477,7 @@ FOR_LOOP: var packet Packet var _n int64 var err error - _n, err = cdc.UnmarshalBinaryReader(c.bufConnReader, &packet, int64(c._maxPacketMsgSize)) + _n, err = cdc.UnmarshalBinaryLengthPrefixedReader(c.bufConnReader, &packet, int64(c._maxPacketMsgSize)) c.recvMonitor.Update(int(_n)) if err != nil { if c.IsRunning() { @@ -553,7 +553,7 @@ func (c *MConnection) stopPongTimer() { // maxPacketMsgSize returns a maximum size of PacketMsg, including the overhead // of amino encoding. func (c *MConnection) maxPacketMsgSize() int { - return len(cdc.MustMarshalBinary(PacketMsg{ + return len(cdc.MustMarshalBinaryLengthPrefixed(PacketMsg{ ChannelID: 0x01, EOF: 1, Bytes: make([]byte, c.config.MaxPacketMsgPayloadSize), @@ -723,7 +723,7 @@ func (ch *Channel) nextPacketMsg() PacketMsg { // Not goroutine-safe func (ch *Channel) writePacketMsgTo(w io.Writer) (n int64, err error) { var packet = ch.nextPacketMsg() - n, err = cdc.MarshalBinaryWriter(w, packet) + n, err = cdc.MarshalBinaryLengthPrefixedWriter(w, packet) atomic.AddInt64(&ch.recentlySent, n) return } diff --git a/p2p/conn/connection_test.go b/p2p/conn/connection_test.go index 95b5488a4..59fe0d1df 100644 --- a/p2p/conn/connection_test.go +++ b/p2p/conn/connection_test.go @@ -140,7 +140,7 @@ func TestMConnectionPongTimeoutResultsInError(t *testing.T) { go func() { // read ping var pkt PacketPing - _, err = cdc.UnmarshalBinaryReader(server, &pkt, maxPingPongPacketSize) + _, err = cdc.UnmarshalBinaryLengthPrefixedReader(server, &pkt, maxPingPongPacketSize) assert.Nil(t, err) serverGotPing <- struct{}{} }() @@ -176,22 +176,22 @@ func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) { defer mconn.Stop() // sending 3 pongs in a row (abuse) - _, err = server.Write(cdc.MustMarshalBinary(PacketPong{})) + _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPong{})) require.Nil(t, err) - _, err = server.Write(cdc.MustMarshalBinary(PacketPong{})) + _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPong{})) require.Nil(t, err) - _, err = server.Write(cdc.MustMarshalBinary(PacketPong{})) + _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPong{})) require.Nil(t, err) serverGotPing := make(chan struct{}) go func() { // read ping (one byte) var packet, err = Packet(nil), error(nil) - _, err = cdc.UnmarshalBinaryReader(server, &packet, maxPingPongPacketSize) + _, err = cdc.UnmarshalBinaryLengthPrefixedReader(server, &packet, maxPingPongPacketSize) require.Nil(t, err) serverGotPing <- struct{}{} // respond with pong - _, err = server.Write(cdc.MustMarshalBinary(PacketPong{})) + _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPong{})) require.Nil(t, err) }() <-serverGotPing @@ -227,18 +227,18 @@ func TestMConnectionMultiplePings(t *testing.T) { // sending 3 pings in a row (abuse) // see https://github.com/tendermint/tendermint/issues/1190 - _, err = server.Write(cdc.MustMarshalBinary(PacketPing{})) + _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPing{})) require.Nil(t, err) var pkt PacketPong - _, err = cdc.UnmarshalBinaryReader(server, &pkt, maxPingPongPacketSize) + _, err = cdc.UnmarshalBinaryLengthPrefixedReader(server, &pkt, maxPingPongPacketSize) require.Nil(t, err) - _, err = server.Write(cdc.MustMarshalBinary(PacketPing{})) + _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPing{})) require.Nil(t, err) - _, err = cdc.UnmarshalBinaryReader(server, &pkt, maxPingPongPacketSize) + _, err = cdc.UnmarshalBinaryLengthPrefixedReader(server, &pkt, maxPingPongPacketSize) require.Nil(t, err) - _, err = server.Write(cdc.MustMarshalBinary(PacketPing{})) + _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPing{})) require.Nil(t, err) - _, err = cdc.UnmarshalBinaryReader(server, &pkt, maxPingPongPacketSize) + _, err = cdc.UnmarshalBinaryLengthPrefixedReader(server, &pkt, maxPingPongPacketSize) require.Nil(t, err) assert.True(t, mconn.IsRunning()) @@ -270,20 +270,20 @@ func TestMConnectionPingPongs(t *testing.T) { go func() { // read ping var pkt PacketPing - _, err = cdc.UnmarshalBinaryReader(server, &pkt, maxPingPongPacketSize) + _, err = cdc.UnmarshalBinaryLengthPrefixedReader(server, &pkt, maxPingPongPacketSize) require.Nil(t, err) serverGotPing <- struct{}{} // respond with pong - _, err = server.Write(cdc.MustMarshalBinary(PacketPong{})) + _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPong{})) require.Nil(t, err) time.Sleep(mconn.config.PingInterval) // read ping - _, err = cdc.UnmarshalBinaryReader(server, &pkt, maxPingPongPacketSize) + _, err = cdc.UnmarshalBinaryLengthPrefixedReader(server, &pkt, maxPingPongPacketSize) require.Nil(t, err) // respond with pong - _, err = server.Write(cdc.MustMarshalBinary(PacketPong{})) + _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPong{})) require.Nil(t, err) }() <-serverGotPing @@ -380,7 +380,7 @@ func TestMConnectionReadErrorBadEncoding(t *testing.T) { client := mconnClient.conn // send badly encoded msgPacket - bz := cdc.MustMarshalBinary(PacketMsg{}) + bz := cdc.MustMarshalBinaryLengthPrefixed(PacketMsg{}) bz[4] += 0x01 // Invalid prefix bytes. // Write it. @@ -428,7 +428,7 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { EOF: 1, Bytes: make([]byte, mconnClient.config.MaxPacketMsgPayloadSize), } - _, err = cdc.MarshalBinaryWriter(buf, packet) + _, err = cdc.MarshalBinaryLengthPrefixedWriter(buf, packet) assert.Nil(t, err) _, err = client.Write(buf.Bytes()) assert.Nil(t, err) @@ -441,7 +441,7 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { EOF: 1, Bytes: make([]byte, mconnClient.config.MaxPacketMsgPayloadSize+100), } - _, err = cdc.MarshalBinaryWriter(buf, packet) + _, err = cdc.MarshalBinaryLengthPrefixedWriter(buf, packet) assert.Nil(t, err) _, err = client.Write(buf.Bytes()) assert.NotNil(t, err) diff --git a/p2p/conn/secret_connection.go b/p2p/conn/secret_connection.go index acdd96de4..1dc66afff 100644 --- a/p2p/conn/secret_connection.go +++ b/p2p/conn/secret_connection.go @@ -211,7 +211,7 @@ func shareEphPubKey(conn io.ReadWriteCloser, locEphPub *[32]byte) (remEphPub *[3 // Send our pubkey and receive theirs in tandem. var trs, _ = cmn.Parallel( func(_ int) (val interface{}, err error, abort bool) { - var _, err1 = cdc.MarshalBinaryWriter(conn, locEphPub) + var _, err1 = cdc.MarshalBinaryLengthPrefixedWriter(conn, locEphPub) if err1 != nil { return nil, err1, true // abort } @@ -219,7 +219,7 @@ func shareEphPubKey(conn io.ReadWriteCloser, locEphPub *[32]byte) (remEphPub *[3 }, func(_ int) (val interface{}, err error, abort bool) { var _remEphPub [32]byte - var _, err2 = cdc.UnmarshalBinaryReader(conn, &_remEphPub, 1024*1024) // TODO + var _, err2 = cdc.UnmarshalBinaryLengthPrefixedReader(conn, &_remEphPub, 1024*1024) // TODO if err2 != nil { return nil, err2, true // abort } @@ -305,7 +305,7 @@ func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKey, signature [] // Send our info and receive theirs in tandem. var trs, _ = cmn.Parallel( func(_ int) (val interface{}, err error, abort bool) { - var _, err1 = cdc.MarshalBinaryWriter(sc, authSigMessage{pubKey, signature}) + var _, err1 = cdc.MarshalBinaryLengthPrefixedWriter(sc, authSigMessage{pubKey, signature}) if err1 != nil { return nil, err1, true // abort } @@ -313,7 +313,7 @@ func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKey, signature [] }, func(_ int) (val interface{}, err error, abort bool) { var _recvMsg authSigMessage - var _, err2 = cdc.UnmarshalBinaryReader(sc, &_recvMsg, 1024*1024) // TODO + var _, err2 = cdc.UnmarshalBinaryLengthPrefixedReader(sc, &_recvMsg, 1024*1024) // TODO if err2 != nil { return nil, err2, true // abort } diff --git a/p2p/metrics.go b/p2p/metrics.go index ed26d1192..b066fb317 100644 --- a/p2p/metrics.go +++ b/p2p/metrics.go @@ -62,7 +62,7 @@ func PrometheusMetrics(namespace string) *Metrics { // NopMetrics returns no-op Metrics. func NopMetrics() *Metrics { return &Metrics{ - Peers: discard.NewGauge(), + Peers: discard.NewGauge(), PeerReceiveBytesTotal: discard.NewCounter(), PeerSendBytesTotal: discard.NewCounter(), PeerPendingSendBytes: discard.NewGauge(), diff --git a/p2p/transport.go b/p2p/transport.go index 10565d8a9..0b9b436f0 100644 --- a/p2p/transport.go +++ b/p2p/transport.go @@ -446,11 +446,11 @@ func handshake( ) go func(errc chan<- error, c net.Conn) { - _, err := cdc.MarshalBinaryWriter(c, ourNodeInfo) + _, err := cdc.MarshalBinaryLengthPrefixedWriter(c, ourNodeInfo) errc <- err }(errc, c) go func(errc chan<- error, c net.Conn) { - _, err := cdc.UnmarshalBinaryReader( + _, err := cdc.UnmarshalBinaryLengthPrefixedReader( c, &peerNodeInfo, int64(MaxNodeInfoSize()), diff --git a/p2p/transport_test.go b/p2p/transport_test.go index cce223a3e..8a5c06bc3 100644 --- a/p2p/transport_test.go +++ b/p2p/transport_test.go @@ -516,7 +516,7 @@ func TestTransportHandshake(t *testing.T) { } go func(c net.Conn) { - _, err := cdc.MarshalBinaryWriter(c, peerNodeInfo.(DefaultNodeInfo)) + _, err := cdc.MarshalBinaryLengthPrefixedWriter(c, peerNodeInfo.(DefaultNodeInfo)) if err != nil { t.Error(err) } @@ -524,7 +524,7 @@ func TestTransportHandshake(t *testing.T) { go func(c net.Conn) { var ni DefaultNodeInfo - _, err := cdc.UnmarshalBinaryReader( + _, err := cdc.UnmarshalBinaryLengthPrefixedReader( c, &ni, int64(MaxNodeInfoSize()), diff --git a/privval/priv_validator.go b/privval/priv_validator.go index c5fba509f..a13f5426b 100644 --- a/privval/priv_validator.go +++ b/privval/priv_validator.go @@ -314,10 +314,10 @@ func (pv *FilePV) String() string { // returns true if the only difference in the votes is their timestamp. func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) { var lastVote, newVote types.CanonicalVote - if err := cdc.UnmarshalBinary(lastSignBytes, &lastVote); err != nil { + if err := cdc.UnmarshalBinaryLengthPrefixed(lastSignBytes, &lastVote); err != nil { panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into vote: %v", err)) } - if err := cdc.UnmarshalBinary(newSignBytes, &newVote); err != nil { + if err := cdc.UnmarshalBinaryLengthPrefixed(newSignBytes, &newVote); err != nil { panic(fmt.Sprintf("signBytes cannot be unmarshalled into vote: %v", err)) } @@ -337,10 +337,10 @@ func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.T // returns true if the only difference in the proposals is their timestamp func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) { var lastProposal, newProposal types.CanonicalProposal - if err := cdc.UnmarshalBinary(lastSignBytes, &lastProposal); err != nil { + if err := cdc.UnmarshalBinaryLengthPrefixed(lastSignBytes, &lastProposal); err != nil { panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into proposal: %v", err)) } - if err := cdc.UnmarshalBinary(newSignBytes, &newProposal); err != nil { + if err := cdc.UnmarshalBinaryLengthPrefixed(newSignBytes, &newProposal); err != nil { panic(fmt.Sprintf("signBytes cannot be unmarshalled into proposal: %v", err)) } @@ -349,8 +349,8 @@ func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (ti now := tmtime.Now() lastProposal.Timestamp = now newProposal.Timestamp = now - lastProposalBytes, _ := cdc.MarshalBinary(lastProposal) - newProposalBytes, _ := cdc.MarshalBinary(newProposal) + lastProposalBytes, _ := cdc.MarshalBinaryLengthPrefixed(lastProposal) + newProposalBytes, _ := cdc.MarshalBinaryLengthPrefixed(newProposal) return lastTime, bytes.Equal(newProposalBytes, lastProposalBytes) } diff --git a/privval/remote_signer.go b/privval/remote_signer.go index 399ee7905..eacc840c5 100644 --- a/privval/remote_signer.go +++ b/privval/remote_signer.go @@ -248,7 +248,7 @@ func (e *RemoteSignerError) Error() string { func readMsg(r io.Reader) (msg RemoteSignerMsg, err error) { const maxRemoteSignerMsgSize = 1024 * 10 - _, err = cdc.UnmarshalBinaryReader(r, &msg, maxRemoteSignerMsgSize) + _, err = cdc.UnmarshalBinaryLengthPrefixedReader(r, &msg, maxRemoteSignerMsgSize) if _, ok := err.(timeoutError); ok { err = cmn.ErrorWrap(ErrConnTimeout, err.Error()) } @@ -256,7 +256,7 @@ func readMsg(r io.Reader) (msg RemoteSignerMsg, err error) { } func writeMsg(w io.Writer, msg interface{}) (err error) { - _, err = cdc.MarshalBinaryWriter(w, msg) + _, err = cdc.MarshalBinaryLengthPrefixedWriter(w, msg) if _, ok := err.(timeoutError); ok { err = cmn.ErrorWrap(ErrConnTimeout, err.Error()) } diff --git a/types/block.go b/types/block.go index ce605263c..70b840c63 100644 --- a/types/block.go +++ b/types/block.go @@ -149,7 +149,7 @@ func (b *Block) MakePartSet(partSize int) *PartSet { // We prefix the byte length, so that unmarshaling // can easily happen via a reader. - bz, err := cdc.MarshalBinary(b) + bz, err := cdc.MarshalBinaryLengthPrefixed(b) if err != nil { panic(err) } diff --git a/types/block_test.go b/types/block_test.go index e34ba29b8..341073980 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -265,7 +265,7 @@ func TestMaxHeaderBytes(t *testing.T) { ProposerAddress: tmhash.Sum([]byte("proposer_address")), } - bz, err := cdc.MarshalBinary(h) + bz, err := cdc.MarshalBinaryLengthPrefixed(h) require.NoError(t, err) assert.EqualValues(t, MaxHeaderBytes, len(bz)) diff --git a/types/evidence_test.go b/types/evidence_test.go index 79805691c..44276ab18 100644 --- a/types/evidence_test.go +++ b/types/evidence_test.go @@ -105,7 +105,7 @@ func TestMaxEvidenceBytes(t *testing.T) { VoteB: makeVote(val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, math.MaxInt64, blockID2), } - bz, err := cdc.MarshalBinary(ev) + bz, err := cdc.MarshalBinaryLengthPrefixed(ev) require.NoError(t, err) assert.EqualValues(t, MaxEvidenceBytes, len(bz)) diff --git a/types/heartbeat.go b/types/heartbeat.go index de03d5cc4..9dea039e0 100644 --- a/types/heartbeat.go +++ b/types/heartbeat.go @@ -23,7 +23,7 @@ type Heartbeat struct { // SignBytes returns the Heartbeat bytes for signing. // It panics if the Heartbeat is nil. func (heartbeat *Heartbeat) SignBytes(chainID string) []byte { - bz, err := cdc.MarshalBinary(CanonicalizeHeartbeat(chainID, heartbeat)) + bz, err := cdc.MarshalBinaryLengthPrefixed(CanonicalizeHeartbeat(chainID, heartbeat)) if err != nil { panic(err) } diff --git a/types/heartbeat_test.go b/types/heartbeat_test.go index 550bcc739..e1ffdd6f1 100644 --- a/types/heartbeat_test.go +++ b/types/heartbeat_test.go @@ -39,7 +39,7 @@ func TestHeartbeatWriteSignBytes(t *testing.T) { { testHeartbeat := &Heartbeat{ValidatorIndex: 1, Height: 10, Round: 1} signBytes := testHeartbeat.SignBytes(chainID) - expected, err := cdc.MarshalBinary(CanonicalizeHeartbeat(chainID, testHeartbeat)) + expected, err := cdc.MarshalBinaryLengthPrefixed(CanonicalizeHeartbeat(chainID, testHeartbeat)) require.NoError(t, err) require.Equal(t, expected, signBytes, "Got unexpected sign bytes for Heartbeat") } @@ -47,7 +47,7 @@ func TestHeartbeatWriteSignBytes(t *testing.T) { { testHeartbeat := &Heartbeat{} signBytes := testHeartbeat.SignBytes(chainID) - expected, err := cdc.MarshalBinary(CanonicalizeHeartbeat(chainID, testHeartbeat)) + expected, err := cdc.MarshalBinaryLengthPrefixed(CanonicalizeHeartbeat(chainID, testHeartbeat)) require.NoError(t, err) require.Equal(t, expected, signBytes, "Got unexpected sign bytes for Heartbeat") } diff --git a/types/proposal.go b/types/proposal.go index a2bc8e367..5d70a3c84 100644 --- a/types/proposal.go +++ b/types/proposal.go @@ -52,7 +52,7 @@ func (p *Proposal) String() string { // SignBytes returns the Proposal bytes for signing func (p *Proposal) SignBytes(chainID string) []byte { - bz, err := cdc.MarshalBinary(CanonicalizeProposal(chainID, p)) + bz, err := cdc.MarshalBinaryLengthPrefixed(CanonicalizeProposal(chainID, p)) if err != nil { panic(err) } diff --git a/types/proposal_test.go b/types/proposal_test.go index 5f9433083..8ae1f3e5a 100644 --- a/types/proposal_test.go +++ b/types/proposal_test.go @@ -27,7 +27,7 @@ func TestProposalSignable(t *testing.T) { chainID := "test_chain_id" signBytes := testProposal.SignBytes(chainID) - expected, err := cdc.MarshalBinary(CanonicalizeProposal(chainID, testProposal)) + expected, err := cdc.MarshalBinaryLengthPrefixed(CanonicalizeProposal(chainID, testProposal)) require.NoError(t, err) require.Equal(t, expected, signBytes, "Got unexpected sign bytes for Proposal") } @@ -57,9 +57,9 @@ func TestProposalVerifySignature(t *testing.T) { // serialize, deserialize and verify again.... newProp := new(Proposal) - bs, err := cdc.MarshalBinary(prop) + bs, err := cdc.MarshalBinaryLengthPrefixed(prop) require.NoError(t, err) - err = cdc.UnmarshalBinary(bs, &newProp) + err = cdc.UnmarshalBinaryLengthPrefixed(bs, &newProp) require.NoError(t, err) // verify the transmitted proposal diff --git a/types/results.go b/types/results.go index 6b5b82d27..db7811684 100644 --- a/types/results.go +++ b/types/results.go @@ -48,7 +48,7 @@ func NewResultFromResponse(response *abci.ResponseDeliverTx) ABCIResult { // Bytes serializes the ABCIResponse using wire func (a ABCIResults) Bytes() []byte { - bz, err := cdc.MarshalBinary(a) + bz, err := cdc.MarshalBinaryLengthPrefixed(a) if err != nil { panic(err) } diff --git a/types/tx_test.go b/types/tx_test.go index 9fb8ff34d..6ce23d6f5 100644 --- a/types/tx_test.go +++ b/types/tx_test.go @@ -79,9 +79,9 @@ func TestValidTxProof(t *testing.T) { // read-write must also work var p2 TxProof - bin, err := cdc.MarshalBinary(proof) + bin, err := cdc.MarshalBinaryLengthPrefixed(proof) assert.Nil(t, err) - err = cdc.UnmarshalBinary(bin, &p2) + err = cdc.UnmarshalBinaryLengthPrefixed(bin, &p2) if assert.Nil(t, err, "%d: %d: %+v", h, i, err) { assert.Nil(t, p2.Validate(root), "%d: %d", h, i) } @@ -105,7 +105,7 @@ func testTxProofUnchangable(t *testing.T) { // make sure it is valid to start with assert.Nil(t, proof.Validate(root)) - bin, err := cdc.MarshalBinary(proof) + bin, err := cdc.MarshalBinaryLengthPrefixed(proof) assert.Nil(t, err) // try mutating the data and make sure nothing breaks @@ -120,7 +120,7 @@ func testTxProofUnchangable(t *testing.T) { // This makes sure that the proof doesn't deserialize into something valid. func assertBadProof(t *testing.T, root []byte, bad []byte, good TxProof) { var proof TxProof - err := cdc.UnmarshalBinary(bad, &proof) + err := cdc.UnmarshalBinaryLengthPrefixed(bad, &proof) if err == nil { err = proof.Validate(root) if err == nil { diff --git a/types/validator_set_test.go b/types/validator_set_test.go index d886b419c..aad9d85a8 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -272,7 +272,7 @@ func randValidatorSet(numValidators int) *ValidatorSet { } func (valSet *ValidatorSet) toBytes() []byte { - bz, err := cdc.MarshalBinary(valSet) + bz, err := cdc.MarshalBinaryLengthPrefixed(valSet) if err != nil { panic(err) } @@ -280,7 +280,7 @@ func (valSet *ValidatorSet) toBytes() []byte { } func (valSet *ValidatorSet) fromBytes(b []byte) { - err := cdc.UnmarshalBinary(b, &valSet) + err := cdc.UnmarshalBinaryLengthPrefixed(b, &valSet) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED panic(err) diff --git a/types/vote.go b/types/vote.go index 2a7133099..e1095bf15 100644 --- a/types/vote.go +++ b/types/vote.go @@ -59,7 +59,7 @@ type Vote struct { } func (vote *Vote) SignBytes(chainID string) []byte { - bz, err := cdc.MarshalBinary(CanonicalizeVote(chainID, vote)) + bz, err := cdc.MarshalBinaryLengthPrefixed(CanonicalizeVote(chainID, vote)) if err != nil { panic(err) } diff --git a/types/vote_test.go b/types/vote_test.go index 3b2f08488..1d7e3daf0 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -46,7 +46,7 @@ func TestVoteSignable(t *testing.T) { vote := examplePrecommit() signBytes := vote.SignBytes("test_chain_id") - expected, err := cdc.MarshalBinary(CanonicalizeVote("test_chain_id", vote)) + expected, err := cdc.MarshalBinaryLengthPrefixed(CanonicalizeVote("test_chain_id", vote)) require.NoError(t, err) require.Equal(t, expected, signBytes, "Got unexpected sign bytes for Vote.") @@ -130,9 +130,9 @@ func TestVoteSignableTestVectors(t *testing.T) { func TestVoteProposalNotEq(t *testing.T) { cv := CanonicalizeVote("", &Vote{Height: 1, Round: 1}) p := CanonicalizeProposal("", &Proposal{Height: 1, Round: 1}) - vb, err := cdc.MarshalBinary(cv) + vb, err := cdc.MarshalBinaryLengthPrefixed(cv) require.NoError(t, err) - pb, err := cdc.MarshalBinary(p) + pb, err := cdc.MarshalBinaryLengthPrefixed(p) require.NoError(t, err) require.NotEqual(t, vb, pb) } @@ -154,9 +154,9 @@ func TestVoteVerifySignature(t *testing.T) { // serialize, deserialize and verify again.... precommit := new(Vote) - bs, err := cdc.MarshalBinary(vote) + bs, err := cdc.MarshalBinaryLengthPrefixed(vote) require.NoError(t, err) - err = cdc.UnmarshalBinary(bs, &precommit) + err = cdc.UnmarshalBinaryLengthPrefixed(bs, &precommit) require.NoError(t, err) // verify the transmitted vote @@ -230,7 +230,7 @@ func TestMaxVoteBytes(t *testing.T) { err := privVal.SignVote("test_chain_id", vote) require.NoError(t, err) - bz, err := cdc.MarshalBinary(vote) + bz, err := cdc.MarshalBinaryLengthPrefixed(vote) require.NoError(t, err) assert.EqualValues(t, MaxVoteBytes, len(bz)) From bbf15b3d09ac05ff2cfd58b53313ec612e1a9b07 Mon Sep 17 00:00:00 2001 From: zhangzheng Date: Thu, 25 Oct 2018 18:27:32 +0800 Subject: [PATCH 099/113] tm-monitor: update health after we added / removed node (#2694) Refs #2693 --- tools/tm-monitor/monitor/network.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/tm-monitor/monitor/network.go b/tools/tm-monitor/monitor/network.go index 9b147c06b..bb5dd0baa 100644 --- a/tools/tm-monitor/monitor/network.go +++ b/tools/tm-monitor/monitor/network.go @@ -140,14 +140,22 @@ func (n *Network) NodeIsOnline(name string) { // NewNode is called when the new node is added to the monitor. func (n *Network) NewNode(name string) { + n.mu.Lock() + defer n.mu.Unlock() + n.NumNodesMonitored++ n.NumNodesMonitoredOnline++ + n.updateHealth() } // NodeDeleted is called when the node is deleted from under the monitor. func (n *Network) NodeDeleted(name string) { + n.mu.Lock() + defer n.mu.Unlock() + n.NumNodesMonitored-- n.NumNodesMonitoredOnline-- + n.updateHealth() } func (n *Network) updateHealth() { From b6d5b8b74574269e5c51ed9d14477e8b9e07def1 Mon Sep 17 00:00:00 2001 From: Ismail Khoffi Date: Mon, 29 Oct 2018 14:16:50 +0100 Subject: [PATCH 100/113] Update to amino 0.14.0 (#2710) * WIP: update to amino 0.14.0 * update Changelog * Update to latest amino version (v0.14.0) --- CHANGELOG_PENDING.md | 1 + Gopkg.lock | 16 +-- Gopkg.toml | 2 +- Makefile | 4 +- types/block.go | 2 +- types/block_test.go | 12 +- types/evidence.go | 2 +- types/proto3/block.pb.go | 266 ++++++++++++++++++++++++++------------- types/proto3/block.proto | 8 +- types/vote.go | 2 +- 10 files changed, 208 insertions(+), 107 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 163c4649f..a56487ff1 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -50,6 +50,7 @@ BREAKING CHANGES: * [types] [\#2298](https://github.com/tendermint/tendermint/issues/2298) Remove `Index` and `Total` fields from `TxProof`. * [types] [\#2598](https://github.com/tendermint/tendermint/issues/2598) `VoteTypeXxx` are now of type `SignedMsgType byte` and named `XxxType`, eg. `PrevoteType`, `PrecommitType`. + * [types] [\#2682](https://github.com/tendermint/tendermint/issues/2682) Use proto3 `varint` encoding for ints that are usually unsigned (instead of zigzag encoding). * Blockchain Protocol * [types] Update SignBytes for `Vote`/`Proposal`/`Heartbeat`: diff --git a/Gopkg.lock b/Gopkg.lock index 566fed4a2..513e0bd7a 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -286,12 +286,12 @@ version = "v1.1.2" [[projects]] - digest = "1:516e71bed754268937f57d4ecb190e01958452336fa73dbac880894164e91c1f" + digest = "1:08d65904057412fc0270fc4812a1c90c594186819243160dc779a402d4b6d0bc" name = "github.com/spf13/cast" packages = ["."] pruneopts = "UT" - revision = "8965335b8c7107321228e3e3702cab9832751bac" - version = "v1.2.0" + revision = "8c9545af88b134710ab1cd196795e7f2388358d7" + version = "v1.3.0" [[projects]] digest = "1:7ffc0983035bc7e297da3688d9fe19d60a420e9c38bef23f845c53788ed6a05e" @@ -365,12 +365,12 @@ revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df" [[projects]] - digest = "1:5f52e817b6c9d52ddba70dece0ea31134d82a52c05bce98fbc739ab2a832df28" + digest = "1:10b3a599325740c84a7c81f3f3cb2e1fdb70b3ea01b7fa28495567a2519df431" name = "github.com/tendermint/go-amino" packages = ["."] pruneopts = "UT" - revision = "cb07448b240918aa8d8df4505153549b86b77134" - version = "v0.13.0" + revision = "6dcc6ddc143e116455c94b25c1004c99e0d0ca12" + version = "v0.14.0" [[projects]] digest = "1:72b71e3a29775e5752ed7a8012052a3dee165e27ec18cedddae5288058f09acf" @@ -415,14 +415,14 @@ [[projects]] branch = "master" - digest = "1:d1da39c9bac61327dbef1d8ef9f210425e99fd2924b6fb5f0bc587a193353637" + digest = "1:fd98d154bf152ad5a49600ede7d7341851bcdfe358b9b82e5ccdba818618167c" name = "golang.org/x/sys" packages = [ "cpu", "unix", ] pruneopts = "UT" - revision = "8a28ead16f52c8aaeffbf79239b251dfdf6c4f96" + revision = "2772b66316d2c587efeb188dcd5ebc6987656e84" [[projects]] digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18" diff --git a/Gopkg.toml b/Gopkg.toml index e24965dc1..955d6c6d1 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -58,7 +58,7 @@ [[constraint]] name = "github.com/tendermint/go-amino" - version = "v0.13.0" + version = "v0.14.0" [[constraint]] name = "google.golang.org/grpc" diff --git a/Makefile b/Makefile index 0b78574b9..4390b1fbb 100644 --- a/Makefile +++ b/Makefile @@ -36,7 +36,7 @@ install: ######################################## ### Protobuf -protoc_all: protoc_libs protoc_merkle protoc_abci protoc_grpc +protoc_all: protoc_libs protoc_merkle protoc_abci protoc_grpc protoc_proto3types %.pb.go: %.proto ## If you get the following error, @@ -52,6 +52,8 @@ protoc_all: protoc_libs protoc_merkle protoc_abci protoc_grpc # see protobuf section above protoc_abci: abci/types/types.pb.go +protoc_proto3types: types/proto3/block.pb.go + build_abci: @go build -i ./abci/cmd/... diff --git a/types/block.go b/types/block.go index 70b840c63..477e39997 100644 --- a/types/block.go +++ b/types/block.go @@ -15,7 +15,7 @@ import ( const ( // MaxHeaderBytes is a maximum header size (including amino overhead). - MaxHeaderBytes int64 = 537 + MaxHeaderBytes int64 = 533 // MaxAminoOverheadForBlock - maximum amino overhead to encode a block (up to // MaxBlockSizeBytes in size) not including it's parts except Data. diff --git a/types/block_test.go b/types/block_test.go index 341073980..28e73f661 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -292,9 +292,9 @@ func TestBlockMaxDataBytes(t *testing.T) { }{ 0: {-10, 1, 0, true, 0}, 1: {10, 1, 0, true, 0}, - 2: {750, 1, 0, true, 0}, - 3: {751, 1, 0, false, 0}, - 4: {752, 1, 0, false, 1}, + 2: {742, 1, 0, true, 0}, + 3: {743, 1, 0, false, 0}, + 4: {744, 1, 0, false, 1}, } for i, tc := range testCases { @@ -320,9 +320,9 @@ func TestBlockMaxDataBytesUnknownEvidence(t *testing.T) { }{ 0: {-10, 1, true, 0}, 1: {10, 1, true, 0}, - 2: {833, 1, true, 0}, - 3: {834, 1, false, 0}, - 4: {835, 1, false, 1}, + 2: {824, 1, true, 0}, + 3: {825, 1, false, 0}, + 4: {826, 1, false, 1}, } for i, tc := range testCases { diff --git a/types/evidence.go b/types/evidence.go index 6d42ed22c..7a808d57b 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -14,7 +14,7 @@ import ( const ( // MaxEvidenceBytes is a maximum size of any evidence (including amino overhead). - MaxEvidenceBytes int64 = 444 + MaxEvidenceBytes int64 = 436 ) // ErrEvidenceInvalid wraps a piece of evidence and the error denoting how or why it is invalid. diff --git a/types/proto3/block.pb.go b/types/proto3/block.pb.go index 7efc7ca7d..99dadac16 100644 --- a/types/proto3/block.pb.go +++ b/types/proto3/block.pb.go @@ -1,22 +1,9 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: block.proto - -/* -Package proto3 is a generated protocol buffer package. - -It is generated from these files: - block.proto - -It has these top-level messages: - PartSetHeader - BlockID - Header - Version - Timestamp -*/ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: types/proto3/block.proto + package proto3 -import proto "github.com/golang/protobuf/proto" +import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" @@ -29,17 +16,39 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type PartSetHeader struct { - Total int32 `protobuf:"zigzag32,1,opt,name=Total" json:"Total,omitempty"` - Hash []byte `protobuf:"bytes,2,opt,name=Hash,proto3" json:"Hash,omitempty"` + Total int32 `protobuf:"varint,1,opt,name=Total,proto3" json:"Total,omitempty"` + Hash []byte `protobuf:"bytes,2,opt,name=Hash,proto3" json:"Hash,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } -func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } -func (*PartSetHeader) ProtoMessage() {} -func (*PartSetHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } +func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } +func (*PartSetHeader) ProtoMessage() {} +func (*PartSetHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_block_57c41dfc0fc285b3, []int{0} +} +func (m *PartSetHeader) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PartSetHeader.Unmarshal(m, b) +} +func (m *PartSetHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PartSetHeader.Marshal(b, m, deterministic) +} +func (dst *PartSetHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartSetHeader.Merge(dst, src) +} +func (m *PartSetHeader) XXX_Size() int { + return xxx_messageInfo_PartSetHeader.Size(m) +} +func (m *PartSetHeader) XXX_DiscardUnknown() { + xxx_messageInfo_PartSetHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_PartSetHeader proto.InternalMessageInfo func (m *PartSetHeader) GetTotal() int32 { if m != nil { @@ -56,14 +65,36 @@ func (m *PartSetHeader) GetHash() []byte { } type BlockID struct { - Hash []byte `protobuf:"bytes,1,opt,name=Hash,proto3" json:"Hash,omitempty"` - PartsHeader *PartSetHeader `protobuf:"bytes,2,opt,name=PartsHeader" json:"PartsHeader,omitempty"` + Hash []byte `protobuf:"bytes,1,opt,name=Hash,proto3" json:"Hash,omitempty"` + PartsHeader *PartSetHeader `protobuf:"bytes,2,opt,name=PartsHeader" json:"PartsHeader,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlockID) Reset() { *m = BlockID{} } +func (m *BlockID) String() string { return proto.CompactTextString(m) } +func (*BlockID) ProtoMessage() {} +func (*BlockID) Descriptor() ([]byte, []int) { + return fileDescriptor_block_57c41dfc0fc285b3, []int{1} +} +func (m *BlockID) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BlockID.Unmarshal(m, b) +} +func (m *BlockID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BlockID.Marshal(b, m, deterministic) +} +func (dst *BlockID) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockID.Merge(dst, src) +} +func (m *BlockID) XXX_Size() int { + return xxx_messageInfo_BlockID.Size(m) +} +func (m *BlockID) XXX_DiscardUnknown() { + xxx_messageInfo_BlockID.DiscardUnknown(m) } -func (m *BlockID) Reset() { *m = BlockID{} } -func (m *BlockID) String() string { return proto.CompactTextString(m) } -func (*BlockID) ProtoMessage() {} -func (*BlockID) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +var xxx_messageInfo_BlockID proto.InternalMessageInfo func (m *BlockID) GetHash() []byte { if m != nil { @@ -82,11 +113,11 @@ func (m *BlockID) GetPartsHeader() *PartSetHeader { type Header struct { // basic block info Version *Version `protobuf:"bytes,1,opt,name=Version" json:"Version,omitempty"` - ChainID string `protobuf:"bytes,2,opt,name=ChainID" json:"ChainID,omitempty"` - Height int64 `protobuf:"zigzag64,3,opt,name=Height" json:"Height,omitempty"` + ChainID string `protobuf:"bytes,2,opt,name=ChainID,proto3" json:"ChainID,omitempty"` + Height int64 `protobuf:"varint,3,opt,name=Height,proto3" json:"Height,omitempty"` Time *Timestamp `protobuf:"bytes,4,opt,name=Time" json:"Time,omitempty"` - NumTxs int64 `protobuf:"zigzag64,5,opt,name=NumTxs" json:"NumTxs,omitempty"` - TotalTxs int64 `protobuf:"zigzag64,6,opt,name=TotalTxs" json:"TotalTxs,omitempty"` + NumTxs int64 `protobuf:"varint,5,opt,name=NumTxs,proto3" json:"NumTxs,omitempty"` + TotalTxs int64 `protobuf:"varint,6,opt,name=TotalTxs,proto3" json:"TotalTxs,omitempty"` // prev block info LastBlockID *BlockID `protobuf:"bytes,7,opt,name=LastBlockID" json:"LastBlockID,omitempty"` // hashes of block data @@ -99,14 +130,36 @@ type Header struct { AppHash []byte `protobuf:"bytes,13,opt,name=AppHash,proto3" json:"AppHash,omitempty"` LastResultsHash []byte `protobuf:"bytes,14,opt,name=LastResultsHash,proto3" json:"LastResultsHash,omitempty"` // consensus info - EvidenceHash []byte `protobuf:"bytes,15,opt,name=EvidenceHash,proto3" json:"EvidenceHash,omitempty"` - ProposerAddress []byte `protobuf:"bytes,16,opt,name=ProposerAddress,proto3" json:"ProposerAddress,omitempty"` + EvidenceHash []byte `protobuf:"bytes,15,opt,name=EvidenceHash,proto3" json:"EvidenceHash,omitempty"` + ProposerAddress []byte `protobuf:"bytes,16,opt,name=ProposerAddress,proto3" json:"ProposerAddress,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { + return fileDescriptor_block_57c41dfc0fc285b3, []int{2} +} +func (m *Header) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Header.Unmarshal(m, b) +} +func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Header.Marshal(b, m, deterministic) +} +func (dst *Header) XXX_Merge(src proto.Message) { + xxx_messageInfo_Header.Merge(dst, src) +} +func (m *Header) XXX_Size() int { + return xxx_messageInfo_Header.Size(m) +} +func (m *Header) XXX_DiscardUnknown() { + xxx_messageInfo_Header.DiscardUnknown(m) } -func (m *Header) Reset() { *m = Header{} } -func (m *Header) String() string { return proto.CompactTextString(m) } -func (*Header) ProtoMessage() {} -func (*Header) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +var xxx_messageInfo_Header proto.InternalMessageInfo func (m *Header) GetVersion() *Version { if m != nil { @@ -221,14 +274,36 @@ func (m *Header) GetProposerAddress() []byte { } type Version struct { - Block uint64 `protobuf:"varint,1,opt,name=Block" json:"Block,omitempty"` - App uint64 `protobuf:"varint,2,opt,name=App" json:"App,omitempty"` + Block uint64 `protobuf:"varint,1,opt,name=Block,proto3" json:"Block,omitempty"` + App uint64 `protobuf:"varint,2,opt,name=App,proto3" json:"App,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { + return fileDescriptor_block_57c41dfc0fc285b3, []int{3} +} +func (m *Version) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Version.Unmarshal(m, b) +} +func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Version.Marshal(b, m, deterministic) +} +func (dst *Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_Version.Merge(dst, src) +} +func (m *Version) XXX_Size() int { + return xxx_messageInfo_Version.Size(m) +} +func (m *Version) XXX_DiscardUnknown() { + xxx_messageInfo_Version.DiscardUnknown(m) } -func (m *Version) Reset() { *m = Version{} } -func (m *Version) String() string { return proto.CompactTextString(m) } -func (*Version) ProtoMessage() {} -func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +var xxx_messageInfo_Version proto.InternalMessageInfo func (m *Version) GetBlock() uint64 { if m != nil { @@ -250,14 +325,36 @@ func (m *Version) GetApp() uint64 { // https://github.com/google/protobuf/blob/d2980062c859649523d5fd51d6b55ab310e47482/src/google/protobuf/timestamp.proto#L123-L135 // NOTE/XXX: nanos do not get skipped if they are zero in amino. type Timestamp struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_block_57c41dfc0fc285b3, []int{4} +} +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Timestamp.Unmarshal(m, b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) +} +func (dst *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(dst, src) +} +func (m *Timestamp) XXX_Size() int { + return xxx_messageInfo_Timestamp.Size(m) +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) } -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +var xxx_messageInfo_Timestamp proto.InternalMessageInfo func (m *Timestamp) GetSeconds() int64 { if m != nil { @@ -281,36 +378,37 @@ func init() { proto.RegisterType((*Timestamp)(nil), "proto3.Timestamp") } -func init() { proto.RegisterFile("block.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 443 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0xcd, 0x6a, 0xdb, 0x40, - 0x10, 0x46, 0xb5, 0x6c, 0xc7, 0x23, 0x3b, 0x4e, 0x86, 0xb6, 0x88, 0x9e, 0x8c, 0x68, 0x8b, 0x7b, - 0x31, 0x24, 0x39, 0x94, 0xd2, 0x93, 0x6b, 0x17, 0x12, 0x28, 0x21, 0x6c, 0x8d, 0xef, 0x1b, 0x6b, - 0xa9, 0x45, 0x2d, 0xad, 0xd0, 0xac, 0x4b, 0xde, 0xb0, 0xaf, 0x55, 0x66, 0x56, 0x52, 0x23, 0x93, - 0x93, 0xf7, 0xfb, 0x99, 0x6f, 0x76, 0xc7, 0x23, 0x88, 0x1e, 0x0f, 0x76, 0xf7, 0x7b, 0x51, 0x56, - 0xd6, 0x59, 0x1c, 0xc8, 0xcf, 0x4d, 0xf2, 0x05, 0x26, 0x0f, 0xba, 0x72, 0x3f, 0x8d, 0xbb, 0x35, - 0x3a, 0x35, 0x15, 0xbe, 0x86, 0xfe, 0xc6, 0x3a, 0x7d, 0x88, 0x83, 0x59, 0x30, 0xbf, 0x54, 0x1e, - 0x20, 0x42, 0x78, 0xab, 0x69, 0x1f, 0xbf, 0x9a, 0x05, 0xf3, 0xb1, 0x92, 0x73, 0xb2, 0x85, 0xe1, - 0x37, 0x4e, 0xbc, 0x5b, 0xb7, 0x72, 0xf0, 0x5f, 0xc6, 0xcf, 0x10, 0x71, 0x32, 0xf9, 0x5c, 0xa9, - 0x8c, 0xae, 0xdf, 0xf8, 0xf6, 0x37, 0x8b, 0x4e, 0x53, 0xf5, 0xdc, 0x99, 0xfc, 0x0d, 0x61, 0x50, - 0x5f, 0xe6, 0x13, 0x0c, 0xb7, 0xa6, 0xa2, 0xcc, 0x16, 0x12, 0x1d, 0x5d, 0x4f, 0x9b, 0xfa, 0x9a, - 0x56, 0x8d, 0x8e, 0x31, 0x0c, 0x57, 0x7b, 0x9d, 0x15, 0x77, 0x6b, 0x69, 0x35, 0x52, 0x0d, 0xc4, - 0xb7, 0x1c, 0x97, 0xfd, 0xda, 0xbb, 0xb8, 0x37, 0x0b, 0xe6, 0xa8, 0x6a, 0x84, 0x1f, 0x20, 0xdc, - 0x64, 0xb9, 0x89, 0x43, 0x49, 0xbe, 0x6c, 0x92, 0x99, 0x23, 0xa7, 0xf3, 0x52, 0x89, 0xcc, 0xe5, - 0xf7, 0xc7, 0x7c, 0xf3, 0x44, 0x71, 0xdf, 0x97, 0x7b, 0x84, 0xef, 0xe0, 0x4c, 0x66, 0xc3, 0xca, - 0x40, 0x94, 0x16, 0xe3, 0x15, 0x44, 0x3f, 0x34, 0xb9, 0x7a, 0x3c, 0xf1, 0xb0, 0x7b, 0xf7, 0x9a, - 0x56, 0xcf, 0x3d, 0xf8, 0x11, 0xce, 0x19, 0xae, 0x6c, 0x9e, 0x67, 0x4e, 0x86, 0x79, 0x26, 0xc3, - 0x3c, 0x61, 0xb9, 0xed, 0x5a, 0x3b, 0x2d, 0x8e, 0x91, 0x38, 0x5a, 0xcc, 0x19, 0x5b, 0x7d, 0xc8, - 0x52, 0xed, 0x6c, 0x45, 0xe2, 0x00, 0x9f, 0xd1, 0x65, 0x71, 0x01, 0x78, 0x6f, 0x9e, 0xdc, 0x89, - 0x37, 0x12, 0xef, 0x0b, 0x0a, 0xbe, 0x87, 0xc9, 0xca, 0x16, 0x64, 0x0a, 0x3a, 0x7a, 0xeb, 0x58, - 0xac, 0x5d, 0x92, 0xff, 0x81, 0x65, 0x59, 0x8a, 0x3e, 0x11, 0xbd, 0x81, 0x38, 0x87, 0x29, 0xbf, - 0x42, 0x19, 0x3a, 0x1e, 0x9c, 0x4f, 0x38, 0x17, 0xc7, 0x29, 0x8d, 0x09, 0x8c, 0xbf, 0xff, 0xc9, - 0x52, 0x53, 0xec, 0x8c, 0xd8, 0xa6, 0x62, 0xeb, 0x70, 0x9c, 0xf6, 0x50, 0xd9, 0xd2, 0x92, 0xa9, - 0x96, 0x69, 0x5a, 0x19, 0xa2, 0xf8, 0xc2, 0xa7, 0x9d, 0xd0, 0xc9, 0x55, 0xbb, 0x3e, 0xbc, 0xd6, - 0x32, 0x69, 0xd9, 0xa3, 0x50, 0x79, 0x80, 0x17, 0xd0, 0x5b, 0x96, 0xa5, 0x2c, 0x4c, 0xa8, 0xf8, - 0x98, 0x7c, 0x85, 0x51, 0xbb, 0x00, 0xfc, 0x22, 0x32, 0x3b, 0x5b, 0xa4, 0x24, 0x65, 0x3d, 0xd5, - 0x40, 0x8e, 0x2b, 0x74, 0x61, 0x49, 0x4a, 0xfb, 0xca, 0x83, 0xc7, 0xfa, 0xa3, 0xfa, 0x17, 0x00, - 0x00, 0xff, 0xff, 0x8f, 0x82, 0xc0, 0x0c, 0x6a, 0x03, 0x00, 0x00, +func init() { proto.RegisterFile("types/proto3/block.proto", fileDescriptor_block_57c41dfc0fc285b3) } + +var fileDescriptor_block_57c41dfc0fc285b3 = []byte{ + // 451 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0x5f, 0x6f, 0xd3, 0x30, + 0x10, 0x57, 0x68, 0xda, 0xae, 0x97, 0x76, 0x1d, 0x27, 0x40, 0x16, 0x4f, 0x55, 0x04, 0xa8, 0xbc, + 0x74, 0xda, 0xf6, 0x80, 0x10, 0x4f, 0xa5, 0x45, 0xda, 0x24, 0x34, 0x4d, 0xa6, 0xea, 0xbb, 0xd7, + 0x58, 0x34, 0xa2, 0x89, 0xa3, 0x9c, 0x8b, 0xc6, 0x27, 0xe4, 0x6b, 0x21, 0x9f, 0x93, 0xd0, 0x44, + 0x7b, 0xf3, 0xef, 0xcf, 0xfd, 0xce, 0xbe, 0x5c, 0x40, 0xd8, 0x3f, 0x85, 0xa6, 0xcb, 0xa2, 0x34, + 0xd6, 0xdc, 0x5c, 0x3e, 0x1e, 0xcc, 0xee, 0xd7, 0x82, 0x01, 0x0e, 0x3c, 0x17, 0x7f, 0x86, 0xc9, + 0x83, 0x2a, 0xed, 0x0f, 0x6d, 0x6f, 0xb5, 0x4a, 0x74, 0x89, 0xaf, 0xa0, 0xbf, 0x31, 0x56, 0x1d, + 0x44, 0x30, 0x0b, 0xe6, 0x7d, 0xe9, 0x01, 0x22, 0x84, 0xb7, 0x8a, 0xf6, 0xe2, 0xc5, 0x2c, 0x98, + 0x8f, 0x25, 0x9f, 0xe3, 0x2d, 0x0c, 0xbf, 0xba, 0xc4, 0xbb, 0x75, 0x23, 0x07, 0xff, 0x65, 0xfc, + 0x04, 0x91, 0x4b, 0x26, 0x9f, 0xcb, 0x95, 0xd1, 0xf5, 0x6b, 0xdf, 0xfe, 0x66, 0xd1, 0x6a, 0x2a, + 0x4f, 0x9d, 0xf1, 0xdf, 0x10, 0x06, 0xd5, 0x65, 0x3e, 0xc2, 0x70, 0xab, 0x4b, 0x4a, 0x4d, 0xce, + 0xd1, 0xd1, 0xf5, 0xb4, 0xae, 0xaf, 0x68, 0x59, 0xeb, 0x28, 0x60, 0xb8, 0xda, 0xab, 0x34, 0xbf, + 0x5b, 0x73, 0xab, 0x91, 0xac, 0x21, 0xbe, 0x71, 0x71, 0xe9, 0xcf, 0xbd, 0x15, 0xbd, 0x59, 0x30, + 0xef, 0xc9, 0x0a, 0xe1, 0x7b, 0x08, 0x37, 0x69, 0xa6, 0x45, 0xc8, 0xc9, 0x2f, 0xeb, 0x64, 0xc7, + 0x91, 0x55, 0x59, 0x21, 0x59, 0x76, 0xe5, 0xf7, 0xc7, 0x6c, 0xf3, 0x44, 0xa2, 0xef, 0xcb, 0x3d, + 0xc2, 0xb7, 0x70, 0xc6, 0xb3, 0x71, 0xca, 0x80, 0x95, 0x06, 0xe3, 0x15, 0x44, 0xdf, 0x15, 0xd9, + 0x6a, 0x3c, 0x62, 0xd8, 0xbe, 0x7b, 0x45, 0xcb, 0x53, 0x0f, 0x7e, 0x80, 0x73, 0x07, 0x57, 0x26, + 0xcb, 0x52, 0xcb, 0xc3, 0x3c, 0xe3, 0x61, 0x76, 0x58, 0xd7, 0x76, 0xad, 0xac, 0x62, 0xc7, 0x88, + 0x1d, 0x0d, 0x76, 0x19, 0x5b, 0x75, 0x48, 0x13, 0x65, 0x4d, 0x49, 0xec, 0x00, 0x9f, 0xd1, 0x66, + 0x71, 0x01, 0x78, 0xaf, 0x9f, 0x6c, 0xc7, 0x1b, 0xb1, 0xf7, 0x19, 0x05, 0xdf, 0xc1, 0x64, 0x65, + 0x72, 0xd2, 0x39, 0x1d, 0xbd, 0x75, 0xcc, 0xd6, 0x36, 0xe9, 0xbe, 0xc0, 0xb2, 0x28, 0x58, 0x9f, + 0xb0, 0x5e, 0x43, 0x9c, 0xc3, 0xd4, 0xbd, 0x42, 0x6a, 0x3a, 0x1e, 0xac, 0x4f, 0x38, 0x67, 0x47, + 0x97, 0xc6, 0x18, 0xc6, 0xdf, 0x7e, 0xa7, 0x89, 0xce, 0x77, 0x9a, 0x6d, 0x53, 0xb6, 0xb5, 0x38, + 0x97, 0xf6, 0x50, 0x9a, 0xc2, 0x90, 0x2e, 0x97, 0x49, 0x52, 0x6a, 0x22, 0x71, 0xe1, 0xd3, 0x3a, + 0x74, 0x7c, 0xd5, 0xac, 0x8f, 0x5b, 0x6b, 0x9e, 0x34, 0xef, 0x51, 0x28, 0x3d, 0xc0, 0x0b, 0xe8, + 0x2d, 0x8b, 0x82, 0x17, 0x26, 0x94, 0xee, 0x18, 0x7f, 0x81, 0x51, 0xb3, 0x00, 0xee, 0x45, 0xa4, + 0x77, 0x26, 0x4f, 0x88, 0xcb, 0x7a, 0xb2, 0x86, 0x2e, 0x2e, 0x57, 0xb9, 0x21, 0x2e, 0xed, 0x4b, + 0x0f, 0x1e, 0xab, 0x9f, 0xea, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4f, 0x84, 0xb5, 0xf8, 0x77, + 0x03, 0x00, 0x00, } diff --git a/types/proto3/block.proto b/types/proto3/block.proto index 1c76746c2..93cf1bc75 100644 --- a/types/proto3/block.proto +++ b/types/proto3/block.proto @@ -4,7 +4,7 @@ package proto3; message PartSetHeader { - sint32 Total = 1; + int32 Total = 1; bytes Hash = 2; } @@ -17,10 +17,10 @@ message Header { // basic block info Version Version = 1; string ChainID = 2; - sint64 Height = 3; + int64 Height = 3; Timestamp Time = 4; - sint64 NumTxs = 5; - sint64 TotalTxs = 6; + int64 NumTxs = 5; + int64 TotalTxs = 6; // prev block info BlockID LastBlockID = 7; diff --git a/types/vote.go b/types/vote.go index e1095bf15..333684fc2 100644 --- a/types/vote.go +++ b/types/vote.go @@ -12,7 +12,7 @@ import ( const ( // MaxVoteBytes is a maximum vote size (including amino overhead). - MaxVoteBytes int64 = 203 + MaxVoteBytes int64 = 199 ) var ( From b24de1c01cc8627cf72b4f5c1295f436a9904aed Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 29 Oct 2018 10:13:11 -0400 Subject: [PATCH 101/113] update changelog pending and readme (#2725) --- CHANGELOG_PENDING.md | 17 +++++++++++------ README.md | 2 +- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index a56487ff1..b8a9102af 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -2,7 +2,7 @@ ## v0.26.0 -*October 19, 2018* +*October 29, 2018* Special thanks to external contributors on this release: @bradyjoestar, @connorwstein, @goolAdapter, @HaoyangLiu, @@ -17,9 +17,14 @@ It also includes our first take at a generalized merkle proof system. See the [UPGRADING.md](UPGRADING.md#v0.26.0) for details on upgrading to the new version. +Please note that we are still making breaking changes to the protocols. +While the new Version fields should help us to keep the software backwards compatible +even while upgrading the protocols, we cannot guarantee that new releases will +be compatible with old chains just yet. Thanks for bearing with us! + Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint). -BREAKING CHANGES: +### BREAKING CHANGES: * CLI/RPC/Config * [config] [\#2232](https://github.com/tendermint/tendermint/issues/2232) timeouts as time.Duration, not ints @@ -50,7 +55,6 @@ BREAKING CHANGES: * [types] [\#2298](https://github.com/tendermint/tendermint/issues/2298) Remove `Index` and `Total` fields from `TxProof`. * [types] [\#2598](https://github.com/tendermint/tendermint/issues/2598) `VoteTypeXxx` are now of type `SignedMsgType byte` and named `XxxType`, eg. `PrevoteType`, `PrecommitType`. - * [types] [\#2682](https://github.com/tendermint/tendermint/issues/2682) Use proto3 `varint` encoding for ints that are usually unsigned (instead of zigzag encoding). * Blockchain Protocol * [types] Update SignBytes for `Vote`/`Proposal`/`Heartbeat`: @@ -66,17 +70,18 @@ BREAKING CHANGES: * [state] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Require block.Version to match state.Version * [types] [\#2670](https://github.com/tendermint/tendermint/issues/2670) Header.Hash() builds Merkle tree out of fields in the same order they appear in the header, instead of sorting by field name + * [types] [\#2682](https://github.com/tendermint/tendermint/issues/2682) Use proto3 `varint` encoding for ints that are usually unsigned (instead of zigzag encoding). * P2P Protocol * [p2p] [\#2654](https://github.com/tendermint/tendermint/issues/2654) Add `ProtocolVersion` struct with protocol versions to top of DefaultNodeInfo and require `ProtocolVersion.Block` to match during peer handshake -FEATURES: +### FEATURES: - [abci] [\#2557](https://github.com/tendermint/tendermint/issues/2557) Add `Codespace` field to `Response{CheckTx, DeliverTx, Query}` - [abci] [\#2662](https://github.com/tendermint/tendermint/issues/2662) Add `BlockVersion` and `P2PVersion` to `RequestInfo` - [crypto/merkle] [\#2298](https://github.com/tendermint/tendermint/issues/2298) General Merkle Proof scheme for chaining various types of Merkle trees together -IMPROVEMENTS: +### IMPROVEMENTS: - Additional Metrics - [consensus] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) - [p2p] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) @@ -85,7 +90,7 @@ IMPROVEMENTS: github.com/tendermint/crypto - [tools] [\#2238](https://github.com/tendermint/tendermint/issues/2238) Binary dependencies are now locked to a specific git commit -BUG FIXES: +### BUG FIXES: - [autofile] [\#2428](https://github.com/tendermint/tendermint/issues/2428) Group.RotateFile need call Flush() before rename (@goolAdapter) - [common] [\#2533](https://github.com/tendermint/tendermint/issues/2533) Fixed a bug in the `BitArray.Or` method - [common] [\#2506](https://github.com/tendermint/tendermint/issues/2506) Fixed a bug in the `BitArray.Sub` method (@james-ray) diff --git a/README.md b/README.md index 069f9f13e..328557ae3 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ We are also still making breaking changes to the protocol and the APIs. Thus, we tag the releases as *alpha software*. In any case, if you intend to run Tendermint in production, -please [contact us](https://riot.im/app/#/room/#tendermint:matrix.org) :) +please [contact us](mailto:partners@tendermint.com) and [join the chat](https://riot.im/app/#/room/#tendermint:matrix.org). ## Security From cdc252b8182deb749150c33a2cc985e12e3c8437 Mon Sep 17 00:00:00 2001 From: Zach Date: Tue, 30 Oct 2018 10:34:51 -0400 Subject: [PATCH 102/113] add fail-test file instead of dep, closes #2638 (#2728) original author of this file is @ebuchman: https://github.com/ebuchman/fail-test --- Gopkg.lock | 8 ----- Gopkg.toml | 4 --- consensus/state.go | 2 +- libs/fail/fail.go | 78 ++++++++++++++++++++++++++++++++++++++++++++++ state/execution.go | 2 +- 5 files changed, 80 insertions(+), 14 deletions(-) create mode 100644 libs/fail/fail.go diff --git a/Gopkg.lock b/Gopkg.lock index 513e0bd7a..f4656e6ba 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -35,13 +35,6 @@ revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" version = "v1.1.1" -[[projects]] - digest = "1:c7644c73a3d23741fdba8a99b1464e021a224b7e205be497271a8003a15ca41b" - name = "github.com/ebuchman/fail-test" - packages = ["."] - pruneopts = "UT" - revision = "95f809107225be108efcf10a3509e4ea6ceef3c4" - [[projects]] digest = "1:544229a3ca0fb2dd5ebc2896d3d2ff7ce096d9751635301e44e37e761349ee70" name = "github.com/fortytw2/leaktest" @@ -503,7 +496,6 @@ input-imports = [ "github.com/btcsuite/btcutil/base58", "github.com/btcsuite/btcutil/bech32", - "github.com/ebuchman/fail-test", "github.com/fortytw2/leaktest", "github.com/go-kit/kit/log", "github.com/go-kit/kit/log/level", diff --git a/Gopkg.toml b/Gopkg.toml index 955d6c6d1..47418bef3 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -81,10 +81,6 @@ name = "github.com/jmhodges/levigo" revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9" -[[constraint]] - name = "github.com/ebuchman/fail-test" - revision = "95f809107225be108efcf10a3509e4ea6ceef3c4" - # last revision used by go-crypto [[constraint]] name = "github.com/btcsuite/btcutil" diff --git a/consensus/state.go b/consensus/state.go index 0b079f13d..40aeeb7a4 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -9,8 +9,8 @@ import ( "sync" "time" - fail "github.com/ebuchman/fail-test" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/fail" "github.com/tendermint/tendermint/libs/log" tmtime "github.com/tendermint/tendermint/types/time" diff --git a/libs/fail/fail.go b/libs/fail/fail.go new file mode 100644 index 000000000..edfca13e3 --- /dev/null +++ b/libs/fail/fail.go @@ -0,0 +1,78 @@ +package fail + +import ( + "fmt" + "math/rand" + "os" + "strconv" +) + +var callIndexToFail int + +func init() { + callIndexToFailS := os.Getenv("FAIL_TEST_INDEX") + + if callIndexToFailS == "" { + callIndexToFail = -1 + } else { + var err error + callIndexToFail, err = strconv.Atoi(callIndexToFailS) + if err != nil { + callIndexToFail = -1 + } + } +} + +// Fail when FAIL_TEST_INDEX == callIndex +var ( + callIndex int //indexes Fail calls + + callRandIndex int // indexes a run of FailRand calls + callRandIndexToFail = -1 // the callRandIndex to fail on in FailRand +) + +func Fail() { + if callIndexToFail < 0 { + return + } + + if callIndex == callIndexToFail { + Exit() + } + + callIndex += 1 +} + +// FailRand should be called n successive times. +// It will fail on a random one of those calls +// n must be greater than 0 +func FailRand(n int) { + if callIndexToFail < 0 { + return + } + + if callRandIndexToFail < 0 { + // first call in the loop, pick a random index to fail at + callRandIndexToFail = rand.Intn(n) + callRandIndex = 0 + } + + if callIndex == callIndexToFail { + if callRandIndex == callRandIndexToFail { + Exit() + } + } + + callRandIndex += 1 + + if callRandIndex == n { + callIndex += 1 + } +} + +func Exit() { + fmt.Printf("*** fail-test %d ***\n", callIndex) + proc, _ := os.FindProcess(os.Getpid()) + proc.Signal(os.Interrupt) + // panic(fmt.Sprintf("*** fail-test %d ***", callIndex)) +} diff --git a/state/execution.go b/state/execution.go index 68298a8d2..72f6cc978 100644 --- a/state/execution.go +++ b/state/execution.go @@ -4,9 +4,9 @@ import ( "fmt" "time" - "github.com/ebuchman/fail-test" abci "github.com/tendermint/tendermint/abci/types" dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/fail" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/proxy" From 56d7160606b16dc0a3d56c9b9b9d2b9aeb2c6484 Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Tue, 30 Oct 2018 08:36:53 -0700 Subject: [PATCH 103/113] Add ValidatorPubkeyTypes as a consensus param (#2636) * Add ValidatorPubkeyTypes as a consensus param Ref #2414 * update spec * address anton's comment * Switch to Validator and Validator Params * Correct changelog entry * Address bucky's comments! * forgot to update changelog * fix typo * fix Params naming --- CHANGELOG_PENDING.md | 2 + abci/types/types.pb.go | 904 ++++++++++++++++++++++++------------- abci/types/types.proto | 13 +- abci/types/typespb_test.go | 158 ++++++- docs/spec/abci/abci.md | 13 +- evidence/pool.go | 2 +- evidence/pool_test.go | 2 +- evidence/reactor.go | 2 +- state/execution.go | 2 +- state/state_test.go | 8 +- state/store.go | 2 +- state/validation.go | 2 +- types/params.go | 72 ++- types/params_test.go | 69 +-- types/protobuf.go | 24 +- types/protobuf_test.go | 1 - 16 files changed, 888 insertions(+), 388 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index b8a9102af..5c25a4b19 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -57,6 +57,8 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi `PrecommitType`. * Blockchain Protocol + * [abci] [\#2636](https://github.com/tendermint/tendermint/issues/2636) Add ValidatorParams field to ConsensusParams. + (Used to control which pubkey types validators can use, by abci type) * [types] Update SignBytes for `Vote`/`Proposal`/`Heartbeat`: * [\#2459](https://github.com/tendermint/tendermint/issues/2459) Use amino encoding instead of JSON in `SignBytes`. * [\#2598](https://github.com/tendermint/tendermint/issues/2598) Reorder fields and use fixed sized encoding. diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 6a70bb979..c867dffc8 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -61,7 +61,7 @@ func (m *Request) Reset() { *m = Request{} } func (m *Request) String() string { return proto.CompactTextString(m) } func (*Request) ProtoMessage() {} func (*Request) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{0} + return fileDescriptor_types_5b877df1938afe10, []int{0} } func (m *Request) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -483,7 +483,7 @@ func (m *RequestEcho) Reset() { *m = RequestEcho{} } func (m *RequestEcho) String() string { return proto.CompactTextString(m) } func (*RequestEcho) ProtoMessage() {} func (*RequestEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{1} + return fileDescriptor_types_5b877df1938afe10, []int{1} } func (m *RequestEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -529,7 +529,7 @@ func (m *RequestFlush) Reset() { *m = RequestFlush{} } func (m *RequestFlush) String() string { return proto.CompactTextString(m) } func (*RequestFlush) ProtoMessage() {} func (*RequestFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{2} + return fileDescriptor_types_5b877df1938afe10, []int{2} } func (m *RequestFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -571,7 +571,7 @@ func (m *RequestInfo) Reset() { *m = RequestInfo{} } func (m *RequestInfo) String() string { return proto.CompactTextString(m) } func (*RequestInfo) ProtoMessage() {} func (*RequestInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{3} + return fileDescriptor_types_5b877df1938afe10, []int{3} } func (m *RequestInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -634,7 +634,7 @@ func (m *RequestSetOption) Reset() { *m = RequestSetOption{} } func (m *RequestSetOption) String() string { return proto.CompactTextString(m) } func (*RequestSetOption) ProtoMessage() {} func (*RequestSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{4} + return fileDescriptor_types_5b877df1938afe10, []int{4} } func (m *RequestSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -692,7 +692,7 @@ func (m *RequestInitChain) Reset() { *m = RequestInitChain{} } func (m *RequestInitChain) String() string { return proto.CompactTextString(m) } func (*RequestInitChain) ProtoMessage() {} func (*RequestInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{5} + return fileDescriptor_types_5b877df1938afe10, []int{5} } func (m *RequestInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -770,7 +770,7 @@ func (m *RequestQuery) Reset() { *m = RequestQuery{} } func (m *RequestQuery) String() string { return proto.CompactTextString(m) } func (*RequestQuery) ProtoMessage() {} func (*RequestQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{6} + return fileDescriptor_types_5b877df1938afe10, []int{6} } func (m *RequestQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -827,7 +827,6 @@ func (m *RequestQuery) GetProve() bool { return false } -// NOTE: validators here have empty pubkeys. type RequestBeginBlock struct { Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` Header Header `protobuf:"bytes,2,opt,name=header" json:"header"` @@ -842,7 +841,7 @@ func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} } func (m *RequestBeginBlock) String() string { return proto.CompactTextString(m) } func (*RequestBeginBlock) ProtoMessage() {} func (*RequestBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{7} + return fileDescriptor_types_5b877df1938afe10, []int{7} } func (m *RequestBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -910,7 +909,7 @@ func (m *RequestCheckTx) Reset() { *m = RequestCheckTx{} } func (m *RequestCheckTx) String() string { return proto.CompactTextString(m) } func (*RequestCheckTx) ProtoMessage() {} func (*RequestCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{8} + return fileDescriptor_types_5b877df1938afe10, []int{8} } func (m *RequestCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -957,7 +956,7 @@ func (m *RequestDeliverTx) Reset() { *m = RequestDeliverTx{} } func (m *RequestDeliverTx) String() string { return proto.CompactTextString(m) } func (*RequestDeliverTx) ProtoMessage() {} func (*RequestDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{9} + return fileDescriptor_types_5b877df1938afe10, []int{9} } func (m *RequestDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1004,7 +1003,7 @@ func (m *RequestEndBlock) Reset() { *m = RequestEndBlock{} } func (m *RequestEndBlock) String() string { return proto.CompactTextString(m) } func (*RequestEndBlock) ProtoMessage() {} func (*RequestEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{10} + return fileDescriptor_types_5b877df1938afe10, []int{10} } func (m *RequestEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1050,7 +1049,7 @@ func (m *RequestCommit) Reset() { *m = RequestCommit{} } func (m *RequestCommit) String() string { return proto.CompactTextString(m) } func (*RequestCommit) ProtoMessage() {} func (*RequestCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{11} + return fileDescriptor_types_5b877df1938afe10, []int{11} } func (m *RequestCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1103,7 +1102,7 @@ func (m *Response) Reset() { *m = Response{} } func (m *Response) String() string { return proto.CompactTextString(m) } func (*Response) ProtoMessage() {} func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{12} + return fileDescriptor_types_5b877df1938afe10, []int{12} } func (m *Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1556,7 +1555,7 @@ func (m *ResponseException) Reset() { *m = ResponseException{} } func (m *ResponseException) String() string { return proto.CompactTextString(m) } func (*ResponseException) ProtoMessage() {} func (*ResponseException) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{13} + return fileDescriptor_types_5b877df1938afe10, []int{13} } func (m *ResponseException) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1603,7 +1602,7 @@ func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } func (*ResponseEcho) ProtoMessage() {} func (*ResponseEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{14} + return fileDescriptor_types_5b877df1938afe10, []int{14} } func (m *ResponseEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1649,7 +1648,7 @@ func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } func (*ResponseFlush) ProtoMessage() {} func (*ResponseFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{15} + return fileDescriptor_types_5b877df1938afe10, []int{15} } func (m *ResponseFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1693,7 +1692,7 @@ func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } func (*ResponseInfo) ProtoMessage() {} func (*ResponseInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{16} + return fileDescriptor_types_5b877df1938afe10, []int{16} } func (m *ResponseInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1772,7 +1771,7 @@ func (m *ResponseSetOption) Reset() { *m = ResponseSetOption{} } func (m *ResponseSetOption) String() string { return proto.CompactTextString(m) } func (*ResponseSetOption) ProtoMessage() {} func (*ResponseSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{17} + return fileDescriptor_types_5b877df1938afe10, []int{17} } func (m *ResponseSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1834,7 +1833,7 @@ func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } func (*ResponseInitChain) ProtoMessage() {} func (*ResponseInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{18} + return fileDescriptor_types_5b877df1938afe10, []int{18} } func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1897,7 +1896,7 @@ func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } func (*ResponseQuery) ProtoMessage() {} func (*ResponseQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{19} + return fileDescriptor_types_5b877df1938afe10, []int{19} } func (m *ResponseQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2000,7 +1999,7 @@ func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } func (*ResponseBeginBlock) ProtoMessage() {} func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{20} + return fileDescriptor_types_5b877df1938afe10, []int{20} } func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2054,7 +2053,7 @@ func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } func (*ResponseCheckTx) ProtoMessage() {} func (*ResponseCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{21} + return fileDescriptor_types_5b877df1938afe10, []int{21} } func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2157,7 +2156,7 @@ func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } func (*ResponseDeliverTx) ProtoMessage() {} func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{22} + return fileDescriptor_types_5b877df1938afe10, []int{22} } func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2255,7 +2254,7 @@ func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } func (*ResponseEndBlock) ProtoMessage() {} func (*ResponseEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{23} + return fileDescriptor_types_5b877df1938afe10, []int{23} } func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2317,7 +2316,7 @@ func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } func (*ResponseCommit) ProtoMessage() {} func (*ResponseCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{24} + return fileDescriptor_types_5b877df1938afe10, []int{24} } func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2356,18 +2355,19 @@ func (m *ResponseCommit) GetData() []byte { // ConsensusParams contains all consensus-relevant parameters // that can be adjusted by the abci app type ConsensusParams struct { - BlockSize *BlockSize `protobuf:"bytes,1,opt,name=block_size,json=blockSize" json:"block_size,omitempty"` - EvidenceParams *EvidenceParams `protobuf:"bytes,2,opt,name=evidence_params,json=evidenceParams" json:"evidence_params,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + BlockSize *BlockSizeParams `protobuf:"bytes,1,opt,name=block_size,json=blockSize" json:"block_size,omitempty"` + Evidence *EvidenceParams `protobuf:"bytes,2,opt,name=evidence" json:"evidence,omitempty"` + Validator *ValidatorParams `protobuf:"bytes,3,opt,name=validator" json:"validator,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } func (m *ConsensusParams) String() string { return proto.CompactTextString(m) } func (*ConsensusParams) ProtoMessage() {} func (*ConsensusParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{25} + return fileDescriptor_types_5b877df1938afe10, []int{25} } func (m *ConsensusParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2396,22 +2396,29 @@ func (m *ConsensusParams) XXX_DiscardUnknown() { var xxx_messageInfo_ConsensusParams proto.InternalMessageInfo -func (m *ConsensusParams) GetBlockSize() *BlockSize { +func (m *ConsensusParams) GetBlockSize() *BlockSizeParams { if m != nil { return m.BlockSize } return nil } -func (m *ConsensusParams) GetEvidenceParams() *EvidenceParams { +func (m *ConsensusParams) GetEvidence() *EvidenceParams { if m != nil { - return m.EvidenceParams + return m.Evidence + } + return nil +} + +func (m *ConsensusParams) GetValidator() *ValidatorParams { + if m != nil { + return m.Validator } return nil } // BlockSize contains limits on the block size. -type BlockSize struct { +type BlockSizeParams struct { // Note: must be greater than 0 MaxBytes int64 `protobuf:"varint,1,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` // Note: must be greater or equal to -1 @@ -2421,18 +2428,18 @@ type BlockSize struct { XXX_sizecache int32 `json:"-"` } -func (m *BlockSize) Reset() { *m = BlockSize{} } -func (m *BlockSize) String() string { return proto.CompactTextString(m) } -func (*BlockSize) ProtoMessage() {} -func (*BlockSize) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{26} +func (m *BlockSizeParams) Reset() { *m = BlockSizeParams{} } +func (m *BlockSizeParams) String() string { return proto.CompactTextString(m) } +func (*BlockSizeParams) ProtoMessage() {} +func (*BlockSizeParams) Descriptor() ([]byte, []int) { + return fileDescriptor_types_5b877df1938afe10, []int{26} } -func (m *BlockSize) XXX_Unmarshal(b []byte) error { +func (m *BlockSizeParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *BlockSize) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *BlockSizeParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_BlockSize.Marshal(b, m, deterministic) + return xxx_messageInfo_BlockSizeParams.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) @@ -2442,26 +2449,26 @@ func (m *BlockSize) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (dst *BlockSize) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockSize.Merge(dst, src) +func (dst *BlockSizeParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockSizeParams.Merge(dst, src) } -func (m *BlockSize) XXX_Size() int { +func (m *BlockSizeParams) XXX_Size() int { return m.Size() } -func (m *BlockSize) XXX_DiscardUnknown() { - xxx_messageInfo_BlockSize.DiscardUnknown(m) +func (m *BlockSizeParams) XXX_DiscardUnknown() { + xxx_messageInfo_BlockSizeParams.DiscardUnknown(m) } -var xxx_messageInfo_BlockSize proto.InternalMessageInfo +var xxx_messageInfo_BlockSizeParams proto.InternalMessageInfo -func (m *BlockSize) GetMaxBytes() int64 { +func (m *BlockSizeParams) GetMaxBytes() int64 { if m != nil { return m.MaxBytes } return 0 } -func (m *BlockSize) GetMaxGas() int64 { +func (m *BlockSizeParams) GetMaxGas() int64 { if m != nil { return m.MaxGas } @@ -2481,7 +2488,7 @@ func (m *EvidenceParams) Reset() { *m = EvidenceParams{} } func (m *EvidenceParams) String() string { return proto.CompactTextString(m) } func (*EvidenceParams) ProtoMessage() {} func (*EvidenceParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{27} + return fileDescriptor_types_5b877df1938afe10, []int{27} } func (m *EvidenceParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2517,6 +2524,54 @@ func (m *EvidenceParams) GetMaxAge() int64 { return 0 } +// ValidatorParams contains limits on validators. +type ValidatorParams struct { + PubKeyTypes []string `protobuf:"bytes,1,rep,name=pub_key_types,json=pubKeyTypes" json:"pub_key_types,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidatorParams) Reset() { *m = ValidatorParams{} } +func (m *ValidatorParams) String() string { return proto.CompactTextString(m) } +func (*ValidatorParams) ProtoMessage() {} +func (*ValidatorParams) Descriptor() ([]byte, []int) { + return fileDescriptor_types_5b877df1938afe10, []int{28} +} +func (m *ValidatorParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatorParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValidatorParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ValidatorParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorParams.Merge(dst, src) +} +func (m *ValidatorParams) XXX_Size() int { + return m.Size() +} +func (m *ValidatorParams) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatorParams.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatorParams proto.InternalMessageInfo + +func (m *ValidatorParams) GetPubKeyTypes() []string { + if m != nil { + return m.PubKeyTypes + } + return nil +} + type LastCommitInfo struct { Round int32 `protobuf:"varint,1,opt,name=round,proto3" json:"round,omitempty"` Votes []VoteInfo `protobuf:"bytes,2,rep,name=votes" json:"votes"` @@ -2529,7 +2584,7 @@ func (m *LastCommitInfo) Reset() { *m = LastCommitInfo{} } func (m *LastCommitInfo) String() string { return proto.CompactTextString(m) } func (*LastCommitInfo) ProtoMessage() {} func (*LastCommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{28} + return fileDescriptor_types_5b877df1938afe10, []int{29} } func (m *LastCommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2603,7 +2658,7 @@ func (m *Header) Reset() { *m = Header{} } func (m *Header) String() string { return proto.CompactTextString(m) } func (*Header) ProtoMessage() {} func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{29} + return fileDescriptor_types_5b877df1938afe10, []int{30} } func (m *Header) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2756,7 +2811,7 @@ func (m *Version) Reset() { *m = Version{} } func (m *Version) String() string { return proto.CompactTextString(m) } func (*Version) ProtoMessage() {} func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{30} + return fileDescriptor_types_5b877df1938afe10, []int{31} } func (m *Version) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2811,7 +2866,7 @@ func (m *BlockID) Reset() { *m = BlockID{} } func (m *BlockID) String() string { return proto.CompactTextString(m) } func (*BlockID) ProtoMessage() {} func (*BlockID) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{31} + return fileDescriptor_types_5b877df1938afe10, []int{32} } func (m *BlockID) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2866,7 +2921,7 @@ func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } func (*PartSetHeader) ProtoMessage() {} func (*PartSetHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{32} + return fileDescriptor_types_5b877df1938afe10, []int{33} } func (m *PartSetHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2923,7 +2978,7 @@ func (m *Validator) Reset() { *m = Validator{} } func (m *Validator) String() string { return proto.CompactTextString(m) } func (*Validator) ProtoMessage() {} func (*Validator) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{33} + return fileDescriptor_types_5b877df1938afe10, []int{34} } func (m *Validator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2979,7 +3034,7 @@ func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } func (*ValidatorUpdate) ProtoMessage() {} func (*ValidatorUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{34} + return fileDescriptor_types_5b877df1938afe10, []int{35} } func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3035,7 +3090,7 @@ func (m *VoteInfo) Reset() { *m = VoteInfo{} } func (m *VoteInfo) String() string { return proto.CompactTextString(m) } func (*VoteInfo) ProtoMessage() {} func (*VoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{35} + return fileDescriptor_types_5b877df1938afe10, []int{36} } func (m *VoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3090,7 +3145,7 @@ func (m *PubKey) Reset() { *m = PubKey{} } func (m *PubKey) String() string { return proto.CompactTextString(m) } func (*PubKey) ProtoMessage() {} func (*PubKey) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{36} + return fileDescriptor_types_5b877df1938afe10, []int{37} } func (m *PubKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3148,7 +3203,7 @@ func (m *Evidence) Reset() { *m = Evidence{} } func (m *Evidence) String() string { return proto.CompactTextString(m) } func (*Evidence) ProtoMessage() {} func (*Evidence) Descriptor() ([]byte, []int) { - return fileDescriptor_types_4449c1011851ea19, []int{37} + return fileDescriptor_types_5b877df1938afe10, []int{38} } func (m *Evidence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3265,10 +3320,12 @@ func init() { golang_proto.RegisterType((*ResponseCommit)(nil), "types.ResponseCommit") proto.RegisterType((*ConsensusParams)(nil), "types.ConsensusParams") golang_proto.RegisterType((*ConsensusParams)(nil), "types.ConsensusParams") - proto.RegisterType((*BlockSize)(nil), "types.BlockSize") - golang_proto.RegisterType((*BlockSize)(nil), "types.BlockSize") + proto.RegisterType((*BlockSizeParams)(nil), "types.BlockSizeParams") + golang_proto.RegisterType((*BlockSizeParams)(nil), "types.BlockSizeParams") proto.RegisterType((*EvidenceParams)(nil), "types.EvidenceParams") golang_proto.RegisterType((*EvidenceParams)(nil), "types.EvidenceParams") + proto.RegisterType((*ValidatorParams)(nil), "types.ValidatorParams") + golang_proto.RegisterType((*ValidatorParams)(nil), "types.ValidatorParams") proto.RegisterType((*LastCommitInfo)(nil), "types.LastCommitInfo") golang_proto.RegisterType((*LastCommitInfo)(nil), "types.LastCommitInfo") proto.RegisterType((*Header)(nil), "types.Header") @@ -4714,7 +4771,10 @@ func (this *ConsensusParams) Equal(that interface{}) bool { if !this.BlockSize.Equal(that1.BlockSize) { return false } - if !this.EvidenceParams.Equal(that1.EvidenceParams) { + if !this.Evidence.Equal(that1.Evidence) { + return false + } + if !this.Validator.Equal(that1.Validator) { return false } if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { @@ -4722,14 +4782,14 @@ func (this *ConsensusParams) Equal(that interface{}) bool { } return true } -func (this *BlockSize) Equal(that interface{}) bool { +func (this *BlockSizeParams) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*BlockSize) + that1, ok := that.(*BlockSizeParams) if !ok { - that2, ok := that.(BlockSize) + that2, ok := that.(BlockSizeParams) if ok { that1 = &that2 } else { @@ -4779,6 +4839,38 @@ func (this *EvidenceParams) Equal(that interface{}) bool { } return true } +func (this *ValidatorParams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ValidatorParams) + if !ok { + that2, ok := that.(ValidatorParams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.PubKeyTypes) != len(that1.PubKeyTypes) { + return false + } + for i := range this.PubKeyTypes { + if this.PubKeyTypes[i] != that1.PubKeyTypes[i] { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} func (this *LastCommitInfo) Equal(that interface{}) bool { if that == nil { return this == nil @@ -6868,23 +6960,33 @@ func (m *ConsensusParams) MarshalTo(dAtA []byte) (int, error) { } i += n33 } - if m.EvidenceParams != nil { + if m.Evidence != nil { dAtA[i] = 0x12 i++ - i = encodeVarintTypes(dAtA, i, uint64(m.EvidenceParams.Size())) - n34, err := m.EvidenceParams.MarshalTo(dAtA[i:]) + i = encodeVarintTypes(dAtA, i, uint64(m.Evidence.Size())) + n34, err := m.Evidence.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n34 } + if m.Validator != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Validator.Size())) + n35, err := m.Validator.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } -func (m *BlockSize) Marshal() (dAtA []byte, err error) { +func (m *BlockSizeParams) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -6894,7 +6996,7 @@ func (m *BlockSize) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *BlockSize) MarshalTo(dAtA []byte) (int, error) { +func (m *BlockSizeParams) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -6941,6 +7043,42 @@ func (m *EvidenceParams) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *ValidatorParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatorParams) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.PubKeyTypes) > 0 { + for _, s := range m.PubKeyTypes { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func (m *LastCommitInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -6997,11 +7135,11 @@ func (m *Header) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintTypes(dAtA, i, uint64(m.Version.Size())) - n35, err := m.Version.MarshalTo(dAtA[i:]) + n36, err := m.Version.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n35 + i += n36 if len(m.ChainID) > 0 { dAtA[i] = 0x12 i++ @@ -7016,11 +7154,11 @@ func (m *Header) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintTypes(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Time))) - n36, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) + n37, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) if err != nil { return 0, err } - i += n36 + i += n37 if m.NumTxs != 0 { dAtA[i] = 0x28 i++ @@ -7034,11 +7172,11 @@ func (m *Header) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintTypes(dAtA, i, uint64(m.LastBlockId.Size())) - n37, err := m.LastBlockId.MarshalTo(dAtA[i:]) + n38, err := m.LastBlockId.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n37 + i += n38 if len(m.LastCommitHash) > 0 { dAtA[i] = 0x42 i++ @@ -7156,11 +7294,11 @@ func (m *BlockID) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintTypes(dAtA, i, uint64(m.PartsHeader.Size())) - n38, err := m.PartsHeader.MarshalTo(dAtA[i:]) + n39, err := m.PartsHeader.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n38 + i += n39 if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -7249,11 +7387,11 @@ func (m *ValidatorUpdate) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintTypes(dAtA, i, uint64(m.PubKey.Size())) - n39, err := m.PubKey.MarshalTo(dAtA[i:]) + n40, err := m.PubKey.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n40 if m.Power != 0 { dAtA[i] = 0x10 i++ @@ -7283,11 +7421,11 @@ func (m *VoteInfo) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintTypes(dAtA, i, uint64(m.Validator.Size())) - n40, err := m.Validator.MarshalTo(dAtA[i:]) + n41, err := m.Validator.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n41 if m.SignedLastBlock { dAtA[i] = 0x10 i++ @@ -7361,11 +7499,11 @@ func (m *Evidence) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintTypes(dAtA, i, uint64(m.Validator.Size())) - n41, err := m.Validator.MarshalTo(dAtA[i:]) + n42, err := m.Validator.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n41 + i += n42 if m.Height != 0 { dAtA[i] = 0x18 i++ @@ -7374,11 +7512,11 @@ func (m *Evidence) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintTypes(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Time))) - n42, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) + n43, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) if err != nil { return 0, err } - i += n42 + i += n43 if m.TotalVotingPower != 0 { dAtA[i] = 0x28 i++ @@ -7971,19 +8109,22 @@ func NewPopulatedResponseCommit(r randyTypes, easy bool) *ResponseCommit { func NewPopulatedConsensusParams(r randyTypes, easy bool) *ConsensusParams { this := &ConsensusParams{} if r.Intn(10) != 0 { - this.BlockSize = NewPopulatedBlockSize(r, easy) + this.BlockSize = NewPopulatedBlockSizeParams(r, easy) + } + if r.Intn(10) != 0 { + this.Evidence = NewPopulatedEvidenceParams(r, easy) } if r.Intn(10) != 0 { - this.EvidenceParams = NewPopulatedEvidenceParams(r, easy) + this.Validator = NewPopulatedValidatorParams(r, easy) } if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 3) + this.XXX_unrecognized = randUnrecognizedTypes(r, 4) } return this } -func NewPopulatedBlockSize(r randyTypes, easy bool) *BlockSize { - this := &BlockSize{} +func NewPopulatedBlockSizeParams(r randyTypes, easy bool) *BlockSizeParams { + this := &BlockSizeParams{} this.MaxBytes = int64(r.Int63()) if r.Intn(2) == 0 { this.MaxBytes *= -1 @@ -8010,6 +8151,19 @@ func NewPopulatedEvidenceParams(r randyTypes, easy bool) *EvidenceParams { return this } +func NewPopulatedValidatorParams(r randyTypes, easy bool) *ValidatorParams { + this := &ValidatorParams{} + v31 := r.Intn(10) + this.PubKeyTypes = make([]string, v31) + for i := 0; i < v31; i++ { + this.PubKeyTypes[i] = string(randStringTypes(r)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 2) + } + return this +} + func NewPopulatedLastCommitInfo(r randyTypes, easy bool) *LastCommitInfo { this := &LastCommitInfo{} this.Round = int32(r.Int31()) @@ -8017,11 +8171,11 @@ func NewPopulatedLastCommitInfo(r randyTypes, easy bool) *LastCommitInfo { this.Round *= -1 } if r.Intn(10) != 0 { - v31 := r.Intn(5) - this.Votes = make([]VoteInfo, v31) - for i := 0; i < v31; i++ { - v32 := NewPopulatedVoteInfo(r, easy) - this.Votes[i] = *v32 + v32 := r.Intn(5) + this.Votes = make([]VoteInfo, v32) + for i := 0; i < v32; i++ { + v33 := NewPopulatedVoteInfo(r, easy) + this.Votes[i] = *v33 } } if !easy && r.Intn(10) != 0 { @@ -8032,15 +8186,15 @@ func NewPopulatedLastCommitInfo(r randyTypes, easy bool) *LastCommitInfo { func NewPopulatedHeader(r randyTypes, easy bool) *Header { this := &Header{} - v33 := NewPopulatedVersion(r, easy) - this.Version = *v33 + v34 := NewPopulatedVersion(r, easy) + this.Version = *v34 this.ChainID = string(randStringTypes(r)) this.Height = int64(r.Int63()) if r.Intn(2) == 0 { this.Height *= -1 } - v34 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) - this.Time = *v34 + v35 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) + this.Time = *v35 this.NumTxs = int64(r.Int63()) if r.Intn(2) == 0 { this.NumTxs *= -1 @@ -8049,51 +8203,51 @@ func NewPopulatedHeader(r randyTypes, easy bool) *Header { if r.Intn(2) == 0 { this.TotalTxs *= -1 } - v35 := NewPopulatedBlockID(r, easy) - this.LastBlockId = *v35 - v36 := r.Intn(100) - this.LastCommitHash = make([]byte, v36) - for i := 0; i < v36; i++ { - this.LastCommitHash[i] = byte(r.Intn(256)) - } + v36 := NewPopulatedBlockID(r, easy) + this.LastBlockId = *v36 v37 := r.Intn(100) - this.DataHash = make([]byte, v37) + this.LastCommitHash = make([]byte, v37) for i := 0; i < v37; i++ { - this.DataHash[i] = byte(r.Intn(256)) + this.LastCommitHash[i] = byte(r.Intn(256)) } v38 := r.Intn(100) - this.ValidatorsHash = make([]byte, v38) + this.DataHash = make([]byte, v38) for i := 0; i < v38; i++ { - this.ValidatorsHash[i] = byte(r.Intn(256)) + this.DataHash[i] = byte(r.Intn(256)) } v39 := r.Intn(100) - this.NextValidatorsHash = make([]byte, v39) + this.ValidatorsHash = make([]byte, v39) for i := 0; i < v39; i++ { - this.NextValidatorsHash[i] = byte(r.Intn(256)) + this.ValidatorsHash[i] = byte(r.Intn(256)) } v40 := r.Intn(100) - this.ConsensusHash = make([]byte, v40) + this.NextValidatorsHash = make([]byte, v40) for i := 0; i < v40; i++ { - this.ConsensusHash[i] = byte(r.Intn(256)) + this.NextValidatorsHash[i] = byte(r.Intn(256)) } v41 := r.Intn(100) - this.AppHash = make([]byte, v41) + this.ConsensusHash = make([]byte, v41) for i := 0; i < v41; i++ { - this.AppHash[i] = byte(r.Intn(256)) + this.ConsensusHash[i] = byte(r.Intn(256)) } v42 := r.Intn(100) - this.LastResultsHash = make([]byte, v42) + this.AppHash = make([]byte, v42) for i := 0; i < v42; i++ { - this.LastResultsHash[i] = byte(r.Intn(256)) + this.AppHash[i] = byte(r.Intn(256)) } v43 := r.Intn(100) - this.EvidenceHash = make([]byte, v43) + this.LastResultsHash = make([]byte, v43) for i := 0; i < v43; i++ { - this.EvidenceHash[i] = byte(r.Intn(256)) + this.LastResultsHash[i] = byte(r.Intn(256)) } v44 := r.Intn(100) - this.ProposerAddress = make([]byte, v44) + this.EvidenceHash = make([]byte, v44) for i := 0; i < v44; i++ { + this.EvidenceHash[i] = byte(r.Intn(256)) + } + v45 := r.Intn(100) + this.ProposerAddress = make([]byte, v45) + for i := 0; i < v45; i++ { this.ProposerAddress[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -8114,13 +8268,13 @@ func NewPopulatedVersion(r randyTypes, easy bool) *Version { func NewPopulatedBlockID(r randyTypes, easy bool) *BlockID { this := &BlockID{} - v45 := r.Intn(100) - this.Hash = make([]byte, v45) - for i := 0; i < v45; i++ { + v46 := r.Intn(100) + this.Hash = make([]byte, v46) + for i := 0; i < v46; i++ { this.Hash[i] = byte(r.Intn(256)) } - v46 := NewPopulatedPartSetHeader(r, easy) - this.PartsHeader = *v46 + v47 := NewPopulatedPartSetHeader(r, easy) + this.PartsHeader = *v47 if !easy && r.Intn(10) != 0 { this.XXX_unrecognized = randUnrecognizedTypes(r, 3) } @@ -8133,9 +8287,9 @@ func NewPopulatedPartSetHeader(r randyTypes, easy bool) *PartSetHeader { if r.Intn(2) == 0 { this.Total *= -1 } - v47 := r.Intn(100) - this.Hash = make([]byte, v47) - for i := 0; i < v47; i++ { + v48 := r.Intn(100) + this.Hash = make([]byte, v48) + for i := 0; i < v48; i++ { this.Hash[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -8146,9 +8300,9 @@ func NewPopulatedPartSetHeader(r randyTypes, easy bool) *PartSetHeader { func NewPopulatedValidator(r randyTypes, easy bool) *Validator { this := &Validator{} - v48 := r.Intn(100) - this.Address = make([]byte, v48) - for i := 0; i < v48; i++ { + v49 := r.Intn(100) + this.Address = make([]byte, v49) + for i := 0; i < v49; i++ { this.Address[i] = byte(r.Intn(256)) } this.Power = int64(r.Int63()) @@ -8163,8 +8317,8 @@ func NewPopulatedValidator(r randyTypes, easy bool) *Validator { func NewPopulatedValidatorUpdate(r randyTypes, easy bool) *ValidatorUpdate { this := &ValidatorUpdate{} - v49 := NewPopulatedPubKey(r, easy) - this.PubKey = *v49 + v50 := NewPopulatedPubKey(r, easy) + this.PubKey = *v50 this.Power = int64(r.Int63()) if r.Intn(2) == 0 { this.Power *= -1 @@ -8177,8 +8331,8 @@ func NewPopulatedValidatorUpdate(r randyTypes, easy bool) *ValidatorUpdate { func NewPopulatedVoteInfo(r randyTypes, easy bool) *VoteInfo { this := &VoteInfo{} - v50 := NewPopulatedValidator(r, easy) - this.Validator = *v50 + v51 := NewPopulatedValidator(r, easy) + this.Validator = *v51 this.SignedLastBlock = bool(bool(r.Intn(2) == 0)) if !easy && r.Intn(10) != 0 { this.XXX_unrecognized = randUnrecognizedTypes(r, 3) @@ -8189,9 +8343,9 @@ func NewPopulatedVoteInfo(r randyTypes, easy bool) *VoteInfo { func NewPopulatedPubKey(r randyTypes, easy bool) *PubKey { this := &PubKey{} this.Type = string(randStringTypes(r)) - v51 := r.Intn(100) - this.Data = make([]byte, v51) - for i := 0; i < v51; i++ { + v52 := r.Intn(100) + this.Data = make([]byte, v52) + for i := 0; i < v52; i++ { this.Data[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -8203,14 +8357,14 @@ func NewPopulatedPubKey(r randyTypes, easy bool) *PubKey { func NewPopulatedEvidence(r randyTypes, easy bool) *Evidence { this := &Evidence{} this.Type = string(randStringTypes(r)) - v52 := NewPopulatedValidator(r, easy) - this.Validator = *v52 + v53 := NewPopulatedValidator(r, easy) + this.Validator = *v53 this.Height = int64(r.Int63()) if r.Intn(2) == 0 { this.Height *= -1 } - v53 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) - this.Time = *v53 + v54 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) + this.Time = *v54 this.TotalVotingPower = int64(r.Int63()) if r.Intn(2) == 0 { this.TotalVotingPower *= -1 @@ -8240,9 +8394,9 @@ func randUTF8RuneTypes(r randyTypes) rune { return rune(ru + 61) } func randStringTypes(r randyTypes) string { - v54 := r.Intn(100) - tmps := make([]rune, v54) - for i := 0; i < v54; i++ { + v55 := r.Intn(100) + tmps := make([]rune, v55) + for i := 0; i < v55; i++ { tmps[i] = randUTF8RuneTypes(r) } return string(tmps) @@ -8264,11 +8418,11 @@ func randFieldTypes(dAtA []byte, r randyTypes, fieldNumber int, wire int) []byte switch wire { case 0: dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) - v55 := r.Int63() + v56 := r.Int63() if r.Intn(2) == 0 { - v55 *= -1 + v56 *= -1 } - dAtA = encodeVarintPopulateTypes(dAtA, uint64(v55)) + dAtA = encodeVarintPopulateTypes(dAtA, uint64(v56)) case 1: dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) @@ -8987,8 +9141,12 @@ func (m *ConsensusParams) Size() (n int) { l = m.BlockSize.Size() n += 1 + l + sovTypes(uint64(l)) } - if m.EvidenceParams != nil { - l = m.EvidenceParams.Size() + if m.Evidence != nil { + l = m.Evidence.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Validator != nil { + l = m.Validator.Size() n += 1 + l + sovTypes(uint64(l)) } if m.XXX_unrecognized != nil { @@ -8997,7 +9155,7 @@ func (m *ConsensusParams) Size() (n int) { return n } -func (m *BlockSize) Size() (n int) { +func (m *BlockSizeParams) Size() (n int) { var l int _ = l if m.MaxBytes != 0 { @@ -9024,6 +9182,21 @@ func (m *EvidenceParams) Size() (n int) { return n } +func (m *ValidatorParams) Size() (n int) { + var l int + _ = l + if len(m.PubKeyTypes) > 0 { + for _, s := range m.PubKeyTypes { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *LastCommitInfo) Size() (n int) { var l int _ = l @@ -13060,7 +13233,7 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.BlockSize == nil { - m.BlockSize = &BlockSize{} + m.BlockSize = &BlockSizeParams{} } if err := m.BlockSize.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -13068,7 +13241,40 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EvidenceParams", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Evidence", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Evidence == nil { + m.Evidence = &EvidenceParams{} + } + if err := m.Evidence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13092,10 +13298,10 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.EvidenceParams == nil { - m.EvidenceParams = &EvidenceParams{} + if m.Validator == nil { + m.Validator = &ValidatorParams{} } - if err := m.EvidenceParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Validator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13121,7 +13327,7 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { } return nil } -func (m *BlockSize) Unmarshal(dAtA []byte) error { +func (m *BlockSizeParams) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13144,10 +13350,10 @@ func (m *BlockSize) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: BlockSize: wiretype end group for non-group") + return fmt.Errorf("proto: BlockSizeParams: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: BlockSize: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: BlockSizeParams: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -13280,6 +13486,86 @@ func (m *EvidenceParams) Unmarshal(dAtA []byte) error { } return nil } +func (m *ValidatorParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatorParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatorParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKeyTypes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PubKeyTypes = append(m.PubKeyTypes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -14885,148 +15171,150 @@ var ( ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_4449c1011851ea19) } +func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_5b877df1938afe10) } func init() { - golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_4449c1011851ea19) -} - -var fileDescriptor_types_4449c1011851ea19 = []byte{ - // 2177 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x93, 0x1b, 0x47, - 0x15, 0xdf, 0xd1, 0x6a, 0x25, 0xcd, 0xd3, 0xea, 0x23, 0xed, 0xb5, 0x2d, 0x8b, 0xb0, 0xeb, 0x1a, - 0x43, 0xe2, 0x25, 0x8e, 0x36, 0x6c, 0x08, 0xb5, 0x8e, 0x43, 0xaa, 0x56, 0xb6, 0x61, 0xb7, 0x12, - 0x60, 0x19, 0xdb, 0xcb, 0x85, 0xaa, 0xa9, 0x96, 0xa6, 0x2d, 0x4d, 0x59, 0x9a, 0x99, 0xcc, 0xb4, - 0x36, 0x5a, 0x1f, 0x73, 0xce, 0x21, 0x07, 0xfe, 0x08, 0xfe, 0x84, 0x1c, 0x39, 0x51, 0x39, 0x72, - 0xe0, 0x6c, 0x60, 0x29, 0x0e, 0x70, 0xa5, 0xa8, 0xe2, 0x48, 0xf5, 0xeb, 0xee, 0xf9, 0xda, 0x91, - 0x89, 0x03, 0x27, 0x2e, 0x52, 0xf7, 0xfb, 0xe8, 0x8f, 0x37, 0xef, 0xbd, 0xdf, 0x7b, 0x0d, 0xd7, - 0xe8, 0x68, 0xec, 0xed, 0xf1, 0xf3, 0x90, 0xc5, 0xf2, 0x77, 0x10, 0x46, 0x01, 0x0f, 0xc8, 0x06, - 0x4e, 0xfa, 0x6f, 0x4f, 0x3c, 0x3e, 0x5d, 0x8c, 0x06, 0xe3, 0x60, 0xbe, 0x37, 0x09, 0x26, 0xc1, - 0x1e, 0x72, 0x47, 0x8b, 0xa7, 0x38, 0xc3, 0x09, 0x8e, 0xa4, 0x56, 0x7f, 0x67, 0x12, 0x04, 0x93, - 0x19, 0x4b, 0xa5, 0xb8, 0x37, 0x67, 0x31, 0xa7, 0xf3, 0x50, 0x09, 0x1c, 0x64, 0xd6, 0xe3, 0xcc, - 0x77, 0x59, 0x34, 0xf7, 0x7c, 0x9e, 0x1d, 0xce, 0xbc, 0x51, 0xbc, 0x37, 0x0e, 0xe6, 0xf3, 0xc0, - 0xcf, 0x1e, 0xa8, 0x7f, 0xef, 0x3f, 0x6a, 0x8e, 0xa3, 0xf3, 0x90, 0x07, 0x7b, 0x73, 0x16, 0x3d, - 0x9b, 0x31, 0xf5, 0x27, 0x95, 0xad, 0xdf, 0x55, 0xa1, 0x6e, 0xb3, 0x4f, 0x16, 0x2c, 0xe6, 0xe4, - 0x36, 0x54, 0xd9, 0x78, 0x1a, 0xf4, 0x2a, 0x37, 0x8d, 0xdb, 0xcd, 0x7d, 0x32, 0x90, 0x9b, 0x28, - 0xee, 0xc3, 0xf1, 0x34, 0x38, 0x5a, 0xb3, 0x51, 0x82, 0xbc, 0x05, 0x1b, 0x4f, 0x67, 0x8b, 0x78, - 0xda, 0x5b, 0x47, 0xd1, 0x2b, 0x79, 0xd1, 0x1f, 0x0b, 0xd6, 0xd1, 0x9a, 0x2d, 0x65, 0xc4, 0xb2, - 0x9e, 0xff, 0x34, 0xe8, 0x55, 0xcb, 0x96, 0x3d, 0xf6, 0x9f, 0xe2, 0xb2, 0x42, 0x82, 0x1c, 0x00, - 0xc4, 0x8c, 0x3b, 0x41, 0xc8, 0xbd, 0xc0, 0xef, 0x6d, 0xa0, 0xfc, 0xf5, 0xbc, 0xfc, 0x23, 0xc6, - 0x7f, 0x8e, 0xec, 0xa3, 0x35, 0xdb, 0x8c, 0xf5, 0x44, 0x68, 0x7a, 0xbe, 0xc7, 0x9d, 0xf1, 0x94, - 0x7a, 0x7e, 0xaf, 0x56, 0xa6, 0x79, 0xec, 0x7b, 0xfc, 0xbe, 0x60, 0x0b, 0x4d, 0x4f, 0x4f, 0xc4, - 0x55, 0x3e, 0x59, 0xb0, 0xe8, 0xbc, 0x57, 0x2f, 0xbb, 0xca, 0x2f, 0x04, 0x4b, 0x5c, 0x05, 0x65, - 0xc8, 0x3d, 0x68, 0x8e, 0xd8, 0xc4, 0xf3, 0x9d, 0xd1, 0x2c, 0x18, 0x3f, 0xeb, 0x35, 0x50, 0xa5, - 0x97, 0x57, 0x19, 0x0a, 0x81, 0xa1, 0xe0, 0x1f, 0xad, 0xd9, 0x30, 0x4a, 0x66, 0x64, 0x1f, 0x1a, - 0xe3, 0x29, 0x1b, 0x3f, 0x73, 0xf8, 0xb2, 0x67, 0xa2, 0xe6, 0xd5, 0xbc, 0xe6, 0x7d, 0xc1, 0x7d, - 0xbc, 0x3c, 0x5a, 0xb3, 0xeb, 0x63, 0x39, 0x24, 0xef, 0x81, 0xc9, 0x7c, 0x57, 0x6d, 0xd7, 0x44, - 0xa5, 0x6b, 0x85, 0xef, 0xe2, 0xbb, 0x7a, 0xb3, 0x06, 0x53, 0x63, 0x32, 0x80, 0x9a, 0x70, 0x14, - 0x8f, 0xf7, 0x36, 0x51, 0x67, 0xab, 0xb0, 0x11, 0xf2, 0x8e, 0xd6, 0x6c, 0x25, 0x25, 0xcc, 0xe7, - 0xb2, 0x99, 0x77, 0xc6, 0x22, 0x71, 0xb8, 0x2b, 0x65, 0xe6, 0x7b, 0x20, 0xf9, 0x78, 0x3c, 0xd3, - 0xd5, 0x93, 0x61, 0x1d, 0x36, 0xce, 0xe8, 0x6c, 0xc1, 0xac, 0x37, 0xa1, 0x99, 0xf1, 0x14, 0xd2, - 0x83, 0xfa, 0x9c, 0xc5, 0x31, 0x9d, 0xb0, 0x9e, 0x71, 0xd3, 0xb8, 0x6d, 0xda, 0x7a, 0x6a, 0xb5, - 0x61, 0x33, 0xeb, 0x27, 0xd6, 0x3c, 0x51, 0x14, 0xbe, 0x20, 0x14, 0xcf, 0x58, 0x14, 0x0b, 0x07, - 0x50, 0x8a, 0x6a, 0x4a, 0x6e, 0x41, 0x0b, 0xed, 0xe0, 0x68, 0xbe, 0xf0, 0xd3, 0xaa, 0xbd, 0x89, - 0xc4, 0x53, 0x25, 0xb4, 0x03, 0xcd, 0x70, 0x3f, 0x4c, 0x44, 0xd6, 0x51, 0x04, 0xc2, 0xfd, 0x50, - 0x09, 0x58, 0xef, 0x43, 0xb7, 0xe8, 0x4a, 0xa4, 0x0b, 0xeb, 0xcf, 0xd8, 0xb9, 0xda, 0x4f, 0x0c, - 0xc9, 0x96, 0xba, 0x16, 0xee, 0x61, 0xda, 0xea, 0x8e, 0x5f, 0x54, 0x12, 0xe5, 0xc4, 0x9b, 0xc8, - 0x01, 0x54, 0x45, 0x2c, 0xa3, 0x76, 0x73, 0xbf, 0x3f, 0x90, 0x81, 0x3e, 0xd0, 0x81, 0x3e, 0x78, - 0xac, 0x03, 0x7d, 0xd8, 0xf8, 0xea, 0xc5, 0xce, 0xda, 0x17, 0x7f, 0xdc, 0x31, 0x6c, 0xd4, 0x20, - 0x37, 0x84, 0x43, 0x50, 0xcf, 0x77, 0x3c, 0x57, 0xed, 0x53, 0xc7, 0xf9, 0xb1, 0x4b, 0x0e, 0xa1, - 0x3b, 0x0e, 0xfc, 0x98, 0xf9, 0xf1, 0x22, 0x76, 0x42, 0x1a, 0xd1, 0x79, 0xac, 0x62, 0x4d, 0x7f, - 0xfe, 0xfb, 0x9a, 0x7d, 0x82, 0x5c, 0xbb, 0x33, 0xce, 0x13, 0xc8, 0x07, 0x00, 0x67, 0x74, 0xe6, - 0xb9, 0x94, 0x07, 0x51, 0xdc, 0xab, 0xde, 0x5c, 0xcf, 0x28, 0x9f, 0x6a, 0xc6, 0x93, 0xd0, 0xa5, - 0x9c, 0x0d, 0xab, 0xe2, 0x64, 0x76, 0x46, 0x9e, 0xbc, 0x01, 0x1d, 0x1a, 0x86, 0x4e, 0xcc, 0x29, - 0x67, 0xce, 0xe8, 0x9c, 0xb3, 0x18, 0xe3, 0x71, 0xd3, 0x6e, 0xd1, 0x30, 0x7c, 0x24, 0xa8, 0x43, - 0x41, 0xb4, 0xdc, 0xe4, 0x6b, 0x62, 0xa8, 0x10, 0x02, 0x55, 0x97, 0x72, 0x8a, 0xd6, 0xd8, 0xb4, - 0x71, 0x2c, 0x68, 0x21, 0xe5, 0x53, 0x75, 0x47, 0x1c, 0x93, 0x6b, 0x50, 0x9b, 0x32, 0x6f, 0x32, - 0xe5, 0x78, 0xad, 0x75, 0x5b, 0xcd, 0x84, 0xe1, 0xc3, 0x28, 0x38, 0x63, 0x98, 0x2d, 0x1a, 0xb6, - 0x9c, 0x58, 0x7f, 0x35, 0xe0, 0xb5, 0x4b, 0xe1, 0x25, 0xd6, 0x9d, 0xd2, 0x78, 0xaa, 0xf7, 0x12, - 0x63, 0xf2, 0x96, 0x58, 0x97, 0xba, 0x2c, 0x52, 0x59, 0xac, 0xa5, 0x6e, 0x7c, 0x84, 0x44, 0x75, - 0x51, 0x25, 0x42, 0x1e, 0x42, 0x77, 0x46, 0x63, 0xee, 0xc8, 0x28, 0x70, 0x30, 0x4b, 0xad, 0xe7, - 0x22, 0xf3, 0x63, 0xaa, 0xa3, 0x45, 0x38, 0xa7, 0x52, 0x6f, 0xcf, 0x72, 0x54, 0x72, 0x04, 0x5b, - 0xa3, 0xf3, 0xe7, 0xd4, 0xe7, 0x9e, 0xcf, 0x9c, 0x4b, 0x36, 0xef, 0xa8, 0xa5, 0x1e, 0x9e, 0x79, - 0x2e, 0xf3, 0xc7, 0xda, 0xd8, 0x57, 0x12, 0x95, 0xe4, 0x63, 0xc4, 0xd6, 0x4d, 0x68, 0xe7, 0x73, - 0x01, 0x69, 0x43, 0x85, 0x2f, 0xd5, 0x0d, 0x2b, 0x7c, 0x69, 0x59, 0x89, 0x07, 0x26, 0x01, 0x79, - 0x49, 0x66, 0x17, 0x3a, 0x85, 0xe4, 0x90, 0x31, 0xb7, 0x91, 0x35, 0xb7, 0xd5, 0x81, 0x56, 0x2e, - 0x27, 0x58, 0x9f, 0x6f, 0x40, 0xc3, 0x66, 0x71, 0x28, 0x9c, 0x89, 0x1c, 0x80, 0xc9, 0x96, 0x63, - 0x26, 0xd3, 0xb1, 0x51, 0x48, 0x76, 0x52, 0xe6, 0xa1, 0xe6, 0x8b, 0xb4, 0x90, 0x08, 0x93, 0xdd, - 0x1c, 0x94, 0x5c, 0x29, 0x2a, 0x65, 0xb1, 0xe4, 0x4e, 0x1e, 0x4b, 0xb6, 0x0a, 0xb2, 0x05, 0x30, - 0xd9, 0xcd, 0x81, 0x49, 0x71, 0xe1, 0x1c, 0x9a, 0xdc, 0x2d, 0x41, 0x93, 0xe2, 0xf1, 0x57, 0xc0, - 0xc9, 0xdd, 0x12, 0x38, 0xe9, 0x5d, 0xda, 0xab, 0x14, 0x4f, 0xee, 0xe4, 0xf1, 0xa4, 0x78, 0x9d, - 0x02, 0xa0, 0x7c, 0x50, 0x06, 0x28, 0x37, 0x0a, 0x3a, 0x2b, 0x11, 0xe5, 0xdd, 0x4b, 0x88, 0x72, - 0xad, 0xa0, 0x5a, 0x02, 0x29, 0x77, 0x73, 0xb9, 0x1e, 0x4a, 0xef, 0x56, 0x9e, 0xec, 0xc9, 0x0f, - 0x2f, 0xa3, 0xd1, 0xf5, 0xe2, 0xa7, 0x2d, 0x83, 0xa3, 0xbd, 0x02, 0x1c, 0x5d, 0x2d, 0x9e, 0xb2, - 0x80, 0x47, 0x29, 0xaa, 0xec, 0x8a, 0xb8, 0x2f, 0x78, 0x9a, 0xc8, 0x11, 0x2c, 0x8a, 0x82, 0x48, - 0x25, 0x6c, 0x39, 0xb1, 0x6e, 0x8b, 0x4c, 0x94, 0xfa, 0xd7, 0x4b, 0x10, 0x08, 0x9d, 0x3e, 0xe3, - 0x5d, 0xd6, 0x97, 0x46, 0xaa, 0x8b, 0x11, 0x9d, 0xcd, 0x62, 0xa6, 0xca, 0x62, 0x19, 0x60, 0xaa, - 0xe4, 0x81, 0x69, 0x07, 0x9a, 0x22, 0x57, 0x16, 0x30, 0x87, 0x86, 0x1a, 0x73, 0xc8, 0xf7, 0xe0, - 0x35, 0xcc, 0x33, 0x12, 0xbe, 0x54, 0x20, 0x56, 0x31, 0x10, 0x3b, 0x82, 0x21, 0x2d, 0x26, 0x13, - 0xe0, 0xdb, 0x70, 0x25, 0x23, 0x2b, 0xd6, 0xc5, 0x1c, 0x27, 0x93, 0x6f, 0x37, 0x91, 0x3e, 0x0c, - 0xc3, 0x23, 0x1a, 0x4f, 0xad, 0x9f, 0xa6, 0x06, 0x4a, 0xf1, 0x8c, 0x40, 0x75, 0x1c, 0xb8, 0xf2, - 0xde, 0x2d, 0x1b, 0xc7, 0x02, 0xe3, 0x66, 0xc1, 0x04, 0x0f, 0x67, 0xda, 0x62, 0x28, 0xa4, 0x92, - 0x50, 0x32, 0x65, 0xcc, 0x58, 0xbf, 0x36, 0xd2, 0xf5, 0x52, 0x88, 0x2b, 0x43, 0x23, 0xe3, 0xbf, - 0x41, 0xa3, 0xca, 0xab, 0xa1, 0x91, 0x75, 0x61, 0xa4, 0x9f, 0x2c, 0xc1, 0x99, 0x6f, 0x76, 0x45, - 0xe1, 0x3d, 0x9e, 0xef, 0xb2, 0x25, 0x9a, 0x74, 0xdd, 0x96, 0x13, 0x5d, 0x02, 0xd4, 0xd0, 0xcc, - 0xf9, 0x12, 0xa0, 0x8e, 0x34, 0x39, 0x21, 0xb7, 0x10, 0x9f, 0x82, 0xa7, 0x2a, 0x54, 0x5b, 0x03, - 0x55, 0x4d, 0x9f, 0x08, 0xa2, 0x2d, 0x79, 0x99, 0x6c, 0x6b, 0xe6, 0xc0, 0xed, 0x75, 0x30, 0xc5, - 0x41, 0xe3, 0x90, 0x8e, 0x19, 0x46, 0x9e, 0x69, 0xa7, 0x04, 0xeb, 0x04, 0xc8, 0xe5, 0x88, 0x27, - 0xef, 0x43, 0x95, 0xd3, 0x89, 0xb0, 0xb7, 0x30, 0x59, 0x7b, 0x20, 0x1b, 0x80, 0xc1, 0x47, 0xa7, - 0x27, 0xd4, 0x8b, 0x86, 0xd7, 0x84, 0xa9, 0xfe, 0xfe, 0x62, 0xa7, 0x2d, 0x64, 0xee, 0x04, 0x73, - 0x8f, 0xb3, 0x79, 0xc8, 0xcf, 0x6d, 0xd4, 0xb1, 0xfe, 0x61, 0x08, 0x24, 0xc8, 0x65, 0x82, 0x52, - 0xc3, 0x69, 0x77, 0xaf, 0x64, 0x40, 0xfb, 0xeb, 0x19, 0xf3, 0xdb, 0x00, 0x13, 0x1a, 0x3b, 0x9f, - 0x52, 0x9f, 0x33, 0x57, 0x59, 0xd4, 0x9c, 0xd0, 0xf8, 0x97, 0x48, 0x10, 0x15, 0x8e, 0x60, 0x2f, - 0x62, 0xe6, 0xa2, 0x69, 0xd7, 0xed, 0xfa, 0x84, 0xc6, 0x4f, 0x62, 0xe6, 0x26, 0xf7, 0xaa, 0xbf, - 0xfa, 0xbd, 0xf2, 0x76, 0x6c, 0x14, 0xed, 0xf8, 0xcf, 0x8c, 0x0f, 0xa7, 0x20, 0xf9, 0xff, 0x7f, - 0xef, 0xbf, 0x19, 0xa2, 0x36, 0xc8, 0xa7, 0x61, 0x72, 0x0c, 0xaf, 0x25, 0x71, 0xe4, 0x2c, 0x30, - 0xbe, 0xb4, 0x2f, 0xbd, 0x3c, 0xfc, 0xba, 0x67, 0x79, 0x72, 0x4c, 0x7e, 0x06, 0xd7, 0x0b, 0x59, - 0x20, 0x59, 0xb0, 0xf2, 0xd2, 0x64, 0x70, 0x35, 0x9f, 0x0c, 0xf4, 0x7a, 0xda, 0x12, 0xeb, 0xdf, - 0xc0, 0xb3, 0xbf, 0x23, 0x0a, 0xa5, 0x2c, 0x78, 0x94, 0x7d, 0x4b, 0xeb, 0x33, 0x03, 0x3a, 0x85, - 0xc3, 0x90, 0x3d, 0x00, 0x99, 0x5a, 0x63, 0xef, 0xb9, 0x2e, 0xda, 0xbb, 0xea, 0xe0, 0x68, 0xb2, - 0x47, 0xde, 0x73, 0x66, 0x9b, 0x23, 0x3d, 0x24, 0x1f, 0x42, 0x87, 0xa9, 0xd2, 0x4d, 0xe7, 0xbe, - 0x4a, 0x0e, 0xc5, 0x74, 0x61, 0xa7, 0x6e, 0xdb, 0x66, 0xb9, 0xb9, 0x75, 0x08, 0x66, 0xb2, 0x2e, - 0xf9, 0x16, 0x98, 0x73, 0xba, 0x54, 0x05, 0xb5, 0x2c, 0xc5, 0x1a, 0x73, 0xba, 0xc4, 0x5a, 0x9a, - 0x5c, 0x87, 0xba, 0x60, 0x4e, 0xa8, 0xdc, 0x61, 0xdd, 0xae, 0xcd, 0xe9, 0xf2, 0x27, 0x34, 0xb6, - 0x76, 0xa1, 0x9d, 0xdf, 0x44, 0x8b, 0x6a, 0x70, 0x93, 0xa2, 0x87, 0x13, 0x66, 0x3d, 0x82, 0x76, - 0xbe, 0x66, 0x15, 0x79, 0x2c, 0x0a, 0x16, 0xbe, 0x8b, 0x82, 0x1b, 0xb6, 0x9c, 0x88, 0xb6, 0xf7, - 0x2c, 0x90, 0x9f, 0x2e, 0x5b, 0xa4, 0x9e, 0x06, 0x9c, 0x65, 0x2a, 0x5d, 0x29, 0x63, 0x7d, 0xb6, - 0x01, 0x35, 0x59, 0x40, 0x93, 0x41, 0xbe, 0x3d, 0x13, 0xdf, 0x4d, 0x69, 0x4a, 0xaa, 0x52, 0x4c, - 0xb0, 0xf1, 0x8d, 0x62, 0x8f, 0x33, 0x6c, 0x5e, 0xbc, 0xd8, 0xa9, 0x23, 0xae, 0x1c, 0x3f, 0x48, - 0x1b, 0x9e, 0x55, 0xfd, 0x80, 0xee, 0xae, 0xaa, 0xaf, 0xdc, 0x5d, 0x5d, 0x87, 0xba, 0xbf, 0x98, - 0x3b, 0x7c, 0x19, 0xab, 0xf8, 0xac, 0xf9, 0x8b, 0xf9, 0xe3, 0x65, 0x2c, 0xbe, 0x01, 0x0f, 0x38, - 0x9d, 0x21, 0x4b, 0x46, 0x67, 0x03, 0x09, 0x82, 0x79, 0x00, 0xad, 0x0c, 0xfc, 0x7a, 0xae, 0x2a, - 0xe3, 0xda, 0x59, 0x0f, 0x39, 0x7e, 0xa0, 0x6e, 0xd9, 0x4c, 0xe0, 0xf8, 0xd8, 0x25, 0xb7, 0xf3, - 0xcd, 0x04, 0xa2, 0x76, 0x03, 0x9d, 0x31, 0xd3, 0x2f, 0x08, 0xcc, 0x16, 0x07, 0x10, 0xee, 0x29, - 0x45, 0x4c, 0x14, 0x69, 0x08, 0x02, 0x32, 0xdf, 0x84, 0x4e, 0x0a, 0x7c, 0x52, 0x04, 0xe4, 0x2a, - 0x29, 0x19, 0x05, 0xdf, 0x81, 0x2d, 0x9f, 0x2d, 0xb9, 0x53, 0x94, 0x6e, 0xa2, 0x34, 0x11, 0xbc, - 0xd3, 0xbc, 0xc6, 0x77, 0xa1, 0x9d, 0x06, 0x30, 0xca, 0x6e, 0xca, 0x96, 0x2e, 0xa1, 0xa2, 0xd8, - 0x0d, 0x68, 0x24, 0x65, 0x47, 0x0b, 0x05, 0xea, 0x54, 0x56, 0x1b, 0x49, 0x21, 0x13, 0xb1, 0x78, - 0x31, 0xe3, 0x6a, 0x91, 0x36, 0xca, 0x60, 0x21, 0x63, 0x4b, 0x3a, 0xca, 0xde, 0x82, 0x56, 0x12, - 0x37, 0x28, 0xd7, 0x41, 0xb9, 0x4d, 0x4d, 0x44, 0xa1, 0x5d, 0xe8, 0x86, 0x51, 0x10, 0x06, 0x31, - 0x8b, 0x1c, 0xea, 0xba, 0x11, 0x8b, 0xe3, 0x5e, 0x57, 0xae, 0xa7, 0xe9, 0x87, 0x92, 0x6c, 0x7d, - 0x1f, 0xea, 0xba, 0x9e, 0xda, 0x82, 0x0d, 0xb4, 0x3a, 0xba, 0x60, 0xd5, 0x96, 0x13, 0x91, 0xb9, - 0x0f, 0xc3, 0x50, 0xbd, 0x0a, 0x88, 0xa1, 0xf5, 0x2b, 0xa8, 0xab, 0x0f, 0x56, 0xda, 0x2b, 0xfe, - 0x08, 0x36, 0x43, 0x1a, 0x89, 0x6b, 0x64, 0x3b, 0x46, 0x5d, 0xb1, 0x9f, 0xd0, 0x88, 0x3f, 0x62, - 0x3c, 0xd7, 0x38, 0x36, 0x51, 0x5e, 0x92, 0xac, 0xbb, 0xd0, 0xca, 0xc9, 0x88, 0x63, 0xa1, 0x1f, - 0xe9, 0x48, 0xc3, 0x49, 0xb2, 0x73, 0x25, 0xdd, 0xd9, 0xba, 0x07, 0x66, 0xf2, 0x6d, 0x44, 0x61, - 0xa9, 0xaf, 0x6e, 0x28, 0x73, 0xcb, 0x29, 0x36, 0xc3, 0xc1, 0xa7, 0x2c, 0x52, 0x31, 0x21, 0x27, - 0xd6, 0x13, 0xe8, 0x14, 0x52, 0x36, 0xb9, 0x03, 0xf5, 0x70, 0x31, 0x72, 0xf4, 0x23, 0x46, 0xda, - 0xf6, 0x9e, 0x2c, 0x46, 0x1f, 0xb1, 0x73, 0xdd, 0xf6, 0x86, 0x38, 0x4b, 0x97, 0xad, 0x64, 0x97, - 0x9d, 0x41, 0x43, 0x47, 0x3f, 0xf9, 0x01, 0x98, 0x89, 0x5b, 0x15, 0x72, 0x64, 0xb2, 0xb5, 0x5a, - 0x34, 0x15, 0x14, 0xde, 0x11, 0x7b, 0x13, 0x9f, 0xb9, 0x4e, 0x1a, 0x42, 0xb8, 0x47, 0xc3, 0xee, - 0x48, 0xc6, 0xc7, 0x3a, 0x5e, 0xac, 0x77, 0xa0, 0x26, 0xcf, 0x26, 0xec, 0x23, 0x56, 0xd6, 0xb5, - 0xb6, 0x18, 0x97, 0x26, 0xf3, 0x3f, 0x18, 0xd0, 0xd0, 0x59, 0xb0, 0x54, 0x29, 0x77, 0xe8, 0xca, - 0xd7, 0x3d, 0xf4, 0xff, 0x3e, 0xf1, 0xdc, 0x01, 0x22, 0xf3, 0xcb, 0x59, 0xc0, 0x3d, 0x7f, 0xe2, - 0x48, 0x5b, 0xcb, 0x1c, 0xd4, 0x45, 0xce, 0x29, 0x32, 0x4e, 0x04, 0x7d, 0xff, 0xf3, 0x0d, 0xe8, - 0x1c, 0x0e, 0xef, 0x1f, 0x1f, 0x86, 0xe1, 0xcc, 0x1b, 0x53, 0xac, 0xdf, 0xf7, 0xa0, 0x8a, 0x2d, - 0x4c, 0xc9, 0x13, 0x6c, 0xbf, 0xac, 0x97, 0x26, 0xfb, 0xb0, 0x81, 0x9d, 0x0c, 0x29, 0x7b, 0x89, - 0xed, 0x97, 0xb6, 0xd4, 0x62, 0x13, 0xd9, 0xeb, 0x5c, 0x7e, 0x90, 0xed, 0x97, 0xf5, 0xd5, 0xe4, - 0x43, 0x30, 0xd3, 0x16, 0x63, 0xd5, 0xb3, 0x6c, 0x7f, 0x65, 0x87, 0x2d, 0xf4, 0xd3, 0x72, 0x6c, - 0xd5, 0xeb, 0x62, 0x7f, 0x65, 0x2b, 0x4a, 0x0e, 0xa0, 0xae, 0x8b, 0xd8, 0xf2, 0x87, 0xd3, 0xfe, - 0x8a, 0xee, 0x57, 0x98, 0x47, 0x76, 0x0d, 0x65, 0xaf, 0xbb, 0xfd, 0xd2, 0x16, 0x9d, 0xbc, 0x07, - 0x35, 0x55, 0x59, 0x94, 0x3e, 0x9e, 0xf6, 0xcb, 0x7b, 0x58, 0x71, 0xc9, 0xb4, 0x6f, 0x5a, 0xf5, - 0x02, 0xdd, 0x5f, 0xf9, 0x96, 0x40, 0x0e, 0x01, 0x32, 0xc5, 0xff, 0xca, 0xa7, 0xe5, 0xfe, 0xea, - 0x37, 0x02, 0x72, 0x0f, 0x1a, 0xe9, 0xbb, 0x4f, 0xf9, 0x63, 0x71, 0x7f, 0x55, 0xdb, 0x3e, 0x7c, - 0xfd, 0x5f, 0x7f, 0xde, 0x36, 0x7e, 0x73, 0xb1, 0x6d, 0x7c, 0x79, 0xb1, 0x6d, 0x7c, 0x75, 0xb1, - 0x6d, 0xfc, 0xfe, 0x62, 0xdb, 0xf8, 0xd3, 0xc5, 0xb6, 0xf1, 0xdb, 0xbf, 0x6c, 0x1b, 0xa3, 0x1a, - 0xba, 0xff, 0xbb, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x7c, 0xbd, 0x95, 0x1c, 0x19, 0x00, - 0x00, + golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_5b877df1938afe10) +} + +var fileDescriptor_types_5b877df1938afe10 = []byte{ + // 2214 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcb, 0x73, 0x1b, 0xc7, + 0xd1, 0xe7, 0x82, 0x20, 0x81, 0x6d, 0x10, 0x0f, 0x8d, 0x28, 0x09, 0xc2, 0xe7, 0x8f, 0x54, 0xad, + 0x12, 0x5b, 0x8c, 0x65, 0xd0, 0xa6, 0xa3, 0x14, 0x65, 0x39, 0xa9, 0x22, 0x24, 0xc5, 0x64, 0xd9, + 0x49, 0x98, 0x95, 0xc4, 0x5c, 0x52, 0xb5, 0x35, 0xc0, 0x8e, 0x80, 0x2d, 0x02, 0xbb, 0xeb, 0xdd, + 0x01, 0x0d, 0xea, 0x98, 0xb3, 0x0f, 0x3e, 0xe4, 0x8f, 0xc8, 0x35, 0x37, 0x1f, 0x73, 0x4a, 0xf9, + 0x98, 0x43, 0xce, 0x4a, 0xc2, 0x54, 0x0e, 0xc9, 0x35, 0x95, 0xaa, 0x1c, 0x53, 0xd3, 0x33, 0xb3, + 0x2f, 0x2e, 0x14, 0xcb, 0xc9, 0x29, 0x17, 0x60, 0xa6, 0x1f, 0xf3, 0xe8, 0xed, 0xee, 0x5f, 0xf7, + 0xc0, 0x75, 0x3a, 0x1c, 0x79, 0xbb, 0xfc, 0x3c, 0x64, 0xb1, 0xfc, 0xed, 0x87, 0x51, 0xc0, 0x03, + 0xb2, 0x86, 0x93, 0xde, 0x3b, 0x63, 0x8f, 0x4f, 0xe6, 0xc3, 0xfe, 0x28, 0x98, 0xed, 0x8e, 0x83, + 0x71, 0xb0, 0x8b, 0xdc, 0xe1, 0xfc, 0x39, 0xce, 0x70, 0x82, 0x23, 0xa9, 0xd5, 0xdb, 0x1e, 0x07, + 0xc1, 0x78, 0xca, 0x52, 0x29, 0xee, 0xcd, 0x58, 0xcc, 0xe9, 0x2c, 0x54, 0x02, 0xfb, 0x99, 0xf5, + 0x38, 0xf3, 0x5d, 0x16, 0xcd, 0x3c, 0x9f, 0x67, 0x87, 0x53, 0x6f, 0x18, 0xef, 0x8e, 0x82, 0xd9, + 0x2c, 0xf0, 0xb3, 0x07, 0xea, 0x3d, 0xf8, 0xb7, 0x9a, 0xa3, 0xe8, 0x3c, 0xe4, 0xc1, 0xee, 0x8c, + 0x45, 0xa7, 0x53, 0xa6, 0xfe, 0xa4, 0xb2, 0xf5, 0xdb, 0x2a, 0xd4, 0x6c, 0xf6, 0xe9, 0x9c, 0xc5, + 0x9c, 0xdc, 0x81, 0x2a, 0x1b, 0x4d, 0x82, 0x6e, 0xe5, 0x96, 0x71, 0xa7, 0xb1, 0x47, 0xfa, 0x72, + 0x13, 0xc5, 0x7d, 0x3c, 0x9a, 0x04, 0x87, 0x2b, 0x36, 0x4a, 0x90, 0xb7, 0x61, 0xed, 0xf9, 0x74, + 0x1e, 0x4f, 0xba, 0xab, 0x28, 0x7a, 0x35, 0x2f, 0xfa, 0x43, 0xc1, 0x3a, 0x5c, 0xb1, 0xa5, 0x8c, + 0x58, 0xd6, 0xf3, 0x9f, 0x07, 0xdd, 0x6a, 0xd9, 0xb2, 0x47, 0xfe, 0x73, 0x5c, 0x56, 0x48, 0x90, + 0x7d, 0x80, 0x98, 0x71, 0x27, 0x08, 0xb9, 0x17, 0xf8, 0xdd, 0x35, 0x94, 0xbf, 0x91, 0x97, 0x7f, + 0xc2, 0xf8, 0x4f, 0x90, 0x7d, 0xb8, 0x62, 0x9b, 0xb1, 0x9e, 0x08, 0x4d, 0xcf, 0xf7, 0xb8, 0x33, + 0x9a, 0x50, 0xcf, 0xef, 0xae, 0x97, 0x69, 0x1e, 0xf9, 0x1e, 0x7f, 0x28, 0xd8, 0x42, 0xd3, 0xd3, + 0x13, 0x71, 0x95, 0x4f, 0xe7, 0x2c, 0x3a, 0xef, 0xd6, 0xca, 0xae, 0xf2, 0x53, 0xc1, 0x12, 0x57, + 0x41, 0x19, 0xf2, 0x00, 0x1a, 0x43, 0x36, 0xf6, 0x7c, 0x67, 0x38, 0x0d, 0x46, 0xa7, 0xdd, 0x3a, + 0xaa, 0x74, 0xf3, 0x2a, 0x03, 0x21, 0x30, 0x10, 0xfc, 0xc3, 0x15, 0x1b, 0x86, 0xc9, 0x8c, 0xec, + 0x41, 0x7d, 0x34, 0x61, 0xa3, 0x53, 0x87, 0x2f, 0xba, 0x26, 0x6a, 0x5e, 0xcb, 0x6b, 0x3e, 0x14, + 0xdc, 0xa7, 0x8b, 0xc3, 0x15, 0xbb, 0x36, 0x92, 0x43, 0x72, 0x0f, 0x4c, 0xe6, 0xbb, 0x6a, 0xbb, + 0x06, 0x2a, 0x5d, 0x2f, 0x7c, 0x17, 0xdf, 0xd5, 0x9b, 0xd5, 0x99, 0x1a, 0x93, 0x3e, 0xac, 0x0b, + 0x47, 0xf1, 0x78, 0x77, 0x03, 0x75, 0x36, 0x0b, 0x1b, 0x21, 0xef, 0x70, 0xc5, 0x56, 0x52, 0xc2, + 0x7c, 0x2e, 0x9b, 0x7a, 0x67, 0x2c, 0x12, 0x87, 0xbb, 0x5a, 0x66, 0xbe, 0x47, 0x92, 0x8f, 0xc7, + 0x33, 0x5d, 0x3d, 0x19, 0xd4, 0x60, 0xed, 0x8c, 0x4e, 0xe7, 0xcc, 0x7a, 0x0b, 0x1a, 0x19, 0x4f, + 0x21, 0x5d, 0xa8, 0xcd, 0x58, 0x1c, 0xd3, 0x31, 0xeb, 0x1a, 0xb7, 0x8c, 0x3b, 0xa6, 0xad, 0xa7, + 0x56, 0x0b, 0x36, 0xb2, 0x7e, 0x62, 0xcd, 0x12, 0x45, 0xe1, 0x0b, 0x42, 0xf1, 0x8c, 0x45, 0xb1, + 0x70, 0x00, 0xa5, 0xa8, 0xa6, 0xe4, 0x36, 0x34, 0xd1, 0x0e, 0x8e, 0xe6, 0x0b, 0x3f, 0xad, 0xda, + 0x1b, 0x48, 0x3c, 0x51, 0x42, 0xdb, 0xd0, 0x08, 0xf7, 0xc2, 0x44, 0x64, 0x15, 0x45, 0x20, 0xdc, + 0x0b, 0x95, 0x80, 0xf5, 0x01, 0x74, 0x8a, 0xae, 0x44, 0x3a, 0xb0, 0x7a, 0xca, 0xce, 0xd5, 0x7e, + 0x62, 0x48, 0x36, 0xd5, 0xb5, 0x70, 0x0f, 0xd3, 0x56, 0x77, 0xfc, 0xa2, 0x92, 0x28, 0x27, 0xde, + 0x44, 0xf6, 0xa1, 0x2a, 0x62, 0x19, 0xb5, 0x1b, 0x7b, 0xbd, 0xbe, 0x0c, 0xf4, 0xbe, 0x0e, 0xf4, + 0xfe, 0x53, 0x1d, 0xe8, 0x83, 0xfa, 0x57, 0x2f, 0xb7, 0x57, 0xbe, 0xf8, 0xc3, 0xb6, 0x61, 0xa3, + 0x06, 0xb9, 0x29, 0x1c, 0x82, 0x7a, 0xbe, 0xe3, 0xb9, 0x6a, 0x9f, 0x1a, 0xce, 0x8f, 0x5c, 0x72, + 0x00, 0x9d, 0x51, 0xe0, 0xc7, 0xcc, 0x8f, 0xe7, 0xb1, 0x13, 0xd2, 0x88, 0xce, 0x62, 0x15, 0x6b, + 0xfa, 0xf3, 0x3f, 0xd4, 0xec, 0x63, 0xe4, 0xda, 0xed, 0x51, 0x9e, 0x40, 0x3e, 0x04, 0x38, 0xa3, + 0x53, 0xcf, 0xa5, 0x3c, 0x88, 0xe2, 0x6e, 0xf5, 0xd6, 0x6a, 0x46, 0xf9, 0x44, 0x33, 0x9e, 0x85, + 0x2e, 0xe5, 0x6c, 0x50, 0x15, 0x27, 0xb3, 0x33, 0xf2, 0xe4, 0x4d, 0x68, 0xd3, 0x30, 0x74, 0x62, + 0x4e, 0x39, 0x73, 0x86, 0xe7, 0x9c, 0xc5, 0x18, 0x8f, 0x1b, 0x76, 0x93, 0x86, 0xe1, 0x13, 0x41, + 0x1d, 0x08, 0xa2, 0xe5, 0x26, 0x5f, 0x13, 0x43, 0x85, 0x10, 0xa8, 0xba, 0x94, 0x53, 0xb4, 0xc6, + 0x86, 0x8d, 0x63, 0x41, 0x0b, 0x29, 0x9f, 0xa8, 0x3b, 0xe2, 0x98, 0x5c, 0x87, 0xf5, 0x09, 0xf3, + 0xc6, 0x13, 0x8e, 0xd7, 0x5a, 0xb5, 0xd5, 0x4c, 0x18, 0x3e, 0x8c, 0x82, 0x33, 0x86, 0xd9, 0xa2, + 0x6e, 0xcb, 0x89, 0xf5, 0x17, 0x03, 0xae, 0x5c, 0x0a, 0x2f, 0xb1, 0xee, 0x84, 0xc6, 0x13, 0xbd, + 0x97, 0x18, 0x93, 0xb7, 0xc5, 0xba, 0xd4, 0x65, 0x91, 0xca, 0x62, 0x4d, 0x75, 0xe3, 0x43, 0x24, + 0xaa, 0x8b, 0x2a, 0x11, 0xf2, 0x18, 0x3a, 0x53, 0x1a, 0x73, 0x47, 0x46, 0x81, 0x83, 0x59, 0x6a, + 0x35, 0x17, 0x99, 0x9f, 0x50, 0x1d, 0x2d, 0xc2, 0x39, 0x95, 0x7a, 0x6b, 0x9a, 0xa3, 0x92, 0x43, + 0xd8, 0x1c, 0x9e, 0xbf, 0xa0, 0x3e, 0xf7, 0x7c, 0xe6, 0x5c, 0xb2, 0x79, 0x5b, 0x2d, 0xf5, 0xf8, + 0xcc, 0x73, 0x99, 0x3f, 0xd2, 0xc6, 0xbe, 0x9a, 0xa8, 0x24, 0x1f, 0x23, 0xb6, 0x6e, 0x41, 0x2b, + 0x9f, 0x0b, 0x48, 0x0b, 0x2a, 0x7c, 0xa1, 0x6e, 0x58, 0xe1, 0x0b, 0xcb, 0x4a, 0x3c, 0x30, 0x09, + 0xc8, 0x4b, 0x32, 0x3b, 0xd0, 0x2e, 0x24, 0x87, 0x8c, 0xb9, 0x8d, 0xac, 0xb9, 0xad, 0x36, 0x34, + 0x73, 0x39, 0xc1, 0xfa, 0x7c, 0x0d, 0xea, 0x36, 0x8b, 0x43, 0xe1, 0x4c, 0x64, 0x1f, 0x4c, 0xb6, + 0x18, 0x31, 0x99, 0x8e, 0x8d, 0x42, 0xb2, 0x93, 0x32, 0x8f, 0x35, 0x5f, 0xa4, 0x85, 0x44, 0x98, + 0xec, 0xe4, 0xa0, 0xe4, 0x6a, 0x51, 0x29, 0x8b, 0x25, 0x77, 0xf3, 0x58, 0xb2, 0x59, 0x90, 0x2d, + 0x80, 0xc9, 0x4e, 0x0e, 0x4c, 0x8a, 0x0b, 0xe7, 0xd0, 0xe4, 0x7e, 0x09, 0x9a, 0x14, 0x8f, 0xbf, + 0x04, 0x4e, 0xee, 0x97, 0xc0, 0x49, 0xf7, 0xd2, 0x5e, 0xa5, 0x78, 0x72, 0x37, 0x8f, 0x27, 0xc5, + 0xeb, 0x14, 0x00, 0xe5, 0xc3, 0x32, 0x40, 0xb9, 0x59, 0xd0, 0x59, 0x8a, 0x28, 0xef, 0x5f, 0x42, + 0x94, 0xeb, 0x05, 0xd5, 0x12, 0x48, 0xb9, 0x9f, 0xcb, 0xf5, 0x50, 0x7a, 0xb7, 0xf2, 0x64, 0x4f, + 0xbe, 0x77, 0x19, 0x8d, 0x6e, 0x14, 0x3f, 0x6d, 0x19, 0x1c, 0xed, 0x16, 0xe0, 0xe8, 0x5a, 0xf1, + 0x94, 0x05, 0x3c, 0x4a, 0x51, 0x65, 0x47, 0xc4, 0x7d, 0xc1, 0xd3, 0x44, 0x8e, 0x60, 0x51, 0x14, + 0x44, 0x2a, 0x61, 0xcb, 0x89, 0x75, 0x47, 0x64, 0xa2, 0xd4, 0xbf, 0x5e, 0x81, 0x40, 0xe8, 0xf4, + 0x19, 0xef, 0xb2, 0xbe, 0x34, 0x52, 0x5d, 0x8c, 0xe8, 0x6c, 0x16, 0x33, 0x55, 0x16, 0xcb, 0x00, + 0x53, 0x25, 0x0f, 0x4c, 0xdb, 0xd0, 0x10, 0xb9, 0xb2, 0x80, 0x39, 0x34, 0xd4, 0x98, 0x43, 0xbe, + 0x03, 0x57, 0x30, 0xcf, 0x48, 0xf8, 0x52, 0x81, 0x58, 0xc5, 0x40, 0x6c, 0x0b, 0x86, 0xb4, 0x98, + 0x4c, 0x80, 0xef, 0xc0, 0xd5, 0x8c, 0xac, 0x58, 0x17, 0x73, 0x9c, 0x4c, 0xbe, 0x9d, 0x44, 0xfa, + 0x20, 0x0c, 0x0f, 0x69, 0x3c, 0xb1, 0x7e, 0x94, 0x1a, 0x28, 0xc5, 0x33, 0x02, 0xd5, 0x51, 0xe0, + 0xca, 0x7b, 0x37, 0x6d, 0x1c, 0x0b, 0x8c, 0x9b, 0x06, 0x63, 0x3c, 0x9c, 0x69, 0x8b, 0xa1, 0x90, + 0x4a, 0x42, 0xc9, 0x94, 0x31, 0x63, 0xfd, 0xd2, 0x48, 0xd7, 0x4b, 0x21, 0xae, 0x0c, 0x8d, 0x8c, + 0xff, 0x04, 0x8d, 0x2a, 0xaf, 0x87, 0x46, 0xd6, 0x85, 0x91, 0x7e, 0xb2, 0x04, 0x67, 0xbe, 0xd9, + 0x15, 0x85, 0xf7, 0x78, 0xbe, 0xcb, 0x16, 0x68, 0xd2, 0x55, 0x5b, 0x4e, 0x74, 0x09, 0xb0, 0x8e, + 0x66, 0xce, 0x97, 0x00, 0x35, 0xa4, 0xc9, 0x09, 0xb9, 0x8d, 0xf8, 0x14, 0x3c, 0x57, 0xa1, 0xda, + 0xec, 0xab, 0x6a, 0xfa, 0x58, 0x10, 0x6d, 0xc9, 0xcb, 0x64, 0x5b, 0x33, 0x07, 0x6e, 0x6f, 0x80, + 0x29, 0x0e, 0x1a, 0x87, 0x74, 0xc4, 0x30, 0xf2, 0x4c, 0x3b, 0x25, 0x58, 0xc7, 0x40, 0x2e, 0x47, + 0x3c, 0xf9, 0x00, 0xaa, 0x9c, 0x8e, 0x85, 0xbd, 0x85, 0xc9, 0x5a, 0x7d, 0xd9, 0x00, 0xf4, 0x3f, + 0x3e, 0x39, 0xa6, 0x5e, 0x34, 0xb8, 0x2e, 0x4c, 0xf5, 0xb7, 0x97, 0xdb, 0x2d, 0x21, 0x73, 0x37, + 0x98, 0x79, 0x9c, 0xcd, 0x42, 0x7e, 0x6e, 0xa3, 0x8e, 0xf5, 0x77, 0x43, 0x20, 0x41, 0x2e, 0x13, + 0x94, 0x1a, 0x4e, 0xbb, 0x7b, 0x25, 0x03, 0xda, 0x5f, 0xcf, 0x98, 0xff, 0x0f, 0x30, 0xa6, 0xb1, + 0xf3, 0x19, 0xf5, 0x39, 0x73, 0x95, 0x45, 0xcd, 0x31, 0x8d, 0x7f, 0x86, 0x04, 0x51, 0xe1, 0x08, + 0xf6, 0x3c, 0x66, 0x2e, 0x9a, 0x76, 0xd5, 0xae, 0x8d, 0x69, 0xfc, 0x2c, 0x66, 0x6e, 0x72, 0xaf, + 0xda, 0xeb, 0xdf, 0x2b, 0x6f, 0xc7, 0x7a, 0xd1, 0x8e, 0xff, 0xc8, 0xf8, 0x70, 0x0a, 0x92, 0xff, + 0xfb, 0xf7, 0xfe, 0xab, 0x21, 0x6a, 0x83, 0x7c, 0x1a, 0x26, 0x47, 0x70, 0x25, 0x89, 0x23, 0x67, + 0x8e, 0xf1, 0xa5, 0x7d, 0xe9, 0xd5, 0xe1, 0xd7, 0x39, 0xcb, 0x93, 0x63, 0xf2, 0x63, 0xb8, 0x51, + 0xc8, 0x02, 0xc9, 0x82, 0x95, 0x57, 0x26, 0x83, 0x6b, 0xf9, 0x64, 0xa0, 0xd7, 0xd3, 0x96, 0x58, + 0xfd, 0x06, 0x9e, 0xfd, 0x2d, 0x51, 0x28, 0x65, 0xc1, 0xa3, 0xec, 0x5b, 0x5a, 0xbf, 0x36, 0xa0, + 0x5d, 0x38, 0x0c, 0xb9, 0x07, 0x20, 0x53, 0x6b, 0xec, 0xbd, 0x60, 0x85, 0x2c, 0x86, 0x26, 0x7b, + 0xe2, 0xbd, 0x60, 0xea, 0xe0, 0xe6, 0x50, 0x13, 0xc8, 0x7b, 0x50, 0x67, 0xaa, 0x80, 0x53, 0xb7, + 0xbd, 0x56, 0xa8, 0xeb, 0x94, 0x4e, 0x22, 0x46, 0xbe, 0x0b, 0x66, 0x62, 0xc3, 0x42, 0xf1, 0x9e, + 0x98, 0x5c, 0x6f, 0x94, 0x08, 0x5a, 0x1f, 0x41, 0xbb, 0x70, 0x0c, 0xf2, 0x7f, 0x60, 0xce, 0xe8, + 0x42, 0x55, 0xe1, 0xb2, 0x7e, 0xab, 0xcf, 0xe8, 0x02, 0x0b, 0x70, 0x72, 0x03, 0x6a, 0x82, 0x39, + 0xa6, 0xf2, 0x2b, 0xac, 0xda, 0xeb, 0x33, 0xba, 0xf8, 0x88, 0xc6, 0xd6, 0x0e, 0xb4, 0xf2, 0x47, + 0xd3, 0xa2, 0x1a, 0x11, 0xa5, 0xe8, 0xc1, 0x98, 0x59, 0xf7, 0xa0, 0x5d, 0x38, 0x11, 0xb1, 0xa0, + 0x19, 0xce, 0x87, 0xce, 0x29, 0x3b, 0x77, 0xf0, 0xc8, 0xe8, 0x33, 0xa6, 0xdd, 0x08, 0xe7, 0xc3, + 0x8f, 0xd9, 0xf9, 0x53, 0x41, 0xb2, 0x9e, 0x40, 0x2b, 0x5f, 0x1f, 0x8b, 0x9c, 0x19, 0x05, 0x73, + 0xdf, 0xc5, 0xf5, 0xd7, 0x6c, 0x39, 0x11, 0x2d, 0xf6, 0x59, 0x20, 0xdd, 0x24, 0x5b, 0x10, 0x9f, + 0x04, 0x9c, 0x65, 0xaa, 0x6a, 0x29, 0x63, 0xfd, 0x62, 0x0d, 0xd6, 0x65, 0xb1, 0x4e, 0xfa, 0xf9, + 0x56, 0x50, 0xf8, 0x88, 0xd2, 0x94, 0x54, 0xa5, 0x98, 0xe0, 0xf0, 0x9b, 0xc5, 0x7e, 0x6a, 0xd0, + 0xb8, 0x78, 0xb9, 0x5d, 0x43, 0x0c, 0x3b, 0x7a, 0x94, 0x36, 0x57, 0xcb, 0x7a, 0x0f, 0xdd, 0xc9, + 0x55, 0x5f, 0xbb, 0x93, 0xbb, 0x01, 0x35, 0x7f, 0x3e, 0x73, 0xf8, 0x22, 0x56, 0xb9, 0x60, 0xdd, + 0x9f, 0xcf, 0x9e, 0x2e, 0xf0, 0xd3, 0xf1, 0x80, 0xd3, 0x29, 0xb2, 0x64, 0x26, 0xa8, 0x23, 0x41, + 0x30, 0xf7, 0xa1, 0x99, 0x81, 0x7a, 0xcf, 0x55, 0x25, 0x63, 0x2b, 0xeb, 0x8d, 0x47, 0x8f, 0xd4, + 0x2d, 0x1b, 0x09, 0xf4, 0x1f, 0xb9, 0xe4, 0x4e, 0xbe, 0x71, 0xc1, 0x0a, 0xa1, 0x8e, 0x8e, 0x9f, + 0xe9, 0x4d, 0x44, 0x7d, 0x20, 0x0e, 0x20, 0x42, 0x41, 0x8a, 0x98, 0x28, 0x52, 0x17, 0x04, 0x64, + 0xbe, 0x05, 0xed, 0x14, 0x64, 0xa5, 0x08, 0xc8, 0x55, 0x52, 0x32, 0x0a, 0xbe, 0x0b, 0x9b, 0x3e, + 0x5b, 0x70, 0xa7, 0x28, 0xdd, 0x40, 0x69, 0x22, 0x78, 0x27, 0x79, 0x8d, 0x6f, 0x43, 0x2b, 0x4d, + 0x16, 0x28, 0xbb, 0x21, 0xdb, 0xc7, 0x84, 0x8a, 0x62, 0x37, 0xa1, 0x9e, 0x94, 0x38, 0x4d, 0x14, + 0xa8, 0x51, 0x59, 0xd9, 0x24, 0x45, 0x53, 0xc4, 0xe2, 0xf9, 0x94, 0xab, 0x45, 0x5a, 0x28, 0x83, + 0x45, 0x93, 0x2d, 0xe9, 0x28, 0x7b, 0x1b, 0x9a, 0x3a, 0xec, 0xa4, 0x5c, 0x1b, 0xe5, 0x36, 0x34, + 0x11, 0x85, 0x76, 0xa0, 0x13, 0x46, 0x41, 0x18, 0xc4, 0x2c, 0x72, 0xa8, 0xeb, 0x46, 0x2c, 0x8e, + 0xbb, 0x1d, 0xb9, 0x9e, 0xa6, 0x1f, 0x48, 0xb2, 0xf5, 0x1e, 0xd4, 0x74, 0xed, 0xb6, 0x09, 0x6b, + 0x68, 0x75, 0x74, 0xc1, 0xaa, 0x2d, 0x27, 0x02, 0x25, 0x0e, 0xc2, 0x50, 0xbd, 0x40, 0x88, 0xa1, + 0xf5, 0x73, 0xa8, 0xa9, 0x0f, 0x56, 0xda, 0x97, 0x7e, 0x1f, 0x36, 0x42, 0x1a, 0x89, 0x6b, 0x64, + 0xbb, 0x53, 0xdd, 0x1d, 0x1c, 0xd3, 0x88, 0x3f, 0x61, 0x3c, 0xd7, 0xa4, 0x36, 0x50, 0x5e, 0x92, + 0xac, 0xfb, 0xd0, 0xcc, 0xc9, 0x88, 0x63, 0xa1, 0x1f, 0xe9, 0x48, 0xc3, 0x49, 0xb2, 0x73, 0x25, + 0xdd, 0xd9, 0x7a, 0x00, 0x66, 0xf2, 0x6d, 0x44, 0x11, 0xab, 0xaf, 0x6e, 0x28, 0x73, 0xcb, 0x29, + 0x36, 0xde, 0xc1, 0x67, 0x2c, 0x52, 0x31, 0x21, 0x27, 0xd6, 0xb3, 0x4c, 0x66, 0x90, 0x79, 0x9b, + 0xdc, 0x85, 0x9a, 0xca, 0x0c, 0x2a, 0x2a, 0x75, 0x8b, 0x7d, 0x8c, 0xa9, 0x41, 0xb7, 0xd8, 0x32, + 0x51, 0xa4, 0xcb, 0x56, 0xb2, 0xcb, 0x4e, 0xa1, 0xae, 0xa3, 0x3f, 0x9f, 0x26, 0xe5, 0x8a, 0x9d, + 0x62, 0x9a, 0x54, 0x8b, 0xa6, 0x82, 0xc2, 0x3b, 0x62, 0x6f, 0xec, 0x33, 0xd7, 0x49, 0x43, 0x08, + 0xf7, 0xa8, 0xdb, 0x6d, 0xc9, 0xf8, 0x44, 0xc7, 0x8b, 0xf5, 0x2e, 0xac, 0xcb, 0xb3, 0x09, 0xfb, + 0x88, 0x95, 0x75, 0x5d, 0x2f, 0xc6, 0xa5, 0xc0, 0xf1, 0x7b, 0x03, 0xea, 0x3a, 0x79, 0x96, 0x2a, + 0xe5, 0x0e, 0x5d, 0xf9, 0xba, 0x87, 0xfe, 0xef, 0x27, 0x9e, 0xbb, 0x40, 0x64, 0x7e, 0x39, 0x0b, + 0xb8, 0xe7, 0x8f, 0x1d, 0x69, 0x6b, 0x99, 0x83, 0x3a, 0xc8, 0x39, 0x41, 0xc6, 0xb1, 0xa0, 0xef, + 0x7d, 0xbe, 0x06, 0xed, 0x83, 0xc1, 0xc3, 0xa3, 0x83, 0x30, 0x9c, 0x7a, 0x23, 0x8a, 0xbd, 0xc2, + 0x2e, 0x54, 0xb1, 0x5d, 0x2a, 0x79, 0xee, 0xed, 0x95, 0xf5, 0xed, 0x64, 0x0f, 0xd6, 0xb0, 0x6b, + 0x22, 0x65, 0xaf, 0xbe, 0xbd, 0xd2, 0xf6, 0x5d, 0x6c, 0x22, 0xfb, 0xaa, 0xcb, 0x8f, 0xbf, 0xbd, + 0xb2, 0x1e, 0x9e, 0xfc, 0x00, 0xcc, 0xb4, 0x9d, 0x59, 0xf6, 0x04, 0xdc, 0x5b, 0xda, 0xcd, 0x0b, + 0xfd, 0xb4, 0xf4, 0x5b, 0xf6, 0x92, 0xd9, 0x5b, 0xda, 0xf6, 0x92, 0x7d, 0xa8, 0xe9, 0x82, 0xb9, + 0xfc, 0x91, 0xb6, 0xb7, 0xa4, 0xd3, 0x16, 0xe6, 0x91, 0x1d, 0x4a, 0xd9, 0x4b, 0x72, 0xaf, 0xf4, + 0x39, 0x80, 0xdc, 0x83, 0x75, 0x55, 0xc5, 0x94, 0x3e, 0xd4, 0xf6, 0xca, 0xfb, 0x65, 0x71, 0xc9, + 0xb4, 0x47, 0x5b, 0xf6, 0xda, 0xdd, 0x5b, 0xfa, 0x6e, 0x41, 0x0e, 0x00, 0x32, 0x8d, 0xc6, 0xd2, + 0x67, 0xec, 0xde, 0xf2, 0xf7, 0x08, 0xf2, 0x00, 0xea, 0xe9, 0x1b, 0x53, 0xf9, 0xc3, 0x74, 0x6f, + 0xd9, 0x13, 0xc1, 0xe0, 0x8d, 0x7f, 0xfe, 0x69, 0xcb, 0xf8, 0xd5, 0xc5, 0x96, 0xf1, 0xe5, 0xc5, + 0x96, 0xf1, 0xd5, 0xc5, 0x96, 0xf1, 0xbb, 0x8b, 0x2d, 0xe3, 0x8f, 0x17, 0x5b, 0xc6, 0x6f, 0xfe, + 0xbc, 0x65, 0x0c, 0xd7, 0xd1, 0xfd, 0xdf, 0xff, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x8d, + 0xcb, 0x04, 0x88, 0x19, 0x00, 0x00, } diff --git a/abci/types/types.proto b/abci/types/types.proto index ffa321836..b48ff1e8b 100644 --- a/abci/types/types.proto +++ b/abci/types/types.proto @@ -74,7 +74,6 @@ message RequestQuery { bool prove = 4; } -// NOTE: validators here have empty pubkeys. message RequestBeginBlock { bytes hash = 1; Header header = 2 [(gogoproto.nullable)=false]; @@ -208,12 +207,13 @@ message ResponseCommit { // ConsensusParams contains all consensus-relevant parameters // that can be adjusted by the abci app message ConsensusParams { - BlockSize block_size = 1; - EvidenceParams evidence_params = 2; + BlockSizeParams block_size = 1; + EvidenceParams evidence = 2; + ValidatorParams validator = 3; } // BlockSize contains limits on the block size. -message BlockSize { +message BlockSizeParams { // Note: must be greater than 0 int64 max_bytes = 1; // Note: must be greater or equal to -1 @@ -226,6 +226,11 @@ message EvidenceParams { int64 max_age = 1; } +// ValidatorParams contains limits on validators. +message ValidatorParams { + repeated string pub_key_types = 1; +} + message LastCommitInfo { int32 round = 1; repeated VoteInfo votes = 2 [(gogoproto.nullable)=false]; diff --git a/abci/types/typespb_test.go b/abci/types/typespb_test.go index 53c5cd94a..9375cc7f1 100644 --- a/abci/types/typespb_test.go +++ b/abci/types/typespb_test.go @@ -1479,15 +1479,15 @@ func TestConsensusParamsMarshalTo(t *testing.T) { } } -func TestBlockSizeProto(t *testing.T) { +func TestBlockSizeParamsProto(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockSize(popr, false) + p := NewPopulatedBlockSizeParams(popr, false) dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &BlockSize{} + msg := &BlockSizeParams{} if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -1510,10 +1510,10 @@ func TestBlockSizeProto(t *testing.T) { } } -func TestBlockSizeMarshalTo(t *testing.T) { +func TestBlockSizeParamsMarshalTo(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockSize(popr, false) + p := NewPopulatedBlockSizeParams(popr, false) size := p.Size() dAtA := make([]byte, size) for i := range dAtA { @@ -1523,7 +1523,7 @@ func TestBlockSizeMarshalTo(t *testing.T) { if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &BlockSize{} + msg := &BlockSizeParams{} if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -1591,6 +1591,62 @@ func TestEvidenceParamsMarshalTo(t *testing.T) { } } +func TestValidatorParamsProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedValidatorParams(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ValidatorParams{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestValidatorParamsMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedValidatorParams(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ValidatorParams{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestLastCommitInfoProto(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -2619,16 +2675,16 @@ func TestConsensusParamsJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } -func TestBlockSizeJSON(t *testing.T) { +func TestBlockSizeParamsJSON(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockSize(popr, true) + p := NewPopulatedBlockSizeParams(popr, true) marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &BlockSize{} + msg := &BlockSizeParams{} err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) @@ -2655,6 +2711,24 @@ func TestEvidenceParamsJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } +func TestValidatorParamsJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedValidatorParams(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ValidatorParams{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} func TestLastCommitInfoJSON(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -3563,12 +3637,12 @@ func TestConsensusParamsProtoCompactText(t *testing.T) { } } -func TestBlockSizeProtoText(t *testing.T) { +func TestBlockSizeParamsProtoText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockSize(popr, true) + p := NewPopulatedBlockSizeParams(popr, true) dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &BlockSize{} + msg := &BlockSizeParams{} if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -3577,12 +3651,12 @@ func TestBlockSizeProtoText(t *testing.T) { } } -func TestBlockSizeProtoCompactText(t *testing.T) { +func TestBlockSizeParamsProtoCompactText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockSize(popr, true) + p := NewPopulatedBlockSizeParams(popr, true) dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &BlockSize{} + msg := &BlockSizeParams{} if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -3619,6 +3693,34 @@ func TestEvidenceParamsProtoCompactText(t *testing.T) { } } +func TestValidatorParamsProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedValidatorParams(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &ValidatorParams{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestValidatorParamsProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedValidatorParams(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &ValidatorParams{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestLastCommitInfoProtoText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -4471,10 +4573,10 @@ func TestConsensusParamsSize(t *testing.T) { } } -func TestBlockSizeSize(t *testing.T) { +func TestBlockSizeParamsSize(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockSize(popr, true) + p := NewPopulatedBlockSizeParams(popr, true) size2 := github_com_gogo_protobuf_proto.Size(p) dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { @@ -4515,6 +4617,28 @@ func TestEvidenceParamsSize(t *testing.T) { } } +func TestValidatorParamsSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedValidatorParams(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + func TestLastCommitInfoSize(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) diff --git a/docs/spec/abci/abci.md b/docs/spec/abci/abci.md index afd726174..f057002ef 100644 --- a/docs/spec/abci/abci.md +++ b/docs/spec/abci/abci.md @@ -441,11 +441,12 @@ Commit are included in the header of the next block. ### ConsensusParams - **Fields**: - - `BlockSize (BlockSize)`: Parameters limiting the size of a block. - - `EvidenceParams (EvidenceParams)`: Parameters limiting the validity of + - `BlockSize (BlockSizeParams)`: Parameters limiting the size of a block. + - `Evidence (EvidenceParams)`: Parameters limiting the validity of evidence of byzantine behaviour. + - `Validator (ValidatorParams)`: Parameters limitng the types of pubkeys validators can use. -### BlockSize +### BlockSizeParams - **Fields**: - `MaxBytes (int64)`: Max size of a block, in bytes. @@ -463,6 +464,12 @@ Commit are included in the header of the next block. similar mechanism for handling Nothing-At-Stake attacks. - NOTE: this should change to time (instead of blocks)! +### ValidatorParams + +- **Fields**: + - `PubKeyTypes ([]string)`: List of accepted pubkey types. Uses same + naming as `PubKey.Type`. + ### Proof - **Fields**: diff --git a/evidence/pool.go b/evidence/pool.go index 0f3d482af..da00a3481 100644 --- a/evidence/pool.go +++ b/evidence/pool.go @@ -127,7 +127,7 @@ func (evpool *EvidencePool) MarkEvidenceAsCommitted(height int64, evidence []typ } // remove committed evidence from the clist - maxAge := evpool.State().ConsensusParams.EvidenceParams.MaxAge + maxAge := evpool.State().ConsensusParams.Evidence.MaxAge evpool.removeEvidence(height, maxAge, blockEvidenceMap) } diff --git a/evidence/pool_test.go b/evidence/pool_test.go index c3ed569e1..4e69596bf 100644 --- a/evidence/pool_test.go +++ b/evidence/pool_test.go @@ -38,7 +38,7 @@ func initializeValidatorState(valAddr []byte, height int64) dbm.DB { NextValidators: valSet.CopyIncrementAccum(1), LastHeightValidatorsChanged: 1, ConsensusParams: types.ConsensusParams{ - EvidenceParams: types.EvidenceParams{ + Evidence: types.EvidenceParams{ MaxAge: 1000000, }, }, diff --git a/evidence/reactor.go b/evidence/reactor.go index cfe47364c..52eb4a56f 100644 --- a/evidence/reactor.go +++ b/evidence/reactor.go @@ -164,7 +164,7 @@ func (evR EvidenceReactor) checkSendEvidenceMessage(peer p2p.Peer, ev types.Evid // NOTE: We only send evidence to peers where // peerHeight - maxAge < evidenceHeight < peerHeight - maxAge := evR.evpool.State().ConsensusParams.EvidenceParams.MaxAge + maxAge := evR.evpool.State().ConsensusParams.Evidence.MaxAge peerHeight := peerState.GetHeight() if peerHeight < evHeight { // peer is behind. sleep while he catches up diff --git a/state/execution.go b/state/execution.go index 72f6cc978..cc8e7e75f 100644 --- a/state/execution.go +++ b/state/execution.go @@ -186,7 +186,7 @@ func (blockExec *BlockExecutor) Commit( state.Validators.Size(), ), ), - mempool.PostCheckMaxGas(state.ConsensusParams.MaxGas), + mempool.PostCheckMaxGas(state.ConsensusParams.BlockSize.MaxGas), ) return res.Data, err diff --git a/state/state_test.go b/state/state_test.go index b1f24d301..88200e17e 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -390,11 +390,11 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { func makeParams(blockBytes, blockGas, evidenceAge int64) types.ConsensusParams { return types.ConsensusParams{ - BlockSize: types.BlockSize{ + BlockSize: types.BlockSizeParams{ MaxBytes: blockBytes, MaxGas: blockGas, }, - EvidenceParams: types.EvidenceParams{ + Evidence: types.EvidenceParams{ MaxAge: evidenceAge, }, } @@ -416,7 +416,7 @@ func TestApplyUpdates(t *testing.T) { 1: {initParams, abci.ConsensusParams{}, initParams}, 2: {initParams, abci.ConsensusParams{ - BlockSize: &abci.BlockSize{ + BlockSize: &abci.BlockSizeParams{ MaxBytes: 44, MaxGas: 55, }, @@ -424,7 +424,7 @@ func TestApplyUpdates(t *testing.T) { makeParams(44, 55, 3)}, 3: {initParams, abci.ConsensusParams{ - EvidenceParams: &abci.EvidenceParams{ + Evidence: &abci.EvidenceParams{ MaxAge: 66, }, }, diff --git a/state/store.go b/state/store.go index 2f90c747e..7a0ef255a 100644 --- a/state/store.go +++ b/state/store.go @@ -251,7 +251,7 @@ func LoadConsensusParams(db dbm.DB, height int64) (types.ConsensusParams, error) return empty, ErrNoConsensusParamsForHeight{height} } - if paramsInfo.ConsensusParams == empty { + if paramsInfo.ConsensusParams.Equals(&empty) { paramsInfo2 := loadConsensusParamsInfo(db, paramsInfo.LastHeightChanged) if paramsInfo2 == nil { panic( diff --git a/state/validation.go b/state/validation.go index ff1791e2b..a12919847 100644 --- a/state/validation.go +++ b/state/validation.go @@ -178,7 +178,7 @@ func VerifyEvidence(stateDB dbm.DB, state State, evidence types.Evidence) error height := state.LastBlockHeight evidenceAge := height - evidence.Height() - maxAge := state.ConsensusParams.EvidenceParams.MaxAge + maxAge := state.ConsensusParams.Evidence.MaxAge if evidenceAge > maxAge { return fmt.Errorf("Evidence from height %d is too old. Min height is %d", evidence.Height(), height-maxAge) diff --git a/types/params.go b/types/params.go index ed1e7963b..81cf429ff 100644 --- a/types/params.go +++ b/types/params.go @@ -17,12 +17,13 @@ const ( // ConsensusParams contains consensus critical parameters that determine the // validity of blocks. type ConsensusParams struct { - BlockSize `json:"block_size_params"` - EvidenceParams `json:"evidence_params"` + BlockSize BlockSizeParams `json:"block_size"` + Evidence EvidenceParams `json:"evidence"` + Validator ValidatorParams `json:"validator"` } -// BlockSize contain limits on the block size. -type BlockSize struct { +// BlockSizeParams define limits on the block size. +type BlockSizeParams struct { MaxBytes int64 `json:"max_bytes"` MaxGas int64 `json:"max_gas"` } @@ -32,17 +33,24 @@ type EvidenceParams struct { MaxAge int64 `json:"max_age"` // only accept new evidence more recent than this } +// ValidatorParams restrict the public key types validators can use. +// NOTE: uses ABCI pubkey naming, not Amino routes. +type ValidatorParams struct { + PubKeyTypes []string `json:"pub_key_types"` +} + // DefaultConsensusParams returns a default ConsensusParams. func DefaultConsensusParams() *ConsensusParams { return &ConsensusParams{ - DefaultBlockSize(), + DefaultBlockSizeParams(), DefaultEvidenceParams(), + DefaultValidatorParams(), } } -// DefaultBlockSize returns a default BlockSize. -func DefaultBlockSize() BlockSize { - return BlockSize{ +// DefaultBlockSizeParams returns a default BlockSizeParams. +func DefaultBlockSizeParams() BlockSizeParams { + return BlockSizeParams{ MaxBytes: 22020096, // 21MB MaxGas: -1, } @@ -55,6 +63,12 @@ func DefaultEvidenceParams() EvidenceParams { } } +// DefaultValidatorParams returns a default ValidatorParams, which allows +// only ed25519 pubkeys. +func DefaultValidatorParams() ValidatorParams { + return ValidatorParams{[]string{ABCIPubKeyTypeEd25519}} +} + // Validate validates the ConsensusParams to ensure all values are within their // allowed limits, and returns an error if they are not. func (params *ConsensusParams) Validate() error { @@ -72,9 +86,22 @@ func (params *ConsensusParams) Validate() error { params.BlockSize.MaxGas) } - if params.EvidenceParams.MaxAge <= 0 { + if params.Evidence.MaxAge <= 0 { return cmn.NewError("EvidenceParams.MaxAge must be greater than 0. Got %d", - params.EvidenceParams.MaxAge) + params.Evidence.MaxAge) + } + + if len(params.Validator.PubKeyTypes) == 0 { + return cmn.NewError("len(Validator.PubKeyTypes) must be greater than 0") + } + + // Check if keyType is a known ABCIPubKeyType + for i := 0; i < len(params.Validator.PubKeyTypes); i++ { + keyType := params.Validator.PubKeyTypes[i] + if _, ok := ABCIPubKeyTypesToAminoRoutes[keyType]; !ok { + return cmn.NewError("params.Validator.PubKeyTypes[%d], %s, is an unknown pubkey type", + i, keyType) + } } return nil @@ -94,6 +121,24 @@ func (params *ConsensusParams) Hash() []byte { return hasher.Sum(nil) } +func (params *ConsensusParams) Equals(params2 *ConsensusParams) bool { + return params.BlockSize == params2.BlockSize && + params.Evidence == params2.Evidence && + stringSliceEqual(params.Validator.PubKeyTypes, params2.Validator.PubKeyTypes) +} + +func stringSliceEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + return true +} + // Update returns a copy of the params with updates from the non-zero fields of p2. // NOTE: note: must not modify the original func (params ConsensusParams) Update(params2 *abci.ConsensusParams) ConsensusParams { @@ -108,8 +153,11 @@ func (params ConsensusParams) Update(params2 *abci.ConsensusParams) ConsensusPar res.BlockSize.MaxBytes = params2.BlockSize.MaxBytes res.BlockSize.MaxGas = params2.BlockSize.MaxGas } - if params2.EvidenceParams != nil { - res.EvidenceParams.MaxAge = params2.EvidenceParams.MaxAge + if params2.Evidence != nil { + res.Evidence.MaxAge = params2.Evidence.MaxAge + } + if params2.Validator != nil { + res.Validator.PubKeyTypes = params2.Validator.PubKeyTypes } return res } diff --git a/types/params_test.go b/types/params_test.go index 2936e5a4e..dc1936fbf 100644 --- a/types/params_test.go +++ b/types/params_test.go @@ -9,23 +9,32 @@ import ( abci "github.com/tendermint/tendermint/abci/types" ) +var ( + valEd25519 = []string{ABCIPubKeyTypeEd25519} + valSecp256k1 = []string{ABCIPubKeyTypeSecp256k1} +) + func TestConsensusParamsValidation(t *testing.T) { testCases := []struct { params ConsensusParams valid bool }{ // test block size - 0: {makeParams(1, 0, 1), true}, - 1: {makeParams(0, 0, 1), false}, - 2: {makeParams(47*1024*1024, 0, 1), true}, - 3: {makeParams(10, 0, 1), true}, - 4: {makeParams(100*1024*1024, 0, 1), true}, - 5: {makeParams(101*1024*1024, 0, 1), false}, - 6: {makeParams(1024*1024*1024, 0, 1), false}, - 7: {makeParams(1024*1024*1024, 0, -1), false}, + 0: {makeParams(1, 0, 1, valEd25519), true}, + 1: {makeParams(0, 0, 1, valEd25519), false}, + 2: {makeParams(47*1024*1024, 0, 1, valEd25519), true}, + 3: {makeParams(10, 0, 1, valEd25519), true}, + 4: {makeParams(100*1024*1024, 0, 1, valEd25519), true}, + 5: {makeParams(101*1024*1024, 0, 1, valEd25519), false}, + 6: {makeParams(1024*1024*1024, 0, 1, valEd25519), false}, + 7: {makeParams(1024*1024*1024, 0, -1, valEd25519), false}, // test evidence age - 8: {makeParams(1, 0, 0), false}, - 9: {makeParams(1, 0, -1), false}, + 8: {makeParams(1, 0, 0, valEd25519), false}, + 9: {makeParams(1, 0, -1, valEd25519), false}, + // test no pubkey type provided + 10: {makeParams(1, 0, 1, []string{}), false}, + // test invalid pubkey type provided + 11: {makeParams(1, 0, 1, []string{"potatoes make good pubkeys"}), false}, } for i, tc := range testCases { if tc.valid { @@ -36,28 +45,31 @@ func TestConsensusParamsValidation(t *testing.T) { } } -func makeParams(blockBytes, blockGas, evidenceAge int64) ConsensusParams { +func makeParams(blockBytes, blockGas, evidenceAge int64, pubkeyTypes []string) ConsensusParams { return ConsensusParams{ - BlockSize: BlockSize{ + BlockSize: BlockSizeParams{ MaxBytes: blockBytes, MaxGas: blockGas, }, - EvidenceParams: EvidenceParams{ + Evidence: EvidenceParams{ MaxAge: evidenceAge, }, + Validator: ValidatorParams{ + PubKeyTypes: pubkeyTypes, + }, } } func TestConsensusParamsHash(t *testing.T) { params := []ConsensusParams{ - makeParams(4, 2, 3), - makeParams(1, 4, 3), - makeParams(1, 2, 4), - makeParams(2, 5, 7), - makeParams(1, 7, 6), - makeParams(9, 5, 4), - makeParams(7, 8, 9), - makeParams(4, 6, 5), + makeParams(4, 2, 3, valEd25519), + makeParams(1, 4, 3, valEd25519), + makeParams(1, 2, 4, valEd25519), + makeParams(2, 5, 7, valEd25519), + makeParams(1, 7, 6, valEd25519), + makeParams(9, 5, 4, valEd25519), + makeParams(7, 8, 9, valEd25519), + makeParams(4, 6, 5, valEd25519), } hashes := make([][]byte, len(params)) @@ -83,23 +95,26 @@ func TestConsensusParamsUpdate(t *testing.T) { }{ // empty updates { - makeParams(1, 2, 3), + makeParams(1, 2, 3, valEd25519), &abci.ConsensusParams{}, - makeParams(1, 2, 3), + makeParams(1, 2, 3, valEd25519), }, // fine updates { - makeParams(1, 2, 3), + makeParams(1, 2, 3, valEd25519), &abci.ConsensusParams{ - BlockSize: &abci.BlockSize{ + BlockSize: &abci.BlockSizeParams{ MaxBytes: 100, MaxGas: 200, }, - EvidenceParams: &abci.EvidenceParams{ + Evidence: &abci.EvidenceParams{ MaxAge: 300, }, + Validator: &abci.ValidatorParams{ + PubKeyTypes: valSecp256k1, + }, }, - makeParams(100, 200, 300), + makeParams(100, 200, 300, valSecp256k1), }, } for _, tc := range testCases { diff --git a/types/protobuf.go b/types/protobuf.go index e1ec81e82..1535c1e37 100644 --- a/types/protobuf.go +++ b/types/protobuf.go @@ -24,6 +24,12 @@ const ( ABCIPubKeyTypeSecp256k1 = "secp256k1" ) +// TODO: Make non-global by allowing for registration of more pubkey types +var ABCIPubKeyTypesToAminoRoutes = map[string]string{ + ABCIPubKeyTypeEd25519: ed25519.PubKeyAminoRoute, + ABCIPubKeyTypeSecp256k1: secp256k1.PubKeyAminoRoute, +} + //------------------------------------------------------- // TM2PB is used for converting Tendermint ABCI to protobuf ABCI. @@ -119,12 +125,15 @@ func (tm2pb) ValidatorUpdates(vals *ValidatorSet) []abci.ValidatorUpdate { func (tm2pb) ConsensusParams(params *ConsensusParams) *abci.ConsensusParams { return &abci.ConsensusParams{ - BlockSize: &abci.BlockSize{ + BlockSize: &abci.BlockSizeParams{ MaxBytes: params.BlockSize.MaxBytes, MaxGas: params.BlockSize.MaxGas, }, - EvidenceParams: &abci.EvidenceParams{ - MaxAge: params.EvidenceParams.MaxAge, + Evidence: &abci.EvidenceParams{ + MaxAge: params.Evidence.MaxAge, + }, + Validator: &abci.ValidatorParams{ + PubKeyTypes: params.Validator.PubKeyTypes, }, } } @@ -215,12 +224,15 @@ func (pb2tm) ValidatorUpdates(vals []abci.ValidatorUpdate) ([]*Validator, error) func (pb2tm) ConsensusParams(csp *abci.ConsensusParams) ConsensusParams { return ConsensusParams{ - BlockSize: BlockSize{ + BlockSize: BlockSizeParams{ MaxBytes: csp.BlockSize.MaxBytes, MaxGas: csp.BlockSize.MaxGas, }, - EvidenceParams: EvidenceParams{ - MaxAge: csp.EvidenceParams.MaxAge, + Evidence: EvidenceParams{ + MaxAge: csp.Evidence.MaxAge, + }, + Validator: ValidatorParams{ + PubKeyTypes: csp.Validator.PubKeyTypes, }, } } diff --git a/types/protobuf_test.go b/types/protobuf_test.go index 7e7f55a1d..f5a2ce5d4 100644 --- a/types/protobuf_test.go +++ b/types/protobuf_test.go @@ -64,7 +64,6 @@ func TestABCIValidators(t *testing.T) { func TestABCIConsensusParams(t *testing.T) { cp := DefaultConsensusParams() - cp.EvidenceParams.MaxAge = 0 // TODO add this to ABCI abciCP := TM2PB.ConsensusParams(cp) cp2 := PB2TM.ConsensusParams(abciCP) From 60437953ac0a99cabf6589e3d60fdb5b77b136d1 Mon Sep 17 00:00:00 2001 From: yutianwu Date: Tue, 30 Oct 2018 23:46:55 +0800 Subject: [PATCH 104/113] [R4R] libs/log: add year to log format (#2707) * add year to log format * update documentation --- CHANGELOG_PENDING.md | 1 + docs/architecture/adr-001-logging.md | 8 ++++---- libs/log/tmfmt_logger.go | 6 +++--- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 5c25a4b19..c8c55f532 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -91,6 +91,7 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi - [crypto/ed25519] [\#2558](https://github.com/tendermint/tendermint/issues/2558) Switch to use latest `golang.org/x/crypto` through our fork at github.com/tendermint/crypto - [tools] [\#2238](https://github.com/tendermint/tendermint/issues/2238) Binary dependencies are now locked to a specific git commit +- [libs/log] [\#2706](https://github.com/tendermint/tendermint/issues/2706) Add year to log format ### BUG FIXES: - [autofile] [\#2428](https://github.com/tendermint/tendermint/issues/2428) Group.RotateFile need call Flush() before rename (@goolAdapter) diff --git a/docs/architecture/adr-001-logging.md b/docs/architecture/adr-001-logging.md index a11a49e14..77e5d39a8 100644 --- a/docs/architecture/adr-001-logging.md +++ b/docs/architecture/adr-001-logging.md @@ -52,13 +52,13 @@ On top of this interface, we will need to implement a stdout logger, which will Many people say that they like the current output, so let's stick with it. ``` -NOTE[04-25|14:45:08] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0 +NOTE[2017-04-25|14:45:08] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0 ``` Couple of minor changes: ``` -I[04-25|14:45:08.322] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0 +I[2017-04-25|14:45:08.322] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0 ``` Notice the level is encoded using only one char plus milliseconds. @@ -155,14 +155,14 @@ Important keyvals should go first. Example: ``` correct -I[04-25|14:45:08.322] ABCI Replay Blocks module=consensus instance=1 appHeight=0 storeHeight=0 stateHeight=0 +I[2017-04-25|14:45:08.322] ABCI Replay Blocks module=consensus instance=1 appHeight=0 storeHeight=0 stateHeight=0 ``` not ``` wrong -I[04-25|14:45:08.322] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0 instance=1 +I[2017-04-25|14:45:08.322] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0 instance=1 ``` for that in most cases you'll need to add `instance` field to a logger upon creating, not when u log a particular message: diff --git a/libs/log/tmfmt_logger.go b/libs/log/tmfmt_logger.go index de155fefa..247ce8fc0 100644 --- a/libs/log/tmfmt_logger.go +++ b/libs/log/tmfmt_logger.go @@ -84,13 +84,13 @@ func (l tmfmtLogger) Log(keyvals ...interface{}) error { // Form a custom Tendermint line // // Example: - // D[05-02|11:06:44.322] Stopping AddrBook (ignoring: already stopped) + // D[2016-05-02|11:06:44.322] Stopping AddrBook (ignoring: already stopped) // // Description: // D - first character of the level, uppercase (ASCII only) - // [05-02|11:06:44.322] - our time format (see https://golang.org/src/time/format.go) + // [2016-05-02|11:06:44.322] - our time format (see https://golang.org/src/time/format.go) // Stopping ... - message - enc.buf.WriteString(fmt.Sprintf("%c[%s] %-44s ", lvl[0]-32, time.Now().Format("01-02|15:04:05.000"), msg)) + enc.buf.WriteString(fmt.Sprintf("%c[%s] %-44s ", lvl[0]-32, time.Now().Format("2016-01-02|15:04:05.000"), msg)) if module != unknown { enc.buf.WriteString("module=" + module + " ") From a530352f6165ac4c9dc64921fae2ab7500c9bc39 Mon Sep 17 00:00:00 2001 From: Ismail Khoffi Date: Tue, 30 Oct 2018 17:16:55 +0100 Subject: [PATCH 105/113] Align Vote/Proposal fields with canonical order and fields (#2730) * reorder fields * add TestVoteString & update tests * remove redundant info from Proposal.String() * update spec * revert changes on vote.String() -> more human friendly --- Gopkg.lock | 6 +++--- docs/spec/blockchain/blockchain.md | 16 ++++++++-------- types/proposal.go | 14 ++++++++++---- types/vote.go | 9 +++++---- types/vote_test.go | 26 ++++++++++++++++++++------ 5 files changed, 46 insertions(+), 25 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index f4656e6ba..59e42f920 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -408,14 +408,14 @@ [[projects]] branch = "master" - digest = "1:fd98d154bf152ad5a49600ede7d7341851bcdfe358b9b82e5ccdba818618167c" + digest = "1:5207b4bc950fd0e45544263103af3e119c94fba6717f9d61931f7a19a7c0706a" name = "golang.org/x/sys" packages = [ "cpu", "unix", ] pruneopts = "UT" - revision = "2772b66316d2c587efeb188dcd5ebc6987656e84" + revision = "f7626d0b1519d8323581a047ca8b372ebf28de9a" [[projects]] digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18" @@ -446,7 +446,7 @@ name = "google.golang.org/genproto" packages = ["googleapis/rpc/status"] pruneopts = "UT" - revision = "94acd270e44e65579b9ee3cdab25034d33fed608" + revision = "b69ba1387ce2108ac9bc8e8e5e5a46e7d5c72313" [[projects]] digest = "1:2dab32a43451e320e49608ff4542fdfc653c95dcc35d0065ec9c6c3dd540ed74" diff --git a/docs/spec/blockchain/blockchain.md b/docs/spec/blockchain/blockchain.md index c5291ed45..061685378 100644 --- a/docs/spec/blockchain/blockchain.md +++ b/docs/spec/blockchain/blockchain.md @@ -146,14 +146,14 @@ The vote includes information about the validator signing it. ```go type Vote struct { - ValidatorAddress []byte - ValidatorIndex int - Height int64 - Round int - Timestamp Time - Type int8 - BlockID BlockID - Signature []byte + Type SignedMsgType // byte + Height int64 + Round int + Timestamp time.Time + BlockID BlockID + ValidatorAddress Address + ValidatorIndex int + Signature []byte } ``` diff --git a/types/proposal.go b/types/proposal.go index 5d70a3c84..fa82fdbb5 100644 --- a/types/proposal.go +++ b/types/proposal.go @@ -20,11 +20,12 @@ var ( // to be considered valid. It may depend on votes from a previous round, // a so-called Proof-of-Lock (POL) round, as noted in the POLRound and POLBlockID. type Proposal struct { + Type SignedMsgType Height int64 `json:"height"` Round int `json:"round"` + POLRound int `json:"pol_round"` // -1 if null. Timestamp time.Time `json:"timestamp"` BlockPartsHeader PartSetHeader `json:"block_parts_header"` - POLRound int `json:"pol_round"` // -1 if null. POLBlockID BlockID `json:"pol_block_id"` // zero if null. Signature []byte `json:"signature"` } @@ -33,11 +34,12 @@ type Proposal struct { // If there is no POLRound, polRound should be -1. func NewProposal(height int64, round int, blockPartsHeader PartSetHeader, polRound int, polBlockID BlockID) *Proposal { return &Proposal{ + Type: ProposalType, Height: height, Round: round, + POLRound: polRound, Timestamp: tmtime.Now(), BlockPartsHeader: blockPartsHeader, - POLRound: polRound, POLBlockID: polBlockID, } } @@ -45,9 +47,13 @@ func NewProposal(height int64, round int, blockPartsHeader PartSetHeader, polRou // String returns a string representation of the Proposal. func (p *Proposal) String() string { return fmt.Sprintf("Proposal{%v/%v %v (%v,%v) %X @ %s}", - p.Height, p.Round, p.BlockPartsHeader, p.POLRound, + p.Height, + p.Round, + p.BlockPartsHeader, + p.POLRound, p.POLBlockID, - cmn.Fingerprint(p.Signature), CanonicalTime(p.Timestamp)) + cmn.Fingerprint(p.Signature), + CanonicalTime(p.Timestamp)) } // SignBytes returns the Proposal bytes for signing diff --git a/types/vote.go b/types/vote.go index 333684fc2..826330d5c 100644 --- a/types/vote.go +++ b/types/vote.go @@ -48,13 +48,13 @@ type Address = crypto.Address // Represents a prevote, precommit, or commit vote from validators for consensus. type Vote struct { - ValidatorAddress Address `json:"validator_address"` - ValidatorIndex int `json:"validator_index"` + Type SignedMsgType `json:"type"` Height int64 `json:"height"` Round int `json:"round"` Timestamp time.Time `json:"timestamp"` - Type SignedMsgType `json:"type"` BlockID BlockID `json:"block_id"` // zero if vote is nil. + ValidatorAddress Address `json:"validator_address"` + ValidatorIndex int `json:"validator_index"` Signature []byte `json:"signature"` } @@ -94,7 +94,8 @@ func (vote *Vote) String() string { typeString, cmn.Fingerprint(vote.BlockID.Hash), cmn.Fingerprint(vote.Signature), - CanonicalTime(vote.Timestamp)) + CanonicalTime(vote.Timestamp), + ) } func (vote *Vote) Verify(chainID string, pubKey crypto.PubKey) error { diff --git a/types/vote_test.go b/types/vote_test.go index 1d7e3daf0..572735858 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -26,12 +26,10 @@ func exampleVote(t byte) *Vote { } return &Vote{ - ValidatorAddress: tmhash.Sum([]byte("validator_address")), - ValidatorIndex: 56789, - Height: 12345, - Round: 2, - Timestamp: stamp, - Type: SignedMsgType(t), + Type: SignedMsgType(t), + Height: 12345, + Round: 2, + Timestamp: stamp, BlockID: BlockID{ Hash: tmhash.Sum([]byte("blockID_hash")), PartsHeader: PartSetHeader{ @@ -39,6 +37,8 @@ func exampleVote(t byte) *Vote { Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), }, }, + ValidatorAddress: tmhash.Sum([]byte("validator_address")), + ValidatorIndex: 56789, } } @@ -235,3 +235,17 @@ func TestMaxVoteBytes(t *testing.T) { assert.EqualValues(t, MaxVoteBytes, len(bz)) } + +func TestVoteString(t *testing.T) { + str := examplePrecommit().String() + expected := `Vote{56789:6AF1F4111082 12345/02/2(Precommit) 8B01023386C3 000000000000 @ 2017-12-25T03:00:01.234Z}` + if str != expected { + t.Errorf("Got unexpected string for Vote. Expected:\n%v\nGot:\n%v", expected, str) + } + + str2 := examplePrevote().String() + expected = `Vote{56789:6AF1F4111082 12345/02/1(Prevote) 8B01023386C3 000000000000 @ 2017-12-25T03:00:01.234Z}` + if str2 != expected { + t.Errorf("Got unexpected string for Vote. Expected:\n%v\nGot:\n%v", expected, str2) + } +} From 7a033444800843cdba95a86eb02ab07de1bcf41d Mon Sep 17 00:00:00 2001 From: Zarko Milosevic Date: Wed, 31 Oct 2018 14:20:36 +0100 Subject: [PATCH 106/113] Introduce EventValidBlock for informing peers about wanted block (#2652) * Introduce EventValidBlock for informing peer about wanted block * Merge with develop * Add isCommit flag to NewValidBlock message - Add test for the case of +2/3 Precommit from the previous round --- CHANGELOG_PENDING.md | 2 + consensus/byzantine_test.go | 2 +- consensus/common_test.go | 5 + consensus/reactor.go | 87 ++++---- consensus/state.go | 63 +++--- consensus/state_test.go | 185 ++++++++++++++++++ .../reactors/consensus/consensus-reactor.md | 8 +- p2p/metrics.go | 2 +- types/event_bus.go | 4 + types/events.go | 2 + types/evidence_test.go | 2 +- 11 files changed, 286 insertions(+), 76 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index c8c55f532..ce0cf247a 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -102,6 +102,8 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi timeoutPrecommit before starting next round - [consensus] [\#1745](https://github.com/tendermint/tendermint/issues/1745) Wait for Proposal or timeoutProposal before entering prevote +- [consensus] [\#2583](https://github.com/tendermint/tendermint/issues/2583) ensure valid + block property with faulty proposer - [consensus] [\#2642](https://github.com/tendermint/tendermint/issues/2642) Only propose ValidBlock, not LockedBlock - [consensus] [\#2642](https://github.com/tendermint/tendermint/issues/2642) Initialized ValidRound and LockedRound to -1 - [consensus] [\#1637](https://github.com/tendermint/tendermint/issues/1637) Limit the amount of evidence that can be included in a diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index 60c2b0dbd..ed4cc90cc 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -263,7 +263,7 @@ func (br *ByzantineReactor) AddPeer(peer p2p.Peer) { // Send our state to peer. // If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus(). if !br.reactor.fastSync { - br.reactor.sendNewRoundStepMessages(peer) + br.reactor.sendNewRoundStepMessage(peer) } } func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) { diff --git a/consensus/common_test.go b/consensus/common_test.go index ca14a2926..c949922fe 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -420,6 +420,11 @@ func ensureNewProposal(proposalCh <-chan interface{}, height int64, round int) { "Timeout expired while waiting for NewProposal event") } +func ensureNewValidBlock(validBlockCh <-chan interface{}, height int64, round int) { + ensureNewEvent(validBlockCh, height, round, ensureTimeout, + "Timeout expired while waiting for NewValidBlock event") +} + func ensureNewBlock(blockCh <-chan interface{}, height int64) { select { case <-time.After(ensureTimeout): diff --git a/consensus/reactor.go b/consensus/reactor.go index 6643273cb..8d5e726f7 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -174,7 +174,7 @@ func (conR *ConsensusReactor) AddPeer(peer p2p.Peer) { // Send our state to peer. // If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus(). if !conR.FastSync() { - conR.sendNewRoundStepMessages(peer) + conR.sendNewRoundStepMessage(peer) } } @@ -215,8 +215,8 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) switch msg := msg.(type) { case *NewRoundStepMessage: ps.ApplyNewRoundStepMessage(msg) - case *CommitStepMessage: - ps.ApplyCommitStepMessage(msg) + case *NewValidBlockMessage: + ps.ApplyNewValidBlockMessage(msg) case *HasVoteMessage: ps.ApplyHasVoteMessage(msg) case *VoteSetMaj23Message: @@ -365,7 +365,12 @@ func (conR *ConsensusReactor) subscribeToBroadcastEvents() { const subscriber = "consensus-reactor" conR.conS.evsw.AddListenerForEvent(subscriber, types.EventNewRoundStep, func(data tmevents.EventData) { - conR.broadcastNewRoundStepMessages(data.(*cstypes.RoundState)) + conR.broadcastNewRoundStepMessage(data.(*cstypes.RoundState)) + }) + + conR.conS.evsw.AddListenerForEvent(subscriber, types.EventValidBlock, + func(data tmevents.EventData) { + conR.broadcastNewValidBlockMessage(data.(*cstypes.RoundState)) }) conR.conS.evsw.AddListenerForEvent(subscriber, types.EventVote, @@ -391,14 +396,20 @@ func (conR *ConsensusReactor) broadcastProposalHeartbeatMessage(hb *types.Heartb conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(msg)) } -func (conR *ConsensusReactor) broadcastNewRoundStepMessages(rs *cstypes.RoundState) { - nrsMsg, csMsg := makeRoundStepMessages(rs) - if nrsMsg != nil { - conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg)) - } - if csMsg != nil { - conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(csMsg)) +func (conR *ConsensusReactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) { + nrsMsg := makeRoundStepMessage(rs) + conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg)) +} + +func (conR *ConsensusReactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) { + csMsg := &NewValidBlockMessage{ + Height: rs.Height, + Round: rs.Round, + BlockPartsHeader: rs.ProposalBlockParts.Header(), + BlockParts: rs.ProposalBlockParts.BitArray(), + IsCommit: rs.Step == cstypes.RoundStepCommit, } + conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(csMsg)) } // Broadcasts HasVoteMessage to peers that care. @@ -427,33 +438,21 @@ func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote) { */ } -func makeRoundStepMessages(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage, csMsg *CommitStepMessage) { +func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage) { nrsMsg = &NewRoundStepMessage{ - Height: rs.Height, - Round: rs.Round, - Step: rs.Step, + Height: rs.Height, + Round: rs.Round, + Step: rs.Step, SecondsSinceStartTime: int(time.Since(rs.StartTime).Seconds()), LastCommitRound: rs.LastCommit.Round(), } - if rs.Step == cstypes.RoundStepCommit { - csMsg = &CommitStepMessage{ - Height: rs.Height, - BlockPartsHeader: rs.ProposalBlockParts.Header(), - BlockParts: rs.ProposalBlockParts.BitArray(), - } - } return } -func (conR *ConsensusReactor) sendNewRoundStepMessages(peer p2p.Peer) { +func (conR *ConsensusReactor) sendNewRoundStepMessage(peer p2p.Peer) { rs := conR.conS.GetRoundState() - nrsMsg, csMsg := makeRoundStepMessages(rs) - if nrsMsg != nil { - peer.Send(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg)) - } - if csMsg != nil { - peer.Send(StateChannel, cdc.MustMarshalBinaryBare(csMsg)) - } + nrsMsg := makeRoundStepMessage(rs) + peer.Send(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg)) } func (conR *ConsensusReactor) gossipDataRoutine(peer p2p.Peer, ps *PeerState) { @@ -524,6 +523,7 @@ OUTER_LOOP: msg := &ProposalMessage{Proposal: rs.Proposal} logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round) if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) { + // NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected! ps.SetHasProposal(rs.Proposal) } } @@ -964,11 +964,18 @@ func (ps *PeerState) SetHasProposal(proposal *types.Proposal) { if ps.PRS.Height != proposal.Height || ps.PRS.Round != proposal.Round { return } + if ps.PRS.Proposal { return } ps.PRS.Proposal = true + + // ps.PRS.ProposalBlockParts is set due to NewValidBlockMessage + if ps.PRS.ProposalBlockParts != nil { + return + } + ps.PRS.ProposalBlockPartsHeader = proposal.BlockPartsHeader ps.PRS.ProposalBlockParts = cmn.NewBitArray(proposal.BlockPartsHeader.Total) ps.PRS.ProposalPOLRound = proposal.POLRound @@ -1211,7 +1218,6 @@ func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) { // Just remember these values. psHeight := ps.PRS.Height psRound := ps.PRS.Round - //psStep := ps.PRS.Step psCatchupCommitRound := ps.PRS.CatchupCommitRound psCatchupCommit := ps.PRS.CatchupCommit @@ -1252,8 +1258,8 @@ func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) { } } -// ApplyCommitStepMessage updates the peer state for the new commit. -func (ps *PeerState) ApplyCommitStepMessage(msg *CommitStepMessage) { +// ApplyNewValidBlockMessage updates the peer state for the new valid block. +func (ps *PeerState) ApplyNewValidBlockMessage(msg *NewValidBlockMessage) { ps.mtx.Lock() defer ps.mtx.Unlock() @@ -1261,6 +1267,10 @@ func (ps *PeerState) ApplyCommitStepMessage(msg *CommitStepMessage) { return } + if ps.PRS.Round != msg.Round && !msg.IsCommit { + return + } + ps.PRS.ProposalBlockPartsHeader = msg.BlockPartsHeader ps.PRS.ProposalBlockParts = msg.BlockParts } @@ -1344,7 +1354,7 @@ type ConsensusMessage interface{} func RegisterConsensusMessages(cdc *amino.Codec) { cdc.RegisterInterface((*ConsensusMessage)(nil), nil) cdc.RegisterConcrete(&NewRoundStepMessage{}, "tendermint/NewRoundStepMessage", nil) - cdc.RegisterConcrete(&CommitStepMessage{}, "tendermint/CommitStep", nil) + cdc.RegisterConcrete(&NewValidBlockMessage{}, "tendermint/NewValidBlockMessage", nil) cdc.RegisterConcrete(&ProposalMessage{}, "tendermint/Proposal", nil) cdc.RegisterConcrete(&ProposalPOLMessage{}, "tendermint/ProposalPOL", nil) cdc.RegisterConcrete(&BlockPartMessage{}, "tendermint/BlockPart", nil) @@ -1384,15 +1394,18 @@ func (m *NewRoundStepMessage) String() string { //------------------------------------- // CommitStepMessage is sent when a block is committed. -type CommitStepMessage struct { +type NewValidBlockMessage struct { Height int64 + Round int BlockPartsHeader types.PartSetHeader BlockParts *cmn.BitArray + IsCommit bool } // String returns a string representation. -func (m *CommitStepMessage) String() string { - return fmt.Sprintf("[CommitStep H:%v BP:%v BA:%v]", m.Height, m.BlockPartsHeader, m.BlockParts) +func (m *NewValidBlockMessage) String() string { + return fmt.Sprintf("[ValidBlockMessage H:%v R:%v BP:%v BA:%v IsCommit:%v]", + m.Height, m.Round, m.BlockPartsHeader, m.BlockParts, m.IsCommit) } //------------------------------------- diff --git a/consensus/state.go b/consensus/state.go index 40aeeb7a4..3048ee3dd 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -904,13 +904,6 @@ func (cs *ConsensusState) defaultDecideProposal(height int64, round int) { polRound, polBlockID := cs.Votes.POLInfo() proposal := types.NewProposal(height, round, blockParts.Header(), polRound, polBlockID) if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal); err == nil { - // Set fields - /* fields set by setProposal and addBlockPart - cs.Proposal = proposal - cs.ProposalBlock = block - cs.ProposalBlockParts = blockParts - */ - // send proposal and block parts on internal msg queue cs.sendInternalMessage(msgInfo{&ProposalMessage{proposal}, ""}) for i := 0; i < blockParts.Total(); i++ { @@ -994,14 +987,6 @@ func (cs *ConsensusState) enterPrevote(height int64, round int) { cs.newStep() }() - // fire event for how we got here - if cs.isProposalComplete() { - cs.eventBus.PublishEventCompleteProposal(cs.RoundStateEvent()) - } else { - // we received +2/3 prevotes for a future round - // TODO: catchup event? - } - cs.Logger.Info(fmt.Sprintf("enterPrevote(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) // Sign and broadcast vote as necessary @@ -1240,6 +1225,8 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) { // Set up ProposalBlockParts and keep waiting. cs.ProposalBlock = nil cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader) + cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()) + cs.evsw.FireEvent(types.EventValidBlock, &cs.RoundState) } else { // We just need to keep waiting. } @@ -1420,11 +1407,6 @@ func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error { return nil } - // We don't care about the proposal if we're already in cstypes.RoundStepCommit. - if cstypes.RoundStepCommit <= cs.Step { - return nil - } - // Verify POLRound, which must be -1 or between 0 and proposal.Round exclusive. if proposal.POLRound != -1 && (proposal.POLRound < 0 || proposal.Round <= proposal.POLRound) { @@ -1437,7 +1419,12 @@ func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error { } cs.Proposal = proposal - cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockPartsHeader) + // We don't update cs.ProposalBlockParts if it is already set. + // This happens if we're already in cstypes.RoundStepCommit or if there is a valid block in the current round. + // TODO: We can check if Proposal is for a different block as this is a sign of misbehavior! + if cs.ProposalBlockParts == nil { + cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockPartsHeader) + } cs.Logger.Info("Received proposal", "proposal", proposal) return nil } @@ -1478,6 +1465,7 @@ func (cs *ConsensusState) addProposalBlockPart(msg *BlockPartMessage, peerID p2p } // NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal cs.Logger.Info("Received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash()) + cs.eventBus.PublishEventCompleteProposal(cs.RoundStateEvent()) // Update Valid* if we can. prevotes := cs.Votes.Prevotes(cs.Round) @@ -1616,16 +1604,26 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, // Update Valid* if we can. // NOTE: our proposal block may be nil or not what received a polka.. - // TODO: we may want to still update the ValidBlock and obtain it via gossipping - if len(blockID.Hash) != 0 && - (cs.ValidRound < vote.Round) && - (vote.Round <= cs.Round) && - cs.ProposalBlock.HashesTo(blockID.Hash) { - - cs.Logger.Info("Updating ValidBlock because of POL.", "validRound", cs.ValidRound, "POLRound", vote.Round) - cs.ValidRound = vote.Round - cs.ValidBlock = cs.ProposalBlock - cs.ValidBlockParts = cs.ProposalBlockParts + if len(blockID.Hash) != 0 && (cs.ValidRound < vote.Round) && (vote.Round == cs.Round) { + + if cs.ProposalBlock.HashesTo(blockID.Hash) { + cs.Logger.Info( + "Updating ValidBlock because of POL.", "validRound", cs.ValidRound, "POLRound", vote.Round) + cs.ValidRound = vote.Round + cs.ValidBlock = cs.ProposalBlock + cs.ValidBlockParts = cs.ProposalBlockParts + } else { + cs.Logger.Info( + "Valid block we don't know about. Set ProposalBlock=nil", + "proposal", cs.ProposalBlock.Hash(), "blockId", blockID.Hash) + // We're getting the wrong block. + cs.ProposalBlock = nil + } + if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) { + cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader) + } + cs.evsw.FireEvent(types.EventValidBlock, &cs.RoundState) + cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()) } } @@ -1634,7 +1632,8 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, // Round-skip if there is any 2/3+ of votes ahead of us cs.enterNewRound(height, vote.Round) } else if cs.Round == vote.Round && cstypes.RoundStepPrevote <= cs.Step { // current round - if prevotes.HasTwoThirdsMajority() { + blockID, ok := prevotes.TwoThirdsMajority() + if ok && (cs.isProposalComplete() || len(blockID.Hash) == 0) { cs.enterPrecommit(height, vote.Round) } else if prevotes.HasTwoThirdsAny() { cs.enterPrevoteWait(height, vote.Round) diff --git a/consensus/state_test.go b/consensus/state_test.go index 83c4bb142..87c8b285a 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -966,6 +966,117 @@ func TestProposeValidBlock(t *testing.T) { assert.True(t, bytes.Equal(rs.ProposalBlock.Hash(), rs.ValidBlock.Hash())) } +// What we want: +// P0 miss to lock B but set valid block to B after receiving delayed prevote. +func TestSetValidBlockOnDelayedPrevote(t *testing.T) { + cs1, vss := randConsensusState(4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round + + partSize := types.BlockPartSizeBytes + + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) + voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) + + // start round and wait for propose and prevote + startTestRound(cs1, cs1.Height, round) + ensureNewRound(newRoundCh, height, round) + + ensureNewProposal(proposalCh, height, round) + rs := cs1.GetRoundState() + propBlock := rs.ProposalBlock + propBlockHash := propBlock.Hash() + propBlockParts := propBlock.MakePartSet(partSize) + + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], propBlockHash) + + // vs2 send prevote for propBlock + signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlockParts.Header(), vs2) + + // vs3 send prevote nil + signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs3) + + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) + + ensurePrecommit(voteCh, height, round) + // we should have precommitted + validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + + rs = cs1.GetRoundState() + + assert.True(t, rs.ValidBlock == nil) + assert.True(t, rs.ValidBlockParts == nil) + assert.True(t, rs.ValidRound == -1) + + // vs2 send (delayed) prevote for propBlock + signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlockParts.Header(), vs4) + + ensureNewValidBlock(validBlockCh, height, round) + + rs = cs1.GetRoundState() + + assert.True(t, bytes.Equal(rs.ValidBlock.Hash(), propBlockHash)) + assert.True(t, rs.ValidBlockParts.Header().Equals(propBlockParts.Header())) + assert.True(t, rs.ValidRound == round) +} + +// What we want: +// P0 miss to lock B as Proposal Block is missing, but set valid block to B after +// receiving delayed Block Proposal. +func TestSetValidBlockOnDelayedProposal(t *testing.T) { + cs1, vss := randConsensusState(4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round + + partSize := types.BlockPartSizeBytes + + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) + voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + + round = round + 1 // move to round in which P0 is not proposer + incrementRound(vs2, vs3, vs4) + + startTestRound(cs1, cs1.Height, round) + ensureNewRound(newRoundCh, height, round) + + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) + + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], nil) + + prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) + propBlockHash := propBlock.Hash() + propBlockParts := propBlock.MakePartSet(partSize) + + // vs2, vs3 and vs4 send prevote for propBlock + signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + ensureNewValidBlock(validBlockCh, height, round) + + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) + + ensurePrecommit(voteCh, height, round) + validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + + if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) + } + + ensureNewProposal(proposalCh, height, round) + rs := cs1.GetRoundState() + + assert.True(t, bytes.Equal(rs.ValidBlock.Hash(), propBlockHash)) + assert.True(t, rs.ValidBlockParts.Header().Equals(propBlockParts.Header())) + assert.True(t, rs.ValidRound == round) +} + // 4 vals, 3 Nil Precommits at P0 // What we want: // P0 waits for timeoutPrecommit before starting next round @@ -1078,6 +1189,80 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { validatePrevote(t, cs1, round, vss[0], nil) } +// What we want: +// P0 emit NewValidBlock event upon receiving 2/3+ Precommit for B but hasn't received block B yet +func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { + cs1, vss := randConsensusState(4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, 1 + + incrementRound(vs2, vs3, vs4) + + partSize := types.BlockPartSizeBytes + + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) + + _, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round) + propBlockHash := propBlock.Hash() + propBlockParts := propBlock.MakePartSet(partSize) + + // start round in which PO is not proposer + startTestRound(cs1, height, round) + ensureNewRound(newRoundCh, height, round) + + // vs2, vs3 and vs4 send precommit for propBlock + signAddVotes(cs1, types.PrecommitType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + ensureNewValidBlock(validBlockCh, height, round) + + rs := cs1.GetRoundState() + assert.True(t, rs.Step == cstypes.RoundStepCommit) + assert.True(t, rs.ProposalBlock == nil) + assert.True(t, rs.ProposalBlockParts.Header().Equals(propBlockParts.Header())) + +} + +// What we want: +// P0 receives 2/3+ Precommit for B for round 0, while being in round 1. It emits NewValidBlock event. +// After receiving block, it executes block and moves to the next height. +func TestCommitFromPreviousRound(t *testing.T) { + cs1, vss := randConsensusState(4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, 1 + + partSize := types.BlockPartSizeBytes + + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + + prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round) + propBlockHash := propBlock.Hash() + propBlockParts := propBlock.MakePartSet(partSize) + + // start round in which PO is not proposer + startTestRound(cs1, height, round) + ensureNewRound(newRoundCh, height, round) + + // vs2, vs3 and vs4 send precommit for propBlock for the previous round + signAddVotes(cs1, types.PrecommitType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + + ensureNewValidBlock(validBlockCh, height, round) + + rs := cs1.GetRoundState() + assert.True(t, rs.Step == cstypes.RoundStepCommit) + assert.True(t, rs.CommitRound == vs2.Round) + assert.True(t, rs.ProposalBlock == nil) + assert.True(t, rs.ProposalBlockParts.Header().Equals(propBlockParts.Header())) + + if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) + } + + ensureNewProposal(proposalCh, height, round) + ensureNewRound(newRoundCh, height+1, 0) +} + //------------------------------------------------------------------------------------------ // SlashingSuite // TODO: Slashing diff --git a/docs/spec/reactors/consensus/consensus-reactor.md b/docs/spec/reactors/consensus/consensus-reactor.md index 7be350322..5ba03322d 100644 --- a/docs/spec/reactors/consensus/consensus-reactor.md +++ b/docs/spec/reactors/consensus/consensus-reactor.md @@ -129,11 +129,11 @@ handleMessage(msg): Reset prs.CatchupCommitRound and prs.CatchupCommit ``` -### CommitStepMessage handler +### NewValidBlockMessage handler ``` handleMessage(msg): - if prs.Height == msg.Height then + if prs.Height == msg.Height && prs.Round == msg.Round then prs.ProposalBlockPartsHeader = msg.BlockPartsHeader prs.ProposalBlockParts = msg.BlockParts ``` @@ -161,8 +161,8 @@ handleMessage(msg): handleMessage(msg): if prs.Height != msg.Height || prs.Round != msg.Round || prs.Proposal then return prs.Proposal = true - prs.ProposalBlockPartsHeader = msg.BlockPartsHeader - prs.ProposalBlockParts = empty set + if prs.ProposalBlockParts == empty set then // otherwise it is set in NewValidBlockMessage handler + prs.ProposalBlockPartsHeader = msg.BlockPartsHeader prs.ProposalPOLRound = msg.POLRound prs.ProposalPOL = nil Send msg through internal peerMsgQueue to ConsensusState service diff --git a/p2p/metrics.go b/p2p/metrics.go index b066fb317..ed26d1192 100644 --- a/p2p/metrics.go +++ b/p2p/metrics.go @@ -62,7 +62,7 @@ func PrometheusMetrics(namespace string) *Metrics { // NopMetrics returns no-op Metrics. func NopMetrics() *Metrics { return &Metrics{ - Peers: discard.NewGauge(), + Peers: discard.NewGauge(), PeerReceiveBytesTotal: discard.NewCounter(), PeerSendBytesTotal: discard.NewCounter(), PeerPendingSendBytes: discard.NewGauge(), diff --git a/types/event_bus.go b/types/event_bus.go index 269d5ab1f..65206e938 100644 --- a/types/event_bus.go +++ b/types/event_bus.go @@ -83,6 +83,10 @@ func (b *EventBus) PublishEventVote(data EventDataVote) error { return b.Publish(EventVote, data) } +func (b *EventBus) PublishEventValidBlock(data EventDataRoundState) error { + return b.Publish(EventValidBlock, data) +} + // PublishEventTx publishes tx event with tags from Result. Note it will add // predefined tags (EventTypeKey, TxHashKey). Existing tags with the same names // will be overwritten. diff --git a/types/events.go b/types/events.go index 09f7216e9..33aa712ef 100644 --- a/types/events.go +++ b/types/events.go @@ -23,6 +23,7 @@ const ( EventTimeoutWait = "TimeoutWait" EventTx = "Tx" EventUnlock = "Unlock" + EventValidBlock = "ValidBlock" EventValidatorSetUpdates = "ValidatorSetUpdates" EventVote = "Vote" ) @@ -119,6 +120,7 @@ var ( EventQueryTx = QueryForEvent(EventTx) EventQueryUnlock = QueryForEvent(EventUnlock) EventQueryValidatorSetUpdates = QueryForEvent(EventValidatorSetUpdates) + EventQueryValidBlock = QueryForEvent(EventValidBlock) EventQueryVote = QueryForEvent(EventVote) ) diff --git a/types/evidence_test.go b/types/evidence_test.go index 44276ab18..033b51e5d 100644 --- a/types/evidence_test.go +++ b/types/evidence_test.go @@ -61,7 +61,7 @@ func TestEvidence(t *testing.T) { {vote1, makeVote(val, chainID, 0, 10, 3, 1, blockID2), false}, // wrong round {vote1, makeVote(val, chainID, 0, 10, 2, 2, blockID2), false}, // wrong step {vote1, makeVote(val2, chainID, 0, 10, 2, 1, blockID), false}, // wrong validator - {vote1, badVote, false}, // signed by wrong key + {vote1, badVote, false}, // signed by wrong key } pubKey := val.GetPubKey() From c5905900eb26d0af7297faee97f1861cee840c00 Mon Sep 17 00:00:00 2001 From: Zarko Milosevic Date: Wed, 31 Oct 2018 15:27:11 +0100 Subject: [PATCH 107/113] Simplify proposal msg (#2735) * Align Proposal message with spec * Update spec --- CHANGELOG_PENDING.md | 1 + Gopkg.lock | 4 +-- consensus/byzantine_test.go | 8 ++--- consensus/common_test.go | 4 +-- consensus/reactor.go | 4 +-- consensus/replay.go | 2 +- consensus/replay_test.go | 2 +- consensus/state.go | 7 ++-- consensus/state_test.go | 12 ++++--- consensus/types/round_state_test.go | 7 ++-- docs/spec/reactors/consensus/consensus.md | 14 +++----- privval/priv_validator_test.go | 16 ++++----- types/canonical.go | 30 ++++++++--------- types/proposal.go | 40 +++++++++++------------ types/proposal_test.go | 16 +++++---- 15 files changed, 82 insertions(+), 85 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index ce0cf247a..f28b46089 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -108,6 +108,7 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi - [consensus] [\#2642](https://github.com/tendermint/tendermint/issues/2642) Initialized ValidRound and LockedRound to -1 - [consensus] [\#1637](https://github.com/tendermint/tendermint/issues/1637) Limit the amount of evidence that can be included in a block +- [consensus] [\#2646](https://github.com/tendermint/tendermint/issues/2646) Simplify Proposal message (align with spec) - [evidence] [\#2515](https://github.com/tendermint/tendermint/issues/2515) Fix db iter leak (@goolAdapter) - [libs/event] [\#2518](https://github.com/tendermint/tendermint/issues/2518) Fix event concurrency flaw (@goolAdapter) - [node] [\#2434](https://github.com/tendermint/tendermint/issues/2434) Make node respond to signal interrupts while sleeping for genesis time diff --git a/Gopkg.lock b/Gopkg.lock index 59e42f920..35542bf62 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -408,14 +408,14 @@ [[projects]] branch = "master" - digest = "1:5207b4bc950fd0e45544263103af3e119c94fba6717f9d61931f7a19a7c0706a" + digest = "1:6f86e2f2e2217cd4d74dec6786163cf80e4d2b99adb341ecc60a45113b844dca" name = "golang.org/x/sys" packages = [ "cpu", "unix", ] pruneopts = "UT" - revision = "f7626d0b1519d8323581a047ca8b372ebf28de9a" + revision = "7e31e0c00fa05cb5fbf4347b585621d6709e19a4" [[projects]] digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18" diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index ed4cc90cc..6f46c04d3 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -179,16 +179,16 @@ func byzantineDecideProposalFunc(t *testing.T, height int64, round int, cs *Cons // Create a new proposal block from state/txs from the mempool. block1, blockParts1 := cs.createProposalBlock() - polRound, polBlockID := cs.Votes.POLInfo() - proposal1 := types.NewProposal(height, round, blockParts1.Header(), polRound, polBlockID) + polRound, propBlockID := cs.ValidRound, types.BlockID{block1.Hash(), blockParts1.Header()} + proposal1 := types.NewProposal(height, round, polRound, propBlockID) if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal1); err != nil { t.Error(err) } // Create a new proposal block from state/txs from the mempool. block2, blockParts2 := cs.createProposalBlock() - polRound, polBlockID = cs.Votes.POLInfo() - proposal2 := types.NewProposal(height, round, blockParts2.Header(), polRound, polBlockID) + polRound, propBlockID = cs.ValidRound, types.BlockID{block2.Hash(), blockParts2.Header()} + proposal2 := types.NewProposal(height, round, polRound, propBlockID) if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal2); err != nil { t.Error(err) } diff --git a/consensus/common_test.go b/consensus/common_test.go index c949922fe..ae8bb6bfe 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -130,8 +130,8 @@ func decideProposal(cs1 *ConsensusState, vs *validatorStub, height int64, round } // Make proposal - polRound, polBlockID := cs1.Votes.POLInfo() - proposal = types.NewProposal(height, round, blockParts.Header(), polRound, polBlockID) + polRound, propBlockID := cs1.ValidRound, types.BlockID{block.Hash(), blockParts.Header()} + proposal = types.NewProposal(height, round, polRound, propBlockID) if err := vs.SignProposal(cs1.state.ChainID, proposal); err != nil { panic(err) } diff --git a/consensus/reactor.go b/consensus/reactor.go index 8d5e726f7..e8c7adc7f 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -976,8 +976,8 @@ func (ps *PeerState) SetHasProposal(proposal *types.Proposal) { return } - ps.PRS.ProposalBlockPartsHeader = proposal.BlockPartsHeader - ps.PRS.ProposalBlockParts = cmn.NewBitArray(proposal.BlockPartsHeader.Total) + ps.PRS.ProposalBlockPartsHeader = proposal.BlockID.PartsHeader + ps.PRS.ProposalBlockParts = cmn.NewBitArray(proposal.BlockID.PartsHeader.Total) ps.PRS.ProposalPOLRound = proposal.POLRound ps.PRS.ProposalPOL = nil // Nil until ProposalPOLMessage received. } diff --git a/consensus/replay.go b/consensus/replay.go index bffab8d28..fcff877fd 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -73,7 +73,7 @@ func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepCh chan case *ProposalMessage: p := msg.Proposal cs.Logger.Info("Replay: Proposal", "height", p.Height, "round", p.Round, "header", - p.BlockPartsHeader, "pol", p.POLRound, "peer", peerID) + p.BlockID.PartsHeader, "pol", p.POLRound, "peer", peerID) case *BlockPartMessage: cs.Logger.Info("Replay: BlockPart", "height", msg.Height, "round", msg.Round, "peer", peerID) case *VoteMessage: diff --git a/consensus/replay_test.go b/consensus/replay_test.go index d6691103e..70c4ba332 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -575,7 +575,7 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} { case msgInfo: switch msg := m.Msg.(type) { case *ProposalMessage: - return &msg.Proposal.BlockPartsHeader + return &msg.Proposal.BlockID.PartsHeader case *BlockPartMessage: return msg.Part case *VoteMessage: diff --git a/consensus/state.go b/consensus/state.go index 3048ee3dd..5b1448983 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -901,9 +901,10 @@ func (cs *ConsensusState) defaultDecideProposal(height int64, round int) { } // Make proposal - polRound, polBlockID := cs.Votes.POLInfo() - proposal := types.NewProposal(height, round, blockParts.Header(), polRound, polBlockID) + propBlockId := types.BlockID{block.Hash(), blockParts.Header()} + proposal := types.NewProposal(height, round, cs.ValidRound, propBlockId) if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal); err == nil { + // send proposal and block parts on internal msg queue cs.sendInternalMessage(msgInfo{&ProposalMessage{proposal}, ""}) for i := 0; i < blockParts.Total(); i++ { @@ -1423,7 +1424,7 @@ func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error { // This happens if we're already in cstypes.RoundStepCommit or if there is a valid block in the current round. // TODO: We can check if Proposal is for a different block as this is a sign of misbehavior! if cs.ProposalBlockParts == nil { - cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockPartsHeader) + cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockID.PartsHeader) } cs.Logger.Info("Received proposal", "proposal", proposal) return nil diff --git a/consensus/state_test.go b/consensus/state_test.go index 87c8b285a..9bf4fada5 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -197,7 +197,9 @@ func TestStateBadProposal(t *testing.T) { stateHash[0] = byte((stateHash[0] + 1) % 255) propBlock.AppHash = stateHash propBlockParts := propBlock.MakePartSet(partSize) - proposal := types.NewProposal(vs2.Height, round, propBlockParts.Header(), -1, types.BlockID{}) + proposal := types.NewProposal( + vs2.Height, round, -1, + types.BlockID{propBlock.Hash(), propBlockParts.Header()}) if err := vs2.SignProposal(config.ChainID(), proposal); err != nil { t.Fatal("failed to sign bad proposal", err) } @@ -811,6 +813,7 @@ func TestStateLockPOLSafety2(t *testing.T) { _, propBlock0 := decideProposal(cs1, vss[0], height, round) propBlockHash0 := propBlock0.Hash() propBlockParts0 := propBlock0.MakePartSet(partSize) + propBlockID0 := types.BlockID{propBlockHash0, propBlockParts0.Header()} // the others sign a polka but we don't see it prevotes := signVotes(types.PrevoteType, propBlockHash0, propBlockParts0.Header(), vs2, vs3, vs4) @@ -819,7 +822,6 @@ func TestStateLockPOLSafety2(t *testing.T) { prop1, propBlock1 := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) propBlockHash1 := propBlock1.Hash() propBlockParts1 := propBlock1.MakePartSet(partSize) - propBlockID1 := types.BlockID{propBlockHash1, propBlockParts1.Header()} incrementRound(vs2, vs3, vs4) @@ -854,7 +856,7 @@ func TestStateLockPOLSafety2(t *testing.T) { round = round + 1 // moving to the next round // in round 2 we see the polkad block from round 0 - newProp := types.NewProposal(height, round, propBlockParts0.Header(), 0, propBlockID1) + newProp := types.NewProposal(height, round, 0, propBlockID0) if err := vs3.SignProposal(config.ChainID(), newProp); err != nil { t.Fatal(err) } @@ -909,7 +911,7 @@ func TestProposeValidBlock(t *testing.T) { ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], propBlockHash) - // the others sign a polka but we don't see it + // the others sign a polka signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) @@ -964,6 +966,8 @@ func TestProposeValidBlock(t *testing.T) { rs = cs1.GetRoundState() assert.True(t, bytes.Equal(rs.ProposalBlock.Hash(), propBlockHash)) assert.True(t, bytes.Equal(rs.ProposalBlock.Hash(), rs.ValidBlock.Hash())) + assert.True(t, rs.Proposal.POLRound == rs.ValidRound) + assert.True(t, bytes.Equal(rs.Proposal.BlockID.Hash, rs.ValidBlock.Hash())) } // What we want: diff --git a/consensus/types/round_state_test.go b/consensus/types/round_state_test.go index a330981f6..6a1c4533a 100644 --- a/consensus/types/round_state_test.go +++ b/consensus/types/round_state_test.go @@ -63,11 +63,8 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) { // Random Proposal proposal := &types.Proposal{ Timestamp: tmtime.Now(), - BlockPartsHeader: types.PartSetHeader{ - Hash: cmn.RandBytes(20), - }, - POLBlockID: blockID, - Signature: sig, + BlockID: blockID, + Signature: sig, } // Random HeightVoteSet // TODO: hvs := diff --git a/docs/spec/reactors/consensus/consensus.md b/docs/spec/reactors/consensus/consensus.md index a1cf17bcb..0f1922303 100644 --- a/docs/spec/reactors/consensus/consensus.md +++ b/docs/spec/reactors/consensus/consensus.md @@ -47,25 +47,21 @@ type ProposalMessage struct { ### Proposal Proposal contains height and round for which this proposal is made, BlockID as a unique identifier -of proposed block, timestamp, and two fields (POLRound and POLBlockID) that are needed for -termination of the consensus. The message is signed by the validator private key. +of proposed block, timestamp, and POLRound (a so-called Proof-of-Lock (POL) round) that is needed for +termination of the consensus. If POLRound >= 0, then BlockID corresponds to the block that +is locked in POLRound. The message is signed by the validator private key. ```go type Proposal struct { Height int64 Round int - Timestamp Time - BlockID BlockID POLRound int - POLBlockID BlockID + BlockID BlockID + Timestamp Time Signature Signature } ``` -NOTE: In the current version of the Tendermint, the consensus value in proposal is represented with -PartSetHeader, and with BlockID in vote message. It should be aligned as suggested in this spec as -BlockID contains PartSetHeader. - ## VoteMessage VoteMessage is sent to vote for some block (or to inform others that a process does not vote in the diff --git a/privval/priv_validator_test.go b/privval/priv_validator_test.go index 90796ddfc..4f4eed97b 100644 --- a/privval/priv_validator_test.go +++ b/privval/priv_validator_test.go @@ -140,8 +140,8 @@ func TestSignProposal(t *testing.T) { require.Nil(t, err) privVal := GenFilePV(tempFile.Name()) - block1 := types.PartSetHeader{5, []byte{1, 2, 3}} - block2 := types.PartSetHeader{10, []byte{3, 2, 1}} + block1 := types.BlockID{[]byte{1, 2, 3}, types.PartSetHeader{5, []byte{1, 2, 3}}} + block2 := types.BlockID{[]byte{3, 2, 1}, types.PartSetHeader{10, []byte{3, 2, 1}}} height, round := int64(10), 1 // sign a proposal for first time @@ -179,7 +179,7 @@ func TestDifferByTimestamp(t *testing.T) { require.Nil(t, err) privVal := GenFilePV(tempFile.Name()) - block1 := types.PartSetHeader{5, []byte{1, 2, 3}} + block1 := types.BlockID{[]byte{1, 2, 3}, types.PartSetHeader{5, []byte{1, 2, 3}}} height, round := int64(10), 1 chainID := "mychainid" @@ -241,11 +241,11 @@ func newVote(addr types.Address, idx int, height int64, round int, typ byte, blo } } -func newProposal(height int64, round int, partsHeader types.PartSetHeader) *types.Proposal { +func newProposal(height int64, round int, blockID types.BlockID) *types.Proposal { return &types.Proposal{ - Height: height, - Round: round, - BlockPartsHeader: partsHeader, - Timestamp: tmtime.Now(), + Height: height, + Round: round, + BlockID: blockID, + Timestamp: tmtime.Now(), } } diff --git a/types/canonical.go b/types/canonical.go index 632dcb624..a4f6f214d 100644 --- a/types/canonical.go +++ b/types/canonical.go @@ -23,14 +23,13 @@ type CanonicalPartSetHeader struct { } type CanonicalProposal struct { - Type SignedMsgType // type alias for byte - Height int64 `binary:"fixed64"` - Round int64 `binary:"fixed64"` - POLRound int64 `binary:"fixed64"` - Timestamp time.Time - BlockPartsHeader CanonicalPartSetHeader - POLBlockID CanonicalBlockID - ChainID string + Type SignedMsgType // type alias for byte + Height int64 `binary:"fixed64"` + Round int64 `binary:"fixed64"` + POLRound int64 `binary:"fixed64"` + BlockID CanonicalBlockID + Timestamp time.Time + ChainID string } type CanonicalVote struct { @@ -71,14 +70,13 @@ func CanonicalizePartSetHeader(psh PartSetHeader) CanonicalPartSetHeader { func CanonicalizeProposal(chainID string, proposal *Proposal) CanonicalProposal { return CanonicalProposal{ - Type: ProposalType, - Height: proposal.Height, - Round: int64(proposal.Round), // cast int->int64 to make amino encode it fixed64 (does not work for int) - POLRound: int64(proposal.POLRound), - Timestamp: proposal.Timestamp, - BlockPartsHeader: CanonicalizePartSetHeader(proposal.BlockPartsHeader), - POLBlockID: CanonicalizeBlockID(proposal.POLBlockID), - ChainID: chainID, + Type: ProposalType, + Height: proposal.Height, + Round: int64(proposal.Round), // cast int->int64 to make amino encode it fixed64 (does not work for int) + POLRound: int64(proposal.POLRound), + BlockID: CanonicalizeBlockID(proposal.BlockID), + Timestamp: proposal.Timestamp, + ChainID: chainID, } } diff --git a/types/proposal.go b/types/proposal.go index fa82fdbb5..09cfd1967 100644 --- a/types/proposal.go +++ b/types/proposal.go @@ -15,43 +15,41 @@ var ( ) // Proposal defines a block proposal for the consensus. -// It refers to the block only by its PartSetHeader. +// It refers to the block by BlockID field. // It must be signed by the correct proposer for the given Height/Round // to be considered valid. It may depend on votes from a previous round, -// a so-called Proof-of-Lock (POL) round, as noted in the POLRound and POLBlockID. +// a so-called Proof-of-Lock (POL) round, as noted in the POLRound. +// If POLRound >= 0, then BlockID corresponds to the block that is locked in POLRound. type Proposal struct { - Type SignedMsgType - Height int64 `json:"height"` - Round int `json:"round"` - POLRound int `json:"pol_round"` // -1 if null. - Timestamp time.Time `json:"timestamp"` - BlockPartsHeader PartSetHeader `json:"block_parts_header"` - POLBlockID BlockID `json:"pol_block_id"` // zero if null. - Signature []byte `json:"signature"` + Type SignedMsgType + Height int64 `json:"height"` + Round int `json:"round"` + POLRound int `json:"pol_round"` // -1 if null. + BlockID BlockID `json:"block_id"` + Timestamp time.Time `json:"timestamp"` + Signature []byte `json:"signature"` } // NewProposal returns a new Proposal. // If there is no POLRound, polRound should be -1. -func NewProposal(height int64, round int, blockPartsHeader PartSetHeader, polRound int, polBlockID BlockID) *Proposal { +func NewProposal(height int64, round int, polRound int, blockID BlockID) *Proposal { return &Proposal{ - Type: ProposalType, - Height: height, - Round: round, - POLRound: polRound, - Timestamp: tmtime.Now(), - BlockPartsHeader: blockPartsHeader, - POLBlockID: polBlockID, + Type: ProposalType, + Height: height, + Round: round, + BlockID: blockID, + POLRound: polRound, + Timestamp: tmtime.Now(), } } // String returns a string representation of the Proposal. func (p *Proposal) String() string { - return fmt.Sprintf("Proposal{%v/%v %v (%v,%v) %X @ %s}", + return fmt.Sprintf("Proposal{%v/%v (%v, %v) %X @ %s}", p.Height, p.Round, - p.BlockPartsHeader, + p.BlockID, p.POLRound, - p.POLBlockID, cmn.Fingerprint(p.Signature), CanonicalTime(p.Timestamp)) } diff --git a/types/proposal_test.go b/types/proposal_test.go index 8ae1f3e5a..9738db2d2 100644 --- a/types/proposal_test.go +++ b/types/proposal_test.go @@ -15,11 +15,11 @@ func init() { panic(err) } testProposal = &Proposal{ - Height: 12345, - Round: 23456, - BlockPartsHeader: PartSetHeader{111, []byte("blockparts")}, - POLRound: -1, - Timestamp: stamp, + Height: 12345, + Round: 23456, + BlockID: BlockID{[]byte{1, 2, 3}, PartSetHeader{111, []byte("blockparts")}}, + POLRound: -1, + Timestamp: stamp, } } @@ -34,7 +34,7 @@ func TestProposalSignable(t *testing.T) { func TestProposalString(t *testing.T) { str := testProposal.String() - expected := `Proposal{12345/23456 111:626C6F636B70 (-1,:0:000000000000) 000000000000 @ 2018-02-11T07:09:22.765Z}` + expected := `Proposal{12345/23456 (010203:111:626C6F636B70, -1) 000000000000 @ 2018-02-11T07:09:22.765Z}` if str != expected { t.Errorf("Got unexpected string for Proposal. Expected:\n%v\nGot:\n%v", expected, str) } @@ -44,7 +44,9 @@ func TestProposalVerifySignature(t *testing.T) { privVal := NewMockPV() pubKey := privVal.GetPubKey() - prop := NewProposal(4, 2, PartSetHeader{777, []byte("proper")}, 2, BlockID{}) + prop := NewProposal( + 4, 2, 2, + BlockID{[]byte{1, 2, 3}, PartSetHeader{777, []byte("proper")}}) signBytes := prop.SignBytes("test_chain_id") // sign it From a83c268d7f1e33249c4acff514eef5901efb66e0 Mon Sep 17 00:00:00 2001 From: Zarko Milosevic Date: Wed, 31 Oct 2018 15:59:01 +0100 Subject: [PATCH 108/113] Fix spec (#2736) --- consensus/reactor.go | 4 +++- .../reactors/consensus/consensus-reactor.md | 9 ++++++--- docs/spec/reactors/consensus/consensus.md | 19 +++++++++++-------- 3 files changed, 20 insertions(+), 12 deletions(-) diff --git a/consensus/reactor.go b/consensus/reactor.go index e8c7adc7f..bf6f7ba77 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -1393,7 +1393,9 @@ func (m *NewRoundStepMessage) String() string { //------------------------------------- -// CommitStepMessage is sent when a block is committed. +// NewValidBlockMessage is sent when a validator observes a valid block B in some round r, +//i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. +// In case the block is also committed, then IsCommit flag is set to true. type NewValidBlockMessage struct { Height int64 Round int diff --git a/docs/spec/reactors/consensus/consensus-reactor.md b/docs/spec/reactors/consensus/consensus-reactor.md index 5ba03322d..23275b122 100644 --- a/docs/spec/reactors/consensus/consensus-reactor.md +++ b/docs/spec/reactors/consensus/consensus-reactor.md @@ -133,9 +133,12 @@ handleMessage(msg): ``` handleMessage(msg): - if prs.Height == msg.Height && prs.Round == msg.Round then - prs.ProposalBlockPartsHeader = msg.BlockPartsHeader - prs.ProposalBlockParts = msg.BlockParts + if prs.Height != msg.Height then return + + if prs.Round != msg.Round && !msg.IsCommit then return + + prs.ProposalBlockPartsHeader = msg.BlockPartsHeader + prs.ProposalBlockParts = msg.BlockParts ``` ### HasVoteMessage handler diff --git a/docs/spec/reactors/consensus/consensus.md b/docs/spec/reactors/consensus/consensus.md index 0f1922303..e5d1f4cc3 100644 --- a/docs/spec/reactors/consensus/consensus.md +++ b/docs/spec/reactors/consensus/consensus.md @@ -26,7 +26,7 @@ only to a subset of processes called peers. By the gossiping protocol, a validat all needed information (`ProposalMessage`, `VoteMessage` and `BlockPartMessage`) so they can reach agreement on some block, and also obtain the content of the chosen block (block parts). As part of the gossiping protocol, processes also send auxiliary messages that inform peers about the -executed steps of the core consensus algorithm (`NewRoundStepMessage` and `CommitStepMessage`), and +executed steps of the core consensus algorithm (`NewRoundStepMessage` and `NewValidBlockMessage`), and also messages that inform peers what votes the process has seen (`HasVoteMessage`, `VoteSetMaj23Message` and `VoteSetBitsMessage`). These messages are then used in the gossiping protocol to determine what messages a process should send to its peers. @@ -132,23 +132,26 @@ type NewRoundStepMessage struct { } ``` -## CommitStepMessage +## NewValidBlockMessage -CommitStepMessage is sent when an agreement on some block is reached. It contains height for which -agreement is reached, block parts header that describes the decided block and is used to obtain all +NewValidBlockMessage is sent when a validator observes a valid block B in some round r, +i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. +It contains height and round in which valid block is observed, block parts header that describes +the valid block and is used to obtain all block parts, and a bit array of the block parts a process currently has, so its peers can know what parts it is missing so they can send them. +In case the block is also committed, then IsCommit flag is set to true. ```go -type CommitStepMessage struct { +type NewValidBlockMessage struct { Height int64 - BlockID BlockID + Round int + BlockPartsHeader PartSetHeader BlockParts BitArray + IsCommit bool } ``` -TODO: We use BlockID instead of BlockPartsHeader (in current implementation) for symmetry. - ## ProposalPOLMessage ProposalPOLMessage is sent when a previous block is re-proposed. From 1660e30ffe077773014ab11b3745b8b4b6933c8f Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 31 Oct 2018 08:02:13 -0700 Subject: [PATCH 109/113] Fix general merkle keypath to start w/ last op's key (#2733) * Fix general merkle keypath to start w/ last op's key * Update CHANGELOG_PENDING.md --- CHANGELOG_PENDING.md | 1 + crypto/merkle/proof.go | 9 +-- crypto/merkle/proof_test.go | 136 ++++++++++++++++++++++++++++++++++++ 3 files changed, 142 insertions(+), 4 deletions(-) create mode 100644 crypto/merkle/proof_test.go diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index f28b46089..2d970e407 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -109,6 +109,7 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi - [consensus] [\#1637](https://github.com/tendermint/tendermint/issues/1637) Limit the amount of evidence that can be included in a block - [consensus] [\#2646](https://github.com/tendermint/tendermint/issues/2646) Simplify Proposal message (align with spec) +- [crypto] [\#2733](https://github.com/tendermint/tendermint/pull/2733) Fix general merkle keypath to start w/ last op's key - [evidence] [\#2515](https://github.com/tendermint/tendermint/issues/2515) Fix db iter leak (@goolAdapter) - [libs/event] [\#2518](https://github.com/tendermint/tendermint/issues/2518) Fix event concurrency flaw (@goolAdapter) - [node] [\#2434](https://github.com/tendermint/tendermint/issues/2434) Make node respond to signal interrupts while sleeping for genesis time diff --git a/crypto/merkle/proof.go b/crypto/merkle/proof.go index 3059ed3b7..5705c96bd 100644 --- a/crypto/merkle/proof.go +++ b/crypto/merkle/proof.go @@ -43,10 +43,11 @@ func (poz ProofOperators) Verify(root []byte, keypath string, args [][]byte) (er for i, op := range poz { key := op.GetKey() if len(key) != 0 { - if !bytes.Equal(keys[0], key) { - return cmn.NewError("Key mismatch on operation #%d: expected %+v but %+v", i, []byte(keys[0]), []byte(key)) + lastKey := keys[len(keys)-1] + if !bytes.Equal(lastKey, key) { + return cmn.NewError("Key mismatch on operation #%d: expected %+v but got %+v", i, string(lastKey), string(key)) } - keys = keys[1:] + keys = keys[:len(keys)-1] } args, err = op.Run(args) if err != nil { @@ -54,7 +55,7 @@ func (poz ProofOperators) Verify(root []byte, keypath string, args [][]byte) (er } } if !bytes.Equal(root, args[0]) { - return cmn.NewError("Calculated root hash is invalid: expected %+v but %+v", root, args[0]) + return cmn.NewError("Calculated root hash is invalid: expected %+v but got %+v", root, args[0]) } if len(keys) != 0 { return cmn.NewError("Keypath not consumed all") diff --git a/crypto/merkle/proof_test.go b/crypto/merkle/proof_test.go new file mode 100644 index 000000000..cc208e9a1 --- /dev/null +++ b/crypto/merkle/proof_test.go @@ -0,0 +1,136 @@ +package merkle + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/tendermint/go-amino" + cmn "github.com/tendermint/tendermint/libs/common" +) + +const ProofOpDomino = "test:domino" + +// Expects given input, produces given output. +// Like the game dominos. +type DominoOp struct { + key string // unexported, may be empty + Input string + Output string +} + +func NewDominoOp(key, input, output string) DominoOp { + return DominoOp{ + key: key, + Input: input, + Output: output, + } +} + +func DominoOpDecoder(pop ProofOp) (ProofOperator, error) { + if pop.Type != ProofOpDomino { + panic("unexpected proof op type") + } + var op DominoOp // a bit strange as we'll discard this, but it works. + err := amino.UnmarshalBinaryLengthPrefixed(pop.Data, &op) + if err != nil { + return nil, cmn.ErrorWrap(err, "decoding ProofOp.Data into SimpleValueOp") + } + return NewDominoOp(string(pop.Key), op.Input, op.Output), nil +} + +func (dop DominoOp) ProofOp() ProofOp { + bz := amino.MustMarshalBinaryLengthPrefixed(dop) + return ProofOp{ + Type: ProofOpDomino, + Key: []byte(dop.key), + Data: bz, + } +} + +func (dop DominoOp) Run(input [][]byte) (output [][]byte, err error) { + if len(input) != 1 { + return nil, cmn.NewError("Expected input of length 1") + } + if string(input[0]) != dop.Input { + return nil, cmn.NewError("Expected input %v, got %v", + dop.Input, string(input[0])) + } + return [][]byte{[]byte(dop.Output)}, nil +} + +func (dop DominoOp) GetKey() []byte { + return []byte(dop.key) +} + +//---------------------------------------- + +func TestProofOperators(t *testing.T) { + var err error + + // ProofRuntime setup + // TODO test this somehow. + // prt := NewProofRuntime() + // prt.RegisterOpDecoder(ProofOpDomino, DominoOpDecoder) + + // ProofOperators setup + op1 := NewDominoOp("KEY1", "INPUT1", "INPUT2") + op2 := NewDominoOp("KEY2", "INPUT2", "INPUT3") + op3 := NewDominoOp("", "INPUT3", "INPUT4") + op4 := NewDominoOp("KEY4", "INPUT4", "OUTPUT4") + + // Good + popz := ProofOperators([]ProofOperator{op1, op2, op3, op4}) + err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) + assert.Nil(t, err) + err = popz.VerifyValue(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", bz("INPUT1")) + assert.Nil(t, err) + + // BAD INPUT + err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1_WRONG")}) + assert.NotNil(t, err) + err = popz.VerifyValue(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", bz("INPUT1_WRONG")) + assert.NotNil(t, err) + + // BAD KEY 1 + err = popz.Verify(bz("OUTPUT4"), "/KEY3/KEY2/KEY1", [][]byte{bz("INPUT1")}) + assert.NotNil(t, err) + + // BAD KEY 2 + err = popz.Verify(bz("OUTPUT4"), "KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) + assert.NotNil(t, err) + + // BAD KEY 3 + err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1/", [][]byte{bz("INPUT1")}) + assert.NotNil(t, err) + + // BAD KEY 4 + err = popz.Verify(bz("OUTPUT4"), "//KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) + assert.NotNil(t, err) + + // BAD OUTPUT 1 + err = popz.Verify(bz("OUTPUT4_WRONG"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) + assert.NotNil(t, err) + + // BAD OUTPUT 2 + err = popz.Verify(bz(""), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) + assert.NotNil(t, err) + + // BAD POPZ 1 + popz = []ProofOperator{op1, op2, op4} + err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) + assert.NotNil(t, err) + + // BAD POPZ 2 + popz = []ProofOperator{op4, op3, op2, op1} + err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) + assert.NotNil(t, err) + + // BAD POPZ 3 + popz = []ProofOperator{} + err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) + assert.NotNil(t, err) +} + +func bz(s string) []byte { + return []byte(s) +} From a22c962e28053f9f51d71d3394e0c62bf551fb99 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 31 Oct 2018 12:42:05 -0400 Subject: [PATCH 110/113] TMHASH is 32 bytes. Closes #1990 (#2732) * tmhash is fully 32 bytes. closes #1990 * AddressSize * fix tests * fix max sizes --- crypto/crypto.go | 21 +++++++++++++------ crypto/ed25519/ed25519.go | 2 +- crypto/merkle/simple_map_test.go | 12 +++++------ crypto/tmhash/hash.go | 33 ++++++++++++++++++++++-------- crypto/tmhash/hash_test.go | 23 +++++++++++++++++++-- docs/spec/blockchain/blockchain.md | 2 +- docs/spec/blockchain/encoding.md | 7 +++---- docs/spec/blockchain/state.md | 4 ++-- p2p/key.go | 3 +-- state/validation.go | 4 ++-- types/block.go | 2 +- types/block_test.go | 17 +++++++-------- types/evidence.go | 2 +- types/validator.go | 8 ++++---- types/vote.go | 2 +- types/vote_test.go | 5 +++-- 16 files changed, 96 insertions(+), 51 deletions(-) diff --git a/crypto/crypto.go b/crypto/crypto.go index 09c12ff76..2462b0a98 100644 --- a/crypto/crypto.go +++ b/crypto/crypto.go @@ -1,21 +1,23 @@ package crypto import ( + "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" ) -type PrivKey interface { - Bytes() []byte - Sign(msg []byte) ([]byte, error) - PubKey() PubKey - Equals(PrivKey) bool -} +const ( + AddressSize = tmhash.TruncatedSize +) // An address is a []byte, but hex-encoded even in JSON. // []byte leaves us the option to change the address length. // Use an alias so Unmarshal methods (with ptr receivers) are available too. type Address = cmn.HexBytes +func AddressHash(bz []byte) Address { + return Address(tmhash.SumTruncated(bz)) +} + type PubKey interface { Address() Address Bytes() []byte @@ -23,6 +25,13 @@ type PubKey interface { Equals(PubKey) bool } +type PrivKey interface { + Bytes() []byte + Sign(msg []byte) ([]byte, error) + PubKey() PubKey + Equals(PrivKey) bool +} + type Symmetric interface { Keygen() []byte Encrypt(plaintext []byte, secret []byte) (ciphertext []byte) diff --git a/crypto/ed25519/ed25519.go b/crypto/ed25519/ed25519.go index 61872d98d..e077cbda4 100644 --- a/crypto/ed25519/ed25519.go +++ b/crypto/ed25519/ed25519.go @@ -136,7 +136,7 @@ type PubKeyEd25519 [PubKeyEd25519Size]byte // Address is the SHA256-20 of the raw pubkey bytes. func (pubKey PubKeyEd25519) Address() crypto.Address { - return crypto.Address(tmhash.Sum(pubKey[:])) + return crypto.Address(tmhash.SumTruncated(pubKey[:])) } // Bytes marshals the PubKey using amino encoding. diff --git a/crypto/merkle/simple_map_test.go b/crypto/merkle/simple_map_test.go index bc095c003..7abde119d 100644 --- a/crypto/merkle/simple_map_test.go +++ b/crypto/merkle/simple_map_test.go @@ -13,14 +13,14 @@ func TestSimpleMap(t *testing.T) { values []string // each string gets converted to []byte in test want string }{ - {[]string{"key1"}, []string{"value1"}, "fa9bc106ffd932d919bee935ceb6cf2b3dd72d8f"}, - {[]string{"key1"}, []string{"value2"}, "e00e7dcfe54e9fafef5111e813a587f01ba9c3e8"}, + {[]string{"key1"}, []string{"value1"}, "321d150de16dceb51c72981b432b115045383259b1a550adf8dc80f927508967"}, + {[]string{"key1"}, []string{"value2"}, "2a9e4baf321eac99f6eecc3406603c14bc5e85bb7b80483cbfc75b3382d24a2f"}, // swap order with 2 keys - {[]string{"key1", "key2"}, []string{"value1", "value2"}, "eff12d1c703a1022ab509287c0f196130123d786"}, - {[]string{"key2", "key1"}, []string{"value2", "value1"}, "eff12d1c703a1022ab509287c0f196130123d786"}, + {[]string{"key1", "key2"}, []string{"value1", "value2"}, "c4d8913ab543ba26aa970646d4c99a150fd641298e3367cf68ca45fb45a49881"}, + {[]string{"key2", "key1"}, []string{"value2", "value1"}, "c4d8913ab543ba26aa970646d4c99a150fd641298e3367cf68ca45fb45a49881"}, // swap order with 3 keys - {[]string{"key1", "key2", "key3"}, []string{"value1", "value2", "value3"}, "b2c62a277c08dbd2ad73ca53cd1d6bfdf5830d26"}, - {[]string{"key1", "key3", "key2"}, []string{"value1", "value3", "value2"}, "b2c62a277c08dbd2ad73ca53cd1d6bfdf5830d26"}, + {[]string{"key1", "key2", "key3"}, []string{"value1", "value2", "value3"}, "b23cef00eda5af4548a213a43793f2752d8d9013b3f2b64bc0523a4791196268"}, + {[]string{"key1", "key3", "key2"}, []string{"value1", "value3", "value2"}, "b23cef00eda5af4548a213a43793f2752d8d9013b3f2b64bc0523a4791196268"}, } for i, tc := range tests { db := newSimpleMap() diff --git a/crypto/tmhash/hash.go b/crypto/tmhash/hash.go index 1b29d8680..f9b958242 100644 --- a/crypto/tmhash/hash.go +++ b/crypto/tmhash/hash.go @@ -6,10 +6,27 @@ import ( ) const ( - Size = 20 + Size = sha256.Size BlockSize = sha256.BlockSize ) +// New returns a new hash.Hash. +func New() hash.Hash { + return sha256.New() +} + +// Sum returns the SHA256 of the bz. +func Sum(bz []byte) []byte { + h := sha256.Sum256(bz) + return h[:] +} + +//------------------------------------------------------------- + +const ( + TruncatedSize = 20 +) + type sha256trunc struct { sha256 hash.Hash } @@ -19,7 +36,7 @@ func (h sha256trunc) Write(p []byte) (n int, err error) { } func (h sha256trunc) Sum(b []byte) []byte { shasum := h.sha256.Sum(b) - return shasum[:Size] + return shasum[:TruncatedSize] } func (h sha256trunc) Reset() { @@ -27,22 +44,22 @@ func (h sha256trunc) Reset() { } func (h sha256trunc) Size() int { - return Size + return TruncatedSize } func (h sha256trunc) BlockSize() int { return h.sha256.BlockSize() } -// New returns a new hash.Hash. -func New() hash.Hash { +// NewTruncated returns a new hash.Hash. +func NewTruncated() hash.Hash { return sha256trunc{ sha256: sha256.New(), } } -// Sum returns the first 20 bytes of SHA256 of the bz. -func Sum(bz []byte) []byte { +// SumTruncated returns the first 20 bytes of SHA256 of the bz. +func SumTruncated(bz []byte) []byte { hash := sha256.Sum256(bz) - return hash[:Size] + return hash[:TruncatedSize] } diff --git a/crypto/tmhash/hash_test.go b/crypto/tmhash/hash_test.go index 27938039a..89a779801 100644 --- a/crypto/tmhash/hash_test.go +++ b/crypto/tmhash/hash_test.go @@ -14,10 +14,29 @@ func TestHash(t *testing.T) { hasher.Write(testVector) bz := hasher.Sum(nil) + bz2 := tmhash.Sum(testVector) + + hasher = sha256.New() + hasher.Write(testVector) + bz3 := hasher.Sum(nil) + + assert.Equal(t, bz, bz2) + assert.Equal(t, bz, bz3) +} + +func TestHashTruncated(t *testing.T) { + testVector := []byte("abc") + hasher := tmhash.NewTruncated() + hasher.Write(testVector) + bz := hasher.Sum(nil) + + bz2 := tmhash.SumTruncated(testVector) + hasher = sha256.New() hasher.Write(testVector) - bz2 := hasher.Sum(nil) - bz2 = bz2[:20] + bz3 := hasher.Sum(nil) + bz3 = bz3[:tmhash.TruncatedSize] assert.Equal(t, bz, bz2) + assert.Equal(t, bz, bz3) } diff --git a/docs/spec/blockchain/blockchain.md b/docs/spec/blockchain/blockchain.md index 061685378..d96a3c7b8 100644 --- a/docs/spec/blockchain/blockchain.md +++ b/docs/spec/blockchain/blockchain.md @@ -344,7 +344,7 @@ next validator sets Merkle root. ### ConsensusParamsHash ```go -block.ConsensusParamsHash == tmhash(amino(state.ConsensusParams)) +block.ConsensusParamsHash == TMHASH(amino(state.ConsensusParams)) ``` Hash of the amino-encoded consensus parameters. diff --git a/docs/spec/blockchain/encoding.md b/docs/spec/blockchain/encoding.md index 2f9fcdca1..f5120cdd4 100644 --- a/docs/spec/blockchain/encoding.md +++ b/docs/spec/blockchain/encoding.md @@ -176,13 +176,12 @@ greater, for example: h0 h1 h3 h4 h0 h1 h2 h3 h4 h5 ``` -Tendermint always uses the `TMHASH` hash function, which is the first 20-bytes -of the SHA256: +Tendermint always uses the `TMHASH` hash function, which is equivalent to +SHA256: ``` func TMHASH(bz []byte) []byte { - shasum := SHA256(bz) - return shasum[:20] + return SHA256(bz) } ``` diff --git a/docs/spec/blockchain/state.md b/docs/spec/blockchain/state.md index a0badd718..502f9d696 100644 --- a/docs/spec/blockchain/state.md +++ b/docs/spec/blockchain/state.md @@ -56,8 +56,8 @@ type Validator struct { } ``` -When hashing the Validator struct, the pubkey is not hashed, -because the address is already the hash of the pubkey. +When hashing the Validator struct, the address is not included, +because it is redundant with the pubkey. The `state.Validators`, `state.LastValidators`, and `state.NextValidators`, must always by sorted by validator address, so that there is a canonical order for computing the SimpleMerkleRoot. diff --git a/p2p/key.go b/p2p/key.go index 3f38b48a9..fc64f27bb 100644 --- a/p2p/key.go +++ b/p2p/key.go @@ -8,7 +8,6 @@ import ( crypto "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" ) @@ -17,7 +16,7 @@ type ID string // IDByteLength is the length of a crypto.Address. Currently only 20. // TODO: support other length addresses ? -const IDByteLength = tmhash.Size +const IDByteLength = crypto.AddressSize //------------------------------------------------------------------------------ // Persistent peer ID diff --git a/state/validation.go b/state/validation.go index a12919847..345224843 100644 --- a/state/validation.go +++ b/state/validation.go @@ -5,7 +5,7 @@ import ( "errors" "fmt" - "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/crypto" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/types" ) @@ -158,7 +158,7 @@ func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { // NOTE: We can't actually verify it's the right proposer because we dont // know what round the block was first proposed. So just check that it's // a legit address and a known validator. - if len(block.ProposerAddress) != tmhash.Size || + if len(block.ProposerAddress) != crypto.AddressSize || !state.Validators.HasAddress(block.ProposerAddress) { return fmt.Errorf( "Block.Header.ProposerAddress, %X, is not a validator", diff --git a/types/block.go b/types/block.go index 477e39997..46ad73a71 100644 --- a/types/block.go +++ b/types/block.go @@ -15,7 +15,7 @@ import ( const ( // MaxHeaderBytes is a maximum header size (including amino overhead). - MaxHeaderBytes int64 = 533 + MaxHeaderBytes int64 = 653 // MaxAminoOverheadForBlock - maximum amino overhead to encode a block (up to // MaxBlockSizeBytes in size) not including it's parts except Data. diff --git a/types/block_test.go b/types/block_test.go index 28e73f661..46881a099 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/version" @@ -116,7 +117,7 @@ func TestBlockMakePartSetWithEvidence(t *testing.T) { partSet := MakeBlock(h, []Tx{Tx("Hello World")}, commit, evList).MakePartSet(1024) assert.NotNil(t, partSet) - assert.Equal(t, 2, partSet.Total()) + assert.Equal(t, 3, partSet.Total()) } func TestBlockHashesTo(t *testing.T) { @@ -262,7 +263,7 @@ func TestMaxHeaderBytes(t *testing.T) { AppHash: tmhash.Sum([]byte("app_hash")), LastResultsHash: tmhash.Sum([]byte("last_results_hash")), EvidenceHash: tmhash.Sum([]byte("evidence_hash")), - ProposerAddress: tmhash.Sum([]byte("proposer_address")), + ProposerAddress: crypto.AddressHash([]byte("proposer_address")), } bz, err := cdc.MarshalBinaryLengthPrefixed(h) @@ -292,9 +293,9 @@ func TestBlockMaxDataBytes(t *testing.T) { }{ 0: {-10, 1, 0, true, 0}, 1: {10, 1, 0, true, 0}, - 2: {742, 1, 0, true, 0}, - 3: {743, 1, 0, false, 0}, - 4: {744, 1, 0, false, 1}, + 2: {886, 1, 0, true, 0}, + 3: {887, 1, 0, false, 0}, + 4: {888, 1, 0, false, 1}, } for i, tc := range testCases { @@ -320,9 +321,9 @@ func TestBlockMaxDataBytesUnknownEvidence(t *testing.T) { }{ 0: {-10, 1, true, 0}, 1: {10, 1, true, 0}, - 2: {824, 1, true, 0}, - 3: {825, 1, false, 0}, - 4: {826, 1, false, 1}, + 2: {984, 1, true, 0}, + 3: {985, 1, false, 0}, + 4: {986, 1, false, 1}, } for i, tc := range testCases { diff --git a/types/evidence.go b/types/evidence.go index 7a808d57b..d1e15c819 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -14,7 +14,7 @@ import ( const ( // MaxEvidenceBytes is a maximum size of any evidence (including amino overhead). - MaxEvidenceBytes int64 = 436 + MaxEvidenceBytes int64 = 484 ) // ErrEvidenceInvalid wraps a piece of evidence and the error denoting how or why it is invalid. diff --git a/types/validator.go b/types/validator.go index af3471848..4bfd78a6d 100644 --- a/types/validator.go +++ b/types/validator.go @@ -77,15 +77,15 @@ func (v *Validator) Hash() []byte { } // Bytes computes the unique encoding of a validator with a given voting power. -// These are the bytes that gets hashed in consensus. It excludes pubkey -// as its redundant with the address. This also excludes accum which changes +// These are the bytes that gets hashed in consensus. It excludes address +// as its redundant with the pubkey. This also excludes accum which changes // every round. func (v *Validator) Bytes() []byte { return cdcEncode((struct { - Address Address + PubKey crypto.PubKey VotingPower int64 }{ - v.Address, + v.PubKey, v.VotingPower, })) } diff --git a/types/vote.go b/types/vote.go index 826330d5c..1d7e9cf6f 100644 --- a/types/vote.go +++ b/types/vote.go @@ -12,7 +12,7 @@ import ( const ( // MaxVoteBytes is a maximum vote size (including amino overhead). - MaxVoteBytes int64 = 199 + MaxVoteBytes int64 = 223 ) var ( diff --git a/types/vote_test.go b/types/vote_test.go index 572735858..cda54f898 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" ) @@ -37,7 +38,7 @@ func exampleVote(t byte) *Vote { Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), }, }, - ValidatorAddress: tmhash.Sum([]byte("validator_address")), + ValidatorAddress: crypto.AddressHash([]byte("validator_address")), ValidatorIndex: 56789, } } @@ -211,7 +212,7 @@ func TestMaxVoteBytes(t *testing.T) { timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) vote := &Vote{ - ValidatorAddress: tmhash.Sum([]byte("validator_address")), + ValidatorAddress: crypto.AddressHash([]byte("validator_address")), ValidatorIndex: math.MaxInt64, Height: math.MaxInt64, Round: math.MaxInt64, From fb91ef7462b421349a56c32733724d920fce3ad4 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Thu, 1 Nov 2018 07:07:18 +0100 Subject: [PATCH 111/113] validate reactor messages (#2711) * validate reactor messages Refs #2683 * validate blockchain messages Refs #2683 * validate evidence messages Refs #2683 * todo * check ProposalPOL and signature sizes * add a changelog entry * check addr is valid when we add it to the addrbook * validate incoming netAddr (not just nil check!) * fixes after Bucky's review * check timestamps * beef up block#ValidateBasic * move some checks into bcBlockResponseMessage * update Gopkg.lock Fix ``` grouped write of manifest, lock and vendor: failed to export github.com/tendermint/go-amino: fatal: failed to unpack tree object 6dcc6ddc143e116455c94b25c1004c99e0d0ca12 ``` by running `dep ensure -update` * bump year since now we check it * generate test/p2p/data on the fly using tendermint testnet * allow sync chains older than 1 year * use full path when creating a testnet * move testnet gen to test/docker/Dockerfile * relax LastCommitRound check Refs #2737 * fix conflicts after merge * add small comment * some ValidateBasic updates * fixes * AppHash length is not fixed --- CHANGELOG_PENDING.md | 4 + blockchain/reactor.go | 53 +++++- config/toml.go | 2 +- consensus/common_test.go | 2 - consensus/mempool_test.go | 1 - consensus/reactor.go | 161 ++++++++++++++++-- consensus/replay.go | 8 +- consensus/types/round_state.go | 7 + crypto/crypto.go | 1 + evidence/reactor.go | 23 ++- p2p/pex/addrbook.go | 4 + p2p/pex/errors.go | 8 + p2p/pex/pex_reactor.go | 28 ++- state/validation.go | 50 +++--- test/docker/Dockerfile | 11 +- test/p2p/README.md | 9 +- test/p2p/data/mach1/core/config/genesis.json | 39 ----- test/p2p/data/mach1/core/config/node_key.json | 6 - .../mach1/core/config/priv_validator.json | 14 -- test/p2p/data/mach2/core/config/genesis.json | 39 ----- test/p2p/data/mach2/core/config/node_key.json | 6 - .../mach2/core/config/priv_validator.json | 14 -- test/p2p/data/mach3/core/config/genesis.json | 39 ----- test/p2p/data/mach3/core/config/node_key.json | 6 - .../mach3/core/config/priv_validator.json | 14 -- test/p2p/data/mach4/core/config/genesis.json | 39 ----- test/p2p/data/mach4/core/config/node_key.json | 6 - .../mach4/core/config/priv_validator.json | 14 -- test/p2p/ip_plus_id.sh | 2 +- test/p2p/peer.sh | 6 +- test/p2p/pex/test_addrbook.sh | 6 +- types/block.go | 115 ++++++++++--- types/block_test.go | 6 +- types/evidence.go | 21 +++ types/heartbeat.go | 31 ++++ types/part_set.go | 26 ++- types/proposal.go | 29 ++++ types/signable.go | 12 ++ types/signed_msg_type.go | 9 +- types/validation.go | 40 +++++ types/vote.go | 38 ++++- 41 files changed, 613 insertions(+), 336 deletions(-) delete mode 100644 test/p2p/data/mach1/core/config/genesis.json delete mode 100644 test/p2p/data/mach1/core/config/node_key.json delete mode 100644 test/p2p/data/mach1/core/config/priv_validator.json delete mode 100644 test/p2p/data/mach2/core/config/genesis.json delete mode 100644 test/p2p/data/mach2/core/config/node_key.json delete mode 100644 test/p2p/data/mach2/core/config/priv_validator.json delete mode 100644 test/p2p/data/mach3/core/config/genesis.json delete mode 100644 test/p2p/data/mach3/core/config/node_key.json delete mode 100644 test/p2p/data/mach3/core/config/priv_validator.json delete mode 100644 test/p2p/data/mach4/core/config/genesis.json delete mode 100644 test/p2p/data/mach4/core/config/node_key.json delete mode 100644 test/p2p/data/mach4/core/config/priv_validator.json create mode 100644 types/validation.go diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 2d970e407..cad2f444a 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -92,6 +92,10 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi github.com/tendermint/crypto - [tools] [\#2238](https://github.com/tendermint/tendermint/issues/2238) Binary dependencies are now locked to a specific git commit - [libs/log] [\#2706](https://github.com/tendermint/tendermint/issues/2706) Add year to log format +- [consensus] [\#2683] validate all incoming messages +- [evidence] [\#2683] validate all incoming messages +- [blockchain] [\#2683] validate all incoming messages +- [p2p/pex] [\#2683] validate pexAddrsMessage addresses ### BUG FIXES: - [autofile] [\#2428](https://github.com/tendermint/tendermint/issues/2428) Group.RotateFile need call Flush() before rename (@goolAdapter) diff --git a/blockchain/reactor.go b/blockchain/reactor.go index fc1b1f4d3..59318dcc5 100644 --- a/blockchain/reactor.go +++ b/blockchain/reactor.go @@ -1,6 +1,7 @@ package blockchain import ( + "errors" "fmt" "reflect" "time" @@ -180,6 +181,12 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) return } + if err = msg.ValidateBasic(); err != nil { + bcR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err) + bcR.Switch.StopPeerForError(src, err) + return + } + bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg) switch msg := msg.(type) { @@ -188,7 +195,6 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) // Unfortunately not queued since the queue is full. } case *bcBlockResponseMessage: - // Got a block. bcR.pool.AddBlock(src.ID(), msg.Block, len(msgBytes)) case *bcStatusRequestMessage: // Send peer our state. @@ -352,7 +358,9 @@ func (bcR *BlockchainReactor) BroadcastStatusRequest() error { // Messages // BlockchainMessage is a generic message for this reactor. -type BlockchainMessage interface{} +type BlockchainMessage interface { + ValidateBasic() error +} func RegisterBlockchainMessages(cdc *amino.Codec) { cdc.RegisterInterface((*BlockchainMessage)(nil), nil) @@ -377,6 +385,14 @@ type bcBlockRequestMessage struct { Height int64 } +// ValidateBasic performs basic validation. +func (m *bcBlockRequestMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + return nil +} + func (m *bcBlockRequestMessage) String() string { return fmt.Sprintf("[bcBlockRequestMessage %v]", m.Height) } @@ -385,6 +401,14 @@ type bcNoBlockResponseMessage struct { Height int64 } +// ValidateBasic performs basic validation. +func (m *bcNoBlockResponseMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + return nil +} + func (brm *bcNoBlockResponseMessage) String() string { return fmt.Sprintf("[bcNoBlockResponseMessage %d]", brm.Height) } @@ -395,6 +419,15 @@ type bcBlockResponseMessage struct { Block *types.Block } +// ValidateBasic performs basic validation. +func (m *bcBlockResponseMessage) ValidateBasic() error { + if err := m.Block.ValidateBasic(); err != nil { + return err + } + + return nil +} + func (m *bcBlockResponseMessage) String() string { return fmt.Sprintf("[bcBlockResponseMessage %v]", m.Block.Height) } @@ -405,6 +438,14 @@ type bcStatusRequestMessage struct { Height int64 } +// ValidateBasic performs basic validation. +func (m *bcStatusRequestMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + return nil +} + func (m *bcStatusRequestMessage) String() string { return fmt.Sprintf("[bcStatusRequestMessage %v]", m.Height) } @@ -415,6 +456,14 @@ type bcStatusResponseMessage struct { Height int64 } +// ValidateBasic performs basic validation. +func (m *bcStatusResponseMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + return nil +} + func (m *bcStatusResponseMessage) String() string { return fmt.Sprintf("[bcStatusResponseMessage %v]", m.Height) } diff --git a/config/toml.go b/config/toml.go index 62e5fa978..d73b9c81d 100644 --- a/config/toml.go +++ b/config/toml.go @@ -342,7 +342,7 @@ func ResetTestRoot(testName string) *Config { } var testGenesis = `{ - "genesis_time": "2017-10-10T08:20:13.695936996Z", + "genesis_time": "2018-10-10T08:20:13.695936996Z", "chain_id": "tendermint_test", "validators": [ { diff --git a/consensus/common_test.go b/consensus/common_test.go index ae8bb6bfe..4f48f4424 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -615,8 +615,6 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G func randGenesisState(numValidators int, randPower bool, minPower int64) (sm.State, []types.PrivValidator) { genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower) s0, _ := sm.MakeGenesisState(genDoc) - db := dbm.NewMemDB() // remove this ? - sm.SaveState(db, s0) return s0, privValidators } diff --git a/consensus/mempool_test.go b/consensus/mempool_test.go index ed97ae681..3dc1cd5ff 100644 --- a/consensus/mempool_test.go +++ b/consensus/mempool_test.go @@ -10,7 +10,6 @@ import ( "github.com/tendermint/tendermint/abci/example/code" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/types" ) diff --git a/consensus/reactor.go b/consensus/reactor.go index bf6f7ba77..fc41e5734 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -8,8 +8,7 @@ import ( "github.com/pkg/errors" - "github.com/tendermint/go-amino" - + amino "github.com/tendermint/go-amino" cstypes "github.com/tendermint/tendermint/consensus/types" cmn "github.com/tendermint/tendermint/libs/common" tmevents "github.com/tendermint/tendermint/libs/events" @@ -205,6 +204,13 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) conR.Switch.StopPeerForError(src, err) return } + + if err = msg.ValidateBasic(); err != nil { + conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err) + conR.Switch.StopPeerForError(src, err) + return + } + conR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg) // Get peer states @@ -242,8 +248,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) case types.PrecommitType: ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID) default: - conR.Logger.Error("Bad VoteSetBitsMessage field Type") - return + panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?") } src.TrySend(VoteSetBitsChannel, cdc.MustMarshalBinaryBare(&VoteSetBitsMessage{ Height: msg.Height, @@ -322,8 +327,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) case types.PrecommitType: ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID) default: - conR.Logger.Error("Bad VoteSetBitsMessage field Type") - return + panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?") } ps.ApplyVoteSetBitsMessage(msg, ourVotes) } else { @@ -440,9 +444,9 @@ func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote) { func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage) { nrsMsg = &NewRoundStepMessage{ - Height: rs.Height, - Round: rs.Round, - Step: rs.Step, + Height: rs.Height, + Round: rs.Round, + Step: rs.Step, SecondsSinceStartTime: int(time.Since(rs.StartTime).Seconds()), LastCommitRound: rs.LastCommit.Round(), } @@ -1349,7 +1353,9 @@ func (ps *PeerState) StringIndented(indent string) string { // Messages // ConsensusMessage is a message that can be sent and received on the ConsensusReactor -type ConsensusMessage interface{} +type ConsensusMessage interface { + ValidateBasic() error +} func RegisterConsensusMessages(cdc *amino.Codec) { cdc.RegisterInterface((*ConsensusMessage)(nil), nil) @@ -1385,6 +1391,27 @@ type NewRoundStepMessage struct { LastCommitRound int } +// ValidateBasic performs basic validation. +func (m *NewRoundStepMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + if m.Round < 0 { + return errors.New("Negative Round") + } + if !m.Step.IsValid() { + return errors.New("Invalid Step") + } + + // NOTE: SecondsSinceStartTime may be negative + + if (m.Height == 1 && m.LastCommitRound != -1) || + (m.Height > 1 && m.LastCommitRound < -1) { // TODO: #2737 LastCommitRound should always be >= 0 for heights > 1 + return errors.New("Invalid LastCommitRound (for 1st block: -1, for others: >= 0)") + } + return nil +} + // String returns a string representation. func (m *NewRoundStepMessage) String() string { return fmt.Sprintf("[NewRoundStep H:%v R:%v S:%v LCR:%v]", @@ -1404,6 +1431,25 @@ type NewValidBlockMessage struct { IsCommit bool } +// ValidateBasic performs basic validation. +func (m *NewValidBlockMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + if m.Round < 0 { + return errors.New("Negative Round") + } + if err := m.BlockPartsHeader.ValidateBasic(); err != nil { + return fmt.Errorf("Wrong BlockPartsHeader: %v", err) + } + if m.BlockParts.Size() != m.BlockPartsHeader.Total { + return fmt.Errorf("BlockParts bit array size %d not equal to BlockPartsHeader.Total %d", + m.BlockParts.Size(), + m.BlockPartsHeader.Total) + } + return nil +} + // String returns a string representation. func (m *NewValidBlockMessage) String() string { return fmt.Sprintf("[ValidBlockMessage H:%v R:%v BP:%v BA:%v IsCommit:%v]", @@ -1417,6 +1463,11 @@ type ProposalMessage struct { Proposal *types.Proposal } +// ValidateBasic performs basic validation. +func (m *ProposalMessage) ValidateBasic() error { + return m.Proposal.ValidateBasic() +} + // String returns a string representation. func (m *ProposalMessage) String() string { return fmt.Sprintf("[Proposal %v]", m.Proposal) @@ -1431,6 +1482,20 @@ type ProposalPOLMessage struct { ProposalPOL *cmn.BitArray } +// ValidateBasic performs basic validation. +func (m *ProposalPOLMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + if m.ProposalPOLRound < 0 { + return errors.New("Negative ProposalPOLRound") + } + if m.ProposalPOL.Size() == 0 { + return errors.New("Empty ProposalPOL bit array") + } + return nil +} + // String returns a string representation. func (m *ProposalPOLMessage) String() string { return fmt.Sprintf("[ProposalPOL H:%v POLR:%v POL:%v]", m.Height, m.ProposalPOLRound, m.ProposalPOL) @@ -1445,6 +1510,20 @@ type BlockPartMessage struct { Part *types.Part } +// ValidateBasic performs basic validation. +func (m *BlockPartMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + if m.Round < 0 { + return errors.New("Negative Round") + } + if err := m.Part.ValidateBasic(); err != nil { + return fmt.Errorf("Wrong Part: %v", err) + } + return nil +} + // String returns a string representation. func (m *BlockPartMessage) String() string { return fmt.Sprintf("[BlockPart H:%v R:%v P:%v]", m.Height, m.Round, m.Part) @@ -1457,6 +1536,11 @@ type VoteMessage struct { Vote *types.Vote } +// ValidateBasic performs basic validation. +func (m *VoteMessage) ValidateBasic() error { + return m.Vote.ValidateBasic() +} + // String returns a string representation. func (m *VoteMessage) String() string { return fmt.Sprintf("[Vote %v]", m.Vote) @@ -1472,6 +1556,23 @@ type HasVoteMessage struct { Index int } +// ValidateBasic performs basic validation. +func (m *HasVoteMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + if m.Round < 0 { + return errors.New("Negative Round") + } + if !types.IsVoteTypeValid(m.Type) { + return errors.New("Invalid Type") + } + if m.Index < 0 { + return errors.New("Negative Index") + } + return nil +} + // String returns a string representation. func (m *HasVoteMessage) String() string { return fmt.Sprintf("[HasVote VI:%v V:{%v/%02d/%v}]", m.Index, m.Height, m.Round, m.Type) @@ -1487,6 +1588,23 @@ type VoteSetMaj23Message struct { BlockID types.BlockID } +// ValidateBasic performs basic validation. +func (m *VoteSetMaj23Message) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + if m.Round < 0 { + return errors.New("Negative Round") + } + if !types.IsVoteTypeValid(m.Type) { + return errors.New("Invalid Type") + } + if err := m.BlockID.ValidateBasic(); err != nil { + return fmt.Errorf("Wrong BlockID: %v", err) + } + return nil +} + // String returns a string representation. func (m *VoteSetMaj23Message) String() string { return fmt.Sprintf("[VSM23 %v/%02d/%v %v]", m.Height, m.Round, m.Type, m.BlockID) @@ -1503,6 +1621,24 @@ type VoteSetBitsMessage struct { Votes *cmn.BitArray } +// ValidateBasic performs basic validation. +func (m *VoteSetBitsMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + if m.Round < 0 { + return errors.New("Negative Round") + } + if !types.IsVoteTypeValid(m.Type) { + return errors.New("Invalid Type") + } + if err := m.BlockID.ValidateBasic(); err != nil { + return fmt.Errorf("Wrong BlockID: %v", err) + } + // NOTE: Votes.Size() can be zero if the node does not have any + return nil +} + // String returns a string representation. func (m *VoteSetBitsMessage) String() string { return fmt.Sprintf("[VSB %v/%02d/%v %v %v]", m.Height, m.Round, m.Type, m.BlockID, m.Votes) @@ -1515,6 +1651,11 @@ type ProposalHeartbeatMessage struct { Heartbeat *types.Heartbeat } +// ValidateBasic performs basic validation. +func (m *ProposalHeartbeatMessage) ValidateBasic() error { + return m.Heartbeat.ValidateBasic() +} + // String returns a string representation. func (m *ProposalHeartbeatMessage) String() string { return fmt.Sprintf("[HEARTBEAT %v]", m.Heartbeat) diff --git a/consensus/replay.go b/consensus/replay.go index fcff877fd..abc43eb57 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -264,8 +264,12 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { // Replay all blocks since appBlockHeight and ensure the result matches the current state. // Returns the final AppHash or an error. -func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight int64, proxyApp proxy.AppConns) ([]byte, error) { - +func (h *Handshaker) ReplayBlocks( + state sm.State, + appHash []byte, + appBlockHeight int64, + proxyApp proxy.AppConns, +) ([]byte, error) { storeBlockHeight := h.store.Height() stateBlockHeight := state.LastBlockHeight h.logger.Info("ABCI Replay Blocks", "appHeight", appBlockHeight, "storeHeight", storeBlockHeight, "stateHeight", stateBlockHeight) diff --git a/consensus/types/round_state.go b/consensus/types/round_state.go index d3f6468bf..ef4236118 100644 --- a/consensus/types/round_state.go +++ b/consensus/types/round_state.go @@ -26,8 +26,15 @@ const ( RoundStepPrecommitWait = RoundStepType(0x07) // Did receive any +2/3 precommits, start timeout RoundStepCommit = RoundStepType(0x08) // Entered commit state machine // NOTE: RoundStepNewHeight acts as RoundStepCommitWait. + + // NOTE: Update IsValid method if you change this! ) +// IsValid returns true if the step is valid, false if unknown/undefined. +func (rs RoundStepType) IsValid() bool { + return uint8(rs) >= 0x01 && uint8(rs) <= 0x08 +} + // String returns a string func (rs RoundStepType) String() string { switch rs { diff --git a/crypto/crypto.go b/crypto/crypto.go index 2462b0a98..b3526f881 100644 --- a/crypto/crypto.go +++ b/crypto/crypto.go @@ -6,6 +6,7 @@ import ( ) const ( + // AddressSize is the size of a pubkey address. AddressSize = tmhash.TruncatedSize ) diff --git a/evidence/reactor.go b/evidence/reactor.go index 52eb4a56f..32753b2b9 100644 --- a/evidence/reactor.go +++ b/evidence/reactor.go @@ -74,6 +74,13 @@ func (evR *EvidenceReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { evR.Switch.StopPeerForError(src, err) return } + + if err = msg.ValidateBasic(); err != nil { + evR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err) + evR.Switch.StopPeerForError(src, err) + return + } + evR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg) switch msg := msg.(type) { @@ -191,7 +198,9 @@ type PeerState interface { // Messages // EvidenceMessage is a message sent or received by the EvidenceReactor. -type EvidenceMessage interface{} +type EvidenceMessage interface { + ValidateBasic() error +} func RegisterEvidenceMessages(cdc *amino.Codec) { cdc.RegisterInterface((*EvidenceMessage)(nil), nil) @@ -209,11 +218,21 @@ func decodeMsg(bz []byte) (msg EvidenceMessage, err error) { //------------------------------------- -// EvidenceMessage contains a list of evidence. +// EvidenceListMessage contains a list of evidence. type EvidenceListMessage struct { Evidence []types.Evidence } +// ValidateBasic performs basic validation. +func (m *EvidenceListMessage) ValidateBasic() error { + for i, ev := range m.Evidence { + if err := ev.ValidateBasic(); err != nil { + return fmt.Errorf("Invalid evidence (#%d): %v", i, err) + } + } + return nil +} + // String returns a string representation of the EvidenceListMessage. func (m *EvidenceListMessage) String() string { return fmt.Sprintf("[EvidenceListMessage %v]", m.Evidence) diff --git a/p2p/pex/addrbook.go b/p2p/pex/addrbook.go index e0c0e0b9c..61710bbf2 100644 --- a/p2p/pex/addrbook.go +++ b/p2p/pex/addrbook.go @@ -648,6 +648,10 @@ func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error { return ErrAddrBookNonRoutable{addr} } + if !addr.Valid() { + return ErrAddrBookInvalidAddr{addr} + } + // TODO: we should track ourAddrs by ID and by IP:PORT and refuse both. if _, ok := a.ourAddrs[addr.String()]; ok { return ErrAddrBookSelf{addr} diff --git a/p2p/pex/errors.go b/p2p/pex/errors.go index 7f660bdc5..fbee748ac 100644 --- a/p2p/pex/errors.go +++ b/p2p/pex/errors.go @@ -46,3 +46,11 @@ type ErrAddrBookNilAddr struct { func (err ErrAddrBookNilAddr) Error() string { return fmt.Sprintf("Cannot add a nil address. Got (addr, src) = (%v, %v)", err.Addr, err.Src) } + +type ErrAddrBookInvalidAddr struct { + Addr *p2p.NetAddress +} + +func (err ErrAddrBookInvalidAddr) Error() string { + return fmt.Sprintf("Cannot add invalid address %v", err.Addr) +} diff --git a/p2p/pex/pex_reactor.go b/p2p/pex/pex_reactor.go index c919794ab..46a12c488 100644 --- a/p2p/pex/pex_reactor.go +++ b/p2p/pex/pex_reactor.go @@ -288,21 +288,37 @@ func (r *PEXReactor) RequestAddrs(p Peer) { func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { id := string(src.ID()) if !r.requestsSent.Has(id) { - return cmn.NewError("Received unsolicited pexAddrsMessage") + return errors.New("Unsolicited pexAddrsMessage") } r.requestsSent.Delete(id) srcAddr := src.NodeInfo().NetAddress() for _, netAddr := range addrs { - // NOTE: GetSelection methods should never return nil addrs + // Validate netAddr. Disconnect from a peer if it sends us invalid data. if netAddr == nil { - return cmn.NewError("received nil addr") + return errors.New("nil address in pexAddrsMessage") + } + // TODO: extract validating logic from NewNetAddressStringWithOptionalID + // and put it in netAddr#Valid (#2722) + na, err := p2p.NewNetAddressString(netAddr.String()) + if err != nil { + return fmt.Errorf("%s address in pexAddrsMessage is invalid: %v", + netAddr.String(), + err, + ) } - err := r.book.AddAddress(netAddr, srcAddr) - r.logErrAddrBook(err) + // NOTE: we check netAddr validity and routability in book#AddAddress. + err = r.book.AddAddress(na, srcAddr) + if err != nil { + r.logErrAddrBook(err) + // XXX: should we be strict about incoming data and disconnect from a + // peer here too? + continue + } - // If this address came from a seed node, try to connect to it without waiting. + // If this address came from a seed node, try to connect to it without + // waiting. for _, seedAddr := range r.seedAddrs { if seedAddr.Equals(srcAddr) { r.ensurePeers() diff --git a/state/validation.go b/state/validation.go index 345224843..e28d40e8b 100644 --- a/state/validation.go +++ b/state/validation.go @@ -21,22 +21,19 @@ func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { // Validate basic info. if block.Version != state.Version.Consensus { - return fmt.Errorf( - "Wrong Block.Header.Version. Expected %v, got %v", + return fmt.Errorf("Wrong Block.Header.Version. Expected %v, got %v", state.Version.Consensus, block.Version, ) } if block.ChainID != state.ChainID { - return fmt.Errorf( - "Wrong Block.Header.ChainID. Expected %v, got %v", + return fmt.Errorf("Wrong Block.Header.ChainID. Expected %v, got %v", state.ChainID, block.ChainID, ) } if block.Height != state.LastBlockHeight+1 { - return fmt.Errorf( - "Wrong Block.Header.Height. Expected %v, got %v", + return fmt.Errorf("Wrong Block.Header.Height. Expected %v, got %v", state.LastBlockHeight+1, block.Height, ) @@ -44,16 +41,15 @@ func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { // Validate prev block info. if !block.LastBlockID.Equals(state.LastBlockID) { - return fmt.Errorf( - "Wrong Block.Header.LastBlockID. Expected %v, got %v", + return fmt.Errorf("Wrong Block.Header.LastBlockID. Expected %v, got %v", state.LastBlockID, block.LastBlockID, ) } + newTxs := int64(len(block.Data.Txs)) if block.TotalTxs != state.LastBlockTotalTx+newTxs { - return fmt.Errorf( - "Wrong Block.Header.TotalTxs. Expected %v, got %v", + return fmt.Errorf("Wrong Block.Header.TotalTxs. Expected %v, got %v", state.LastBlockTotalTx+newTxs, block.TotalTxs, ) @@ -61,46 +57,44 @@ func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { // Validate app info if !bytes.Equal(block.AppHash, state.AppHash) { - return fmt.Errorf( - "Wrong Block.Header.AppHash. Expected %X, got %v", + return fmt.Errorf("Wrong Block.Header.AppHash. Expected %X, got %v", state.AppHash, block.AppHash, ) } if !bytes.Equal(block.ConsensusHash, state.ConsensusParams.Hash()) { - return fmt.Errorf( - "Wrong Block.Header.ConsensusHash. Expected %X, got %v", + return fmt.Errorf("Wrong Block.Header.ConsensusHash. Expected %X, got %v", state.ConsensusParams.Hash(), block.ConsensusHash, ) } if !bytes.Equal(block.LastResultsHash, state.LastResultsHash) { - return fmt.Errorf( - "Wrong Block.Header.LastResultsHash. Expected %X, got %v", + return fmt.Errorf("Wrong Block.Header.LastResultsHash. Expected %X, got %v", state.LastResultsHash, block.LastResultsHash, ) } if !bytes.Equal(block.ValidatorsHash, state.Validators.Hash()) { - return fmt.Errorf( - "Wrong Block.Header.ValidatorsHash. Expected %X, got %v", + return fmt.Errorf("Wrong Block.Header.ValidatorsHash. Expected %X, got %v", state.Validators.Hash(), block.ValidatorsHash, ) } if !bytes.Equal(block.NextValidatorsHash, state.NextValidators.Hash()) { - return fmt.Errorf("Wrong Block.Header.NextValidatorsHash. Expected %X, got %v", state.NextValidators.Hash(), block.NextValidatorsHash) + return fmt.Errorf("Wrong Block.Header.NextValidatorsHash. Expected %X, got %v", + state.NextValidators.Hash(), + block.NextValidatorsHash, + ) } // Validate block LastCommit. if block.Height == 1 { if len(block.LastCommit.Precommits) != 0 { - return errors.New("Block at height 1 (first block) should have no LastCommit precommits") + return errors.New("Block at height 1 can't have LastCommit precommits") } } else { if len(block.LastCommit.Precommits) != state.LastValidators.Size() { - return fmt.Errorf( - "Invalid block commit size. Expected %v, got %v", + return fmt.Errorf("Invalid block commit size. Expected %v, got %v", state.LastValidators.Size(), len(block.LastCommit.Precommits), ) @@ -115,8 +109,7 @@ func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { // Validate block Time if block.Height > 1 { if !block.Time.After(state.LastBlockTime) { - return fmt.Errorf( - "Block time %v not greater than last block time %v", + return fmt.Errorf("Block time %v not greater than last block time %v", block.Time, state.LastBlockTime, ) @@ -124,8 +117,7 @@ func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { medianTime := MedianTime(block.LastCommit, state.LastValidators) if !block.Time.Equal(medianTime) { - return fmt.Errorf( - "Invalid block time. Expected %v, got %v", + return fmt.Errorf("Invalid block time. Expected %v, got %v", medianTime, block.Time, ) @@ -133,8 +125,7 @@ func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { } else if block.Height == 1 { genesisTime := state.LastBlockTime if !block.Time.Equal(genesisTime) { - return fmt.Errorf( - "Block time %v is not equal to genesis time %v", + return fmt.Errorf("Block time %v is not equal to genesis time %v", block.Time, genesisTime, ) @@ -160,8 +151,7 @@ func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { // a legit address and a known validator. if len(block.ProposerAddress) != crypto.AddressSize || !state.Validators.HasAddress(block.ProposerAddress) { - return fmt.Errorf( - "Block.Header.ProposerAddress, %X, is not a validator", + return fmt.Errorf("Block.Header.ProposerAddress, %X, is not a validator", block.ProposerAddress, ) } diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile index 6bb320be8..1a64d4173 100644 --- a/test/docker/Dockerfile +++ b/test/docker/Dockerfile @@ -14,6 +14,7 @@ ENV GOBIN $GOPATH/bin WORKDIR $REPO # Copy in the code +# TODO: rewrite to only copy Makefile & other files? COPY . $REPO # Install the vendored dependencies @@ -21,16 +22,18 @@ COPY . $REPO RUN make get_tools RUN make get_vendor_deps -# Now copy in the code -# NOTE: this will overwrite whatever is in vendor/ -COPY . $REPO - # install ABCI CLI RUN make install_abci # install Tendermint RUN make install +RUN tendermint testnet --node-dir-prefix="mach" --v=4 --populate-persistent-peers=false --o=$REPO/test/p2p/data + +# Now copy in the code +# NOTE: this will overwrite whatever is in vendor/ +COPY . $REPO + # expose the volume for debugging VOLUME $REPO diff --git a/test/p2p/README.md b/test/p2p/README.md index 4ee3690af..956ce906c 100644 --- a/test/p2p/README.md +++ b/test/p2p/README.md @@ -19,7 +19,7 @@ docker network create --driver bridge --subnet 172.57.0.0/16 my_testnet ``` This gives us a new network with IP addresses in the rage `172.57.0.0 - 172.57.255.255`. -Peers on the network can have any IP address in this range. +Peers on the network can have any IP address in this range. For our four node network, let's pick `172.57.0.101 - 172.57.0.104`. Since we use Tendermint's default listening port of 26656, our list of seed nodes will look like: @@ -37,7 +37,7 @@ for i in $(seq 1 4); do --ip="172.57.0.$((100 + $i))" \ --name local_testnet_$i \ --entrypoint tendermint \ - -e TMHOME=/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$i/core \ + -e TMHOME=/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$((i-1)) \ tendermint_tester node --p2p.persistent_peers 172.57.0.101:26656,172.57.0.102:26656,172.57.0.103:26656,172.57.0.104:26656 --proxy_app=kvstore done ``` @@ -47,8 +47,5 @@ If you now run `docker ps`, you'll see your containers! We can confirm they are making blocks by checking the `/status` message using `curl` and `jq` to pretty print the output json: ``` -curl 172.57.0.101:26657/status | jq . +curl 172.57.0.101:26657/status | jq . ``` - - - diff --git a/test/p2p/data/mach1/core/config/genesis.json b/test/p2p/data/mach1/core/config/genesis.json deleted file mode 100644 index 515c10714..000000000 --- a/test/p2p/data/mach1/core/config/genesis.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "genesis_time": "2016-06-24T20:01:19.322Z", - "chain_id": "chain-9ujDWI", - "validators": [ - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY=" - }, - "power": "1", - "name": "mach1" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0=" - }, - "power": "1", - "name": "mach2" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE=" - }, - "power": "1", - "name": "mach3" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k=" - }, - "power": "1", - "name": "mach4" - } - ], - "app_hash": "" -} diff --git a/test/p2p/data/mach1/core/config/node_key.json b/test/p2p/data/mach1/core/config/node_key.json deleted file mode 100644 index 4fa960850..000000000 --- a/test/p2p/data/mach1/core/config/node_key.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "BpYtFp8xSrudBa5aBLRuSPD72PGDAUm0dJORDL3Kd5YJbluUzRefVFrjwoHZv1yeDj2P9xkEi2L3hJCUz/qFkQ==" - } -} diff --git a/test/p2p/data/mach1/core/config/priv_validator.json b/test/p2p/data/mach1/core/config/priv_validator.json deleted file mode 100644 index ea2a01f5c..000000000 --- a/test/p2p/data/mach1/core/config/priv_validator.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "address": "AE47BBD4B3ACD80BFE17F6E0C66C5B8492A81AE4", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY=" - }, - "last_height": "0", - "last_round": "0", - "last_step": 0, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "VHqgfHqM4WxcsqQMbCbRWwoylgQQqfHqblC2NvGrOJq+iTPf8WAMAm40cY8XhaTN6rkMNWmLOU44tpR66R3hFg==" - } -} diff --git a/test/p2p/data/mach2/core/config/genesis.json b/test/p2p/data/mach2/core/config/genesis.json deleted file mode 100644 index 515c10714..000000000 --- a/test/p2p/data/mach2/core/config/genesis.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "genesis_time": "2016-06-24T20:01:19.322Z", - "chain_id": "chain-9ujDWI", - "validators": [ - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY=" - }, - "power": "1", - "name": "mach1" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0=" - }, - "power": "1", - "name": "mach2" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE=" - }, - "power": "1", - "name": "mach3" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k=" - }, - "power": "1", - "name": "mach4" - } - ], - "app_hash": "" -} diff --git a/test/p2p/data/mach2/core/config/node_key.json b/test/p2p/data/mach2/core/config/node_key.json deleted file mode 100644 index 6eb151106..000000000 --- a/test/p2p/data/mach2/core/config/node_key.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "uM6LDVE4wQIIUmq9rc6RxzX8zEGG4G4Jcuw15klzQopF68YfJM4bkbPSavurEcJ4nvBMusKBg2GcARFrZqnFKA==" - } -} diff --git a/test/p2p/data/mach2/core/config/priv_validator.json b/test/p2p/data/mach2/core/config/priv_validator.json deleted file mode 100644 index 6e0cd7f8f..000000000 --- a/test/p2p/data/mach2/core/config/priv_validator.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "address": "5D61EE46CCE91F579086522D7FD8CEC3F854E946", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0=" - }, - "last_height": "0", - "last_round": "0", - "last_step": 0, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "0EeInmBQL8MSnQq38zSxg47Z7R7Nmcu5a3GtWr9agUNtxTRGUyMSZYfSoqk7WdaJtxcHOx3paKJabvE9WVMYrQ==" - } -} diff --git a/test/p2p/data/mach3/core/config/genesis.json b/test/p2p/data/mach3/core/config/genesis.json deleted file mode 100644 index 515c10714..000000000 --- a/test/p2p/data/mach3/core/config/genesis.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "genesis_time": "2016-06-24T20:01:19.322Z", - "chain_id": "chain-9ujDWI", - "validators": [ - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY=" - }, - "power": "1", - "name": "mach1" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0=" - }, - "power": "1", - "name": "mach2" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE=" - }, - "power": "1", - "name": "mach3" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k=" - }, - "power": "1", - "name": "mach4" - } - ], - "app_hash": "" -} diff --git a/test/p2p/data/mach3/core/config/node_key.json b/test/p2p/data/mach3/core/config/node_key.json deleted file mode 100644 index 0885bcf9c..000000000 --- a/test/p2p/data/mach3/core/config/node_key.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "kT3orG0YkipT9rAZbvAjtGk/7Pu1ZeCE8LSUF2jz2uiSs1rdlUVi/gccRlvCRLKvrtSicOyEkmk0FHPOGS3mgg==" - } -} diff --git a/test/p2p/data/mach3/core/config/priv_validator.json b/test/p2p/data/mach3/core/config/priv_validator.json deleted file mode 100644 index ec68ca7bb..000000000 --- a/test/p2p/data/mach3/core/config/priv_validator.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "address": "705F9DA2CC7D7AF5F4519455ED99622E40E439A1", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE=" - }, - "last_height": "0", - "last_round": "0", - "last_step": 0, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "waTkfzSfxfVW9Kmie6d2uUQkwxK6ps9u5EuGc0jXw/KuZ6xpfRNaoLRgHqV+qrP+v0uqTyKcRaWYwphbEvzRoQ==" - } -} diff --git a/test/p2p/data/mach4/core/config/genesis.json b/test/p2p/data/mach4/core/config/genesis.json deleted file mode 100644 index 515c10714..000000000 --- a/test/p2p/data/mach4/core/config/genesis.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "genesis_time": "2016-06-24T20:01:19.322Z", - "chain_id": "chain-9ujDWI", - "validators": [ - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY=" - }, - "power": "1", - "name": "mach1" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0=" - }, - "power": "1", - "name": "mach2" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE=" - }, - "power": "1", - "name": "mach3" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k=" - }, - "power": "1", - "name": "mach4" - } - ], - "app_hash": "" -} diff --git a/test/p2p/data/mach4/core/config/node_key.json b/test/p2p/data/mach4/core/config/node_key.json deleted file mode 100644 index d6a5d79c2..000000000 --- a/test/p2p/data/mach4/core/config/node_key.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "QIIm8/QEEawiJi3Zozv+J9b+1CufCEkGs3lxGMlRy4L4FVIXCoXJTwYIrotZtwoMqLYEqQV1hbKKJmFA3GFelw==" - } -} diff --git a/test/p2p/data/mach4/core/config/priv_validator.json b/test/p2p/data/mach4/core/config/priv_validator.json deleted file mode 100644 index 468550ea8..000000000 --- a/test/p2p/data/mach4/core/config/priv_validator.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "address": "D1054266EC9EEA511ED9A76DEFD520BBE1B5E850", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k=" - }, - "last_height": "0", - "last_round": "0", - "last_step": 0, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "xMw+0o8CDC29qYvNvwjDztNwRw508l6TjV0pXo49KwyevI9YztS0bc1auKulkd0lPNfLUDcnP9oyvAtkYcTv2Q==" - } -} diff --git a/test/p2p/ip_plus_id.sh b/test/p2p/ip_plus_id.sh index 0d2248fe0..95871d3f1 100755 --- a/test/p2p/ip_plus_id.sh +++ b/test/p2p/ip_plus_id.sh @@ -3,5 +3,5 @@ set -eu ID=$1 DOCKER_IMAGE=$2 -NODEID="$(docker run --rm -e TMHOME=/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core $DOCKER_IMAGE tendermint show_node_id)" +NODEID="$(docker run --rm -e TMHOME=/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$((ID-1)) $DOCKER_IMAGE tendermint show_node_id)" echo "$NODEID@172.57.0.$((100+$ID))" diff --git a/test/p2p/peer.sh b/test/p2p/peer.sh index ad04d000f..63d46f8d5 100644 --- a/test/p2p/peer.sh +++ b/test/p2p/peer.sh @@ -15,13 +15,15 @@ echo "starting tendermint peer ID=$ID" # NOTE: $NODE_FLAGS should be unescaped (no quotes). otherwise it will be # treated as one flag. +# test/p2p/data/mach$((ID-1)) data is generated in test/docker/Dockerfile using +# the tendermint testnet command. if [[ "$ID" == "x" ]]; then # Set "x" to "1" to print to console. docker run \ --net="$NETWORK_NAME" \ --ip=$(test/p2p/ip.sh "$ID") \ --name "local_testnet_$ID" \ --entrypoint tendermint \ - -e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core" \ + -e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$((ID-1))" \ -e GOMAXPROCS=1 \ --log-driver=syslog \ --log-opt syslog-address=udp://127.0.0.1:5514 \ @@ -34,7 +36,7 @@ else --ip=$(test/p2p/ip.sh "$ID") \ --name "local_testnet_$ID" \ --entrypoint tendermint \ - -e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core" \ + -e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$((ID-1))" \ -e GOMAXPROCS=1 \ --log-driver=syslog \ --log-opt syslog-address=udp://127.0.0.1:5514 \ diff --git a/test/p2p/pex/test_addrbook.sh b/test/p2p/pex/test_addrbook.sh index 9c58db30c..06f9212fd 100644 --- a/test/p2p/pex/test_addrbook.sh +++ b/test/p2p/pex/test_addrbook.sh @@ -18,7 +18,7 @@ echo "1. restart peer $ID" docker stop "local_testnet_$ID" echo "stopped local_testnet_$ID" # preserve addrbook.json -docker cp "local_testnet_$ID:/go/src/github.com/tendermint/tendermint/test/p2p/data/mach1/core/config/addrbook.json" "/tmp/addrbook.json" +docker cp "local_testnet_$ID:/go/src/github.com/tendermint/tendermint/test/p2p/data/mach0/config/addrbook.json" "/tmp/addrbook.json" set +e #CIRCLE docker rm -vf "local_testnet_$ID" set -e @@ -32,11 +32,11 @@ bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$CLIENT_NAME" "test/p2p # Now we know that the node is up. -docker cp "/tmp/addrbook.json" "local_testnet_$ID:/go/src/github.com/tendermint/tendermint/test/p2p/data/mach1/core/config/addrbook.json" +docker cp "/tmp/addrbook.json" "local_testnet_$ID:/go/src/github.com/tendermint/tendermint/test/p2p/data/mach0/config/addrbook.json" echo "with the following addrbook:" cat /tmp/addrbook.json # exec doesn't work on circle -# docker exec "local_testnet_$ID" cat "/go/src/github.com/tendermint/tendermint/test/p2p/data/mach1/core/config/addrbook.json" +# docker exec "local_testnet_$ID" cat "/go/src/github.com/tendermint/tendermint/test/p2p/data/mach0/config/addrbook.json" echo "" echo "----------------------------------------------------------------------" diff --git a/types/block.go b/types/block.go index 46ad73a71..4ae51d4df 100644 --- a/types/block.go +++ b/types/block.go @@ -2,12 +2,14 @@ package types import ( "bytes" - "errors" "fmt" "strings" "sync" "time" + "github.com/pkg/errors" + + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/merkle" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/version" @@ -57,54 +59,117 @@ func MakeBlock(height int64, txs []Tx, lastCommit *Commit, evidence []Evidence) // ValidateBasic performs basic validation that doesn't involve state data. // It checks the internal consistency of the block. +// Further validation is done using state#ValidateBlock. func (b *Block) ValidateBasic() error { if b == nil { - return errors.New("Nil blocks are invalid") + return errors.New("nil block") } b.mtx.Lock() defer b.mtx.Unlock() + if len(b.ChainID) > MaxChainIDLen { + return fmt.Errorf("ChainID is too long. Max is %d, got %d", MaxChainIDLen, len(b.ChainID)) + } + if b.Height < 0 { - return fmt.Errorf( - "Negative Block.Header.Height: %v", - b.Height, - ) + return errors.New("Negative Header.Height") + } else if b.Height == 0 { + return errors.New("Zero Header.Height") } + // NOTE: Timestamp validation is subtle and handled elsewhere. + newTxs := int64(len(b.Data.Txs)) if b.NumTxs != newTxs { - return fmt.Errorf( - "Wrong Block.Header.NumTxs. Expected %v, got %v", + return fmt.Errorf("Wrong Header.NumTxs. Expected %v, got %v", newTxs, b.NumTxs, ) } + + // TODO: fix tests so we can do this + /*if b.TotalTxs < b.NumTxs { + return fmt.Errorf("Header.TotalTxs (%d) is less than Header.NumTxs (%d)", b.TotalTxs, b.NumTxs) + }*/ + if b.TotalTxs < 0 { + return errors.New("Negative Header.TotalTxs") + } + + if err := b.LastBlockID.ValidateBasic(); err != nil { + return fmt.Errorf("Wrong Header.LastBlockID: %v", err) + } + + // Validate the last commit and its hash. + if b.Header.Height > 1 { + if b.LastCommit == nil { + return errors.New("nil LastCommit") + } + if err := b.LastCommit.ValidateBasic(); err != nil { + return fmt.Errorf("Wrong LastCommit") + } + } + if err := ValidateHash(b.LastCommitHash); err != nil { + return fmt.Errorf("Wrong Header.LastCommitHash: %v", err) + } if !bytes.Equal(b.LastCommitHash, b.LastCommit.Hash()) { - return fmt.Errorf( - "Wrong Block.Header.LastCommitHash. Expected %v, got %v", - b.LastCommitHash, + return fmt.Errorf("Wrong Header.LastCommitHash. Expected %v, got %v", b.LastCommit.Hash(), + b.LastCommitHash, ) } - if b.Header.Height != 1 { - if err := b.LastCommit.ValidateBasic(); err != nil { - return err - } + + // Validate the hash of the transactions. + // NOTE: b.Data.Txs may be nil, but b.Data.Hash() + // still works fine + if err := ValidateHash(b.DataHash); err != nil { + return fmt.Errorf("Wrong Header.DataHash: %v", err) } if !bytes.Equal(b.DataHash, b.Data.Hash()) { return fmt.Errorf( - "Wrong Block.Header.DataHash. Expected %v, got %v", - b.DataHash, + "Wrong Header.DataHash. Expected %v, got %v", b.Data.Hash(), + b.DataHash, ) } + + // Basic validation of hashes related to application data. + // Will validate fully against state in state#ValidateBlock. + if err := ValidateHash(b.ValidatorsHash); err != nil { + return fmt.Errorf("Wrong Header.ValidatorsHash: %v", err) + } + if err := ValidateHash(b.NextValidatorsHash); err != nil { + return fmt.Errorf("Wrong Header.NextValidatorsHash: %v", err) + } + if err := ValidateHash(b.ConsensusHash); err != nil { + return fmt.Errorf("Wrong Header.ConsensusHash: %v", err) + } + // NOTE: AppHash is arbitrary length + if err := ValidateHash(b.LastResultsHash); err != nil { + return fmt.Errorf("Wrong Header.LastResultsHash: %v", err) + } + + // Validate evidence and its hash. + if err := ValidateHash(b.EvidenceHash); err != nil { + return fmt.Errorf("Wrong Header.EvidenceHash: %v", err) + } + // NOTE: b.Evidence.Evidence may be nil, but we're just looping. + for i, ev := range b.Evidence.Evidence { + if err := ev.ValidateBasic(); err != nil { + return fmt.Errorf("Invalid evidence (#%d): %v", i, err) + } + } if !bytes.Equal(b.EvidenceHash, b.Evidence.Hash()) { - return fmt.Errorf( - "Wrong Block.Header.EvidenceHash. Expected %v, got %v", + return fmt.Errorf("Wrong Header.EvidenceHash. Expected %v, got %v", b.EvidenceHash, b.Evidence.Hash(), ) } + + if len(b.ProposerAddress) != crypto.AddressSize { + return fmt.Errorf("Expected len(Header.ProposerAddress) to be %d, got %d", + crypto.AddressSize, len(b.ProposerAddress)) + } + return nil } @@ -719,6 +784,18 @@ func (blockID BlockID) Key() string { return string(blockID.Hash) + string(bz) } +// ValidateBasic performs basic validation. +func (blockID BlockID) ValidateBasic() error { + // Hash can be empty in case of POLBlockID in Proposal. + if err := ValidateHash(blockID.Hash); err != nil { + return fmt.Errorf("Wrong Hash") + } + if err := blockID.PartsHeader.ValidateBasic(); err != nil { + return fmt.Errorf("Wrong PartsHeader: %v", err) + } + return nil +} + // String returns a human readable string representation of the BlockID func (blockID BlockID) String() string { return fmt.Sprintf(`%v:%v`, blockID.Hash, blockID.PartsHeader) diff --git a/types/block_test.go b/types/block_test.go index 46881a099..cdea293f0 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -80,11 +80,13 @@ func TestBlockValidateBasic(t *testing.T) { blk.EvidenceHash = []byte("something else") }, true}, } - for _, tc := range testCases { + for i, tc := range testCases { t.Run(tc.testName, func(t *testing.T) { block := MakeBlock(h, txs, commit, evList) + block.ProposerAddress = valSet.GetProposer().Address tc.malleateBlock(block) - assert.Equal(t, tc.expErr, block.ValidateBasic() != nil, "ValidateBasic had an unexpected result") + err = block.ValidateBasic() + assert.Equal(t, tc.expErr, err != nil, "#%d: %v", i, err) }) } } diff --git a/types/evidence.go b/types/evidence.go index d1e15c819..fb2423458 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" + "github.com/pkg/errors" "github.com/tendermint/tendermint/crypto/tmhash" amino "github.com/tendermint/go-amino" @@ -60,6 +61,7 @@ type Evidence interface { Verify(chainID string, pubKey crypto.PubKey) error // verify the evidence Equal(Evidence) bool // check equality of evidence + ValidateBasic() error String() string } @@ -172,6 +174,23 @@ func (dve *DuplicateVoteEvidence) Equal(ev Evidence) bool { return bytes.Equal(dveHash, evHash) } +// ValidateBasic performs basic validation. +func (dve *DuplicateVoteEvidence) ValidateBasic() error { + if len(dve.PubKey.Bytes()) == 0 { + return errors.New("Empty PubKey") + } + if dve.VoteA == nil || dve.VoteB == nil { + return fmt.Errorf("One or both of the votes are empty %v, %v", dve.VoteA, dve.VoteB) + } + if err := dve.VoteA.ValidateBasic(); err != nil { + return fmt.Errorf("Invalid VoteA: %v", err) + } + if err := dve.VoteB.ValidateBasic(); err != nil { + return fmt.Errorf("Invalid VoteB: %v", err) + } + return nil +} + //----------------------------------------------------------------- // UNSTABLE @@ -201,6 +220,7 @@ func (e MockGoodEvidence) Equal(ev Evidence) bool { return e.Height_ == e2.Height_ && bytes.Equal(e.Address_, e2.Address_) } +func (e MockGoodEvidence) ValidateBasic() error { return nil } func (e MockGoodEvidence) String() string { return fmt.Sprintf("GoodEvidence: %d/%s", e.Height_, e.Address_) } @@ -218,6 +238,7 @@ func (e MockBadEvidence) Equal(ev Evidence) bool { return e.Height_ == e2.Height_ && bytes.Equal(e.Address_, e2.Address_) } +func (e MockBadEvidence) ValidateBasic() error { return nil } func (e MockBadEvidence) String() string { return fmt.Sprintf("BadEvidence: %d/%s", e.Height_, e.Address_) } diff --git a/types/heartbeat.go b/types/heartbeat.go index 9dea039e0..986e9384f 100644 --- a/types/heartbeat.go +++ b/types/heartbeat.go @@ -3,6 +3,8 @@ package types import ( "fmt" + "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto" cmn "github.com/tendermint/tendermint/libs/common" ) @@ -50,3 +52,32 @@ func (heartbeat *Heartbeat) String() string { heartbeat.Height, heartbeat.Round, heartbeat.Sequence, fmt.Sprintf("/%X.../", cmn.Fingerprint(heartbeat.Signature[:]))) } + +// ValidateBasic performs basic validation. +func (heartbeat *Heartbeat) ValidateBasic() error { + if len(heartbeat.ValidatorAddress) != crypto.AddressSize { + return fmt.Errorf("Expected ValidatorAddress size to be %d bytes, got %d bytes", + crypto.AddressSize, + len(heartbeat.ValidatorAddress), + ) + } + if heartbeat.ValidatorIndex < 0 { + return errors.New("Negative ValidatorIndex") + } + if heartbeat.Height < 0 { + return errors.New("Negative Height") + } + if heartbeat.Round < 0 { + return errors.New("Negative Round") + } + if heartbeat.Sequence < 0 { + return errors.New("Negative Sequence") + } + if len(heartbeat.Signature) == 0 { + return errors.New("Signature is missing") + } + if len(heartbeat.Signature) > MaxSignatureSize { + return fmt.Errorf("Signature is too big (max: %d)", MaxSignatureSize) + } + return nil +} diff --git a/types/part_set.go b/types/part_set.go index 812b1c2fd..af59851c9 100644 --- a/types/part_set.go +++ b/types/part_set.go @@ -2,11 +2,12 @@ package types import ( "bytes" - "errors" "fmt" "io" "sync" + "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" @@ -36,6 +37,17 @@ func (part *Part) Hash() []byte { return part.hash } +// ValidateBasic performs basic validation. +func (part *Part) ValidateBasic() error { + if part.Index < 0 { + return errors.New("Negative Index") + } + if len(part.Bytes) > BlockPartSizeBytes { + return fmt.Errorf("Too big (max: %d)", BlockPartSizeBytes) + } + return nil +} + func (part *Part) String() string { return part.StringIndented("") } @@ -70,6 +82,18 @@ func (psh PartSetHeader) Equals(other PartSetHeader) bool { return psh.Total == other.Total && bytes.Equal(psh.Hash, other.Hash) } +// ValidateBasic performs basic validation. +func (psh PartSetHeader) ValidateBasic() error { + if psh.Total < 0 { + return errors.New("Negative Total") + } + // Hash can be empty in case of POLBlockID.PartsHeader in Proposal. + if err := ValidateHash(psh.Hash); err != nil { + return errors.Wrap(err, "Wrong Hash") + } + return nil +} + //------------------------------------- type PartSet struct { diff --git a/types/proposal.go b/types/proposal.go index 09cfd1967..f3b62aae7 100644 --- a/types/proposal.go +++ b/types/proposal.go @@ -43,6 +43,35 @@ func NewProposal(height int64, round int, polRound int, blockID BlockID) *Propos } } +// ValidateBasic performs basic validation. +func (p *Proposal) ValidateBasic() error { + if p.Type != ProposalType { + return errors.New("Invalid Type") + } + if p.Height < 0 { + return errors.New("Negative Height") + } + if p.Round < 0 { + return errors.New("Negative Round") + } + if p.POLRound < -1 { + return errors.New("Negative POLRound (exception: -1)") + } + if err := p.BlockID.ValidateBasic(); err != nil { + return fmt.Errorf("Wrong BlockID: %v", err) + } + + // NOTE: Timestamp validation is subtle and handled elsewhere. + + if len(p.Signature) == 0 { + return errors.New("Signature is missing") + } + if len(p.Signature) > MaxSignatureSize { + return fmt.Errorf("Signature is too big (max: %d)", MaxSignatureSize) + } + return nil +} + // String returns a string representation of the Proposal. func (p *Proposal) String() string { return fmt.Sprintf("Proposal{%v/%v (%v, %v) %X @ %s}", diff --git a/types/signable.go b/types/signable.go index cc6498882..baabdff08 100644 --- a/types/signable.go +++ b/types/signable.go @@ -1,5 +1,17 @@ package types +import ( + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" +) + +var ( + // MaxSignatureSize is a maximum allowed signature size for the Heartbeat, + // Proposal and Vote. + // XXX: secp256k1 does not have Size nor MaxSize defined. + MaxSignatureSize = cmn.MaxInt(ed25519.SignatureSize, 64) +) + // Signable is an interface for all signable things. // It typically removes signatures before serializing. // SignBytes returns the bytes to be signed diff --git a/types/signed_msg_type.go b/types/signed_msg_type.go index cc3ddbdc1..10e7c70c0 100644 --- a/types/signed_msg_type.go +++ b/types/signed_msg_type.go @@ -15,11 +15,10 @@ const ( HeartbeatType SignedMsgType = 0x30 ) -func IsVoteTypeValid(type_ SignedMsgType) bool { - switch type_ { - case PrevoteType: - return true - case PrecommitType: +// IsVoteTypeValid returns true if t is a valid vote type. +func IsVoteTypeValid(t SignedMsgType) bool { + switch t { + case PrevoteType, PrecommitType: return true default: return false diff --git a/types/validation.go b/types/validation.go new file mode 100644 index 000000000..1271cfd94 --- /dev/null +++ b/types/validation.go @@ -0,0 +1,40 @@ +package types + +import ( + "fmt" + "time" + + "github.com/tendermint/tendermint/crypto/tmhash" + tmtime "github.com/tendermint/tendermint/types/time" +) + +// ValidateTime does a basic time validation ensuring time does not drift too +// much: +/- one year. +// TODO: reduce this to eg 1 day +// NOTE: DO NOT USE in ValidateBasic methods in this package. This function +// can only be used for real time validation, like on proposals and votes +// in the consensus. If consensus is stuck, and rounds increase for more than a day, +// having only a 1-day band here could break things... +// Can't use for validating blocks because we may be syncing years worth of history. +func ValidateTime(t time.Time) error { + var ( + now = tmtime.Now() + oneYear = 8766 * time.Hour + ) + if t.Before(now.Add(-oneYear)) || t.After(now.Add(oneYear)) { + return fmt.Errorf("Time drifted too much. Expected: -1 < %v < 1 year", now) + } + return nil +} + +// ValidateHash returns an error if the hash is not empty, but its +// size != tmhash.Size. +func ValidateHash(h []byte) error { + if len(h) > 0 && len(h) != tmhash.Size { + return fmt.Errorf("Expected size to be %d bytes, got %d bytes", + tmhash.Size, + len(h), + ) + } + return nil +} diff --git a/types/vote.go b/types/vote.go index 1d7e9cf6f..bf14d403b 100644 --- a/types/vote.go +++ b/types/vote.go @@ -46,7 +46,8 @@ func NewConflictingVoteError(val *Validator, voteA, voteB *Vote) *ErrVoteConflic // Address is hex bytes. type Address = crypto.Address -// Represents a prevote, precommit, or commit vote from validators for consensus. +// Vote represents a prevote, precommit, or commit vote from validators for +// consensus. type Vote struct { Type SignedMsgType `json:"type"` Height int64 `json:"height"` @@ -108,3 +109,38 @@ func (vote *Vote) Verify(chainID string, pubKey crypto.PubKey) error { } return nil } + +// ValidateBasic performs basic validation. +func (vote *Vote) ValidateBasic() error { + if !IsVoteTypeValid(vote.Type) { + return errors.New("Invalid Type") + } + if vote.Height < 0 { + return errors.New("Negative Height") + } + if vote.Round < 0 { + return errors.New("Negative Round") + } + + // NOTE: Timestamp validation is subtle and handled elsewhere. + + if err := vote.BlockID.ValidateBasic(); err != nil { + return fmt.Errorf("Wrong BlockID: %v", err) + } + if len(vote.ValidatorAddress) != crypto.AddressSize { + return fmt.Errorf("Expected ValidatorAddress size to be %d bytes, got %d bytes", + crypto.AddressSize, + len(vote.ValidatorAddress), + ) + } + if vote.ValidatorIndex < 0 { + return errors.New("Negative ValidatorIndex") + } + if len(vote.Signature) == 0 { + return errors.New("Signature is missing") + } + if len(vote.Signature) > MaxSignatureSize { + return fmt.Errorf("Signature is too big (max: %d)", MaxSignatureSize) + } + return nil +} From 80e4fe6c0d72df0a41ff6909cd0d626d5980894b Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 2 Nov 2018 10:16:29 +0100 Subject: [PATCH 112/113] [ADR] [DRAFT] pubsub 2.0 (#2532) * pubsub adr Refs #951, #1879, #1880 * highlight question * fix typos after Ismail's review --- docs/architecture/adr-033-pubsub.md | 122 ++++++++++++++++++++++++++++ docs/architecture/adr-template.md | 4 + 2 files changed, 126 insertions(+) create mode 100644 docs/architecture/adr-033-pubsub.md diff --git a/docs/architecture/adr-033-pubsub.md b/docs/architecture/adr-033-pubsub.md new file mode 100644 index 000000000..0ef0cae62 --- /dev/null +++ b/docs/architecture/adr-033-pubsub.md @@ -0,0 +1,122 @@ +# ADR 033: pubsub 2.0 + +Author: Anton Kaliaev (@melekes) + +## Changelog + +02-10-2018: Initial draft + +## Context + +Since the initial version of the pubsub, there's been a number of issues +raised: #951, #1879, #1880. Some of them are high-level issues questioning the +core design choices made. Others are minor and mostly about the interface of +`Subscribe()` / `Publish()` functions. + +### Sync vs Async + +Now, when publishing a message to subscribers, we can do it in a goroutine: + +_using channels for data transmission_ +```go +for each subscriber { + out := subscriber.outc + go func() { + out <- msg + } +} +``` + +_by invoking callback functions_ +```go +for each subscriber { + go subscriber.callbackFn() +} +``` + +This gives us greater performance and allows us to avoid "slow client problem" +(when other subscribers have to wait for a slow subscriber). A pool of +goroutines can be used to avoid uncontrolled memory growth. + +In certain cases, this is what you want. But in our case, because we need +strict ordering of events (if event A was published before B, the guaranteed +delivery order will be A -> B), we can't use goroutines. + +There is also a question whenever we should have a non-blocking send: + +```go +for each subscriber { + out := subscriber.outc + select { + case out <- msg: + default: + log("subscriber %v buffer is full, skipping...") + } +} +``` + +This fixes the "slow client problem", but there is no way for a slow client to +know if it had missed a message. On the other hand, if we're going to stick +with blocking send, **devs must always ensure subscriber's handling code does not +block**. As you can see, there is an implicit choice between ordering guarantees +and using goroutines. + +The interim option is to run goroutines pool for a single message, wait for all +goroutines to finish. This will solve "slow client problem", but we'd still +have to wait `max(goroutine_X_time)` before we can publish the next message. +My opinion: not worth doing. + +### Channels vs Callbacks + +Yet another question is whether we should use channels for message transmission or +call subscriber-defined callback functions. Callback functions give subscribers +more flexibility - you can use mutexes in there, channels, spawn goroutines, +anything you really want. But they also carry local scope, which can result in +memory leaks and/or memory usage increase. + +Go channels are de-facto standard for carrying data between goroutines. + +**Question: Is it worth switching to callback functions?** + +### Why `Subscribe()` accepts an `out` channel? + +Because in our tests, we create buffered channels (cap: 1). Alternatively, we +can make capacity an argument. + +## Decision + +Change Subscribe() function to return out channel: + +```go +// outCap can be used to set capacity of out channel (unbuffered by default). +Subscribe(ctx context.Context, clientID string, query Query, outCap... int) (out <-chan interface{}, err error) { +``` + +It's more idiomatic since we're closing it during Unsubscribe/UnsubscribeAll calls. + +Also, we should make tags available to subscribers: + +```go +type MsgAndTags struct { + Msg interface{} + Tags TagMap +} + +// outCap can be used to set capacity of out channel (unbuffered by default). +Subscribe(ctx context.Context, clientID string, query Query, outCap... int) (out <-chan MsgAndTags, err error) { +``` + +## Status + +In review + +## Consequences + +### Positive + +- more idiomatic interface +- subscribers know what tags msg was published with + +### Negative + +### Neutral diff --git a/docs/architecture/adr-template.md b/docs/architecture/adr-template.md index d47c7f558..4879afc40 100644 --- a/docs/architecture/adr-template.md +++ b/docs/architecture/adr-template.md @@ -1,5 +1,9 @@ # ADR 000: Template for an ADR +Author: + +## Changelog + ## Context ## Decision From 322cee9156a4b99980f5c7d256877eaa3f4dc85e Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 2 Nov 2018 13:55:09 -0400 Subject: [PATCH 113/113] Release/v0.26.0 (#2726) * changelog_pending -> changelog * update changelog * update changelog * update changelog and upgrading --- CHANGELOG.md | 143 +++++++++++++++++++++++++++++++++++++++++++ CHANGELOG_PENDING.md | 101 +----------------------------- UPGRADING.md | 10 ++- 3 files changed, 152 insertions(+), 102 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6032fc204..792386e5c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,148 @@ # Changelog +## v0.26.0 + +*November 2, 2018* + +Special thanks to external contributors on this release: +@bradyjoestar, @connorwstein, @goolAdapter, @HaoyangLiu, +@james-ray, @overbool, @phymbert, @Slamper, @Uzair1995, @yutianwu. + +Special thanks to @Slamper for a series of bug reports in our [bug bounty +program](https://hackerone.com/tendermint) which are fixed in this release. + +This release is primarily about adding Version fields to various data structures, +optimizing consensus messages for signing and verification in +restricted environments (like HSMs and the Ethereum Virtual Machine), and +aligning the consensus code with the [specification](https://arxiv.org/abs/1807.04938). +It also includes our first take at a generalized merkle proof system, and +changes the length of hashes used for hashing data structures from 20 to 32 +bytes. + +See the [UPGRADING.md](UPGRADING.md#v0.26.0) for details on upgrading to the new +version. + +Please note that we are still making breaking changes to the protocols. +While the new Version fields should help us to keep the software backwards compatible +even while upgrading the protocols, we cannot guarantee that new releases will +be compatible with old chains just yet. We expect there will be another breaking +release or two before the Cosmos Hub launch, but we will otherwise be paying +increasing attention to backwards compatibility. Thanks for bearing with us! + +### BREAKING CHANGES: + +* CLI/RPC/Config + * [config] [\#2232](https://github.com/tendermint/tendermint/issues/2232) Timeouts are now strings like "3s" and "100ms", not ints + * [config] [\#2505](https://github.com/tendermint/tendermint/issues/2505) Remove Mempool.RecheckEmpty (it was effectively useless anyways) + * [config] [\#2490](https://github.com/tendermint/tendermint/issues/2490) `mempool.wal` is disabled by default + * [privval] [\#2459](https://github.com/tendermint/tendermint/issues/2459) Split `SocketPVMsg`s implementations into Request and Response, where the Response may contain a error message (returned by the remote signer) + * [state] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version field to State, breaking the format of State as + encoded on disk. + * [rpc] [\#2298](https://github.com/tendermint/tendermint/issues/2298) `/abci_query` takes `prove` argument instead of `trusted` and switches the default + behaviour to `prove=false` + * [rpc] [\#2654](https://github.com/tendermint/tendermint/issues/2654) Remove all `node_info.other.*_version` fields in `/status` and + `/net_info` + * [rpc] [\#2636](https://github.com/tendermint/tendermint/issues/2636) Remove + `_params` suffix from fields in `consensus_params`. + +* Apps + * [abci] [\#2298](https://github.com/tendermint/tendermint/issues/2298) ResponseQuery.Proof is now a structured merkle.Proof, not just + arbitrary bytes + * [abci] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version to Header and shift all fields by one + * [abci] [\#2662](https://github.com/tendermint/tendermint/issues/2662) Bump the field numbers for some `ResponseInfo` fields to make room for + `AppVersion` + * [abci] [\#2636](https://github.com/tendermint/tendermint/issues/2636) Updates to ConsensusParams + * Remove `Params` suffix from field names + * Add `Params` suffix to message types + * Add new field and type, `Validator ValidatorParams`, to control what types of validator keys are allowed. + +* Go API + * [config] [\#2232](https://github.com/tendermint/tendermint/issues/2232) Timeouts are time.Duration, not ints + * [crypto/merkle & lite] [\#2298](https://github.com/tendermint/tendermint/issues/2298) Various changes to accomodate General Merkle trees + * [crypto/merkle] [\#2595](https://github.com/tendermint/tendermint/issues/2595) Remove all Hasher objects in favor of byte slices + * [crypto/merkle] [\#2635](https://github.com/tendermint/tendermint/issues/2635) merkle.SimpleHashFromTwoHashes is no longer exported + * [node] [\#2479](https://github.com/tendermint/tendermint/issues/2479) Remove node.RunForever + * [rpc/client] [\#2298](https://github.com/tendermint/tendermint/issues/2298) `ABCIQueryOptions.Trusted` -> `ABCIQueryOptions.Prove` + * [types] [\#2298](https://github.com/tendermint/tendermint/issues/2298) Remove `Index` and `Total` fields from `TxProof`. + * [types] [\#2598](https://github.com/tendermint/tendermint/issues/2598) + `VoteTypeXxx` are now of type `SignedMsgType byte` and named `XxxType`, eg. + `PrevoteType`, `PrecommitType`. + * [types] [\#2636](https://github.com/tendermint/tendermint/issues/2636) Rename fields in ConsensusParams to remove `Params` suffixes + * [types] [\#2735](https://github.com/tendermint/tendermint/issues/2735) Simplify Proposal message to align with spec + +* Blockchain Protocol + * [crypto/tmhash] [\#2732](https://github.com/tendermint/tendermint/issues/2732) TMHASH is now full 32-byte SHA256 + * All hashes in the block header and Merkle trees are now 32-bytes + * PubKey Addresses are still only 20-bytes + * [state] [\#2587](https://github.com/tendermint/tendermint/issues/2587) Require block.Time of the fist block to be genesis time + * [state] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Require block.Version to match state.Version + * [types] Update SignBytes for `Vote`/`Proposal`/`Heartbeat`: + * [\#2459](https://github.com/tendermint/tendermint/issues/2459) Use amino encoding instead of JSON in `SignBytes`. + * [\#2598](https://github.com/tendermint/tendermint/issues/2598) Reorder fields and use fixed sized encoding. + * [\#2598](https://github.com/tendermint/tendermint/issues/2598) Change `Type` field from `string` to `byte` and use new + `SignedMsgType` to enumerate. + * [types] [\#2730](https://github.com/tendermint/tendermint/issues/2730) Use + same order for fields in `Vote` as in the SignBytes + * [types] [\#2732](https://github.com/tendermint/tendermint/issues/2732) Remove the address field from the validator hash + * [types] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version struct to Header + * [types] [\#2609](https://github.com/tendermint/tendermint/issues/2609) ConsensusParams.Hash() is the hash of the amino encoded + struct instead of the Merkle tree of the fields + * [types] [\#2670](https://github.com/tendermint/tendermint/issues/2670) Header.Hash() builds Merkle tree out of fields in the same + order they appear in the header, instead of sorting by field name + * [types] [\#2682](https://github.com/tendermint/tendermint/issues/2682) Use proto3 `varint` encoding for ints that are usually unsigned (instead of zigzag encoding). + * [types] [\#2636](https://github.com/tendermint/tendermint/issues/2636) Add Validator field to ConsensusParams + (Used to control which pubkey types validators can use, by abci type). + +* P2P Protocol + * [consensus] [\#2652](https://github.com/tendermint/tendermint/issues/2652) + Replace `CommitStepMessage` with `NewValidBlockMessage` + * [consensus] [\#2735](https://github.com/tendermint/tendermint/issues/2735) Simplify `Proposal` message to align with spec + * [consensus] [\#2730](https://github.com/tendermint/tendermint/issues/2730) + Add `Type` field to `Proposal` and use same order of fields as in the + SignBytes for both `Proposal` and `Vote` + * [p2p] [\#2654](https://github.com/tendermint/tendermint/issues/2654) Add `ProtocolVersion` struct with protocol versions to top of + DefaultNodeInfo and require `ProtocolVersion.Block` to match during peer handshake + + +### FEATURES: +- [abci] [\#2557](https://github.com/tendermint/tendermint/issues/2557) Add `Codespace` field to `Response{CheckTx, DeliverTx, Query}` +- [abci] [\#2662](https://github.com/tendermint/tendermint/issues/2662) Add `BlockVersion` and `P2PVersion` to `RequestInfo` +- [crypto/merkle] [\#2298](https://github.com/tendermint/tendermint/issues/2298) General Merkle Proof scheme for chaining various types of Merkle trees together + +### IMPROVEMENTS: +- Additional Metrics + - [consensus] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) + - [p2p] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) +- [config] [\#2232](https://github.com/tendermint/tendermint/issues/2232) Added ValidateBasic method, which performs basic checks +- [crypto/ed25519] [\#2558](https://github.com/tendermint/tendermint/issues/2558) Switch to use latest `golang.org/x/crypto` through our fork at + github.com/tendermint/crypto +- [libs/log] [\#2707](https://github.com/tendermint/tendermint/issues/2707) Add year to log format (@yutianwu) +- [tools] [\#2238](https://github.com/tendermint/tendermint/issues/2238) Binary dependencies are now locked to a specific git commit + +### BUG FIXES: +- [\#2711](https://github.com/tendermint/tendermint/issues/2711) Validate all incoming reactor messages. Fixes various bugs due to negative ints. +- [autofile] [\#2428](https://github.com/tendermint/tendermint/issues/2428) Group.RotateFile need call Flush() before rename (@goolAdapter) +- [common] [\#2533](https://github.com/tendermint/tendermint/issues/2533) Fixed a bug in the `BitArray.Or` method +- [common] [\#2506](https://github.com/tendermint/tendermint/issues/2506) Fixed a bug in the `BitArray.Sub` method (@james-ray) +- [common] [\#2534](https://github.com/tendermint/tendermint/issues/2534) Fix `BitArray.PickRandom` to choose uniformly from true bits +- [consensus] [\#1690](https://github.com/tendermint/tendermint/issues/1690) Wait for + timeoutPrecommit before starting next round +- [consensus] [\#1745](https://github.com/tendermint/tendermint/issues/1745) Wait for + Proposal or timeoutProposal before entering prevote +- [consensus] [\#2642](https://github.com/tendermint/tendermint/issues/2642) Only propose ValidBlock, not LockedBlock +- [consensus] [\#2642](https://github.com/tendermint/tendermint/issues/2642) Initialized ValidRound and LockedRound to -1 +- [consensus] [\#1637](https://github.com/tendermint/tendermint/issues/1637) Limit the amount of evidence that can be included in a + block +- [consensus] [\#2652](https://github.com/tendermint/tendermint/issues/2652) Ensure valid block property with faulty proposer +- [evidence] [\#2515](https://github.com/tendermint/tendermint/issues/2515) Fix db iter leak (@goolAdapter) +- [libs/event] [\#2518](https://github.com/tendermint/tendermint/issues/2518) Fix event concurrency flaw (@goolAdapter) +- [node] [\#2434](https://github.com/tendermint/tendermint/issues/2434) Make node respond to signal interrupts while sleeping for genesis time +- [state] [\#2616](https://github.com/tendermint/tendermint/issues/2616) Pass nil to NewValidatorSet() when genesis file's Validators field is nil +- [p2p] [\#2555](https://github.com/tendermint/tendermint/issues/2555) Fix p2p switch FlushThrottle value (@goolAdapter) +- [p2p] [\#2668](https://github.com/tendermint/tendermint/issues/2668) Reconnect to originally dialed address (not self-reported + address) for persistent peers + + ## v0.25.0 *September 22, 2018* diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index cad2f444a..f5e56a123 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -1,124 +1,27 @@ # Pending -## v0.26.0 +## v0.26.1 -*October 29, 2018* +*TBA* Special thanks to external contributors on this release: -@bradyjoestar, @connorwstein, @goolAdapter, @HaoyangLiu, -@james-ray, @overbool, @phymbert, @Slamper, @Uzair1995 - -This release is primarily about adding Version fields to various data structures, -optimizing consensus messages for signing and verification in -restricted environments (like HSMs and the Ethereum Virtual Machine), and -aligning the consensus code with the [specification](https://arxiv.org/abs/1807.04938). -It also includes our first take at a generalized merkle proof system. - -See the [UPGRADING.md](UPGRADING.md#v0.26.0) for details on upgrading to the new -version. - -Please note that we are still making breaking changes to the protocols. -While the new Version fields should help us to keep the software backwards compatible -even while upgrading the protocols, we cannot guarantee that new releases will -be compatible with old chains just yet. Thanks for bearing with us! Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint). ### BREAKING CHANGES: * CLI/RPC/Config - * [config] [\#2232](https://github.com/tendermint/tendermint/issues/2232) timeouts as time.Duration, not ints - * [config] [\#2505](https://github.com/tendermint/tendermint/issues/2505) Remove Mempool.RecheckEmpty (it was effectively useless anyways) - * [config] [\#2490](https://github.com/tendermint/tendermint/issues/2490) `mempool.wal` is disabled by default - * [privval] [\#2459](https://github.com/tendermint/tendermint/issues/2459) Split `SocketPVMsg`s implementations into Request and Response, where the Response may contain a error message (returned by the remote signer) - * [state] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version field to State, breaking the format of State as - encoded on disk. - * [rpc] [\#2298](https://github.com/tendermint/tendermint/issues/2298) `/abci_query` takes `prove` argument instead of `trusted` and switches the default - behaviour to `prove=false` - * [rpc] [\#2654](https://github.com/tendermint/tendermint/issues/2654) Remove all `node_info.other.*_version` fields in `/status` and - `/net_info` * Apps - * [abci] [\#2298](https://github.com/tendermint/tendermint/issues/2298) ResponseQuery.Proof is now a structured merkle.Proof, not just - arbitrary bytes - * [abci] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version to Header and shift all fields by one - * [abci] [\#2662](https://github.com/tendermint/tendermint/issues/2662) Bump the field numbers for some `ResponseInfo` fields to make room for - `AppVersion` * Go API - * [config] [\#2232](https://github.com/tendermint/tendermint/issues/2232) timeouts as time.Duration, not ints - * [crypto/merkle & lite] [\#2298](https://github.com/tendermint/tendermint/issues/2298) Various changes to accomodate General Merkle trees - * [crypto/merkle] [\#2595](https://github.com/tendermint/tendermint/issues/2595) Remove all Hasher objects in favor of byte slices - * [crypto/merkle] [\#2635](https://github.com/tendermint/tendermint/issues/2635) merkle.SimpleHashFromTwoHashes is no longer exported - * [node] [\#2479](https://github.com/tendermint/tendermint/issues/2479) Remove node.RunForever - * [rpc/client] [\#2298](https://github.com/tendermint/tendermint/issues/2298) `ABCIQueryOptions.Trusted` -> `ABCIQueryOptions.Prove` - * [types] [\#2298](https://github.com/tendermint/tendermint/issues/2298) Remove `Index` and `Total` fields from `TxProof`. - * [types] [\#2598](https://github.com/tendermint/tendermint/issues/2598) `VoteTypeXxx` are now of type `SignedMsgType byte` and named `XxxType`, eg. `PrevoteType`, - `PrecommitType`. * Blockchain Protocol - * [abci] [\#2636](https://github.com/tendermint/tendermint/issues/2636) Add ValidatorParams field to ConsensusParams. - (Used to control which pubkey types validators can use, by abci type) - * [types] Update SignBytes for `Vote`/`Proposal`/`Heartbeat`: - * [\#2459](https://github.com/tendermint/tendermint/issues/2459) Use amino encoding instead of JSON in `SignBytes`. - * [\#2598](https://github.com/tendermint/tendermint/issues/2598) Reorder fields and use fixed sized encoding. - * [\#2598](https://github.com/tendermint/tendermint/issues/2598) Change `Type` field fromt `string` to `byte` and use new - `SignedMsgType` to enumerate. - * [types] [\#2512](https://github.com/tendermint/tendermint/issues/2512) Remove the pubkey field from the validator hash - * [types] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version struct to Header - * [types] [\#2609](https://github.com/tendermint/tendermint/issues/2609) ConsensusParams.Hash() is the hash of the amino encoded - struct instead of the Merkle tree of the fields - * [state] [\#2587](https://github.com/tendermint/tendermint/issues/2587) Require block.Time of the fist block to be genesis time - * [state] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Require block.Version to match state.Version - * [types] [\#2670](https://github.com/tendermint/tendermint/issues/2670) Header.Hash() builds Merkle tree out of fields in the same - order they appear in the header, instead of sorting by field name - * [types] [\#2682](https://github.com/tendermint/tendermint/issues/2682) Use proto3 `varint` encoding for ints that are usually unsigned (instead of zigzag encoding). * P2P Protocol - * [p2p] [\#2654](https://github.com/tendermint/tendermint/issues/2654) Add `ProtocolVersion` struct with protocol versions to top of - DefaultNodeInfo and require `ProtocolVersion.Block` to match during peer handshake ### FEATURES: -- [abci] [\#2557](https://github.com/tendermint/tendermint/issues/2557) Add `Codespace` field to `Response{CheckTx, DeliverTx, Query}` -- [abci] [\#2662](https://github.com/tendermint/tendermint/issues/2662) Add `BlockVersion` and `P2PVersion` to `RequestInfo` -- [crypto/merkle] [\#2298](https://github.com/tendermint/tendermint/issues/2298) General Merkle Proof scheme for chaining various types of Merkle trees together ### IMPROVEMENTS: -- Additional Metrics - - [consensus] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) - - [p2p] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) -- [config] [\#2232](https://github.com/tendermint/tendermint/issues/2232) Added ValidateBasic method, which performs basic checks -- [crypto/ed25519] [\#2558](https://github.com/tendermint/tendermint/issues/2558) Switch to use latest `golang.org/x/crypto` through our fork at - github.com/tendermint/crypto -- [tools] [\#2238](https://github.com/tendermint/tendermint/issues/2238) Binary dependencies are now locked to a specific git commit -- [libs/log] [\#2706](https://github.com/tendermint/tendermint/issues/2706) Add year to log format -- [consensus] [\#2683] validate all incoming messages -- [evidence] [\#2683] validate all incoming messages -- [blockchain] [\#2683] validate all incoming messages -- [p2p/pex] [\#2683] validate pexAddrsMessage addresses ### BUG FIXES: -- [autofile] [\#2428](https://github.com/tendermint/tendermint/issues/2428) Group.RotateFile need call Flush() before rename (@goolAdapter) -- [common] [\#2533](https://github.com/tendermint/tendermint/issues/2533) Fixed a bug in the `BitArray.Or` method -- [common] [\#2506](https://github.com/tendermint/tendermint/issues/2506) Fixed a bug in the `BitArray.Sub` method (@james-ray) -- [common] [\#2534](https://github.com/tendermint/tendermint/issues/2534) Fix `BitArray.PickRandom` to choose uniformly from true bits -- [consensus] [\#1690](https://github.com/tendermint/tendermint/issues/1690) Wait for - timeoutPrecommit before starting next round -- [consensus] [\#1745](https://github.com/tendermint/tendermint/issues/1745) Wait for - Proposal or timeoutProposal before entering prevote -- [consensus] [\#2583](https://github.com/tendermint/tendermint/issues/2583) ensure valid - block property with faulty proposer -- [consensus] [\#2642](https://github.com/tendermint/tendermint/issues/2642) Only propose ValidBlock, not LockedBlock -- [consensus] [\#2642](https://github.com/tendermint/tendermint/issues/2642) Initialized ValidRound and LockedRound to -1 -- [consensus] [\#1637](https://github.com/tendermint/tendermint/issues/1637) Limit the amount of evidence that can be included in a - block -- [consensus] [\#2646](https://github.com/tendermint/tendermint/issues/2646) Simplify Proposal message (align with spec) -- [crypto] [\#2733](https://github.com/tendermint/tendermint/pull/2733) Fix general merkle keypath to start w/ last op's key -- [evidence] [\#2515](https://github.com/tendermint/tendermint/issues/2515) Fix db iter leak (@goolAdapter) -- [libs/event] [\#2518](https://github.com/tendermint/tendermint/issues/2518) Fix event concurrency flaw (@goolAdapter) -- [node] [\#2434](https://github.com/tendermint/tendermint/issues/2434) Make node respond to signal interrupts while sleeping for genesis time -- [state] [\#2616](https://github.com/tendermint/tendermint/issues/2616) Pass nil to NewValidatorSet() when genesis file's Validators field is nil -- [p2p] [\#2555](https://github.com/tendermint/tendermint/issues/2555) Fix p2p switch FlushThrottle value (@goolAdapter) -- [p2p] [\#2668](https://github.com/tendermint/tendermint/issues/2668) Reconnect to originally dialed address (not self-reported - address) for persistent peers - diff --git a/UPGRADING.md b/UPGRADING.md index cb0830a45..055dbec47 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -5,7 +5,7 @@ a newer version of Tendermint Core. ## v0.26.0 -New 0.26.0 release contains a lot of changes to core data types. It is not +New 0.26.0 release contains a lot of changes to core data types and protocols. It is not compatible to the old versions and there is no straight forward way to update old data to be compatible with the new version. @@ -33,7 +33,7 @@ to `prove`. To get proofs with your queries, ensure you set `prove=true`. Various version fields like `amino_version`, `p2p_version`, `consensus_version`, and `rpc_version` have been removed from the `node_info.other` and are consolidated under the tendermint semantic version (ie. `node_info.version`) and -the new `block` and `p2p` protocol versions under `node_info.protocol_version`.. +the new `block` and `p2p` protocol versions under `node_info.protocol_version`. ### ABCI Changes @@ -45,7 +45,7 @@ protobuf file for these changes. The `ResponseQuery.Proof` field is now structured as a `[]ProofOp` to support generalized Merkle tree constructions where the leaves of one Merkle tree are -the root of another. If you don't need this functionaluty, and you used to +the root of another. If you don't need this functionality, and you used to return `` here, you should instead return a single `ProofOp` with just the `Data` field set: @@ -79,6 +79,10 @@ The `node.RunForever` function was removed. Signal handling and running forever should instead be explicitly configured by the caller. See how we do it [here](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/cmd/tendermint/commands/run_node.go#L60). +### Other + +All hashes, except for public key addresses, are now 32-bytes. + ## v0.25.0 This release has minimal impact.