Browse Source

lite2: light client with weak subjectivity (#3989)

Refs #1771

ADR: https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-044-lite-client-with-weak-subjectivity.md

## Commits:

* add Verifier and VerifyCommitTrusting

* add two more checks

make trustLevel an option

* float32 for trustLevel

* check newHeader time

* started writing lite Client

* unify Verify methods

* ensure h2.Header.bfttime < h1.Header.bfttime + tp

* move trust checks into Verify function

* add more comments

* more docs

* started writing tests

* unbonding period failures

* tests are green

* export ErrNewHeaderTooFarIntoFuture

* make golangci happy

* test for non-adjusted headers

* more precision

* providers and stores

* VerifyHeader and VerifyHeaderAtHeight funcs

* fix compile errors

* remove lastVerifiedHeight, persist new trusted header

* sequential verification

* remove TrustedStore option

* started writing tests for light client

* cover basic cases for linear verification

* bisection tests PASS

* rename BisectingVerification to SkippingVerification

* refactor the code

* add TrustedHeader method

* consolidate sequential verification tests

* consolidate skipping verification tests

* rename trustedVals to trustedNextVals

* start writing docs

* ValidateTrustLevel func and ErrOldHeaderExpired error

* AutoClient and example tests

* fix errors

* update doc

* remove ErrNewHeaderTooFarIntoFuture

This check is unnecessary given existing a) ErrOldHeaderExpired b)
h2.Time > now checks.

* return an error if we're at more recent height

* add comments

* add LastSignedHeaderHeight method to Store

I think it's fine if Store tracks last height

* copy over proxy from old lite package

* make TrustedHeader return latest if height=0

* modify LastSignedHeaderHeight to return an error if no headers exist

* copy over proxy impl

* refactor proxy and start http lite client

* Tx and BlockchainInfo methods

* Block method

* commit method

* code compiles again

* lite client compiles

* extract updateLiteClientIfNeededTo func

* move final parts

* add placeholder for tests

* force usage of lite http client in proxy

* comment out query tests for now

* explicitly mention tp: trusting period

* verify nextVals in VerifyHeader

* refactor bisection

* move the NextValidatorsHash check into updateTrustedHeaderAndVals

+ update the comment

* add ConsensusParams method to RPC client

* add ConsensusParams to rpc/mock/client

* change trustLevel type to a new cmn.Fraction type

+ update SkippingVerification comment

* stress out trustLevel is only used for non-adjusted headers

* fixes after Fede's review

Co-authored-by: Federico Kunze <31522760+fedekunze@users.noreply.github.com>

* compare newHeader with a header from an alternative provider

* save pivot header

Refs https://github.com/tendermint/tendermint/pull/3989#discussion_r349122824

* check header can still be trusted in TrustedHeader

Refs https://github.com/tendermint/tendermint/pull/3989#discussion_r349101424

* lite: update Validators and Block endpoints

- Block no longer contains BlockMeta
- Validators now accept two additional params: page and perPage

* make linter happy
pull/4200/head
Anton Kaliaev 5 years ago
committed by GitHub
parent
commit
fb8b00f1d8
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 2934 additions and 25 deletions
  1. +17
    -0
      libs/common/fraction.go
  2. +76
    -0
      lite2/auto_client.go
  3. +443
    -0
      lite2/client.go
  4. +243
    -0
      lite2/client_test.go
  5. +35
    -0
      lite2/doc.go
  6. +18
    -0
      lite2/errors.go
  7. +95
    -0
      lite2/example_test.go
  8. +103
    -0
      lite2/provider/http/http.go
  9. +57
    -0
      lite2/provider/http/http_test.go
  10. +43
    -0
      lite2/provider/mock/mock.go
  11. +33
    -0
      lite2/provider/provider.go
  12. +102
    -0
      lite2/proxy/proxy.go
  13. +229
    -0
      lite2/proxy/routes.go
  14. +418
    -0
      lite2/rpc/client.go
  15. +14
    -0
      lite2/rpc/proof.go
  16. +162
    -0
      lite2/rpc/query_test.go
  17. +132
    -0
      lite2/store/db/db.go
  18. +38
    -0
      lite2/store/store.go
  19. +140
    -0
      lite2/test_helpers.go
  20. +116
    -0
      lite2/verifier.go
  21. +279
    -0
      lite2/verifier_test.go
  22. +9
    -0
      rpc/client/httpclient.go
  23. +1
    -0
      rpc/client/interface.go
  24. +4
    -0
      rpc/client/localclient.go
  25. +4
    -0
      rpc/client/mock/client.go
  26. +1
    -1
      types/block.go
  27. +18
    -0
      types/block_meta.go
  28. +7
    -0
      types/block_meta_test.go
  29. +97
    -24
      types/validator_set.go

+ 17
- 0
libs/common/fraction.go View File

@ -0,0 +1,17 @@
package common
import "fmt"
// Fraction defined in terms of a numerator divided by a denominator in int64
// format.
type Fraction struct {
// The portion of the denominator in the faction, e.g. 2 in 2/3.
Numerator int64
// The value by which the numerator is divided, e.g. 3 in 2/3. Must be
// positive.
Denominator int64
}
func (fr Fraction) String() string {
return fmt.Sprintf("%d/%d", fr.Numerator, fr.Denominator)
}

+ 76
- 0
lite2/auto_client.go View File

@ -0,0 +1,76 @@
package lite
import (
"time"
"github.com/tendermint/tendermint/types"
)
// AutoClient can auto update itself by fetching headers every N seconds.
type AutoClient struct {
base *Client
updatePeriod time.Duration
quit chan struct{}
trustedHeaders chan *types.SignedHeader
err chan error
}
// NewAutoClient creates a new client and starts a polling goroutine.
func NewAutoClient(base *Client, updatePeriod time.Duration) *AutoClient {
c := &AutoClient{
base: base,
updatePeriod: updatePeriod,
quit: make(chan struct{}),
trustedHeaders: make(chan *types.SignedHeader),
err: make(chan error),
}
go c.autoUpdate()
return c
}
// TrustedHeaders returns a channel onto which new trusted headers are posted.
func (c *AutoClient) TrustedHeaders() <-chan *types.SignedHeader {
return c.trustedHeaders
}
// Err returns a channel onto which errors are posted.
func (c *AutoClient) Err() <-chan error {
return c.err
}
// Stop stops the client.
func (c *AutoClient) Stop() {
close(c.quit)
}
func (c *AutoClient) autoUpdate() {
lastTrustedHeight, err := c.base.LastTrustedHeight()
if err != nil {
c.err <- err
return
}
ticker := time.NewTicker(c.updatePeriod)
defer ticker.Stop()
for {
select {
case <-ticker.C:
err := c.base.VerifyHeaderAtHeight(lastTrustedHeight+1, time.Now())
if err != nil {
c.err <- err
continue
}
h, err := c.base.TrustedHeader(lastTrustedHeight+1, time.Now())
if err != nil {
c.err <- err
continue
}
c.trustedHeaders <- h
lastTrustedHeight = h.Height
case <-c.quit:
return
}
}
}

+ 443
- 0
lite2/client.go View File

@ -0,0 +1,443 @@
package lite
import (
"bytes"
"fmt"
"time"
"github.com/pkg/errors"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/lite2/provider"
"github.com/tendermint/tendermint/lite2/store"
"github.com/tendermint/tendermint/types"
)
// TrustOptions are the trust parameters needed when a new light client
// connects to the network or when an existing light client that has been
// offline for longer than the trusting period connects to the network.
//
// The expectation is the user will get this information from a trusted source
// like a validator, a friend, or a secure website. A more user friendly
// solution with trust tradeoffs is that we establish an https based protocol
// with a default end point that populates this information. Also an on-chain
// registry of roots-of-trust (e.g. on the Cosmos Hub) seems likely in the
// future.
type TrustOptions struct {
// tp: trusting period.
//
// Should be significantly less than the unbonding period (e.g. unbonding
// period = 3 weeks, trusting period = 2 weeks).
//
// More specifically, trusting period + time needed to check headers + time
// needed to report and punish misbehavior should be less than the unbonding
// period.
Period time.Duration
// Header's Height and Hash must both be provided to force the trusting of a
// particular header.
Height int64
Hash []byte
}
type mode byte
const (
sequential mode = iota + 1
skipping
)
// Option sets a parameter for the light client.
type Option func(*Client)
// SequentialVerification option configures the light client to sequentially
// check the headers. Note this is much slower than SkippingVerification,
// albeit more secure.
func SequentialVerification() Option {
return func(c *Client) {
c.verificationMode = sequential
}
}
// SkippingVerification option configures the light client to skip headers as
// long as {trustLevel} of the old validator set signed the new header. The
// bisection algorithm from the specification is used for finding the minimal
// "trust path".
//
// trustLevel - fraction of the old validator set (in terms of voting power),
// which must sign the new header in order for us to trust it. NOTE this only
// applies to non-adjusted headers. For adjusted headers, sequential
// verification is used.
func SkippingVerification(trustLevel cmn.Fraction) Option {
if err := ValidateTrustLevel(trustLevel); err != nil {
panic(err)
}
return func(c *Client) {
c.verificationMode = skipping
c.trustLevel = trustLevel
}
}
// AlternativeSources option can be used to supply alternative providers, which
// will be used for cross-checking the primary provider.
func AlternativeSources(providers []provider.Provider) Option {
return func(c *Client) {
c.alternatives = providers
}
}
// Client represents a light client, connected to a single chain, which gets
// headers from a primary provider, verifies them either sequentially or by
// skipping some and stores them in a trusted store (usually, a local FS).
//
// Default verification: SkippingVerification(DefaultTrustLevel)
type Client struct {
chainID string
trustingPeriod time.Duration // see TrustOptions.Period
verificationMode mode
trustLevel cmn.Fraction
// Primary provider of new headers.
primary provider.Provider
// Alternative providers for checking the primary for misbehavior by
// comparing data.
alternatives []provider.Provider
// Where trusted headers are stored.
trustedStore store.Store
// Highest trusted header from the store (height=H).
trustedHeader *types.SignedHeader
// Highest next validator set from the store (height=H+1).
trustedNextVals *types.ValidatorSet
logger log.Logger
}
// NewClient returns a new light client. It returns an error if it fails to
// obtain the header & vals from the primary or they are invalid (e.g. trust
// hash does not match with the one from the header).
//
// See all Option(s) for the additional configuration.
func NewClient(
chainID string,
trustOptions TrustOptions,
primary provider.Provider,
trustedStore store.Store,
options ...Option) (*Client, error) {
c := &Client{
chainID: chainID,
trustingPeriod: trustOptions.Period,
verificationMode: skipping,
trustLevel: DefaultTrustLevel,
primary: primary,
trustedStore: trustedStore,
logger: log.NewNopLogger(),
}
for _, o := range options {
o(c)
}
if err := c.initializeWithTrustOptions(trustOptions); err != nil {
return nil, err
}
return c, nil
}
func (c *Client) initializeWithTrustOptions(options TrustOptions) error {
// 1) Fetch and verify the header.
h, err := c.primary.SignedHeader(options.Height)
if err != nil {
return err
}
// NOTE: Verify func will check if it's expired or not.
if err := h.ValidateBasic(c.chainID); err != nil {
return errors.Wrap(err, "ValidateBasic failed")
}
if !bytes.Equal(h.Hash(), options.Hash) {
return errors.Errorf("expected header's hash %X, but got %X", options.Hash, h.Hash())
}
// 2) Fetch and verify the next vals.
vals, err := c.primary.ValidatorSet(options.Height + 1)
if err != nil {
return err
}
// 3) Persist both of them and continue.
return c.updateTrustedHeaderAndVals(h, vals)
}
// SetLogger sets a logger.
func (c *Client) SetLogger(l log.Logger) {
c.logger = l
}
// TrustedHeader returns a trusted header at the given height (0 - the latest)
// or nil if no such header exist.
// TODO: mention how many headers will be kept by the light client.
// .
// height must be >= 0.
//
// It returns an error if:
// - the header expired (ErrOldHeaderExpired). In that case, update your
// client to more recent height;
// - there are some issues with the trusted store, although that should not
// happen normally.
func (c *Client) TrustedHeader(height int64, now time.Time) (*types.SignedHeader, error) {
if height < 0 {
return nil, errors.New("negative height")
}
if height == 0 {
var err error
height, err = c.LastTrustedHeight()
if err != nil {
return nil, err
}
}
h, err := c.trustedStore.SignedHeader(height)
if err != nil {
return nil, err
}
// Ensure header can still be trusted.
expirationTime := h.Time.Add(c.trustingPeriod)
if !expirationTime.After(now) {
return nil, ErrOldHeaderExpired{expirationTime, now}
}
return h, nil
}
// LastTrustedHeight returns a last trusted height.
func (c *Client) LastTrustedHeight() (int64, error) {
return c.trustedStore.LastSignedHeaderHeight()
}
// ChainID returns the chain ID.
func (c *Client) ChainID() string {
return c.chainID
}
// VerifyHeaderAtHeight fetches the header and validators at the given height
// and calls VerifyHeader.
//
// If the trusted header is more recent than one here, an error is returned.
func (c *Client) VerifyHeaderAtHeight(height int64, now time.Time) error {
if c.trustedHeader.Height >= height {
return errors.Errorf("height #%d is already trusted (last: #%d)", height, c.trustedHeader.Height)
}
// Request the header and the vals.
newHeader, newVals, err := c.fetchHeaderAndValsAtHeight(height)
if err != nil {
return err
}
return c.VerifyHeader(newHeader, newVals, now)
}
// VerifyHeader verifies new header against the trusted state.
//
// SequentialVerification: verifies that 2/3 of the trusted validator set has
// signed the new header. If the headers are not adjacent, **all** intermediate
// headers will be requested.
//
// SkippingVerification(trustLevel): verifies that {trustLevel} of the trusted
// validator set has signed the new header. If it's not the case and the
// headers are not adjacent, bisection is performed and necessary (not all)
// intermediate headers will be requested. See the specification for the
// algorithm.
//
// If the trusted header is more recent than one here, an error is returned.
func (c *Client) VerifyHeader(newHeader *types.SignedHeader, newVals *types.ValidatorSet, now time.Time) error {
if c.trustedHeader.Height >= newHeader.Height {
return errors.Errorf("height #%d is already trusted (last: #%d)", newHeader.Height, c.trustedHeader.Height)
}
if len(c.alternatives) > 0 {
if err := c.compareNewHeaderWithRandomAlternative(newHeader); err != nil {
return err
}
}
var err error
switch c.verificationMode {
case sequential:
err = c.sequence(newHeader, newVals, now)
case skipping:
err = c.bisection(c.trustedHeader, c.trustedNextVals, newHeader, newVals, now)
default:
panic(fmt.Sprintf("Unknown verification mode: %b", c.verificationMode))
}
if err != nil {
return err
}
// Update trusted header and vals.
nextVals, err := c.primary.ValidatorSet(newHeader.Height + 1)
if err != nil {
return err
}
return c.updateTrustedHeaderAndVals(newHeader, nextVals)
}
func (c *Client) sequence(newHeader *types.SignedHeader, newVals *types.ValidatorSet, now time.Time) error {
// 1) Verify any intermediate headers.
var (
interimHeader *types.SignedHeader
nextVals *types.ValidatorSet
err error
)
for height := c.trustedHeader.Height + 1; height < newHeader.Height; height++ {
interimHeader, err = c.primary.SignedHeader(height)
if err != nil {
return errors.Wrapf(err, "failed to obtain the header #%d", height)
}
err = Verify(c.chainID, c.trustedHeader, c.trustedNextVals, interimHeader, c.trustedNextVals,
c.trustingPeriod, now, c.trustLevel)
if err != nil {
return errors.Wrapf(err, "failed to verify the header #%d", height)
}
// Update trusted header and vals.
if height == newHeader.Height-1 {
nextVals = newVals
} else {
nextVals, err = c.primary.ValidatorSet(height + 1)
if err != nil {
return errors.Wrapf(err, "failed to obtain the vals #%d", height+1)
}
}
err = c.updateTrustedHeaderAndVals(interimHeader, nextVals)
if err != nil {
return errors.Wrapf(err, "failed to update trusted state #%d", height)
}
}
// 2) Verify the new header.
return Verify(c.chainID, c.trustedHeader, c.trustedNextVals, newHeader, newVals, c.trustingPeriod, now, c.trustLevel)
}
func (c *Client) bisection(
lastHeader *types.SignedHeader,
lastVals *types.ValidatorSet,
newHeader *types.SignedHeader,
newVals *types.ValidatorSet,
now time.Time) error {
err := Verify(c.chainID, lastHeader, lastVals, newHeader, newVals, c.trustingPeriod, now, c.trustLevel)
switch err.(type) {
case nil:
return nil
case types.ErrTooMuchChange:
// continue bisection
default:
return errors.Wrapf(err, "failed to verify the header #%d ", newHeader.Height)
}
if newHeader.Height == c.trustedHeader.Height+1 {
// TODO: submit evidence here
return errors.Errorf("adjacent headers (#%d and #%d) that are not matching", lastHeader.Height, newHeader.Height)
}
pivot := (c.trustedHeader.Height + newHeader.Header.Height) / 2
pivotHeader, pivotVals, err := c.fetchHeaderAndValsAtHeight(pivot)
if err != nil {
return err
}
// left branch
{
err := c.bisection(lastHeader, lastVals, pivotHeader, pivotVals, now)
if err != nil {
return errors.Wrapf(err, "bisection of #%d and #%d", lastHeader.Height, pivot)
}
}
// right branch
{
nextVals, err := c.primary.ValidatorSet(pivot + 1)
if err != nil {
return errors.Wrapf(err, "failed to obtain the vals #%d", pivot+1)
}
if !bytes.Equal(pivotHeader.NextValidatorsHash, nextVals.Hash()) {
return errors.Errorf("expected next validator's hash %X, but got %X (height #%d)",
pivotHeader.NextValidatorsHash,
nextVals.Hash(),
pivot)
}
err = c.updateTrustedHeaderAndVals(pivotHeader, nextVals)
if err != nil {
return errors.Wrapf(err, "failed to update trusted state #%d", pivot)
}
err = c.bisection(pivotHeader, nextVals, newHeader, newVals, now)
if err != nil {
return errors.Wrapf(err, "bisection of #%d and #%d", pivot, newHeader.Height)
}
}
return nil
}
func (c *Client) updateTrustedHeaderAndVals(h *types.SignedHeader, vals *types.ValidatorSet) error {
if !bytes.Equal(h.NextValidatorsHash, vals.Hash()) {
return errors.Errorf("expected next validator's hash %X, but got %X", h.NextValidatorsHash, vals.Hash())
}
if err := c.trustedStore.SaveSignedHeader(h); err != nil {
return errors.Wrap(err, "failed to save trusted header")
}
if err := c.trustedStore.SaveValidatorSet(vals, h.Height+1); err != nil {
return errors.Wrap(err, "failed to save trusted vals")
}
c.trustedHeader = h
c.trustedNextVals = vals
return nil
}
func (c *Client) fetchHeaderAndValsAtHeight(height int64) (*types.SignedHeader, *types.ValidatorSet, error) {
h, err := c.primary.SignedHeader(height)
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to obtain the header #%d", height)
}
vals, err := c.primary.ValidatorSet(height)
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to obtain the vals #%d", height)
}
return h, vals, nil
}
func (c *Client) compareNewHeaderWithRandomAlternative(h *types.SignedHeader) error {
// 1. Pick an alternative provider.
p := c.alternatives[cmn.RandIntn(len(c.alternatives))]
// 2. Fetch the header.
altHeader, err := p.SignedHeader(h.Height)
if err != nil {
return errors.Wrapf(err,
"failed to obtain header #%d from alternative provider %v", h.Height, p)
}
// 3. Compare hashes.
if !bytes.Equal(h.Hash(), altHeader.Hash()) {
// TODO: One of the providers is lying. Send the evidence to fork
// accountability server.
return errors.Errorf(
"new header hash %X does not match one from alternative provider %X",
h.Hash(), altHeader.Hash())
}
return nil
}

+ 243
- 0
lite2/client_test.go View File

@ -0,0 +1,243 @@
package lite
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
mockp "github.com/tendermint/tendermint/lite2/provider/mock"
dbs "github.com/tendermint/tendermint/lite2/store/db"
"github.com/tendermint/tendermint/types"
)
func TestClient_SequentialVerification(t *testing.T) {
const (
chainID = "sequential-verification"
)
var (
keys = genPrivKeys(4)
// 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do!
vals = keys.ToValidators(20, 10)
bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z")
header = keys.GenSignedHeader(chainID, 1, bTime, nil, vals, vals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys))
)
testCases := []struct {
otherHeaders map[int64]*types.SignedHeader // all except ^
vals map[int64]*types.ValidatorSet
initErr bool
verifyErr bool
}{
// good
{
map[int64]*types.SignedHeader{
// trusted header
1: header,
// interim header (3/3 signed)
2: keys.GenSignedHeader(chainID, 2, bTime.Add(1*time.Hour), nil, vals, vals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)),
// last header (3/3 signed)
3: keys.GenSignedHeader(chainID, 3, bTime.Add(2*time.Hour), nil, vals, vals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)),
},
map[int64]*types.ValidatorSet{
1: vals,
2: vals,
3: vals,
4: vals,
},
false,
false,
},
// bad: different first header
{
map[int64]*types.SignedHeader{
// different header
1: keys.GenSignedHeader(chainID, 1, bTime.Add(1*time.Hour), nil, vals, vals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)),
},
map[int64]*types.ValidatorSet{
1: vals,
},
true,
false,
},
// bad: 1/3 signed interim header
{
map[int64]*types.SignedHeader{
// trusted header
1: header,
// interim header (1/3 signed)
2: keys.GenSignedHeader(chainID, 2, bTime.Add(1*time.Hour), nil, vals, vals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), len(keys)-1, len(keys)),
// last header (3/3 signed)
3: keys.GenSignedHeader(chainID, 3, bTime.Add(2*time.Hour), nil, vals, vals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)),
},
map[int64]*types.ValidatorSet{
1: vals,
2: vals,
3: vals,
4: vals,
},
false,
true,
},
// bad: 1/3 signed last header
{
map[int64]*types.SignedHeader{
// trusted header
1: header,
// interim header (3/3 signed)
2: keys.GenSignedHeader(chainID, 2, bTime.Add(1*time.Hour), nil, vals, vals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)),
// last header (1/3 signed)
3: keys.GenSignedHeader(chainID, 3, bTime.Add(2*time.Hour), nil, vals, vals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), len(keys)-1, len(keys)),
},
map[int64]*types.ValidatorSet{
1: vals,
2: vals,
3: vals,
4: vals,
},
false,
true,
},
}
for _, tc := range testCases {
c, err := NewClient(
chainID,
TrustOptions{
Period: 4 * time.Hour,
Height: 1,
Hash: header.Hash(),
},
mockp.New(
chainID,
tc.otherHeaders,
tc.vals,
),
dbs.New(dbm.NewMemDB(), chainID),
SequentialVerification(),
)
if tc.initErr {
require.Error(t, err)
continue
} else {
require.NoError(t, err)
}
err = c.VerifyHeaderAtHeight(3, bTime.Add(3*time.Hour))
if tc.verifyErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
}
}
func TestClient_SkippingVerification(t *testing.T) {
const (
chainID = "skipping-verification"
)
var (
keys = genPrivKeys(4)
// 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do!
vals = keys.ToValidators(20, 10)
bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z")
header = keys.GenSignedHeader(chainID, 1, bTime, nil, vals, vals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys))
)
// required for 2nd test case
newKeys := genPrivKeys(4)
newVals := newKeys.ToValidators(10, 1)
testCases := []struct {
otherHeaders map[int64]*types.SignedHeader // all except ^
vals map[int64]*types.ValidatorSet
initErr bool
verifyErr bool
}{
// good
{
map[int64]*types.SignedHeader{
// trusted header
1: header,
// last header (3/3 signed)
3: keys.GenSignedHeader(chainID, 3, bTime.Add(2*time.Hour), nil, vals, vals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)),
},
map[int64]*types.ValidatorSet{
1: vals,
2: vals,
3: vals,
4: vals,
},
false,
false,
},
// good, val set changes 100% at height 2
{
map[int64]*types.SignedHeader{
// trusted header
1: header,
// interim header (3/3 signed)
2: keys.GenSignedHeader(chainID, 2, bTime.Add(1*time.Hour), nil, vals, newVals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)),
// last header (0/4 of the original val set signed)
3: newKeys.GenSignedHeader(chainID, 3, bTime.Add(2*time.Hour), nil, newVals, newVals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(newKeys)),
},
map[int64]*types.ValidatorSet{
1: vals,
2: vals,
3: newVals,
4: newVals,
},
false,
false,
},
}
for _, tc := range testCases {
c, err := NewClient(
chainID,
TrustOptions{
Period: 4 * time.Hour,
Height: 1,
Hash: header.Hash(),
},
mockp.New(
chainID,
tc.otherHeaders,
tc.vals,
),
dbs.New(dbm.NewMemDB(), chainID),
SkippingVerification(DefaultTrustLevel),
)
if tc.initErr {
require.Error(t, err)
continue
} else {
require.NoError(t, err)
}
err = c.VerifyHeaderAtHeight(3, bTime.Add(3*time.Hour))
if tc.verifyErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
}
}

+ 35
- 0
lite2/doc.go View File

@ -0,0 +1,35 @@
/*
Package lite provides a light client implementation.
The concept of light clients was introduced in the Bitcoin white paper. It
describes a watcher of distributed consensus process that only validates the
consensus algorithm and not the state machine transactions within.
Tendermint light clients allow bandwidth & compute-constrained devices, such as
smartphones, low-power embedded chips, or other blockchains to efficiently
verify the consensus of a Tendermint blockchain. This forms the basis of safe
and efficient state synchronization for new network nodes and inter-blockchain
communication (where a light client of one Tendermint instance runs in another
chain's state machine).
In a network that is expected to reliably punish validators for misbehavior by
slashing bonded stake and where the validator set changes infrequently, clients
can take advantage of this assumption to safely synchronize a lite client
without downloading the intervening headers.
Light clients (and full nodes) operating in the Proof Of Stake context need a
trusted block height from a trusted source that is no older than 1 unbonding
window plus a configurable evidence submission synchrony bound. This is called
weak subjectivity.
Weak subjectivity is required in Proof of Stake blockchains because it is
costless for an attacker to buy up voting keys that are no longer bonded and
fork the network at some point in its prior history. See Vitalik's post at
[Proof of Stake: How I Learned to Love Weak
Subjectivity](https://blog.ethereum.org/2014/11/25/proof-stake-learned-love-weak-subjectivity/).
NOTE: Tendermint provides a somewhat different (stronger) light client model
than Bitcoin under eclipse, since the eclipsing node(s) can only fool the light
client if they have two-thirds of the private keys from the last root-of-trust.
*/
package lite

+ 18
- 0
lite2/errors.go View File

@ -0,0 +1,18 @@
package lite
import (
"fmt"
"time"
)
// ErrOldHeaderExpired means the old (trusted) header has expired according to
// the given trustingPeriod and current time. If so, the light client must be
// reset subjectively.
type ErrOldHeaderExpired struct {
At time.Time
Now time.Time
}
func (e ErrOldHeaderExpired) Error() string {
return fmt.Sprintf("old header has expired at %v (now: %v)", e.At, e.Now)
}

+ 95
- 0
lite2/example_test.go View File

@ -0,0 +1,95 @@
package lite
//func TestExample_Client(t *testing.T) {
// const (
// chainID = "my-awesome-chain"
// )
// dbDir, err := ioutil.TempDir("", "lite-client-example")
// if err != nil {
// t.Fatal(err)
// }
// defer os.RemoveAll(dbDir)
// // TODO: fetch the "trusted" header from a node
// header := (*types.SignedHeader)(nil)
// /////////////////////////////////////////////////////////////////////////////
// db, err := dbm.NewGoLevelDB("lite-client-db", dbDir)
// if err != nil {
// // return err
// t.Fatal(err)
// }
// c, err := NewClient(
// chainID,
// TrustOptions{
// Period: 504 * time.Hour, // 21 days
// Height: 100,
// Hash: header.Hash(),
// },
// httpp.New(chainID, "tcp://localhost:26657"),
// dbs.New(db, chainID),
// )
// err = c.VerifyHeaderAtHeight(101, time.Now())
// if err != nil {
// fmt.Println("retry?")
// }
// h, err := c.TrustedHeader(101)
// if err != nil {
// fmt.Println("retry?")
// }
// fmt.Println("got header", h)
// // verify some data
//}
//func TestExample_AutoClient(t *testing.T) {
// const (
// chainID = "my-awesome-chain"
// )
// dbDir, err := ioutil.TempDir("", "lite-client-example")
// if err != nil {
// t.Fatal(err)
// }
// defer os.RemoveAll(dbDir)
// // TODO: fetch the "trusted" header from a node
// header := (*types.SignedHeader)(nil)
// /////////////////////////////////////////////////////////////////////////////
// db, err := dbm.NewGoLevelDB("lite-client-db", dbDir)
// if err != nil {
// // return err
// t.Fatal(err)
// }
// base, err := NewClient(
// chainID,
// TrustOptions{
// Period: 504 * time.Hour, // 21 days
// Height: 100,
// Hash: header.Hash(),
// },
// httpp.New(chainID, "tcp://localhost:26657"),
// dbs.New(db, chainID),
// )
// c := NewAutoClient(base, 1*time.Second)
// defer c.Stop()
// select {
// case h := <-c.TrustedHeaders():
// fmt.Println("got header", h)
// // verify some data
// case err := <-c.Err():
// switch errors.Cause(err).(type) {
// case ErrOldHeaderExpired:
// // reobtain trust height and hash
// default:
// // try with another full node
// fmt.Println("got error", err)
// }
// }
//}

+ 103
- 0
lite2/provider/http/http.go View File

@ -0,0 +1,103 @@
package http
import (
"fmt"
"github.com/tendermint/tendermint/lite2/provider"
rpcclient "github.com/tendermint/tendermint/rpc/client"
"github.com/tendermint/tendermint/types"
)
// SignStatusClient combines a SignClient and StatusClient.
type SignStatusClient interface {
rpcclient.SignClient
rpcclient.StatusClient
}
// http provider uses an RPC client (or SignStatusClient more generally) to
// obtain the necessary information.
type http struct {
chainID string
client SignStatusClient
}
// New creates a HTTP provider, which is using the rpcclient.HTTP
// client under the hood.
func New(chainID, remote string) provider.Provider {
return NewWithClient(chainID, rpcclient.NewHTTP(remote, "/websocket"))
}
// NewWithClient allows you to provide custom SignStatusClient.
func NewWithClient(chainID string, client SignStatusClient) provider.Provider {
return &http{
chainID: chainID,
client: client,
}
}
func (p *http) ChainID() string {
return p.chainID
}
func (p *http) SignedHeader(height int64) (*types.SignedHeader, error) {
h, err := validateHeight(height)
if err != nil {
return nil, err
}
commit, err := p.client.Commit(h)
if err != nil {
return nil, err
}
// Verify we're still on the same chain.
if p.chainID != commit.Header.ChainID {
return nil, fmt.Errorf("expected chainID %s, got %s", p.chainID, commit.Header.ChainID)
}
return &commit.SignedHeader, nil
}
func (p *http) ValidatorSet(height int64) (*types.ValidatorSet, error) {
h, err := validateHeight(height)
if err != nil {
return nil, err
}
const maxPerPage = 100
res, err := p.client.Validators(h, 0, maxPerPage)
if err != nil {
return nil, err
}
var (
vals = res.Validators
page = 1
)
// Check if there are more validators.
for len(res.Validators) == maxPerPage {
res, err = p.client.Validators(h, page, maxPerPage)
if err != nil {
return nil, err
}
if len(res.Validators) > 0 {
vals = append(vals, res.Validators...)
}
page++
}
return types.NewValidatorSet(vals), nil
}
func validateHeight(height int64) (*int64, error) {
if height < 0 {
return nil, fmt.Errorf("expected height >= 0, got height %d", height)
}
h := &height
if height == 0 {
h = nil
}
return h, nil
}

+ 57
- 0
lite2/provider/http/http_test.go View File

@ -0,0 +1,57 @@
package http
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/abci/example/kvstore"
rpcclient "github.com/tendermint/tendermint/rpc/client"
rpctest "github.com/tendermint/tendermint/rpc/test"
"github.com/tendermint/tendermint/types"
)
func TestMain(m *testing.M) {
app := kvstore.NewKVStoreApplication()
node := rpctest.StartTendermint(app)
code := m.Run()
rpctest.StopTendermint(node)
os.Exit(code)
}
func TestProvider(t *testing.T) {
cfg := rpctest.GetConfig()
defer os.RemoveAll(cfg.RootDir)
rpcAddr := cfg.RPC.ListenAddress
genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile())
if err != nil {
panic(err)
}
chainID := genDoc.ChainID
t.Log("chainID:", chainID)
p := New(chainID, rpcAddr)
require.NotNil(t, p)
// let it produce some blocks
err = rpcclient.WaitForHeight(p.(*http).client, 6, nil)
require.Nil(t, err)
// let's get the highest block
sh, err := p.SignedHeader(0)
require.Nil(t, err, "%+v", err)
assert.True(t, sh.Height < 5000)
// let's check this is valid somehow
assert.Nil(t, sh.ValidateBasic(chainID))
// historical queries now work :)
lower := sh.Height - 5
sh, err = p.SignedHeader(lower)
assert.Nil(t, err, "%+v", err)
assert.Equal(t, lower, sh.Height)
}

+ 43
- 0
lite2/provider/mock/mock.go View File

@ -0,0 +1,43 @@
package mock
import (
"github.com/pkg/errors"
"github.com/tendermint/tendermint/lite2/provider"
"github.com/tendermint/tendermint/types"
)
// mock provider allows to directly set headers & vals, which can be handy when
// testing.
type mock struct {
chainID string
headers map[int64]*types.SignedHeader
vals map[int64]*types.ValidatorSet
}
// New creates a mock provider.
func New(chainID string, headers map[int64]*types.SignedHeader, vals map[int64]*types.ValidatorSet) provider.Provider {
return &mock{
chainID: chainID,
headers: headers,
vals: vals,
}
}
func (p *mock) ChainID() string {
return p.chainID
}
func (p *mock) SignedHeader(height int64) (*types.SignedHeader, error) {
if _, ok := p.headers[height]; ok {
return p.headers[height], nil
}
return nil, errors.Errorf("no header at height %d", height)
}
func (p *mock) ValidatorSet(height int64) (*types.ValidatorSet, error) {
if _, ok := p.vals[height]; ok {
return p.vals[height], nil
}
return nil, errors.Errorf("no vals for height %d", height)
}

+ 33
- 0
lite2/provider/provider.go View File

@ -0,0 +1,33 @@
package provider
import "github.com/tendermint/tendermint/types"
// Provider provides information for the lite client to sync (verification
// happens in the client).
type Provider interface {
// ChainID returns the blockchain ID.
ChainID() string
// SignedHeader returns the SignedHeader that corresponds to the given
// height.
//
// 0 - the latest.
// height must be >= 0.
//
// If the provider fails to fetch the SignedHeader due to the IO or other
// issues, an error will be returned.
// If there's no SignedHeader for the given height, ErrSignedHeaderNotFound
// will be returned.
SignedHeader(height int64) (*types.SignedHeader, error)
// ValidatorSet returns the ValidatorSet that corresponds to height.
//
// 0 - the latest.
// height must be >= 0.
//
// If the provider fails to fetch the ValidatorSet due to the IO or other
// issues, an error will be returned.
// If there's no ValidatorSet for the given height, ErrValidatorSetNotFound
// will be returned.
ValidatorSet(height int64) (*types.ValidatorSet, error)
}

+ 102
- 0
lite2/proxy/proxy.go View File

@ -0,0 +1,102 @@
package proxy
import (
"context"
"net"
"net/http"
"github.com/pkg/errors"
amino "github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/libs/log"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
lrpc "github.com/tendermint/tendermint/lite2/rpc"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
rpcserver "github.com/tendermint/tendermint/rpc/lib/server"
)
// A Proxy defines parameters for running an HTTP server proxy.
type Proxy struct {
Addr string // TCP address to listen on, ":http" if empty
Config *rpcserver.Config
Codec *amino.Codec
Client *lrpc.Client
Logger log.Logger
}
// ListenAndServe configures the rpcserver.WebsocketManager, sets up the RPC
// routes to proxy via Client, and starts up an HTTP server on the TCP network
// address p.Addr.
// See http#Server#ListenAndServe.
func (p *Proxy) ListenAndServe() error {
listener, mux, err := p.listen()
if err != nil {
return err
}
return rpcserver.StartHTTPServer(
listener,
mux,
p.Logger,
p.Config,
)
}
// ListenAndServeTLS acts identically to ListenAndServe, except that it expects
// HTTPS connections.
// See http#Server#ListenAndServeTLS.
func (p *Proxy) ListenAndServeTLS(certFile, keyFile string) error {
listener, mux, err := p.listen()
if err != nil {
return err
}
return rpcserver.StartHTTPAndTLSServer(
listener,
mux,
certFile,
keyFile,
p.Logger,
p.Config,
)
}
func (p *Proxy) listen() (net.Listener, *http.ServeMux, error) {
ctypes.RegisterAmino(p.Codec)
mux := http.NewServeMux()
// 1) Register regular routes.
r := RPCRoutes(p.Client)
rpcserver.RegisterRPCFuncs(mux, r, p.Codec, p.Logger)
// 2) Allow websocket connections.
wmLogger := p.Logger.With("protocol", "websocket")
wm := rpcserver.NewWebsocketManager(r, p.Codec,
rpcserver.OnDisconnect(func(remoteAddr string) {
err := p.Client.UnsubscribeAll(context.Background(), remoteAddr)
if err != nil && err != tmpubsub.ErrSubscriptionNotFound {
wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err)
}
}),
rpcserver.ReadLimit(p.Config.MaxBodyBytes),
)
wm.SetLogger(wmLogger)
mux.HandleFunc("/websocket", wm.WebsocketHandler)
// 3) Start a client.
if !p.Client.IsRunning() {
if err := p.Client.Start(); err != nil {
return nil, mux, errors.Wrap(err, "Client#Start")
}
}
// 4) Start listening for new connections.
listener, err := rpcserver.Listen(p.Addr, p.Config)
if err != nil {
return nil, mux, err
}
return listener, mux, nil
}

+ 229
- 0
lite2/proxy/routes.go View File

@ -0,0 +1,229 @@
package proxy
import (
cmn "github.com/tendermint/tendermint/libs/common"
lrpc "github.com/tendermint/tendermint/lite2/rpc"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
rpcserver "github.com/tendermint/tendermint/rpc/lib/server"
rpctypes "github.com/tendermint/tendermint/rpc/lib/types"
"github.com/tendermint/tendermint/types"
)
func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc {
return map[string]*rpcserver.RPCFunc{
// Subscribe/unsubscribe are reserved for websocket events.
"subscribe": rpcserver.NewWSRPCFunc(c.SubscribeWS, "query"),
"unsubscribe": rpcserver.NewWSRPCFunc(c.UnsubscribeWS, "query"),
"unsubscribe_all": rpcserver.NewWSRPCFunc(c.UnsubscribeAllWS, ""),
// info API
"health": rpcserver.NewRPCFunc(makeHealthFunc(c), ""),
"status": rpcserver.NewRPCFunc(makeStatusFunc(c), ""),
"net_info": rpcserver.NewRPCFunc(makeNetInfoFunc(c), ""),
"blockchain": rpcserver.NewRPCFunc(makeBlockchainInfoFunc(c), "minHeight,maxHeight"),
"genesis": rpcserver.NewRPCFunc(makeGenesisFunc(c), ""),
"block": rpcserver.NewRPCFunc(makeBlockFunc(c), "height"),
"block_results": rpcserver.NewRPCFunc(makeBlockResultsFunc(c), "height"),
"commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height"),
"tx": rpcserver.NewRPCFunc(makeTxFunc(c), "hash,prove"),
"tx_search": rpcserver.NewRPCFunc(makeTxSearchFunc(c), "query,prove,page,per_page"),
"validators": rpcserver.NewRPCFunc(makeValidatorsFunc(c), "height,page,per_page"),
"dump_consensus_state": rpcserver.NewRPCFunc(makeDumpConsensusStateFunc(c), ""),
"consensus_state": rpcserver.NewRPCFunc(makeConsensusStateFunc(c), ""),
"consensus_params": rpcserver.NewRPCFunc(makeConsensusParamsFunc(c), "height"),
"unconfirmed_txs": rpcserver.NewRPCFunc(makeUnconfirmedTxsFunc(c), "limit"),
"num_unconfirmed_txs": rpcserver.NewRPCFunc(makeNumUnconfirmedTxsFunc(c), ""),
// tx broadcast API
"broadcast_tx_commit": rpcserver.NewRPCFunc(makeBroadcastTxCommitFunc(c), "tx"),
"broadcast_tx_sync": rpcserver.NewRPCFunc(makeBroadcastTxSyncFunc(c), "tx"),
"broadcast_tx_async": rpcserver.NewRPCFunc(makeBroadcastTxAsyncFunc(c), "tx"),
// abci API
"abci_query": rpcserver.NewRPCFunc(makeABCIQueryFunc(c), "path,data,height,prove"),
"abci_info": rpcserver.NewRPCFunc(makeABCIInfoFunc(c), ""),
// evidence API
"broadcast_evidence": rpcserver.NewRPCFunc(makeBroadcastEvidenceFunc(c), "evidence"),
}
}
type rpcHealthFunc func(ctx *rpctypes.Context) (*ctypes.ResultHealth, error)
func makeHealthFunc(c *lrpc.Client) rpcHealthFunc {
return func(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) {
return c.Health()
}
}
type rpcStatusFunc func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error)
// nolint: interfacer
func makeStatusFunc(c *lrpc.Client) rpcStatusFunc {
return func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) {
return c.Status()
}
}
type rpcNetInfoFunc func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultNetInfo, error)
func makeNetInfoFunc(c *lrpc.Client) rpcNetInfoFunc {
return func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultNetInfo, error) {
return c.NetInfo()
}
}
type rpcBlockchainInfoFunc func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error)
func makeBlockchainInfoFunc(c *lrpc.Client) rpcBlockchainInfoFunc {
return func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
return c.BlockchainInfo(minHeight, maxHeight)
}
}
type rpcGenesisFunc func(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error)
func makeGenesisFunc(c *lrpc.Client) rpcGenesisFunc {
return func(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) {
return c.Genesis()
}
}
type rpcBlockFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlock, error)
func makeBlockFunc(c *lrpc.Client) rpcBlockFunc {
return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlock, error) {
return c.Block(height)
}
}
type rpcBlockResultsFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlockResults, error)
func makeBlockResultsFunc(c *lrpc.Client) rpcBlockResultsFunc {
return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlockResults, error) {
return c.BlockResults(height)
}
}
type rpcCommitFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultCommit, error)
func makeCommitFunc(c *lrpc.Client) rpcCommitFunc {
return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultCommit, error) {
return c.Commit(height)
}
}
type rpcTxFunc func(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error)
func makeTxFunc(c *lrpc.Client) rpcTxFunc {
return func(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) {
return c.Tx(hash, prove)
}
}
type rpcTxSearchFunc func(ctx *rpctypes.Context, query string, prove bool,
page, perPage int) (*ctypes.ResultTxSearch, error)
func makeTxSearchFunc(c *lrpc.Client) rpcTxSearchFunc {
return func(ctx *rpctypes.Context, query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error) {
return c.TxSearch(query, prove, page, perPage)
}
}
type rpcValidatorsFunc func(ctx *rpctypes.Context, height *int64,
page, perPage int) (*ctypes.ResultValidators, error)
func makeValidatorsFunc(c *lrpc.Client) rpcValidatorsFunc {
return func(ctx *rpctypes.Context, height *int64, page, perPage int) (*ctypes.ResultValidators, error) {
return c.Validators(height, page, perPage)
}
}
type rpcDumpConsensusStateFunc func(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error)
func makeDumpConsensusStateFunc(c *lrpc.Client) rpcDumpConsensusStateFunc {
return func(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) {
return c.DumpConsensusState()
}
}
type rpcConsensusStateFunc func(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error)
func makeConsensusStateFunc(c *lrpc.Client) rpcConsensusStateFunc {
return func(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) {
return c.ConsensusState()
}
}
type rpcConsensusParamsFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultConsensusParams, error)
func makeConsensusParamsFunc(c *lrpc.Client) rpcConsensusParamsFunc {
return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultConsensusParams, error) {
return c.ConsensusParams(height)
}
}
type rpcUnconfirmedTxsFunc func(ctx *rpctypes.Context, limit int) (*ctypes.ResultUnconfirmedTxs, error)
func makeUnconfirmedTxsFunc(c *lrpc.Client) rpcUnconfirmedTxsFunc {
return func(ctx *rpctypes.Context, limit int) (*ctypes.ResultUnconfirmedTxs, error) {
return c.UnconfirmedTxs(limit)
}
}
type rpcNumUnconfirmedTxsFunc func(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error)
func makeNumUnconfirmedTxsFunc(c *lrpc.Client) rpcNumUnconfirmedTxsFunc {
return func(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) {
return c.NumUnconfirmedTxs()
}
}
type rpcBroadcastTxCommitFunc func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error)
func makeBroadcastTxCommitFunc(c *lrpc.Client) rpcBroadcastTxCommitFunc {
return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
return c.BroadcastTxCommit(tx)
}
}
type rpcBroadcastTxSyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error)
func makeBroadcastTxSyncFunc(c *lrpc.Client) rpcBroadcastTxSyncFunc {
return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return c.BroadcastTxSync(tx)
}
}
type rpcBroadcastTxAsyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error)
func makeBroadcastTxAsyncFunc(c *lrpc.Client) rpcBroadcastTxAsyncFunc {
return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return c.BroadcastTxAsync(tx)
}
}
type rpcABCIQueryFunc func(ctx *rpctypes.Context, path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error)
func makeABCIQueryFunc(c *lrpc.Client) rpcABCIQueryFunc {
return func(ctx *rpctypes.Context, path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) {
return c.ABCIQuery(path, data)
}
}
type rpcABCIInfoFunc func(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error)
func makeABCIInfoFunc(c *lrpc.Client) rpcABCIInfoFunc {
return func(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) {
return c.ABCIInfo()
}
}
type rpcBroadcastEvidenceFunc func(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error)
// nolint: interfacer
func makeBroadcastEvidenceFunc(c *lrpc.Client) rpcBroadcastEvidenceFunc {
return func(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) {
return c.BroadcastEvidence(ev)
}
}

+ 418
- 0
lite2/rpc/client.go View File

@ -0,0 +1,418 @@
package rpc
import (
"bytes"
"context"
"fmt"
"strings"
"time"
"github.com/pkg/errors"
"github.com/tendermint/tendermint/crypto/merkle"
cmn "github.com/tendermint/tendermint/libs/common"
lite "github.com/tendermint/tendermint/lite2"
rpcclient "github.com/tendermint/tendermint/rpc/client"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
rpctypes "github.com/tendermint/tendermint/rpc/lib/types"
"github.com/tendermint/tendermint/types"
)
// Client is an RPC client, which uses lite#Client to verify data (if it can be
// proved!).
type Client struct {
cmn.BaseService
next rpcclient.Client
lc *lite.Client
prt *merkle.ProofRuntime
}
var _ rpcclient.Client = (*Client)(nil)
// NewClient returns a new client.
func NewClient(next rpcclient.Client, lc *lite.Client) *Client {
c := &Client{
next: next,
lc: lc,
prt: defaultProofRuntime(),
}
c.BaseService = *cmn.NewBaseService(nil, "Client", c)
return c
}
func (c *Client) OnStart() error {
if !c.next.IsRunning() {
return c.next.Start()
}
return nil
}
func (c *Client) OnStop() {
if c.next.IsRunning() {
c.next.Stop()
}
}
func (c *Client) Status() (*ctypes.ResultStatus, error) {
return c.next.Status()
}
func (c *Client) ABCIInfo() (*ctypes.ResultABCIInfo, error) {
return c.next.ABCIInfo()
}
func (c *Client) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) {
return c.ABCIQueryWithOptions(path, data, rpcclient.DefaultABCIQueryOptions)
}
// GetWithProofOptions is useful if you want full access to the ABCIQueryOptions.
// XXX Usage of path? It's not used, and sometimes it's /, sometimes /key, sometimes /store.
func (c *Client) ABCIQueryWithOptions(path string, data cmn.HexBytes,
opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
res, err := c.next.ABCIQueryWithOptions(path, data, opts)
if err != nil {
return nil, err
}
resp := res.Response
// Validate the response.
if resp.IsErr() {
return nil, errors.Errorf("err response code: %v", resp.Code)
}
if len(resp.Key) == 0 || resp.Proof == nil {
return nil, errors.New("empty tree")
}
if resp.Height <= 0 {
return nil, errors.New("negative or zero height")
}
// Update the light client if we're behind.
if err := c.updateLiteClientIfNeededTo(resp.Height + 1); err != nil {
return nil, err
}
// AppHash for height H is in header H+1.
h, err := c.lc.TrustedHeader(resp.Height+1, time.Now())
if err != nil {
return nil, errors.Wrapf(err, "TrustedHeader(%d)", resp.Height+1)
}
// Validate the value proof against the trusted header.
if resp.Value != nil {
// Value exists
// XXX How do we encode the key into a string...
storeName, err := parseQueryStorePath(path)
if err != nil {
return nil, err
}
kp := merkle.KeyPath{}
kp = kp.AppendKey([]byte(storeName), merkle.KeyEncodingURL)
kp = kp.AppendKey(resp.Key, merkle.KeyEncodingURL)
err = c.prt.VerifyValue(resp.Proof, h.AppHash, kp.String(), resp.Value)
if err != nil {
return nil, errors.Wrap(err, "verify value proof")
}
return &ctypes.ResultABCIQuery{Response: resp}, nil
}
// OR validate the ansence proof against the trusted header.
// XXX How do we encode the key into a string...
err = c.prt.VerifyAbsence(resp.Proof, h.AppHash, string(resp.Key))
if err != nil {
return nil, errors.Wrap(err, "verify absence proof")
}
return &ctypes.ResultABCIQuery{Response: resp}, nil
}
func (c *Client) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
return c.next.BroadcastTxCommit(tx)
}
func (c *Client) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return c.next.BroadcastTxAsync(tx)
}
func (c *Client) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return c.next.BroadcastTxSync(tx)
}
func (c *Client) UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) {
return c.next.UnconfirmedTxs(limit)
}
func (c *Client) NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) {
return c.next.NumUnconfirmedTxs()
}
func (c *Client) NetInfo() (*ctypes.ResultNetInfo, error) {
return c.next.NetInfo()
}
func (c *Client) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) {
return c.next.DumpConsensusState()
}
func (c *Client) ConsensusState() (*ctypes.ResultConsensusState, error) {
return c.next.ConsensusState()
}
func (c *Client) ConsensusParams(height *int64) (*ctypes.ResultConsensusParams, error) {
return c.next.ConsensusParams(height)
}
func (c *Client) Health() (*ctypes.ResultHealth, error) {
return c.next.Health()
}
// BlockchainInfo calls rpcclient#BlockchainInfo and then verifies every header
// returned.
func (c *Client) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
res, err := c.next.BlockchainInfo(minHeight, maxHeight)
if err != nil {
return nil, err
}
// Validate res.
for _, meta := range res.BlockMetas {
if meta == nil {
return nil, errors.New("nil BlockMeta")
}
if err := meta.ValidateBasic(); err != nil {
return nil, errors.Wrap(err, "invalid BlockMeta")
}
}
// Update the light client if we're behind.
if len(res.BlockMetas) > 0 {
lastHeight := res.BlockMetas[len(res.BlockMetas)-1].Header.Height
if err := c.updateLiteClientIfNeededTo(lastHeight); err != nil {
return nil, err
}
}
// Verify each of the BlockMetas.
for _, meta := range res.BlockMetas {
h, err := c.lc.TrustedHeader(meta.Header.Height, time.Now())
if err != nil {
return nil, errors.Wrapf(err, "TrustedHeader(%d)", meta.Header.Height)
}
if bmH, tH := meta.Header.Hash(), h.Hash(); !bytes.Equal(bmH, tH) {
return nil, errors.Errorf("BlockMeta#Header %X does not match with trusted header %X",
bmH, tH)
}
}
return res, nil
}
func (c *Client) Genesis() (*ctypes.ResultGenesis, error) {
return c.next.Genesis()
}
// Block calls rpcclient#Block and then verifies the result.
func (c *Client) Block(height *int64) (*ctypes.ResultBlock, error) {
res, err := c.next.Block(height)
if err != nil {
return nil, err
}
// Validate res.
if err := res.BlockID.ValidateBasic(); err != nil {
return nil, err
}
if err := res.Block.ValidateBasic(); err != nil {
return nil, err
}
if bmH, bH := res.BlockID.Hash, res.Block.Hash(); !bytes.Equal(bmH, bH) {
return nil, errors.Errorf("BlockID %X does not match with Block %X",
bmH, bH)
}
// Update the light client if we're behind.
if err := c.updateLiteClientIfNeededTo(res.Block.Height); err != nil {
return nil, err
}
// Verify block.
h, err := c.lc.TrustedHeader(res.Block.Height, time.Now())
if err != nil {
return nil, errors.Wrapf(err, "TrustedHeader(%d)", res.Block.Height)
}
if bH, tH := res.Block.Hash(), h.Hash(); !bytes.Equal(bH, tH) {
return nil, errors.Errorf("Block#Header %X does not match with trusted header %X",
bH, tH)
}
return res, nil
}
func (c *Client) BlockResults(height *int64) (*ctypes.ResultBlockResults, error) {
return c.next.BlockResults(height)
}
func (c *Client) Commit(height *int64) (*ctypes.ResultCommit, error) {
res, err := c.next.Commit(height)
if err != nil {
return nil, err
}
// Validate res.
if err := res.SignedHeader.ValidateBasic(c.lc.ChainID()); err != nil {
return nil, err
}
// Update the light client if we're behind.
if err := c.updateLiteClientIfNeededTo(res.Height); err != nil {
return nil, err
}
// Verify commit.
h, err := c.lc.TrustedHeader(res.Height, time.Now())
if err != nil {
return nil, errors.Wrapf(err, "TrustedHeader(%d)", res.Height)
}
if rH, tH := res.Hash(), h.Hash(); !bytes.Equal(rH, tH) {
return nil, errors.Errorf("header %X does not match with trusted header %X",
rH, tH)
}
return res, nil
}
// Tx calls rpcclient#Tx method and then verifies the proof if such was
// requested.
func (c *Client) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
res, err := c.next.Tx(hash, prove)
if err != nil || !prove {
return res, err
}
// Validate res.
if res.Height <= 0 {
return nil, errors.Errorf("invalid ResultTx: %v", res)
}
// Update the light client if we're behind.
if err := c.updateLiteClientIfNeededTo(res.Height); err != nil {
return nil, err
}
// Validate the proof.
h, err := c.lc.TrustedHeader(res.Height, time.Now())
if err != nil {
return res, errors.Wrapf(err, "TrustedHeader(%d)", res.Height)
}
return res, res.Proof.Validate(h.DataHash)
}
func (c *Client) TxSearch(query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error) {
return c.next.TxSearch(query, prove, page, perPage)
}
func (c *Client) Validators(height *int64, page, perPage int) (*ctypes.ResultValidators, error) {
return c.next.Validators(height, page, perPage)
}
func (c *Client) BroadcastEvidence(ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) {
return c.next.BroadcastEvidence(ev)
}
func (c *Client) Subscribe(ctx context.Context, subscriber, query string,
outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) {
return c.next.Subscribe(ctx, subscriber, query, outCapacity...)
}
func (c *Client) Unsubscribe(ctx context.Context, subscriber, query string) error {
return c.next.Unsubscribe(ctx, subscriber, query)
}
func (c *Client) UnsubscribeAll(ctx context.Context, subscriber string) error {
return c.next.UnsubscribeAll(ctx, subscriber)
}
func (c *Client) updateLiteClientIfNeededTo(height int64) error {
lastTrustedHeight, err := c.lc.LastTrustedHeight()
if err != nil {
return errors.Wrap(err, "LastTrustedHeight")
}
if lastTrustedHeight < height {
if err := c.lc.VerifyHeaderAtHeight(height, time.Now()); err != nil {
return errors.Wrapf(err, "VerifyHeaderAtHeight(%d)", height)
}
}
return nil
}
func (c *Client) RegisterOpDecoder(typ string, dec merkle.OpDecoder) {
c.prt.RegisterOpDecoder(typ, dec)
}
// SubscribeWS subscribes for events using the given query and remote address as
// a subscriber, but does not verify responses (UNSAFE)!
// TODO: verify data
func (c *Client) SubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) {
out, err := c.next.Subscribe(context.Background(), ctx.RemoteAddr(), query)
if err != nil {
return nil, err
}
go func() {
for {
select {
case resultEvent := <-out:
// We should have a switch here that performs a validation
// depending on the event's type.
ctx.WSConn.TryWriteRPCResponse(
rpctypes.NewRPCSuccessResponse(
ctx.WSConn.Codec(),
rpctypes.JSONRPCStringID(fmt.Sprintf("%v#event", ctx.JSONReq.ID)),
resultEvent,
))
case <-c.Quit():
return
}
}
}()
return &ctypes.ResultSubscribe{}, nil
}
// UnsubscribeWS calls original client's Unsubscribe using remote address as a
// subscriber.
func (c *Client) UnsubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) {
err := c.next.Unsubscribe(context.Background(), ctx.RemoteAddr(), query)
if err != nil {
return nil, err
}
return &ctypes.ResultUnsubscribe{}, nil
}
// UnsubscribeAllWS calls original client's UnsubscribeAll using remote address
// as a subscriber.
func (c *Client) UnsubscribeAllWS(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) {
err := c.next.UnsubscribeAll(context.Background(), ctx.RemoteAddr())
if err != nil {
return nil, err
}
return &ctypes.ResultUnsubscribe{}, nil
}
func parseQueryStorePath(path string) (storeName string, err error) {
if !strings.HasPrefix(path, "/") {
return "", fmt.Errorf("expected path to start with /")
}
paths := strings.SplitN(path[1:], "/", 3)
switch {
case len(paths) != 3:
return "", errors.New("expected format like /store/<storeName>/key")
case paths[0] != "store":
return "", errors.New("expected format like /store/<storeName>/key")
case paths[2] != "key":
return "", errors.New("expected format like /store/<storeName>/key")
}
return paths[1], nil
}

+ 14
- 0
lite2/rpc/proof.go View File

@ -0,0 +1,14 @@
package rpc
import (
"github.com/tendermint/tendermint/crypto/merkle"
)
func defaultProofRuntime() *merkle.ProofRuntime {
prt := merkle.NewProofRuntime()
prt.RegisterOpDecoder(
merkle.ProofOpSimpleValue,
merkle.SimpleValueOpDecoder,
)
return prt
}

+ 162
- 0
lite2/rpc/query_test.go View File

@ -0,0 +1,162 @@
package rpc
//import (
// "fmt"
// "os"
// "testing"
// "time"
// "github.com/stretchr/testify/assert"
// "github.com/stretchr/testify/require"
// "github.com/tendermint/tendermint/abci/example/kvstore"
// "github.com/tendermint/tendermint/crypto/merkle"
// "github.com/tendermint/tendermint/lite"
// certclient "github.com/tendermint/tendermint/lite/client"
// nm "github.com/tendermint/tendermint/node"
// "github.com/tendermint/tendermint/rpc/client"
// rpctest "github.com/tendermint/tendermint/rpc/test"
// "github.com/tendermint/tendermint/types"
//)
//var node *nm.Node
//var chainID = "tendermint_test" // TODO use from config.
////nolint:unused
//var waitForEventTimeout = 5 * time.Second
//// TODO fix tests!!
//func TestMain(m *testing.M) {
// app := kvstore.NewKVStoreApplication()
// node = rpctest.StartTendermint(app)
// code := m.Run()
// rpctest.StopTendermint(node)
// os.Exit(code)
//}
//func kvstoreTx(k, v []byte) []byte {
// return []byte(fmt.Sprintf("%s=%s", k, v))
//}
//// TODO: enable it after general proof format has been adapted
//// in abci/examples/kvstore.go
////nolint:unused,deadcode
//func _TestAppProofs(t *testing.T) {
// assert, require := assert.New(t), require.New(t)
// prt := defaultProofRuntime()
// cl := client.NewLocal(node)
// client.WaitForHeight(cl, 1, nil)
// // This sets up our trust on the node based on some past point.
// source := certclient.NewProvider(chainID, cl)
// seed, err := source.LatestFullCommit(chainID, 1, 1)
// require.NoError(err, "%#v", err)
// cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators)
// // Wait for tx confirmation.
// done := make(chan int64)
// go func() {
// evtTyp := types.EventTx
// _, err = client.WaitForOneEvent(cl, evtTyp, waitForEventTimeout)
// require.Nil(err, "%#v", err)
// close(done)
// }()
// // Submit a transaction.
// k := []byte("my-key")
// v := []byte("my-value")
// tx := kvstoreTx(k, v)
// br, err := cl.BroadcastTxCommit(tx)
// require.NoError(err, "%#v", err)
// require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx)
// require.EqualValues(0, br.DeliverTx.Code)
// brh := br.Height
// // Fetch latest after tx commit.
// <-done
// latest, err := source.LatestFullCommit(chainID, 1, 1<<63-1)
// require.NoError(err, "%#v", err)
// rootHash := latest.SignedHeader.AppHash
// if rootHash == nil {
// // Fetch one block later, AppHash hasn't been committed yet.
// // TODO find a way to avoid doing this.
// client.WaitForHeight(cl, latest.SignedHeader.Height+1, nil)
// latest, err = source.LatestFullCommit(chainID, latest.SignedHeader.Height+1, 1<<63-1)
// require.NoError(err, "%#v", err)
// rootHash = latest.SignedHeader.AppHash
// }
// require.NotNil(rootHash)
// // verify a query before the tx block has no data (and valid non-exist proof)
// bs, height, proof, err := GetWithProof(prt, k, brh-1, cl, cert)
// require.NoError(err, "%#v", err)
// require.NotNil(proof)
// require.Equal(height, brh-1)
// // require.NotNil(proof)
// // TODO: Ensure that *some* keys will be there, ensuring that proof is nil,
// // (currently there's a race condition)
// // and ensure that proof proves absence of k.
// require.Nil(bs)
// // but given that block it is good
// bs, height, proof, err = GetWithProof(prt, k, brh, cl, cert)
// require.NoError(err, "%#v", err)
// require.NotNil(proof)
// require.Equal(height, brh)
// assert.EqualValues(v, bs)
// err = prt.VerifyValue(proof, rootHash, string(k), bs) // XXX key encoding
// assert.NoError(err, "%#v", err)
// // Test non-existing key.
// missing := []byte("my-missing-key")
// bs, _, proof, err = GetWithProof(prt, missing, 0, cl, cert)
// require.NoError(err)
// require.Nil(bs)
// require.NotNil(proof)
// err = prt.VerifyAbsence(proof, rootHash, string(missing)) // XXX VerifyAbsence(), keyencoding
// assert.NoError(err, "%#v", err)
// err = prt.VerifyAbsence(proof, rootHash, string(k)) // XXX VerifyAbsence(), keyencoding
// assert.Error(err, "%#v", err)
//}
//func TestTxProofs(t *testing.T) {
// assert, require := assert.New(t), require.New(t)
// cl := client.NewLocal(node)
// client.WaitForHeight(cl, 1, nil)
// tx := kvstoreTx([]byte("key-a"), []byte("value-a"))
// br, err := cl.BroadcastTxCommit(tx)
// require.NoError(err, "%#v", err)
// require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx)
// require.EqualValues(0, br.DeliverTx.Code)
// brh := br.Height
// source := certclient.NewProvider(chainID, cl)
// seed, err := source.LatestFullCommit(chainID, brh-2, brh-2)
// require.NoError(err, "%#v", err)
// cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators)
// // First let's make sure a bogus transaction hash returns a valid non-existence proof.
// key := types.Tx([]byte("bogus")).Hash()
// _, err = cl.Tx(key, true)
// require.NotNil(err)
// require.Contains(err.Error(), "not found")
// // Now let's check with the real tx root hash.
// key = types.Tx(tx).Hash()
// res, err := cl.Tx(key, true)
// require.NoError(err, "%#v", err)
// require.NotNil(res)
// keyHash := merkle.SimpleHashFromByteSlices([][]byte{key})
// err = res.Proof.Validate(keyHash)
// assert.NoError(err, "%#v", err)
// commit, err := GetCertifiedCommit(br.Height, cl, cert)
// require.Nil(err, "%#v", err)
// require.Equal(res.Proof.RootHash, commit.Header.DataHash)
//}

+ 132
- 0
lite2/store/db/db.go View File

@ -0,0 +1,132 @@
package db
import (
"errors"
"fmt"
"regexp"
"strconv"
"github.com/tendermint/go-amino"
dbm "github.com/tendermint/tm-db"
cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino"
"github.com/tendermint/tendermint/lite2/store"
"github.com/tendermint/tendermint/types"
)
type dbs struct {
db dbm.DB
prefix string
cdc *amino.Codec
}
// New returns a Store that wraps any DB (with an optional prefix in case you
// want to use one DB with many light clients).
func New(db dbm.DB, prefix string) store.Store {
cdc := amino.NewCodec()
cryptoAmino.RegisterAmino(cdc)
return &dbs{db: db, prefix: prefix, cdc: cdc}
}
func (s *dbs) SaveSignedHeader(sh *types.SignedHeader) error {
if sh.Height <= 0 {
panic("negative or zero height")
}
bz, err := s.cdc.MarshalBinaryLengthPrefixed(sh)
if err != nil {
return err
}
s.db.Set(s.shKey(sh.Height), bz)
return nil
}
func (s *dbs) SaveValidatorSet(valSet *types.ValidatorSet, height int64) error {
if height <= 0 {
panic("negative or zero height")
}
bz, err := s.cdc.MarshalBinaryLengthPrefixed(valSet)
if err != nil {
return err
}
s.db.Set(s.vsKey(height), bz)
return nil
}
func (s *dbs) SignedHeader(height int64) (*types.SignedHeader, error) {
bz := s.db.Get(s.shKey(height))
if bz == nil {
return nil, nil
}
var signedHeader *types.SignedHeader
err := s.cdc.UnmarshalBinaryLengthPrefixed(bz, &signedHeader)
return signedHeader, err
}
func (s *dbs) ValidatorSet(height int64) (*types.ValidatorSet, error) {
bz := s.db.Get(s.vsKey(height))
if bz == nil {
return nil, nil
}
var valSet *types.ValidatorSet
err := s.cdc.UnmarshalBinaryLengthPrefixed(bz, &valSet)
return valSet, err
}
func (s *dbs) LastSignedHeaderHeight() (int64, error) {
itr := s.db.ReverseIterator(
s.shKey(1),
append(s.shKey(1<<63-1), byte(0x00)),
)
defer itr.Close()
for itr.Valid() {
key := itr.Key()
_, height, ok := parseShKey(key)
if ok {
return height, nil
}
}
return -1, errors.New("no headers found")
}
func (s *dbs) shKey(height int64) []byte {
return []byte(fmt.Sprintf("sh/%s/%010d", s.prefix, height))
}
func (s *dbs) vsKey(height int64) []byte {
return []byte(fmt.Sprintf("vs/%s/%010d", s.prefix, height))
}
var keyPattern = regexp.MustCompile(`^(sh|vs)/([^/]*)/([0-9]+)/$`)
func parseKey(key []byte) (part string, prefix string, height int64, ok bool) {
submatch := keyPattern.FindSubmatch(key)
if submatch == nil {
return "", "", 0, false
}
part = string(submatch[1])
prefix = string(submatch[2])
heightStr := string(submatch[3])
heightInt, err := strconv.Atoi(heightStr)
if err != nil {
return "", "", 0, false
}
height = int64(heightInt)
ok = true // good!
return
}
func parseShKey(key []byte) (prefix string, height int64, ok bool) {
var part string
part, prefix, height, ok = parseKey(key)
if part != "sh" {
return "", 0, false
}
return
}

+ 38
- 0
lite2/store/store.go View File

@ -0,0 +1,38 @@
package store
import "github.com/tendermint/tendermint/types"
// Store is anything that can persistenly store headers.
type Store interface {
// SaveSignedHeader saves a SignedHeader.
//
// height must be > 0.
SaveSignedHeader(sh *types.SignedHeader) error
// SaveValidatorSet saves a ValidatorSet.
//
// height must be > 0.
SaveValidatorSet(valSet *types.ValidatorSet, height int64) error
// SignedHeader returns the SignedHeader that corresponds to the given
// height.
//
// height must be > 0.
//
// If the store is empty and the latest SignedHeader is requested, an error
// is returned.
SignedHeader(height int64) (*types.SignedHeader, error)
// ValidatorSet returns the ValidatorSet that corresponds to height.
//
// height must be > 0.
//
// If the store is empty and the latest ValidatorSet is requested, an error
// is returned.
ValidatorSet(height int64) (*types.ValidatorSet, error)
// LastSignedHeaderHeight returns the last SignedHeader height.
//
// If the store is empty, an error is returned.
LastSignedHeaderHeight() (int64, error)
}

+ 140
- 0
lite2/test_helpers.go View File

@ -0,0 +1,140 @@
package lite
import (
"time"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/crypto/secp256k1"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
// privKeys is a helper type for testing.
//
// It lets us simulate signing with many keys. The main use case is to create
// a set, and call GenSignedHeader to get properly signed header for testing.
//
// You can set different weights of validators each time you call ToValidators,
// and can optionally extend the validator set later with Extend.
type privKeys []crypto.PrivKey
// genPrivKeys produces an array of private keys to generate commits.
func genPrivKeys(n int) privKeys {
res := make(privKeys, n)
for i := range res {
res[i] = ed25519.GenPrivKey()
}
return res
}
// Change replaces the key at index i.
func (pkz privKeys) Change(i int) privKeys {
res := make(privKeys, len(pkz))
copy(res, pkz)
res[i] = ed25519.GenPrivKey()
return res
}
// Extend adds n more keys (to remove, just take a slice).
func (pkz privKeys) Extend(n int) privKeys {
extra := genPrivKeys(n)
return append(pkz, extra...)
}
// GenSecpPrivKeys produces an array of secp256k1 private keys to generate commits.
func GenSecpPrivKeys(n int) privKeys {
res := make(privKeys, n)
for i := range res {
res[i] = secp256k1.GenPrivKey()
}
return res
}
// ExtendSecp adds n more secp256k1 keys (to remove, just take a slice).
func (pkz privKeys) ExtendSecp(n int) privKeys {
extra := GenSecpPrivKeys(n)
return append(pkz, extra...)
}
// ToValidators produces a valset from the set of keys.
// The first key has weight `init` and it increases by `inc` every step
// so we can have all the same weight, or a simple linear distribution
// (should be enough for testing).
func (pkz privKeys) ToValidators(init, inc int64) *types.ValidatorSet {
res := make([]*types.Validator, len(pkz))
for i, k := range pkz {
res[i] = types.NewValidator(k.PubKey(), init+int64(i)*inc)
}
return types.NewValidatorSet(res)
}
// signHeader properly signs the header with all keys from first to last exclusive.
func (pkz privKeys) signHeader(header *types.Header, first, last int) *types.Commit {
commitSigs := make([]*types.CommitSig, len(pkz))
// We need this list to keep the ordering.
vset := pkz.ToValidators(1, 0)
// Fill in the votes we want.
for i := first; i < last && i < len(pkz); i++ {
vote := makeVote(header, vset, pkz[i])
commitSigs[vote.ValidatorIndex] = vote.CommitSig()
}
blockID := types.BlockID{Hash: header.Hash()}
return types.NewCommit(blockID, commitSigs)
}
func makeVote(header *types.Header, valset *types.ValidatorSet, key crypto.PrivKey) *types.Vote {
addr := key.PubKey().Address()
idx, _ := valset.GetByAddress(addr)
vote := &types.Vote{
ValidatorAddress: addr,
ValidatorIndex: idx,
Height: header.Height,
Round: 1,
Timestamp: tmtime.Now(),
Type: types.PrecommitType,
BlockID: types.BlockID{Hash: header.Hash()},
}
// Sign it
signBytes := vote.SignBytes(header.ChainID)
// TODO Consider reworking makeVote API to return an error
sig, err := key.Sign(signBytes)
if err != nil {
panic(err)
}
vote.Signature = sig
return vote
}
func genHeader(chainID string, height int64, bTime time.Time, txs types.Txs,
valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte) *types.Header {
return &types.Header{
ChainID: chainID,
Height: height,
Time: bTime,
// LastBlockID
// LastCommitHash
ValidatorsHash: valset.Hash(),
NextValidatorsHash: nextValset.Hash(),
DataHash: txs.Hash(),
AppHash: appHash,
ConsensusHash: consHash,
LastResultsHash: resHash,
}
}
// GenSignedHeader calls genHeader and signHeader and combines them into a SignedHeader.
func (pkz privKeys) GenSignedHeader(chainID string, height int64, bTime time.Time, txs types.Txs,
valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) *types.SignedHeader {
header := genHeader(chainID, height, bTime, txs, valset, nextValset, appHash, consHash, resHash)
return &types.SignedHeader{
Header: header,
Commit: pkz.signHeader(header, first, last),
}
}

+ 116
- 0
lite2/verifier.go View File

@ -0,0 +1,116 @@
package lite
import (
"bytes"
"time"
"github.com/pkg/errors"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/types"
)
var (
// DefaultTrustLevel - new header can be trusted if at least one correct old
// validator signed it.
DefaultTrustLevel = cmn.Fraction{Numerator: 1, Denominator: 3}
)
func Verify(
chainID string,
h1 *types.SignedHeader,
h1NextVals *types.ValidatorSet,
h2 *types.SignedHeader,
h2Vals *types.ValidatorSet,
trustingPeriod time.Duration,
now time.Time,
trustLevel cmn.Fraction) error {
if err := ValidateTrustLevel(trustLevel); err != nil {
return err
}
// Ensure last header can still be trusted.
expirationTime := h1.Time.Add(trustingPeriod)
if !expirationTime.After(now) {
return ErrOldHeaderExpired{expirationTime, now}
}
if err := verifyNewHeaderAndVals(chainID, h2, h2Vals, h1, now); err != nil {
return err
}
if h2.Height == h1.Height+1 {
if !bytes.Equal(h2.ValidatorsHash, h1NextVals.Hash()) {
return errors.Errorf("expected old header validators (%X) to match those from new header (%X)",
h1NextVals.Hash(),
h2.ValidatorsHash,
)
}
} else {
// Ensure that +`trustLevel` (default 1/3) or more of last trusted validators signed correctly.
err := h1NextVals.VerifyCommitTrusting(chainID, h2.Commit.BlockID, h2.Height, h2.Commit, trustLevel)
if err != nil {
return err
}
}
// Ensure that +2/3 of new validators signed correctly.
err := h2Vals.VerifyCommit(chainID, h2.Commit.BlockID, h2.Height, h2.Commit)
if err != nil {
return err
}
return nil
}
func verifyNewHeaderAndVals(
chainID string,
h2 *types.SignedHeader,
h2Vals *types.ValidatorSet,
h1 *types.SignedHeader,
now time.Time) error {
if err := h2.ValidateBasic(chainID); err != nil {
return errors.Wrap(err, "h2.ValidateBasic failed")
}
if h2.Height <= h1.Height {
return errors.Errorf("expected new header height %d to be greater than one of old header %d",
h2.Height,
h1.Height)
}
if !h2.Time.After(h1.Time) {
return errors.Errorf("expected new header time %v to be after old header time %v",
h2.Time,
h1.Time)
}
if !h2.Time.Before(now) {
return errors.Errorf("new header has a time from the future %v (now: %v)",
h2.Time,
now)
}
if !bytes.Equal(h2.ValidatorsHash, h2Vals.Hash()) {
return errors.Errorf("expected new header validators (%X) to match those that were supplied (%X)",
h2Vals.Hash(),
h2.NextValidatorsHash,
)
}
return nil
}
// ValidateTrustLevel checks that trustLevel is within the allowed range [1/3,
// 1]. If not, it returns an error. 1/3 is the minimum amount of trust needed
// which does not break the security model.
func ValidateTrustLevel(lvl cmn.Fraction) error {
if lvl.Numerator*3 < lvl.Denominator || // < 1/3
lvl.Numerator > lvl.Denominator || // > 1
lvl.Denominator == 0 {
return errors.Errorf("trustLevel must be within [1/3, 1], given %v", lvl)
}
return nil
}

+ 279
- 0
lite2/verifier_test.go View File

@ -0,0 +1,279 @@
package lite
import (
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/types"
)
func TestVerifyAdjustedHeaders(t *testing.T) {
const (
chainID = "TestVerifyAdjustedHeaders"
lastHeight = 1
nextHeight = 2
)
var (
keys = genPrivKeys(4)
// 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do!
vals = keys.ToValidators(20, 10)
bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z")
header = keys.GenSignedHeader(chainID, lastHeight, bTime, nil, vals, vals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys))
)
testCases := []struct {
newHeader *types.SignedHeader
newVals *types.ValidatorSet
trustingPeriod time.Duration
now time.Time
expErr error
expErrText string
}{
// same header -> no error
0: {
header,
vals,
3 * time.Hour,
bTime.Add(2 * time.Hour),
nil,
"expected new header height 1 to be greater than one of old header 1",
},
// different chainID -> error
1: {
keys.GenSignedHeader("different-chainID", nextHeight, bTime.Add(1*time.Hour), nil, vals, vals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)),
vals,
3 * time.Hour,
bTime.Add(2 * time.Hour),
nil,
"h2.ValidateBasic failed: signedHeader belongs to another chain 'different-chainID' not 'TestVerifyAdjustedHeaders'",
},
// 3/3 signed -> no error
2: {
keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)),
vals,
3 * time.Hour,
bTime.Add(2 * time.Hour),
nil,
"",
},
// 2/3 signed -> no error
3: {
keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 1, len(keys)),
vals,
3 * time.Hour,
bTime.Add(2 * time.Hour),
nil,
"",
},
// 1/3 signed -> error
4: {
keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), len(keys)-1, len(keys)),
vals,
3 * time.Hour,
bTime.Add(2 * time.Hour),
types.ErrTooMuchChange{Got: 50, Needed: 93},
"",
},
// vals does not match with what we have -> error
5: {
keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, keys.ToValidators(10, 1), vals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)),
keys.ToValidators(10, 1),
3 * time.Hour,
bTime.Add(2 * time.Hour),
nil,
"to match those from new header",
},
// vals are inconsistent with newHeader -> error
6: {
keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)),
keys.ToValidators(10, 1),
3 * time.Hour,
bTime.Add(2 * time.Hour),
nil,
"to match those that were supplied",
},
// old header has expired -> error
7: {
keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)),
keys.ToValidators(10, 1),
1 * time.Hour,
bTime.Add(1 * time.Hour),
nil,
"old header has expired",
},
}
for i, tc := range testCases {
tc := tc
t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
err := Verify(chainID, header, vals, tc.newHeader, tc.newVals, tc.trustingPeriod, tc.now, DefaultTrustLevel)
switch {
case tc.expErr != nil && assert.Error(t, err):
assert.Equal(t, tc.expErr, err)
case tc.expErrText != "":
assert.Contains(t, err.Error(), tc.expErrText)
default:
assert.NoError(t, err)
}
})
}
}
func TestVerifyNonAdjustedHeaders(t *testing.T) {
const (
chainID = "TestVerifyNonAdjustedHeaders"
lastHeight = 1
)
var (
keys = genPrivKeys(4)
// 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do!
vals = keys.ToValidators(20, 10)
bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z")
header = keys.GenSignedHeader(chainID, lastHeight, bTime, nil, vals, vals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys))
// 30, 40, 50
twoThirds = keys[1:]
twoThirdsVals = twoThirds.ToValidators(30, 10)
// 50
oneThird = keys[len(keys)-1:]
oneThirdVals = oneThird.ToValidators(50, 10)
// 20
lessThanOneThird = keys[0:1]
lessThanOneThirdVals = lessThanOneThird.ToValidators(20, 10)
)
testCases := []struct {
newHeader *types.SignedHeader
newVals *types.ValidatorSet
trustingPeriod time.Duration
now time.Time
expErr error
expErrText string
}{
// 3/3 new vals signed, 3/3 old vals present -> no error
0: {
keys.GenSignedHeader(chainID, 3, bTime.Add(1*time.Hour), nil, vals, vals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)),
vals,
3 * time.Hour,
bTime.Add(2 * time.Hour),
nil,
"",
},
// 2/3 new vals signed, 3/3 old vals present -> no error
1: {
keys.GenSignedHeader(chainID, 4, bTime.Add(1*time.Hour), nil, vals, vals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 1, len(keys)),
vals,
3 * time.Hour,
bTime.Add(2 * time.Hour),
nil,
"",
},
// 1/3 new vals signed, 3/3 old vals present -> error
2: {
keys.GenSignedHeader(chainID, 5, bTime.Add(1*time.Hour), nil, vals, vals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), len(keys)-1, len(keys)),
vals,
3 * time.Hour,
bTime.Add(2 * time.Hour),
types.ErrTooMuchChange{Got: 50, Needed: 93},
"",
},
// 3/3 new vals signed, 2/3 old vals present -> no error
3: {
twoThirds.GenSignedHeader(chainID, 5, bTime.Add(1*time.Hour), nil, twoThirdsVals, twoThirdsVals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(twoThirds)),
twoThirdsVals,
3 * time.Hour,
bTime.Add(2 * time.Hour),
nil,
"",
},
// 3/3 new vals signed, 1/3 old vals present -> no error
4: {
oneThird.GenSignedHeader(chainID, 5, bTime.Add(1*time.Hour), nil, oneThirdVals, oneThirdVals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(oneThird)),
oneThirdVals,
3 * time.Hour,
bTime.Add(2 * time.Hour),
nil,
"",
},
// 3/3 new vals signed, less than 1/3 old vals present -> error
5: {
lessThanOneThird.GenSignedHeader(chainID, 5, bTime.Add(1*time.Hour), nil, lessThanOneThirdVals, lessThanOneThirdVals,
[]byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(lessThanOneThird)),
lessThanOneThirdVals,
3 * time.Hour,
bTime.Add(2 * time.Hour),
types.ErrTooMuchChange{Got: 20, Needed: 46},
"",
},
}
for i, tc := range testCases {
tc := tc
t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
err := Verify(chainID, header, vals, tc.newHeader, tc.newVals, tc.trustingPeriod, tc.now, DefaultTrustLevel)
switch {
case tc.expErr != nil && assert.Error(t, err):
assert.Equal(t, tc.expErr, err)
case tc.expErrText != "":
assert.Contains(t, err.Error(), tc.expErrText)
default:
assert.NoError(t, err)
}
})
}
}
func TestValidateTrustLevel(t *testing.T) {
testCases := []struct {
lvl cmn.Fraction
valid bool
}{
// valid
0: {cmn.Fraction{Numerator: 1, Denominator: 1}, true},
1: {cmn.Fraction{Numerator: 1, Denominator: 3}, true},
2: {cmn.Fraction{Numerator: 2, Denominator: 3}, true},
3: {cmn.Fraction{Numerator: 3, Denominator: 3}, true},
4: {cmn.Fraction{Numerator: 4, Denominator: 5}, true},
// invalid
5: {cmn.Fraction{Numerator: 6, Denominator: 5}, false},
6: {cmn.Fraction{Numerator: -1, Denominator: 3}, false},
7: {cmn.Fraction{Numerator: 0, Denominator: 1}, false},
8: {cmn.Fraction{Numerator: -1, Denominator: -3}, false},
9: {cmn.Fraction{Numerator: 0, Denominator: 0}, false},
10: {cmn.Fraction{Numerator: 1, Denominator: 0}, false},
}
for _, tc := range testCases {
err := ValidateTrustLevel(tc.lvl)
if !tc.valid {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
}
}

+ 9
- 0
rpc/client/httpclient.go View File

@ -260,6 +260,15 @@ func (c *baseRPCClient) ConsensusState() (*ctypes.ResultConsensusState, error) {
return result, nil
}
func (c *baseRPCClient) ConsensusParams(height *int64) (*ctypes.ResultConsensusParams, error) {
result := new(ctypes.ResultConsensusParams)
_, err := c.caller.Call("consensus_params", map[string]interface{}{"height": height}, result)
if err != nil {
return nil, errors.Wrap(err, "ConsensusParams")
}
return result, nil
}
func (c *baseRPCClient) Health() (*ctypes.ResultHealth, error) {
result := new(ctypes.ResultHealth)
_, err := c.caller.Call("health", map[string]interface{}{}, result)


+ 1
- 0
rpc/client/interface.go View File

@ -88,6 +88,7 @@ type NetworkClient interface {
NetInfo() (*ctypes.ResultNetInfo, error)
DumpConsensusState() (*ctypes.ResultDumpConsensusState, error)
ConsensusState() (*ctypes.ResultConsensusState, error)
ConsensusParams(height *int64) (*ctypes.ResultConsensusParams, error)
Health() (*ctypes.ResultHealth, error)
}


+ 4
- 0
rpc/client/localclient.go View File

@ -116,6 +116,10 @@ func (c *Local) ConsensusState() (*ctypes.ResultConsensusState, error) {
return core.ConsensusState(c.ctx)
}
func (c *Local) ConsensusParams(height *int64) (*ctypes.ResultConsensusParams, error) {
return core.ConsensusParams(c.ctx, height)
}
func (c *Local) Health() (*ctypes.ResultHealth, error) {
return core.Health(c.ctx)
}


+ 4
- 0
rpc/client/mock/client.go View File

@ -121,6 +121,10 @@ func (c Client) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) {
return core.DumpConsensusState(&rpctypes.Context{})
}
func (c Client) ConsensusParams(height *int64) (*ctypes.ResultConsensusParams, error) {
return core.ConsensusParams(&rpctypes.Context{}, height)
}
func (c Client) Health() (*ctypes.ResultHealth, error) {
return core.Health(&rpctypes.Context{})
}


+ 1
- 1
types/block.go View File

@ -706,7 +706,7 @@ func (sh SignedHeader) ValidateBasic(chainID string) error {
// Check ChainID.
if sh.ChainID != chainID {
return fmt.Errorf("header belongs to another chain '%s' not '%s'",
return fmt.Errorf("signedHeader belongs to another chain '%s' not '%s'",
sh.ChainID, chainID)
}
// Check Height.


+ 18
- 0
types/block_meta.go View File

@ -1,5 +1,11 @@
package types
import (
"bytes"
"github.com/pkg/errors"
)
// BlockMeta contains meta information.
type BlockMeta struct {
BlockID BlockID `json:"block_id"`
@ -45,3 +51,15 @@ func (bm *BlockMeta) MarshalTo(data []byte) (int, error) {
func (bm *BlockMeta) Unmarshal(bs []byte) error {
return cdc.UnmarshalBinaryBare(bs, bm)
}
// ValidateBasic performs basic validation.
func (bm *BlockMeta) ValidateBasic() error {
if err := bm.BlockID.ValidateBasic(); err != nil {
return err
}
if !bytes.Equal(bm.BlockID.Hash, bm.Header.Hash()) {
return errors.Errorf("expected BlockID#Hash and Header#Hash to be the same, got %X != %X",
bm.BlockID.Hash, bm.Header.Hash())
}
return nil
}

+ 7
- 0
types/block_meta_test.go View File

@ -0,0 +1,7 @@
package types
import "testing"
func TestBlockMetaValidateBasic(t *testing.T) {
// TODO
}

+ 97
- 24
types/validator_set.go View File

@ -11,6 +11,7 @@ import (
"github.com/pkg/errors"
"github.com/tendermint/tendermint/crypto/merkle"
cmn "github.com/tendermint/tendermint/libs/common"
)
// MaxTotalVotingPower - the maximum allowed total voting power.
@ -593,30 +594,28 @@ func (vals *ValidatorSet) UpdateWithChangeSet(changes []*Validator) error {
return vals.updateWithChangeSet(changes, true)
}
// Verify that +2/3 of the set had signed the given signBytes.
// VerifyCommit verifies that +2/3 of the validator set signed this commit.
func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height int64, commit *Commit) error {
if err := commit.ValidateBasic(); err != nil {
return err
}
if vals.Size() != len(commit.Precommits) {
return NewErrInvalidCommitPrecommits(vals.Size(), len(commit.Precommits))
}
if height != commit.Height() {
return NewErrInvalidCommitHeight(height, commit.Height())
}
if !blockID.Equals(commit.BlockID) {
return fmt.Errorf("invalid commit -- wrong block id: want %v got %v",
blockID, commit.BlockID)
if err := vals.verifyCommitBasic(commit, height, blockID); err != nil {
return err
}
talliedVotingPower := int64(0)
for idx, precommit := range commit.Precommits {
// skip absent and nil votes
// NOTE: do we want to check the validity of votes
// for nil?
if precommit == nil {
continue // OK, some precommits can be missing.
}
_, val := vals.GetByIndex(idx)
// The vals and commit have a 1-to-1 correspondance.
// This means we don't need the validator address or to do any lookup.
val := vals.Validators[idx]
// Validate signature.
precommitSignBytes := commit.VoteSignBytes(chainID, idx)
if !val.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) {
@ -632,10 +631,11 @@ func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height i
// }
}
if talliedVotingPower > vals.TotalVotingPower()*2/3 {
return nil
if got, needed := talliedVotingPower, vals.TotalVotingPower()*2/3; got <= needed {
return ErrTooMuchChange{Got: got, Needed: needed}
}
return errTooMuchChange{talliedVotingPower, vals.TotalVotingPower()*2/3 + 1}
return nil
}
// VerifyFutureCommit will check to see if the set would be valid with a different
@ -717,8 +717,77 @@ func (vals *ValidatorSet) VerifyFutureCommit(newSet *ValidatorSet, chainID strin
// }
}
if oldVotingPower <= oldVals.TotalVotingPower()*2/3 {
return errTooMuchChange{oldVotingPower, oldVals.TotalVotingPower()*2/3 + 1}
if got, needed := oldVotingPower, oldVals.TotalVotingPower()*2/3; got <= needed {
return ErrTooMuchChange{Got: got, Needed: needed}
}
return nil
}
// VerifyCommitTrusting verifies that trustLevel ([1/3, 1]) of the validator
// set signed this commit.
// NOTE the given validators do not necessarily correspond to the validator set
// for this commit, but there may be some intersection.
func (vals *ValidatorSet) VerifyCommitTrusting(chainID string, blockID BlockID,
height int64, commit *Commit, trustLevel cmn.Fraction) error {
if trustLevel.Numerator*3 < trustLevel.Denominator || // < 1/3
trustLevel.Numerator > trustLevel.Denominator { // > 1
panic(fmt.Sprintf("trustLevel must be within [1/3, 1], given %v", trustLevel))
}
if err := vals.verifyCommitBasic(commit, height, blockID); err != nil {
return err
}
talliedVotingPower := int64(0)
for idx, precommit := range commit.Precommits {
// skip absent and nil votes
// NOTE: do we want to check the validity of votes
// for nil?
if precommit == nil {
continue
}
// We don't know the validators that committed this block, so we have to
// check for each vote if its validator is already known.
_, val := vals.GetByAddress(precommit.ValidatorAddress)
if val != nil {
// Validate signature.
precommitSignBytes := commit.VoteSignBytes(chainID, idx)
if !val.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) {
return fmt.Errorf("invalid commit -- invalid signature: %v", precommit)
}
// Good precommit!
if blockID.Equals(precommit.BlockID) {
talliedVotingPower += val.VotingPower
}
// else {
// It's OK that the BlockID doesn't match. We include stray
// precommits to measure validator availability.
// }
}
}
got := talliedVotingPower
needed := (vals.TotalVotingPower() * trustLevel.Numerator) / trustLevel.Denominator
if got <= needed {
return ErrTooMuchChange{Got: got, Needed: needed}
}
return nil
}
func (vals *ValidatorSet) verifyCommitBasic(commit *Commit, height int64, blockID BlockID) error {
if err := commit.ValidateBasic(); err != nil {
return err
}
if height != commit.Height() {
return NewErrInvalidCommitHeight(height, commit.Height())
}
if !blockID.Equals(commit.BlockID) {
return fmt.Errorf("invalid commit -- wrong block ID: want %v, got %v",
blockID, commit.BlockID)
}
return nil
}
@ -726,18 +795,22 @@ func (vals *ValidatorSet) VerifyFutureCommit(newSet *ValidatorSet, chainID strin
//-----------------
// ErrTooMuchChange
// IsErrTooMuchChange returns true if err is related to changes in validator
// set exceeding max limit.
// TODO: remove
func IsErrTooMuchChange(err error) bool {
_, ok := errors.Cause(err).(errTooMuchChange)
_, ok := errors.Cause(err).(ErrTooMuchChange)
return ok
}
type errTooMuchChange struct {
got int64
needed int64
// ErrTooMuchChange indicates that changes in the validator set exceeded max limit.
type ErrTooMuchChange struct {
Got int64
Needed int64
}
func (e errTooMuchChange) Error() string {
return fmt.Sprintf("Invalid commit -- insufficient old voting power: got %v, needed %v", e.got, e.needed)
func (e ErrTooMuchChange) Error() string {
return fmt.Sprintf("invalid commit -- insufficient old voting power: got %d, needed more than %d", e.Got, e.Needed)
}
//----------------


Loading…
Cancel
Save