Browse Source

light: rename lite2 to light & remove lite (#4946)

This PR removes lite & renames lite2 to light throughout the repo

Signed-off-by: Marko Baricevic <marbar3778@yahoo.com>

Closes: #4944
pull/4948/head
Marko 5 years ago
committed by GitHub
parent
commit
c2578e2262
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
63 changed files with 204 additions and 3275 deletions
  1. +1
    -0
      CHANGELOG_PENDING.md
  2. +26
    -26
      cmd/tendermint/commands/lite.go
  3. +1
    -1
      cmd/tendermint/main.go
  4. +1
    -1
      docs/app-dev/app-development.md
  5. +1
    -1
      docs/guides/go-built-in.md
  6. +1
    -1
      docs/guides/java.md
  7. +1
    -1
      docs/guides/kotlin.md
  8. +1
    -1
      docs/tendermint-core/configuration.md
  9. +6
    -6
      docs/tendermint-core/light-client-protocol.md
  10. +5
    -5
      light/client.go
  11. +15
    -15
      light/client_benchmark_test.go
  12. +56
    -56
      light/client_test.go
  13. +7
    -7
      light/doc.go
  14. +2
    -2
      light/errors.go
  15. +13
    -13
      light/example_test.go
  16. +1
    -1
      light/helpers_test.go
  17. +0
    -0
      light/provider/errors.go
  18. +1
    -1
      light/provider/http/http.go
  19. +6
    -6
      light/provider/http/http_test.go
  20. +1
    -1
      light/provider/mock/deadmock.go
  21. +1
    -1
      light/provider/mock/mock.go
  22. +1
    -1
      light/provider/provider.go
  23. +1
    -1
      light/proxy/proxy.go
  24. +1
    -1
      light/proxy/routes.go
  25. +14
    -14
      light/rpc/client.go
  26. +0
    -0
      light/rpc/proof.go
  27. +0
    -2
      light/rpc/query_test.go
  28. +7
    -7
      light/setup.go
  29. +1
    -1
      light/store/db/db.go
  30. +0
    -0
      light/store/db/db_test.go
  31. +0
    -0
      light/store/errors.go
  32. +0
    -0
      light/store/store.go
  33. +1
    -1
      light/trust_options.go
  34. +1
    -1
      light/verifier.go
  35. +10
    -10
      light/verifier_test.go
  36. +0
    -79
      lite/base_verifier.go
  37. +0
    -66
      lite/base_verifier_test.go
  38. +0
    -139
      lite/client/provider.go
  39. +0
    -62
      lite/client/provider_test.go
  40. +0
    -87
      lite/commit.go
  41. +0
    -285
      lite/dbprovider.go
  42. +0
    -133
      lite/doc.go
  43. +0
    -275
      lite/dynamic_verifier.go
  44. +0
    -299
      lite/dynamic_verifier_test.go
  45. +0
    -99
      lite/errors/errors.go
  46. +0
    -159
      lite/helpers.go
  47. +0
    -85
      lite/multiprovider.go
  48. +0
    -32
      lite/provider.go
  49. +0
    -141
      lite/provider_test.go
  50. +0
    -48
      lite/proxy/block.go
  51. +0
    -21
      lite/proxy/errors.go
  52. +0
    -14
      lite/proxy/proof.go
  53. +0
    -187
      lite/proxy/proxy.go
  54. +0
    -148
      lite/proxy/query.go
  55. +0
    -163
      lite/proxy/query_test.go
  56. +0
    -211
      lite/proxy/validate_test.go
  57. +0
    -49
      lite/proxy/verifier.go
  58. +0
    -275
      lite/proxy/wrapper.go
  59. +0
    -13
      lite/types.go
  60. +3
    -3
      node/node.go
  61. +15
    -15
      statesync/stateprovider.go
  62. +1
    -1
      types/block.go
  63. +1
    -1
      types/validator_set.go

+ 1
- 0
CHANGELOG_PENDING.md View File

@ -20,6 +20,7 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
- sr25519: type `PrivKeySr25519` is now `PrivKey`
- sr25519: type `PubKeySr25519` is now `PubKey`
- multisig: type `PubKeyMultisigThreshold` is now `PubKey`
- [light] \#4946 Rename `lite2` pkg to `light`, the lite cmd has also been renamed to `light`. Remove `lite` implementation.
- Apps


+ 26
- 26
cmd/tendermint/commands/lite.go View File

@ -14,17 +14,17 @@ import (
"github.com/tendermint/tendermint/libs/log"
tmos "github.com/tendermint/tendermint/libs/os"
lite "github.com/tendermint/tendermint/lite2"
lproxy "github.com/tendermint/tendermint/lite2/proxy"
lrpc "github.com/tendermint/tendermint/lite2/rpc"
dbs "github.com/tendermint/tendermint/lite2/store/db"
"github.com/tendermint/tendermint/light"
lproxy "github.com/tendermint/tendermint/light/proxy"
lrpc "github.com/tendermint/tendermint/light/rpc"
dbs "github.com/tendermint/tendermint/light/store/db"
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server"
)
// LiteCmd represents the base command when called without any subcommands
var LiteCmd = &cobra.Command{
Use: "lite [chainID]",
// LightCmd represents the base command when called without any subcommands
var LightCmd = &cobra.Command{
Use: "light [chainID]",
Short: "Run a light client proxy server, verifying Tendermint rpc",
Long: `Run a light client proxy server, verifying Tendermint rpc.
@ -36,16 +36,16 @@ Example:
start a fresh instance:
lite cosmoshub-3 -p http://52.57.29.196:26657 -w http://public-seed-node.cosmoshub.certus.one:26657
light cosmoshub-3 -p http://52.57.29.196:26657 -w http://public-seed-node.cosmoshub.certus.one:26657
--height 962118 --hash 28B97BE9F6DE51AC69F70E0B7BFD7E5C9CD1A595B7DC31AFF27C50D4948020CD
continue from latest state:
lite cosmoshub-3 -p http://52.57.29.196:26657 -w http://public-seed-node.cosmoshub.certus.one:26657
light cosmoshub-3 -p http://52.57.29.196:26657 -w http://public-seed-node.cosmoshub.certus.one:26657
`,
RunE: runProxy,
Args: cobra.ExactArgs(1),
Example: `lite cosmoshub-3 -p http://52.57.29.196:26657 -w http://public-seed-node.cosmoshub.certus.one:26657
Example: `light cosmoshub-3 -p http://52.57.29.196:26657 -w http://public-seed-node.cosmoshub.certus.one:26657
--height 962118 --hash 28B97BE9F6DE51AC69F70E0B7BFD7E5C9CD1A595B7DC31AFF27C50D4948020CD`,
}
@ -65,23 +65,23 @@ var (
)
func init() {
LiteCmd.Flags().StringVar(&listenAddr, "laddr", "tcp://localhost:8888",
LightCmd.Flags().StringVar(&listenAddr, "laddr", "tcp://localhost:8888",
"Serve the proxy on the given address")
LiteCmd.Flags().StringVarP(&primaryAddr, "primary", "p", "",
LightCmd.Flags().StringVarP(&primaryAddr, "primary", "p", "",
"Connect to a Tendermint node at this address")
LiteCmd.Flags().StringVarP(&witnessAddrsJoined, "witnesses", "w", "",
LightCmd.Flags().StringVarP(&witnessAddrsJoined, "witnesses", "w", "",
"Tendermint nodes to cross-check the primary node, comma-separated")
LiteCmd.Flags().StringVar(&home, "home-dir", ".tendermint-lite", "Specify the home directory")
LiteCmd.Flags().IntVar(
LightCmd.Flags().StringVar(&home, "home-dir", ".tendermint-light", "Specify the home directory")
LightCmd.Flags().IntVar(
&maxOpenConnections,
"max-open-connections",
900,
"Maximum number of simultaneous connections (including WebSocket).")
LiteCmd.Flags().DurationVar(&trustingPeriod, "trusting-period", 168*time.Hour,
LightCmd.Flags().DurationVar(&trustingPeriod, "trusting-period", 168*time.Hour,
"Trusting period. Should be significantly less than the unbonding period")
LiteCmd.Flags().Int64Var(&trustedHeight, "height", 1, "Trusted header's height")
LiteCmd.Flags().BytesHexVar(&trustedHash, "hash", []byte{}, "Trusted header's hash")
LiteCmd.Flags().BoolVar(&verbose, "verbose", false, "Verbose output")
LightCmd.Flags().Int64Var(&trustedHeight, "height", 1, "Trusted header's height")
LightCmd.Flags().BytesHexVar(&trustedHash, "hash", []byte{}, "Trusted header's hash")
LightCmd.Flags().BoolVar(&verbose, "verbose", false, "Verbose output")
}
func runProxy(cmd *cobra.Command, args []string) error {
@ -100,16 +100,16 @@ func runProxy(cmd *cobra.Command, args []string) error {
witnessesAddrs := strings.Split(witnessAddrsJoined, ",")
db, err := dbm.NewGoLevelDB("lite-client-db", home)
db, err := dbm.NewGoLevelDB("light-client-db", home)
if err != nil {
return fmt.Errorf("can't create a db: %w", err)
}
var c *lite.Client
var c *light.Client
if trustedHeight > 0 && len(trustedHash) > 0 { // fresh installation
c, err = lite.NewHTTPClient(
c, err = light.NewHTTPClient(
chainID,
lite.TrustOptions{
light.TrustOptions{
Period: trustingPeriod,
Height: trustedHeight,
Hash: trustedHash,
@ -117,16 +117,16 @@ func runProxy(cmd *cobra.Command, args []string) error {
primaryAddr,
witnessesAddrs,
dbs.New(db, chainID),
lite.Logger(logger),
light.Logger(logger),
)
} else { // continue from latest state
c, err = lite.NewHTTPClientFromTrustedStore(
c, err = light.NewHTTPClientFromTrustedStore(
chainID,
trustingPeriod,
primaryAddr,
witnessesAddrs,
dbs.New(db, chainID),
lite.Logger(logger),
light.Logger(logger),
)
}
if err != nil {


+ 1
- 1
cmd/tendermint/main.go View File

@ -17,7 +17,7 @@ func main() {
cmd.GenValidatorCmd,
cmd.InitFilesCmd,
cmd.ProbeUpnpCmd,
cmd.LiteCmd,
cmd.LightCmd,
cmd.ReplayCmd,
cmd.ReplayConsoleCmd,
cmd.ResetAllCmd,


+ 1
- 1
docs/app-dev/app-development.md View File

@ -316,7 +316,7 @@ field can be left empty. Tendermint core will take care of updating the
validator set. Note the change in voting power must be strictly less than 1/3
per block if you want a light client to be able to prove the transition
externally. See the [light client
docs](https://godoc.org/github.com/tendermint/tendermint/lite#hdr-How_We_Track_Validators)
docs](https://godoc.org/github.com/tendermint/tendermint/light#hdr-How_We_Track_Validators)
for details on how it tracks validators.
In go:


+ 1
- 1
docs/guides/go-built-in.md View File

@ -288,7 +288,7 @@ the application's `Query` method.
Applications are free to provide their own APIs. But by using Tendermint Core
as a proxy, clients (including [light client
package](https://godoc.org/github.com/tendermint/tendermint/lite2)) can leverage
package](https://godoc.org/github.com/tendermint/tendermint/light)) can leverage
the unified API across different applications. Plus they won't have to call the
otherwise separate Tendermint Core API for additional proofs.


+ 1
- 1
docs/guides/java.md View File

@ -430,7 +430,7 @@ the application's `Query` method.
Applications are free to provide their own APIs. But by using Tendermint Core
as a proxy, clients (including [light client
package](https://godoc.org/github.com/tendermint/tendermint/lite)) can leverage
package](https://godoc.org/github.com/tendermint/tendermint/light)) can leverage
the unified API across different applications. Plus they won't have to call the
otherwise separate Tendermint Core API for additional proofs.


+ 1
- 1
docs/guides/kotlin.md View File

@ -410,7 +410,7 @@ the application's `Query` method.
Applications are free to provide their own APIs. But by using Tendermint Core
as a proxy, clients (including [light client
package](https://godoc.org/github.com/tendermint/tendermint/lite)) can leverage
package](https://godoc.org/github.com/tendermint/tendermint/light)) can leverage
the unified API across different applications. Plus they won't have to call the
otherwise separate Tendermint Core API for additional proofs.


+ 1
- 1
docs/tendermint-core/configuration.md View File

@ -361,7 +361,7 @@ Note after the block H, Tendermint creates something we call a "proof block"
(only if the application hash changed) H+1. The reason for this is to support
proofs. If you have a transaction in block H that changes the state to X, the
new application hash will only be included in block H+1. If after your
transaction is committed, you want to get a lite-client proof for the new state
transaction is committed, you want to get a light-client proof for the new state
(X), you need the new block to be committed in order to do that because the new
block has the new application hash for the state X. That's why we make a new
(empty) block if the application hash changes. Otherwise, you won't be able to


+ 6
- 6
docs/tendermint-core/light-client-protocol.md View File

@ -8,8 +8,8 @@ Light clients are an important part of the complete blockchain system for most
applications. Tendermint provides unique speed and security properties for
light client applications.
See our [lite
package](https://pkg.go.dev/github.com/tendermint/tendermint/lite2?tab=doc).
See our [light
package](https://pkg.go.dev/github.com/tendermint/tendermint/light?tab=doc).
## Overview
@ -33,7 +33,7 @@ proofs](https://github.com/tendermint/spec/blob/953523c3cb99fdb8c8f7a2d21e3a9909
## Where to obtain trusted height & hash?
https://pkg.go.dev/github.com/tendermint/tendermint/lite2?tab=doc#TrustOptions
https://pkg.go.dev/github.com/tendermint/tendermint/light?tab=doc#TrustOptions
One way to obtain semi-trusted hash & height is to query multiple full nodes
and compare their hashes:
@ -48,16 +48,16 @@ $ curl -s https://233.123.0.140:26657:26657/commit | jq "{height: .result.signed
## HTTP proxy
Tendermint comes with a built-in `tendermint lite` command, which can be used
Tendermint comes with a built-in `tendermint light` command, which can be used
to run a light client proxy server, verifying Tendermint rpc. All calls that
can be tracked back to a block header by a proof will be verified before
passing them back to the caller. Other than that, it will present the same
interface as a full Tendermint node.
```sh
$ tendermint lite supernova -p tcp://233.123.0.140:26657 \
$ tendermint light supernova -p tcp://233.123.0.140:26657 \
-w tcp://179.63.29.15:26657,tcp://144.165.223.135:26657 \
--height=10 --hash=37E9A6DD3FA25E83B22C18835401E8E56088D0D7ABC6FD99FCDC920DD76C1C57
```
For additional options, run `tendermint lite --help`.
For additional options, run `tendermint light --help`.

lite2/client.go → light/client.go View File


lite2/client_benchmark_test.go → light/client_benchmark_test.go View File


lite2/client_test.go → light/client_test.go View File


lite2/doc.go → light/doc.go View File


lite2/errors.go → light/errors.go View File


lite2/example_test.go → light/example_test.go View File


lite2/helpers_test.go → light/helpers_test.go View File


lite2/provider/errors.go → light/provider/errors.go View File


lite2/provider/http/http.go → light/provider/http/http.go View File


lite2/provider/http/http_test.go → light/provider/http/http_test.go View File


lite2/provider/mock/deadmock.go → light/provider/mock/deadmock.go View File


lite2/provider/mock/mock.go → light/provider/mock/mock.go View File


lite2/provider/provider.go → light/provider/provider.go View File


lite2/proxy/proxy.go → light/proxy/proxy.go View File


lite2/proxy/routes.go → light/proxy/routes.go View File


lite2/rpc/client.go → light/rpc/client.go View File


lite2/rpc/proof.go → light/rpc/proof.go View File


lite2/rpc/query_test.go → light/rpc/query_test.go View File


lite2/setup.go → light/setup.go View File


lite2/store/db/db.go → light/store/db/db.go View File


lite2/store/db/db_test.go → light/store/db/db_test.go View File


lite2/store/errors.go → light/store/errors.go View File


lite2/store/store.go → light/store/store.go View File


lite2/trust_options.go → light/trust_options.go View File


lite2/verifier.go → light/verifier.go View File


lite2/verifier_test.go → light/verifier_test.go View File


+ 0
- 79
lite/base_verifier.go View File

@ -1,79 +0,0 @@
package lite
import (
"bytes"
"github.com/pkg/errors"
lerr "github.com/tendermint/tendermint/lite/errors"
"github.com/tendermint/tendermint/types"
)
var _ Verifier = (*BaseVerifier)(nil)
// BaseVerifier lets us check the validity of SignedHeaders at height or
// later, requiring sufficient votes (> 2/3) from the given valset.
// To verify blocks produced by a blockchain with mutable validator sets,
// use the DynamicVerifier.
// TODO: Handle unbonding time.
type BaseVerifier struct {
chainID string
height int64
valset *types.ValidatorSet
}
// NewBaseVerifier returns a new Verifier initialized with a validator set at
// some height.
func NewBaseVerifier(chainID string, height int64, valset *types.ValidatorSet) *BaseVerifier {
if valset.IsNilOrEmpty() {
panic("NewBaseVerifier requires a valid valset")
}
return &BaseVerifier{
chainID: chainID,
height: height,
valset: valset,
}
}
// Implements Verifier.
func (bv *BaseVerifier) ChainID() string {
return bv.chainID
}
// Implements Verifier.
func (bv *BaseVerifier) Verify(signedHeader types.SignedHeader) error {
// We can't verify commits for a different chain.
if signedHeader.ChainID != bv.chainID {
return errors.Errorf("BaseVerifier chainID is %v, cannot verify chainID %v",
bv.chainID, signedHeader.ChainID)
}
// We can't verify commits older than bv.height.
if signedHeader.Height < bv.height {
return errors.Errorf("BaseVerifier height is %v, cannot verify height %v",
bv.height, signedHeader.Height)
}
// We can't verify with the wrong validator set.
if !bytes.Equal(signedHeader.ValidatorsHash,
bv.valset.Hash()) {
return lerr.ErrUnexpectedValidators(signedHeader.ValidatorsHash, bv.valset.Hash())
}
// Do basic sanity checks.
err := signedHeader.ValidateBasic(bv.chainID)
if err != nil {
return errors.Wrap(err, "in verify")
}
// Check commit signatures.
err = bv.valset.VerifyCommit(
bv.chainID, signedHeader.Commit.BlockID,
signedHeader.Height, signedHeader.Commit)
if err != nil {
return errors.Wrap(err, "in verify")
}
return nil
}

+ 0
- 66
lite/base_verifier_test.go View File

@ -1,66 +0,0 @@
package lite
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tendermint/tendermint/crypto/tmhash"
lerr "github.com/tendermint/tendermint/lite/errors"
"github.com/tendermint/tendermint/types"
)
func TestBaseCert(t *testing.T) {
// TODO: Requires proposer address to be set in header.
t.SkipNow()
assert := assert.New(t)
keys := genPrivKeys(4)
// 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do!
vals := keys.ToValidators(20, 10)
// and a Verifier based on our known set
chainID := "test-static"
cert := NewBaseVerifier(chainID, 2, vals)
cases := []struct {
keys privKeys
vals *types.ValidatorSet
height int64
first, last int // who actually signs
proper bool // true -> expect no error
changed bool // true -> expect validator change error
}{
// height regression
{keys, vals, 1, 0, len(keys), false, false},
// perfect, signed by everyone
{keys, vals, 2, 0, len(keys), true, false},
// skip little guy is okay
{keys, vals, 3, 1, len(keys), true, false},
// but not the big guy
{keys, vals, 4, 0, len(keys) - 1, false, false},
// Changing the power a little bit breaks the static validator.
// The sigs are enough, but the validator hash is unknown.
{keys, keys.ToValidators(20, 11), 5, 0, len(keys), false, true},
}
for _, tc := range cases {
sh := tc.keys.GenSignedHeader(
chainID, tc.height, nil, tc.vals, tc.vals,
tmhash.Sum([]byte("foo")),
tmhash.Sum([]byte("params")),
tmhash.Sum([]byte("results")),
tc.first, tc.last,
)
err := cert.Verify(sh)
if tc.proper {
assert.Nil(err, "%+v", err)
} else {
assert.NotNil(err)
if tc.changed {
assert.True(lerr.IsErrUnexpectedValidators(err), "%+v", err)
}
}
}
}

+ 0
- 139
lite/client/provider.go View File

@ -1,139 +0,0 @@
/*
Package client defines a provider that uses a rpchttp
to get information, which is used to get new headers
and validators directly from a Tendermint client.
*/
package client
import (
"fmt"
log "github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/lite"
lerr "github.com/tendermint/tendermint/lite/errors"
rpcclient "github.com/tendermint/tendermint/rpc/client"
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/types"
)
// SignStatusClient combines a SignClient and StatusClient.
type SignStatusClient interface {
rpcclient.SignClient
rpcclient.StatusClient
}
type provider struct {
logger log.Logger
chainID string
client SignStatusClient
}
// NewProvider implements Provider (but not PersistentProvider).
func NewProvider(chainID string, client SignStatusClient) lite.Provider {
return &provider{
logger: log.NewNopLogger(),
chainID: chainID,
client: client,
}
}
// NewHTTPProvider can connect to a tendermint json-rpc endpoint
// at the given url, and uses that as a read-only provider.
func NewHTTPProvider(chainID, remote string) (lite.Provider, error) {
httpClient, err := rpchttp.New(remote, "/websocket")
if err != nil {
return nil, err
}
return NewProvider(chainID, httpClient), nil
}
// Implements Provider.
func (p *provider) SetLogger(logger log.Logger) {
logger = logger.With("module", "lite/client")
p.logger = logger
}
// StatusClient returns the internal client as a StatusClient
func (p *provider) StatusClient() rpcclient.StatusClient {
return p.client
}
// LatestFullCommit implements Provider.
func (p *provider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (fc lite.FullCommit, err error) {
if chainID != p.chainID {
err = fmt.Errorf("expected chainID %s, got %s", p.chainID, chainID)
return
}
if maxHeight != 0 && maxHeight < minHeight {
err = fmt.Errorf("need maxHeight == 0 or minHeight <= maxHeight, got min %v and max %v",
minHeight, maxHeight)
return
}
commit, err := p.fetchLatestCommit(minHeight, maxHeight)
if err != nil {
return
}
fc, err = p.fillFullCommit(commit.SignedHeader)
return
}
// fetchLatestCommit fetches the latest commit from the client.
func (p *provider) fetchLatestCommit(minHeight int64, maxHeight int64) (*ctypes.ResultCommit, error) {
status, err := p.client.Status()
if err != nil {
return nil, err
}
if status.SyncInfo.LatestBlockHeight < minHeight {
err = fmt.Errorf("provider is at %v but require minHeight=%v",
status.SyncInfo.LatestBlockHeight, minHeight)
return nil, err
}
if maxHeight == 0 {
maxHeight = status.SyncInfo.LatestBlockHeight
} else if status.SyncInfo.LatestBlockHeight < maxHeight {
maxHeight = status.SyncInfo.LatestBlockHeight
}
return p.client.Commit(&maxHeight)
}
// Implements Provider.
func (p *provider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) {
return p.getValidatorSet(chainID, height)
}
func (p *provider) getValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) {
if chainID != p.chainID {
err = fmt.Errorf("expected chainID %s, got %s", p.chainID, chainID)
return
}
if height < 1 {
err = fmt.Errorf("expected height >= 1, got height %v", height)
return
}
res, err := p.client.Validators(&height, 0, 0)
if err != nil {
// TODO pass through other types of errors.
return nil, lerr.ErrUnknownValidators(chainID, height)
}
valset = types.NewValidatorSet(res.Validators)
return
}
// This does no validation.
func (p *provider) fillFullCommit(signedHeader types.SignedHeader) (fc lite.FullCommit, err error) {
// Get the validators.
valset, err := p.getValidatorSet(signedHeader.ChainID, signedHeader.Height)
if err != nil {
return lite.FullCommit{}, err
}
// Get the next validators.
nextValset, err := p.getValidatorSet(signedHeader.ChainID, signedHeader.Height+1)
if err != nil {
return lite.FullCommit{}, err
}
return lite.NewFullCommit(signedHeader, valset, nextValset), nil
}

+ 0
- 62
lite/client/provider_test.go View File

@ -1,62 +0,0 @@
package client
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/abci/example/kvstore"
rpcclient "github.com/tendermint/tendermint/rpc/client"
rpctest "github.com/tendermint/tendermint/rpc/test"
"github.com/tendermint/tendermint/types"
)
func TestMain(m *testing.M) {
app := kvstore.NewApplication()
node := rpctest.StartTendermint(app)
code := m.Run()
rpctest.StopTendermint(node)
os.Exit(code)
}
func TestProvider(t *testing.T) {
assert, require := assert.New(t), require.New(t)
cfg := rpctest.GetConfig()
defer os.RemoveAll(cfg.RootDir)
rpcAddr := cfg.RPC.ListenAddress
genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile())
if err != nil {
panic(err)
}
chainID := genDoc.ChainID
t.Log("chainID:", chainID)
p, err := NewHTTPProvider(chainID, rpcAddr)
require.Nil(err)
require.NotNil(p)
// let it produce some blocks
err = rpcclient.WaitForHeight(p.(*provider).client, 6, nil)
require.Nil(err)
// let's get the highest block
fc, err := p.LatestFullCommit(chainID, 1, 1<<63-1)
require.Nil(err, "%+v", err)
sh := fc.Height()
assert.True(sh < 5000)
// let's check this is valid somehow
assert.Nil(fc.ValidateFull(chainID))
// historical queries now work :)
lower := sh - 5
fc, err = p.LatestFullCommit(chainID, lower, lower)
assert.Nil(err, "%+v", err)
assert.Equal(lower, fc.Height())
}

+ 0
- 87
lite/commit.go View File

@ -1,87 +0,0 @@
package lite
import (
"bytes"
"errors"
"fmt"
"github.com/tendermint/tendermint/types"
)
// FullCommit contains a SignedHeader (the block header and a commit that signs it),
// the validator set which signed the commit, and the next validator set. The
// next validator set (which is proven from the block header) allows us to
// revert to block-by-block updating of lite Verifier's latest validator set,
// even in the face of arbitrarily large power changes.
type FullCommit struct {
SignedHeader types.SignedHeader `json:"signed_header"`
Validators *types.ValidatorSet `json:"validator_set"`
NextValidators *types.ValidatorSet `json:"next_validator_set"`
}
// NewFullCommit returns a new FullCommit.
func NewFullCommit(signedHeader types.SignedHeader, valset, nextValset *types.ValidatorSet) FullCommit {
return FullCommit{
SignedHeader: signedHeader,
Validators: valset,
NextValidators: nextValset,
}
}
// Validate the components and check for consistency.
// This also checks to make sure that Validators actually
// signed the SignedHeader.Commit.
// If > 2/3 did not sign the Commit from fc.Validators, it
// is not a valid commit!
func (fc FullCommit) ValidateFull(chainID string) error {
// Ensure that Validators exists and matches the header.
if fc.Validators.Size() == 0 {
return errors.New("need FullCommit.Validators")
}
if !bytes.Equal(
fc.SignedHeader.ValidatorsHash,
fc.Validators.Hash()) {
return fmt.Errorf("header has vhash %X but valset hash is %X",
fc.SignedHeader.ValidatorsHash,
fc.Validators.Hash(),
)
}
// Ensure that NextValidators exists and matches the header.
if fc.NextValidators.Size() == 0 {
return errors.New("need FullCommit.NextValidators")
}
if !bytes.Equal(
fc.SignedHeader.NextValidatorsHash,
fc.NextValidators.Hash()) {
return fmt.Errorf("header has next vhash %X but next valset hash is %X",
fc.SignedHeader.NextValidatorsHash,
fc.NextValidators.Hash(),
)
}
// Validate the header.
err := fc.SignedHeader.ValidateBasic(chainID)
if err != nil {
return err
}
// Validate the signatures on the commit.
hdr, cmt := fc.SignedHeader.Header, fc.SignedHeader.Commit
return fc.Validators.VerifyCommit(
hdr.ChainID, cmt.BlockID,
hdr.Height, cmt)
}
// Height returns the height of the header.
func (fc FullCommit) Height() int64 {
if fc.SignedHeader.Header == nil {
panic("should not happen")
}
return fc.SignedHeader.Height
}
// ChainID returns the chainID of the header.
func (fc FullCommit) ChainID() string {
if fc.SignedHeader.Header == nil {
panic("should not happen")
}
return fc.SignedHeader.ChainID
}

+ 0
- 285
lite/dbprovider.go View File

@ -1,285 +0,0 @@
package lite
import (
"fmt"
"regexp"
"strconv"
amino "github.com/tendermint/go-amino"
dbm "github.com/tendermint/tm-db"
cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino"
log "github.com/tendermint/tendermint/libs/log"
lerr "github.com/tendermint/tendermint/lite/errors"
"github.com/tendermint/tendermint/types"
)
var _ PersistentProvider = (*DBProvider)(nil)
// DBProvider stores commits and validator sets in a DB.
type DBProvider struct {
logger log.Logger
label string
db dbm.DB
cdc *amino.Codec
limit int
}
func NewDBProvider(label string, db dbm.DB) *DBProvider {
// NOTE: when debugging, this type of construction might be useful.
//db = dbm.NewDebugDB("db provider "+tmrand.Str(4), db)
cdc := amino.NewCodec()
cryptoamino.RegisterAmino(cdc)
dbp := &DBProvider{
logger: log.NewNopLogger(),
label: label,
db: db,
cdc: cdc,
}
return dbp
}
func (dbp *DBProvider) SetLogger(logger log.Logger) {
dbp.logger = logger.With("label", dbp.label)
}
func (dbp *DBProvider) SetLimit(limit int) *DBProvider {
dbp.limit = limit
return dbp
}
// Implements PersistentProvider.
func (dbp *DBProvider) SaveFullCommit(fc FullCommit) error {
dbp.logger.Info("DBProvider.SaveFullCommit()...", "fc", fc)
batch := dbp.db.NewBatch()
defer batch.Close()
// Save the fc.validators.
// We might be overwriting what we already have, but
// it makes the logic easier for now.
vsKey := validatorSetKey(fc.ChainID(), fc.Height())
vsBz, err := dbp.cdc.MarshalBinaryLengthPrefixed(fc.Validators)
if err != nil {
return err
}
batch.Set(vsKey, vsBz)
// Save the fc.NextValidators.
nvsKey := validatorSetKey(fc.ChainID(), fc.Height()+1)
nvsBz, err := dbp.cdc.MarshalBinaryLengthPrefixed(fc.NextValidators)
if err != nil {
return err
}
batch.Set(nvsKey, nvsBz)
// Save the fc.SignedHeader
shKey := signedHeaderKey(fc.ChainID(), fc.Height())
shBz, err := dbp.cdc.MarshalBinaryLengthPrefixed(fc.SignedHeader)
if err != nil {
return err
}
batch.Set(shKey, shBz)
// And write sync.
batch.WriteSync()
// Garbage collect.
// TODO: optimize later.
if dbp.limit > 0 {
dbp.deleteAfterN(fc.ChainID(), dbp.limit)
}
return nil
}
// Implements Provider.
func (dbp *DBProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (
FullCommit, error) {
dbp.logger.Info("DBProvider.LatestFullCommit()...",
"chainID", chainID, "minHeight", minHeight, "maxHeight", maxHeight)
if minHeight <= 0 {
minHeight = 1
}
if maxHeight == 0 {
maxHeight = 1<<63 - 1
}
itr, err := dbp.db.ReverseIterator(
signedHeaderKey(chainID, minHeight),
append(signedHeaderKey(chainID, maxHeight), byte(0x00)),
)
if err != nil {
panic(err)
}
defer itr.Close()
for itr.Valid() {
key := itr.Key()
_, _, ok := parseSignedHeaderKey(key)
if !ok {
// Skip over other keys.
itr.Next()
continue
} else {
// Found the latest full commit signed header.
shBz := itr.Value()
sh := types.SignedHeader{}
err := dbp.cdc.UnmarshalBinaryLengthPrefixed(shBz, &sh)
if err != nil {
return FullCommit{}, err
}
lfc, err := dbp.fillFullCommit(sh)
if err == nil {
dbp.logger.Info("DBProvider.LatestFullCommit() found latest.", "height", lfc.Height())
return lfc, nil
}
dbp.logger.Error("DBProvider.LatestFullCommit() got error", "lfc", lfc)
dbp.logger.Error(fmt.Sprintf("%+v", err))
return lfc, err
}
}
return FullCommit{}, lerr.ErrCommitNotFound()
}
func (dbp *DBProvider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) {
return dbp.getValidatorSet(chainID, height)
}
func (dbp *DBProvider) getValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) {
vsBz, err := dbp.db.Get(validatorSetKey(chainID, height))
if err != nil {
return nil, err
}
if len(vsBz) == 0 {
err = lerr.ErrUnknownValidators(chainID, height)
return
}
err = dbp.cdc.UnmarshalBinaryLengthPrefixed(vsBz, &valset)
if err != nil {
return
}
// To test deep equality. This makes it easier to test for e.g. valset
// equivalence using assert.Equal (tests for deep equality) in our tests,
// which also tests for unexported/private field equivalence.
valset.TotalVotingPower()
return
}
func (dbp *DBProvider) fillFullCommit(sh types.SignedHeader) (FullCommit, error) {
var chainID = sh.ChainID
var height = sh.Height
var valset, nextValset *types.ValidatorSet
// Load the validator set.
valset, err := dbp.getValidatorSet(chainID, height)
if err != nil {
return FullCommit{}, err
}
// Load the next validator set.
nextValset, err = dbp.getValidatorSet(chainID, height+1)
if err != nil {
return FullCommit{}, err
}
// Return filled FullCommit.
return FullCommit{
SignedHeader: sh,
Validators: valset,
NextValidators: nextValset,
}, nil
}
func (dbp *DBProvider) deleteAfterN(chainID string, after int) error {
dbp.logger.Info("DBProvider.deleteAfterN()...", "chainID", chainID, "after", after)
itr, err := dbp.db.ReverseIterator(
signedHeaderKey(chainID, 1),
append(signedHeaderKey(chainID, 1<<63-1), byte(0x00)),
)
if err != nil {
panic(err)
}
defer itr.Close()
var lastHeight int64 = 1<<63 - 1
var numSeen = 0
var numDeleted = 0
for itr.Valid() {
key := itr.Key()
_, height, ok := parseChainKeyPrefix(key)
if !ok {
return fmt.Errorf("unexpected key %v", key)
}
if height < lastHeight {
lastHeight = height
numSeen++
}
if numSeen > after {
dbp.db.Delete(key)
numDeleted++
}
itr.Next()
}
dbp.logger.Info(fmt.Sprintf("DBProvider.deleteAfterN() deleted %v items", numDeleted))
return nil
}
//----------------------------------------
// key encoding
func signedHeaderKey(chainID string, height int64) []byte {
return []byte(fmt.Sprintf("%s/%010d/sh", chainID, height))
}
func validatorSetKey(chainID string, height int64) []byte {
return []byte(fmt.Sprintf("%s/%010d/vs", chainID, height))
}
//----------------------------------------
// key parsing
var keyPattern = regexp.MustCompile(`^([^/]+)/([0-9]*)/(.*)$`)
func parseKey(key []byte) (chainID string, height int64, part string, ok bool) {
submatch := keyPattern.FindSubmatch(key)
if submatch == nil {
return "", 0, "", false
}
chainID = string(submatch[1])
heightStr := string(submatch[2])
heightInt, err := strconv.Atoi(heightStr)
if err != nil {
return "", 0, "", false
}
height = int64(heightInt)
part = string(submatch[3])
ok = true // good!
return
}
func parseSignedHeaderKey(key []byte) (chainID string, height int64, ok bool) {
var part string
chainID, height, part, ok = parseKey(key)
if part != "sh" {
return "", 0, false
}
return
}
func parseChainKeyPrefix(key []byte) (chainID string, height int64, ok bool) {
chainID, height, _, ok = parseKey(key)
return
}

+ 0
- 133
lite/doc.go View File

@ -1,133 +0,0 @@
/*
Package lite is deprecated and will be removed in v0.34!
Package lite allows you to securely validate headers without a full node.
This library pulls together all the crypto and algorithms, so given a
relatively recent (< unbonding period) known validator set, one can get
indisputable proof that data is in the chain (current state) or detect if the
node is lying to the client.
Tendermint RPC exposes a lot of info, but a malicious node could return any
data it wants to queries, or even to block headers, even making up fake
signatures from non-existent validators to justify it. This is a lot of logic
to get right, to be contained in a small, easy to use library, that does this
for you, so you can just build nice applications.
We design for clients who have no strong trust relationship with any Tendermint
node, just the blockchain and validator set as a whole.
SignedHeader
SignedHeader is a block header along with a commit -- enough validator
precommit-vote signatures to prove its validity (> 2/3 of the voting power)
given the validator set responsible for signing that header. A FullCommit is a
SignedHeader along with the current and next validator sets.
The hash of the next validator set is included and signed in the SignedHeader.
This lets the lite client keep track of arbitrary changes to the validator set,
as every change to the validator set must be approved by inclusion in the
header and signed in the commit.
In the worst case, with every block changing the validators around completely,
a lite client can sync up with every block header to verify each validator set
change on the chain. In practice, most applications will not have frequent
drastic updates to the validator set, so the logic defined in this package for
lite client syncing is optimized to use intelligent bisection and
block-skipping for efficient sourcing and verification of these data structures
and updates to the validator set (see the DynamicVerifier for more
information).
The FullCommit is also declared in this package as a convenience structure,
which includes the SignedHeader along with the full current and next
ValidatorSets.
Verifier
A Verifier validates a new SignedHeader given the currently known state. There
are two different types of Verifiers provided.
BaseVerifier - given a validator set and a height, this Verifier verifies
that > 2/3 of the voting power of the given validator set had signed the
SignedHeader, and that the SignedHeader was to be signed by the exact given
validator set, and that the height of the commit is at least height (or
greater).
DynamicVerifier - this Verifier implements an auto-update and persistence
strategy to verify any SignedHeader of the blockchain.
Provider and PersistentProvider
A Provider allows us to store and retrieve the FullCommits.
type Provider interface {
// LatestFullCommit returns the latest commit with
// minHeight <= height <= maxHeight.
// If maxHeight is zero, returns the latest where
// minHeight <= height.
LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error)
}
* client.NewHTTPProvider - query Tendermint rpc.
A PersistentProvider is a Provider that also allows for saving state. This is
used by the DynamicVerifier for persistence.
type PersistentProvider interface {
Provider
// SaveFullCommit saves a FullCommit (without verification).
SaveFullCommit(fc FullCommit) error
}
* DBProvider - persistence provider for use with any libs/DB.
* MultiProvider - combine multiple providers.
The suggested use for local light clients is client.NewHTTPProvider(...) for
getting new data (Source), and NewMultiProvider(NewDBProvider("label",
dbm.NewMemDB()), NewDBProvider("label", db.NewFileDB(...))) to store confirmed
full commits (Trusted)
How We Track Validators
Unless you want to blindly trust the node you talk with, you need to trace
every response back to a hash in a block header and validate the commit
signatures of that block header match the proper validator set. If there is a
static validator set, you store it locally upon initialization of the client,
and check against that every time.
If the validator set for the blockchain is dynamic, verifying block commits is
a bit more involved -- if there is a block at height H with a known (trusted)
validator set V, and another block at height H' (H' > H) with validator set V'
!= V, then we want a way to safely update it.
First, we get the new (unconfirmed) validator set V' and verify that H' is
internally consistent and properly signed by this V'. Assuming it is a valid
block, we check that at least 2/3 of the validators in V also signed it,
meaning it would also be valid under our old assumptions. Then, we accept H'
and V' as valid and trusted and use that to validate for heights X > H' until a
more recent and updated validator set is found.
If we cannot update directly from H -> H' because there was too much change to
the validator set, then we can look for some Hm (H < Hm < H') with a validator
set Vm. Then we try to update H -> Hm and then Hm -> H' in two steps. If one
of these steps doesn't work, then we continue bisecting, until we eventually
have to externally validate the validator set changes at every block.
Since we never trust any server in this protocol, only the signatures
themselves, it doesn't matter if the seed comes from a (possibly malicious)
node or a (possibly malicious) user. We can accept it or reject it based only
on our trusted validator set and cryptographic proofs. This makes it extremely
important to verify that you have the proper validator set when initializing
the client, as that is the root of all trust.
The software currently assumes that the unbonding period is infinite in
duration. If the DynamicVerifier hasn't been updated in a while, you should
manually verify the block headers using other sources.
TODO: Update the software to handle cases around the unbonding period.
*/
package lite

+ 0
- 275
lite/dynamic_verifier.go View File

@ -1,275 +0,0 @@
package lite
import (
"bytes"
"fmt"
"sync"
log "github.com/tendermint/tendermint/libs/log"
lerr "github.com/tendermint/tendermint/lite/errors"
"github.com/tendermint/tendermint/types"
)
const sizeOfPendingMap = 1024
var _ Verifier = (*DynamicVerifier)(nil)
// DynamicVerifier implements an auto-updating Verifier. It uses a
// "source" provider to obtain the needed FullCommits to securely sync with
// validator set changes. It stores properly validated data on the
// "trusted" local system.
// TODO: make this single threaded and create a new
// ConcurrentDynamicVerifier that wraps it with concurrency.
// see https://github.com/tendermint/tendermint/issues/3170
type DynamicVerifier struct {
chainID string
logger log.Logger
// Already validated, stored locally
trusted PersistentProvider
// New info, like a node rpc, or other import method.
source Provider
// pending map to synchronize concurrent verification requests
mtx sync.Mutex
pendingVerifications map[int64]chan struct{}
}
// NewDynamicVerifier returns a new DynamicVerifier. It uses the
// trusted provider to store validated data and the source provider to
// obtain missing data (e.g. FullCommits).
//
// The trusted provider should be a DBProvider.
// The source provider should be a client.HTTPProvider.
func NewDynamicVerifier(chainID string, trusted PersistentProvider, source Provider) *DynamicVerifier {
return &DynamicVerifier{
logger: log.NewNopLogger(),
chainID: chainID,
trusted: trusted,
source: source,
pendingVerifications: make(map[int64]chan struct{}, sizeOfPendingMap),
}
}
func (dv *DynamicVerifier) SetLogger(logger log.Logger) {
logger = logger.With("module", "lite")
dv.logger = logger
dv.trusted.SetLogger(logger)
dv.source.SetLogger(logger)
}
// Implements Verifier.
func (dv *DynamicVerifier) ChainID() string {
return dv.chainID
}
// Implements Verifier.
//
// If the validators have changed since the last known time, it looks to
// dv.trusted and dv.source to prove the new validators. On success, it will
// try to store the SignedHeader in dv.trusted if the next
// validator can be sourced.
func (dv *DynamicVerifier) Verify(shdr types.SignedHeader) error {
// Performs synchronization for multi-threads verification at the same height.
dv.mtx.Lock()
if pending := dv.pendingVerifications[shdr.Height]; pending != nil {
dv.mtx.Unlock()
<-pending // pending is chan struct{}
} else {
pending := make(chan struct{})
dv.pendingVerifications[shdr.Height] = pending
defer func() {
close(pending)
dv.mtx.Lock()
delete(dv.pendingVerifications, shdr.Height)
dv.mtx.Unlock()
}()
dv.mtx.Unlock()
}
//Get the exact trusted commit for h, and if it is
// equal to shdr, then it's already trusted, so
// just return nil.
trustedFCSameHeight, err := dv.trusted.LatestFullCommit(dv.chainID, shdr.Height, shdr.Height)
if err == nil {
// If loading trust commit successfully, and trust commit equal to shdr, then don't verify it,
// just return nil.
if bytes.Equal(trustedFCSameHeight.SignedHeader.Hash(), shdr.Hash()) {
dv.logger.Info(fmt.Sprintf("Load full commit at height %d from cache, there is not need to verify.", shdr.Height))
return nil
}
} else if !lerr.IsErrCommitNotFound(err) {
// Return error if it is not CommitNotFound error
dv.logger.Info(fmt.Sprintf("Encountered unknown error in loading full commit at height %d.", shdr.Height))
return err
}
// Get the latest known full commit <= h-1 from our trusted providers.
// The full commit at h-1 contains the valset to sign for h.
prevHeight := shdr.Height - 1
trustedFC, err := dv.trusted.LatestFullCommit(dv.chainID, 1, prevHeight)
if err != nil {
return err
}
// sync up to the prevHeight and assert our latest NextValidatorSet
// is the ValidatorSet for the SignedHeader
if trustedFC.Height() == prevHeight {
// Return error if valset doesn't match.
if !bytes.Equal(
trustedFC.NextValidators.Hash(),
shdr.Header.ValidatorsHash) {
return lerr.ErrUnexpectedValidators(
trustedFC.NextValidators.Hash(),
shdr.Header.ValidatorsHash)
}
} else {
// If valset doesn't match, try to update
if !bytes.Equal(
trustedFC.NextValidators.Hash(),
shdr.Header.ValidatorsHash) {
// ... update.
trustedFC, err = dv.updateToHeight(prevHeight)
if err != nil {
return err
}
// Return error if valset _still_ doesn't match.
if !bytes.Equal(trustedFC.NextValidators.Hash(),
shdr.Header.ValidatorsHash) {
return lerr.ErrUnexpectedValidators(
trustedFC.NextValidators.Hash(),
shdr.Header.ValidatorsHash)
}
}
}
// Verify the signed header using the matching valset.
cert := NewBaseVerifier(dv.chainID, trustedFC.Height()+1, trustedFC.NextValidators)
err = cert.Verify(shdr)
if err != nil {
return err
}
// By now, the SignedHeader is fully validated and we're synced up to
// SignedHeader.Height - 1. To sync to SignedHeader.Height, we need
// the validator set at SignedHeader.Height + 1 so we can verify the
// SignedHeader.NextValidatorSet.
// TODO: is the ValidateFull below mostly redundant with the BaseVerifier.Verify above?
// See https://github.com/tendermint/tendermint/issues/3174.
// Get the next validator set.
nextValset, err := dv.source.ValidatorSet(dv.chainID, shdr.Height+1)
if lerr.IsErrUnknownValidators(err) {
// Ignore this error.
return nil
} else if err != nil {
return err
}
// Create filled FullCommit.
nfc := FullCommit{
SignedHeader: shdr,
Validators: trustedFC.NextValidators,
NextValidators: nextValset,
}
// Validate the full commit. This checks the cryptographic
// signatures of Commit against Validators.
if err := nfc.ValidateFull(dv.chainID); err != nil {
return err
}
// Trust it.
return dv.trusted.SaveFullCommit(nfc)
}
// verifyAndSave will verify if this is a valid source full commit given the
// best match trusted full commit, and if good, persist to dv.trusted.
// Returns ErrNotEnoughVotingPowerSigned when >2/3 of trustedFC did not sign sourceFC.
// Panics if trustedFC.Height() >= sourceFC.Height().
func (dv *DynamicVerifier) verifyAndSave(trustedFC, sourceFC FullCommit) error {
if trustedFC.Height() >= sourceFC.Height() {
panic("should not happen")
}
err := trustedFC.NextValidators.VerifyFutureCommit(
sourceFC.Validators,
dv.chainID, sourceFC.SignedHeader.Commit.BlockID,
sourceFC.SignedHeader.Height, sourceFC.SignedHeader.Commit,
)
if err != nil {
return err
}
return dv.trusted.SaveFullCommit(sourceFC)
}
// updateToHeight will use divide-and-conquer to find a path to h.
// Returns nil error iff we successfully verify and persist a full commit
// for height h, using repeated applications of bisection if necessary.
//
// Returns ErrCommitNotFound if source provider doesn't have the commit for h.
func (dv *DynamicVerifier) updateToHeight(h int64) (FullCommit, error) {
// Fetch latest full commit from source.
sourceFC, err := dv.source.LatestFullCommit(dv.chainID, h, h)
if err != nil {
return FullCommit{}, err
}
// If sourceFC.Height() != h, we can't do it.
if sourceFC.Height() != h {
return FullCommit{}, lerr.ErrCommitNotFound()
}
// Validate the full commit. This checks the cryptographic
// signatures of Commit against Validators.
if err := sourceFC.ValidateFull(dv.chainID); err != nil {
return FullCommit{}, err
}
// Verify latest FullCommit against trusted FullCommits
FOR_LOOP:
for {
// Fetch latest full commit from trusted.
trustedFC, err := dv.trusted.LatestFullCommit(dv.chainID, 1, h)
if err != nil {
return FullCommit{}, err
}
// We have nothing to do.
if trustedFC.Height() == h {
return trustedFC, nil
}
// Try to update to full commit with checks.
err = dv.verifyAndSave(trustedFC, sourceFC)
if err == nil {
// All good!
return sourceFC, nil
}
// Handle special case when err is ErrNotEnoughVotingPowerSigned.
if types.IsErrNotEnoughVotingPowerSigned(err) {
// Divide and conquer.
start, end := trustedFC.Height(), sourceFC.Height()
if !(start < end) {
panic("should not happen")
}
mid := (start + end) / 2
_, err = dv.updateToHeight(mid)
if err != nil {
return FullCommit{}, err
}
// If we made it to mid, we retry.
continue FOR_LOOP
}
return FullCommit{}, err
}
}
func (dv *DynamicVerifier) LastTrustedHeight() int64 {
fc, err := dv.trusted.LatestFullCommit(dv.chainID, 1, 1<<63-1)
if err != nil {
panic("should not happen")
}
return fc.Height()
}

+ 0
- 299
lite/dynamic_verifier_test.go View File

@ -1,299 +0,0 @@
package lite
import (
"fmt"
"sync"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
"github.com/tendermint/tendermint/crypto/tmhash"
log "github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/types"
)
const testChainID = "inquiry-test"
func TestInquirerValidPath(t *testing.T) {
assert, require := assert.New(t), require.New(t)
trust := NewDBProvider("trust", dbm.NewMemDB())
source := NewDBProvider("source", dbm.NewMemDB())
// Set up the validators to generate test blocks.
var vote int64 = 10
keys := genPrivKeys(5)
nkeys := keys.Extend(1)
// Construct a bunch of commits, each with one more height than the last.
chainID := testChainID
consHash := []byte("params")
resHash := []byte("results")
count := 50
fcz := make([]FullCommit, count)
for i := 0; i < count; i++ {
vals := keys.ToValidators(vote, 0)
nextVals := nkeys.ToValidators(vote, 0)
h := int64(1 + i)
appHash := []byte(fmt.Sprintf("h=%d", h))
fcz[i] = keys.GenFullCommit(
chainID, h, nil,
vals, nextVals,
appHash, consHash, resHash, 0, len(keys))
// Extend the keys by 1 each time.
keys = nkeys
nkeys = nkeys.Extend(1)
}
// Initialize a Verifier with the initial state.
err := trust.SaveFullCommit(fcz[0])
require.Nil(err)
cert := NewDynamicVerifier(chainID, trust, source)
cert.SetLogger(log.TestingLogger())
// This should fail validation:
sh := fcz[count-1].SignedHeader
err = cert.Verify(sh)
require.NotNil(err)
// Adding a few commits in the middle should be insufficient.
for i := 10; i < 13; i++ {
err := source.SaveFullCommit(fcz[i])
require.Nil(err)
}
err = cert.Verify(sh)
assert.NotNil(err)
// With more info, we succeed.
for i := 0; i < count; i++ {
err := source.SaveFullCommit(fcz[i])
require.Nil(err)
}
// TODO: Requires proposer address to be set in header.
// err = cert.Verify(sh)
// assert.Nil(err, "%+v", err)
}
func TestDynamicVerify(t *testing.T) {
trust := NewDBProvider("trust", dbm.NewMemDB())
source := NewDBProvider("source", dbm.NewMemDB())
// 10 commits with one valset, 1 to change,
// 10 commits with the next one
n1, n2 := 10, 10
nCommits := n1 + n2 + 1
maxHeight := int64(nCommits)
fcz := make([]FullCommit, nCommits)
// gen the 2 val sets
chainID := "dynamic-verifier"
power := int64(10)
keys1 := genPrivKeys(5)
vals1 := keys1.ToValidators(power, 0)
keys2 := genPrivKeys(5)
vals2 := keys2.ToValidators(power, 0)
// make some commits with the first
for i := 0; i < n1; i++ {
fcz[i] = makeFullCommit(int64(i), keys1, vals1, vals1, chainID)
}
// update the val set
fcz[n1] = makeFullCommit(int64(n1), keys1, vals1, vals2, chainID)
// make some commits with the new one
for i := n1 + 1; i < nCommits; i++ {
fcz[i] = makeFullCommit(int64(i), keys2, vals2, vals2, chainID)
}
// Save everything in the source
for _, fc := range fcz {
source.SaveFullCommit(fc)
}
// Initialize a Verifier with the initial state.
err := trust.SaveFullCommit(fcz[0])
require.Nil(t, err)
ver := NewDynamicVerifier(chainID, trust, source)
ver.SetLogger(log.TestingLogger())
// fetch the latest from the source
_, err = source.LatestFullCommit(chainID, 1, maxHeight)
require.NoError(t, err)
// TODO: Requires proposer address to be set in header.
// try to update to the latest
// err = ver.Verify(latestFC.SignedHeader)
// require.NoError(t, err)
}
func makeFullCommit(height int64, keys privKeys, vals, nextVals *types.ValidatorSet, chainID string) FullCommit {
height++
consHash := tmhash.Sum([]byte("special-params"))
appHash := tmhash.Sum([]byte(fmt.Sprintf("h=%d", height)))
resHash := tmhash.Sum([]byte(fmt.Sprintf("res=%d", height)))
return keys.GenFullCommit(
chainID, height, nil,
vals, nextVals,
appHash, consHash, resHash, 0, len(keys),
)
}
func TestInquirerVerifyHistorical(t *testing.T) {
assert, require := assert.New(t), require.New(t)
trust := NewDBProvider("trust", dbm.NewMemDB())
source := NewDBProvider("source", dbm.NewMemDB())
// Set up the validators to generate test blocks.
var vote int64 = 10
keys := genPrivKeys(5)
nkeys := keys.Extend(1)
// Construct a bunch of commits, each with one more height than the last.
chainID := testChainID
count := 10
consHash := []byte("special-params")
fcz := make([]FullCommit, count)
for i := 0; i < count; i++ {
vals := keys.ToValidators(vote, 0)
nextVals := nkeys.ToValidators(vote, 0)
h := int64(1 + i)
appHash := []byte(fmt.Sprintf("h=%d", h))
resHash := []byte(fmt.Sprintf("res=%d", h))
fcz[i] = keys.GenFullCommit(
chainID, h, nil,
vals, nextVals,
appHash, consHash, resHash, 0, len(keys))
// Extend the keys by 1 each time.
keys = nkeys
nkeys = nkeys.Extend(1)
}
// Initialize a Verifier with the initial state.
err := trust.SaveFullCommit(fcz[0])
require.Nil(err)
cert := NewDynamicVerifier(chainID, trust, source)
cert.SetLogger(log.TestingLogger())
// Store a few full commits as trust.
for _, i := range []int{2, 5} {
trust.SaveFullCommit(fcz[i])
}
// See if we can jump forward using trusted full commits.
// Souce doesn't have fcz[9] so cert.LastTrustedHeight wont' change.
err = source.SaveFullCommit(fcz[7])
require.Nil(err, "%+v", err)
// TODO: Requires proposer address to be set in header.
// sh := fcz[8].SignedHeader
// err = cert.Verify(sh)
// require.Nil(err, "%+v", err)
// assert.Equal(fcz[7].Height(), cert.LastTrustedHeight())
commit, err := trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height())
require.NotNil(err, "%+v", err)
assert.Equal(commit, (FullCommit{}))
// With fcz[9] Verify will update last trusted height.
err = source.SaveFullCommit(fcz[9])
require.Nil(err, "%+v", err)
// TODO: Requires proposer address to be set in header.
// sh = fcz[8].SignedHeader
// err = cert.Verify(sh)
// require.Nil(err, "%+v", err)
// assert.Equal(fcz[8].Height(), cert.LastTrustedHeight())
// TODO: Requires proposer address to be set in header.
// commit, err = trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height())
// require.Nil(err, "%+v", err)
// assert.Equal(commit.Height(), fcz[8].Height())
// Add access to all full commits via untrusted source.
for i := 0; i < count; i++ {
err := source.SaveFullCommit(fcz[i])
require.Nil(err)
}
// TODO: Requires proposer address to be set in header.
// Try to check an unknown seed in the past.
// sh = fcz[3].SignedHeader
// err = cert.Verify(sh)
// require.Nil(err, "%+v", err)
// assert.Equal(fcz[8].Height(), cert.LastTrustedHeight())
// TODO: Requires proposer address to be set in header.
// Jump all the way forward again.
// sh = fcz[count-1].SignedHeader
// err = cert.Verify(sh)
// require.Nil(err, "%+v", err)
// assert.Equal(fcz[9].Height(), cert.LastTrustedHeight())
}
func TestConcurrencyInquirerVerify(t *testing.T) {
_, require := assert.New(t), require.New(t)
trust := NewDBProvider("trust", dbm.NewMemDB()).SetLimit(10)
source := NewDBProvider("source", dbm.NewMemDB())
// Set up the validators to generate test blocks.
var vote int64 = 10
keys := genPrivKeys(5)
nkeys := keys.Extend(1)
// Construct a bunch of commits, each with one more height than the last.
chainID := testChainID
count := 10
consHash := []byte("special-params")
fcz := make([]FullCommit, count)
for i := 0; i < count; i++ {
vals := keys.ToValidators(vote, 0)
nextVals := nkeys.ToValidators(vote, 0)
h := int64(1 + i)
appHash := []byte(fmt.Sprintf("h=%d", h))
resHash := []byte(fmt.Sprintf("res=%d", h))
fcz[i] = keys.GenFullCommit(
chainID, h, nil,
vals, nextVals,
appHash, consHash, resHash, 0, len(keys))
// Extend the keys by 1 each time.
keys = nkeys
nkeys = nkeys.Extend(1)
}
// Initialize a Verifier with the initial state.
err := trust.SaveFullCommit(fcz[0])
require.Nil(err)
cert := NewDynamicVerifier(chainID, trust, source)
cert.SetLogger(log.TestingLogger())
err = source.SaveFullCommit(fcz[7])
require.Nil(err, "%+v", err)
err = source.SaveFullCommit(fcz[8])
require.Nil(err, "%+v", err)
sh := fcz[8].SignedHeader
var wg sync.WaitGroup
count = 100
errList := make([]error, count)
for i := 0; i < count; i++ {
wg.Add(1)
go func(index int) {
errList[index] = cert.Verify(sh)
defer wg.Done()
}(i)
}
wg.Wait()
// TODO: Requires proposer address to be set in header.
// for _, err := range errList {
// require.Nil(err)
// }
}

+ 0
- 99
lite/errors/errors.go View File

@ -1,99 +0,0 @@
package errors
import (
"fmt"
"github.com/pkg/errors"
)
//----------------------------------------
// Error types
type errCommitNotFound struct{}
func (e errCommitNotFound) Error() string {
return "Commit not found by provider"
}
type errUnexpectedValidators struct {
got []byte
want []byte
}
func (e errUnexpectedValidators) Error() string {
return fmt.Sprintf("Validator set is different. Got %X want %X",
e.got, e.want)
}
type errUnknownValidators struct {
chainID string
height int64
}
func (e errUnknownValidators) Error() string {
return fmt.Sprintf("Validators are unknown or missing for chain %s and height %d",
e.chainID, e.height)
}
type errEmptyTree struct{}
func (e errEmptyTree) Error() string {
return "Tree is empty"
}
//----------------------------------------
// Methods for above error types
//-----------------
// ErrCommitNotFound
// ErrCommitNotFound indicates that a the requested commit was not found.
func ErrCommitNotFound() error {
return errors.Wrap(errCommitNotFound{}, "")
}
func IsErrCommitNotFound(err error) bool {
_, ok := errors.Cause(err).(errCommitNotFound)
return ok
}
//-----------------
// ErrUnexpectedValidators
// ErrUnexpectedValidators indicates a validator set mismatch.
func ErrUnexpectedValidators(got, want []byte) error {
return errors.Wrap(errUnexpectedValidators{
got: got,
want: want,
}, "")
}
func IsErrUnexpectedValidators(err error) bool {
_, ok := errors.Cause(err).(errUnexpectedValidators)
return ok
}
//-----------------
// ErrUnknownValidators
// ErrUnknownValidators indicates that some validator set was missing or unknown.
func ErrUnknownValidators(chainID string, height int64) error {
return errors.Wrap(errUnknownValidators{chainID, height}, "")
}
func IsErrUnknownValidators(err error) bool {
_, ok := errors.Cause(err).(errUnknownValidators)
return ok
}
//-----------------
// ErrEmptyTree
func ErrEmptyTree() error {
return errors.Wrap(errEmptyTree{}, "")
}
func IsErrEmptyTree(err error) bool {
_, ok := errors.Cause(err).(errEmptyTree)
return ok
}

+ 0
- 159
lite/helpers.go View File

@ -1,159 +0,0 @@
package lite
import (
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/crypto/secp256k1"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
// PrivKeys is a helper type for testing.
//
// It lets us simulate signing with many keys. The main use case is to create
// a set, and call GenSignedHeader to get properly signed header for testing.
//
// You can set different weights of validators each time you call ToValidators,
// and can optionally extend the validator set later with Extend.
type privKeys []crypto.PrivKey
// genPrivKeys produces an array of private keys to generate commits.
func genPrivKeys(n int) privKeys {
res := make(privKeys, n)
for i := range res {
res[i] = ed25519.GenPrivKey()
}
return res
}
// Change replaces the key at index i.
func (pkz privKeys) Change(i int) privKeys {
res := make(privKeys, len(pkz))
copy(res, pkz)
res[i] = ed25519.GenPrivKey()
return res
}
// Extend adds n more keys (to remove, just take a slice).
func (pkz privKeys) Extend(n int) privKeys {
extra := genPrivKeys(n)
return append(pkz, extra...)
}
// GenSecpPrivKeys produces an array of secp256k1 private keys to generate commits.
func genSecpPrivKeys(n int) privKeys {
res := make(privKeys, n)
for i := range res {
res[i] = secp256k1.GenPrivKey()
}
return res
}
// ExtendSecp adds n more secp256k1 keys (to remove, just take a slice).
func (pkz privKeys) ExtendSecp(n int) privKeys {
extra := genSecpPrivKeys(n)
return append(pkz, extra...)
}
// ToValidators produces a valset from the set of keys.
// The first key has weight `init` and it increases by `inc` every step
// so we can have all the same weight, or a simple linear distribution
// (should be enough for testing).
func (pkz privKeys) ToValidators(init, inc int64) *types.ValidatorSet {
res := make([]*types.Validator, len(pkz))
for i, k := range pkz {
res[i] = types.NewValidator(k.PubKey(), init+int64(i)*inc)
}
return types.NewValidatorSet(res)
}
// signHeader properly signs the header with all keys from first to last exclusive.
func (pkz privKeys) signHeader(header *types.Header, first, last int) *types.Commit {
commitSigs := make([]types.CommitSig, len(pkz))
for i := 0; i < len(pkz); i++ {
commitSigs[i] = types.NewCommitSigAbsent()
}
// We need this list to keep the ordering.
vset := pkz.ToValidators(1, 0)
blockID := types.BlockID{
Hash: header.Hash(),
PartsHeader: types.PartSetHeader{Total: 1, Hash: crypto.CRandBytes(32)},
}
// Fill in the votes we want.
for i := first; i < last && i < len(pkz); i++ {
vote := makeVote(header, vset, pkz[i], blockID)
commitSigs[vote.ValidatorIndex] = vote.CommitSig()
}
return types.NewCommit(header.Height, 1, blockID, commitSigs)
}
func makeVote(header *types.Header, valset *types.ValidatorSet, key crypto.PrivKey, blockID types.BlockID) *types.Vote {
addr := key.PubKey().Address()
idx, _ := valset.GetByAddress(addr)
vote := &types.Vote{
ValidatorAddress: addr,
ValidatorIndex: idx,
Height: header.Height,
Round: 1,
Timestamp: tmtime.Now(),
Type: types.PrecommitType,
BlockID: blockID,
}
// Sign it
signBytes := vote.SignBytes(header.ChainID)
// TODO Consider reworking makeVote API to return an error
sig, err := key.Sign(signBytes)
if err != nil {
panic(err)
}
vote.Signature = sig
return vote
}
func genHeader(chainID string, height int64, txs types.Txs,
valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte) *types.Header {
return &types.Header{
ChainID: chainID,
Height: height,
Time: tmtime.Now(),
// LastBlockID
// LastCommitHash
ValidatorsHash: valset.Hash(),
NextValidatorsHash: nextValset.Hash(),
DataHash: txs.Hash(),
AppHash: appHash,
ConsensusHash: consHash,
LastResultsHash: resHash,
}
}
// GenSignedHeader calls genHeader and signHeader and combines them into a SignedHeader.
func (pkz privKeys) GenSignedHeader(chainID string, height int64, txs types.Txs,
valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) types.SignedHeader {
header := genHeader(chainID, height, txs, valset, nextValset, appHash, consHash, resHash)
check := types.SignedHeader{
Header: header,
Commit: pkz.signHeader(header, first, last),
}
return check
}
// GenFullCommit calls genHeader and signHeader and combines them into a FullCommit.
func (pkz privKeys) GenFullCommit(chainID string, height int64, txs types.Txs,
valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) FullCommit {
header := genHeader(chainID, height, txs, valset, nextValset, appHash, consHash, resHash)
commit := types.SignedHeader{
Header: header,
Commit: pkz.signHeader(header, first, last),
}
return NewFullCommit(commit, valset, nextValset)
}

+ 0
- 85
lite/multiprovider.go View File

@ -1,85 +0,0 @@
package lite
import (
log "github.com/tendermint/tendermint/libs/log"
lerr "github.com/tendermint/tendermint/lite/errors"
"github.com/tendermint/tendermint/types"
)
var _ PersistentProvider = (*multiProvider)(nil)
// multiProvider allows you to place one or more caches in front of a source
// Provider. It runs through them in order until a match is found.
type multiProvider struct {
logger log.Logger
providers []PersistentProvider
}
// NewMultiProvider returns a new provider which wraps multiple other providers.
func NewMultiProvider(providers ...PersistentProvider) PersistentProvider {
return &multiProvider{
logger: log.NewNopLogger(),
providers: providers,
}
}
// SetLogger sets logger on self and all subproviders.
func (mc *multiProvider) SetLogger(logger log.Logger) {
mc.logger = logger
for _, p := range mc.providers {
p.SetLogger(logger)
}
}
// SaveFullCommit saves on all providers, and aborts on the first error.
func (mc *multiProvider) SaveFullCommit(fc FullCommit) (err error) {
for _, p := range mc.providers {
err = p.SaveFullCommit(fc)
if err != nil {
return
}
}
return
}
// LatestFullCommit loads the latest from all providers and provides
// the latest FullCommit that satisfies the conditions.
// Returns the first error encountered.
func (mc *multiProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (fc FullCommit, err error) {
for _, p := range mc.providers {
var commit FullCommit
commit, err = p.LatestFullCommit(chainID, minHeight, maxHeight)
if lerr.IsErrCommitNotFound(err) {
err = nil
continue
} else if err != nil {
return
}
if fc == (FullCommit{}) {
fc = commit
} else if commit.Height() > fc.Height() {
fc = commit
}
if fc.Height() == maxHeight {
return
}
}
if fc == (FullCommit{}) {
err = lerr.ErrCommitNotFound()
return
}
return
}
// ValidatorSet returns validator set at height as provided by the first
// provider which has it, or an error otherwise.
func (mc *multiProvider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) {
for _, p := range mc.providers {
valset, err = p.ValidatorSet(chainID, height)
if err == nil {
// TODO Log unexpected types of errors.
return valset, nil
}
}
return nil, lerr.ErrUnknownValidators(chainID, height)
}

+ 0
- 32
lite/provider.go View File

@ -1,32 +0,0 @@
package lite
import (
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/types"
)
// Provider provides information for the lite client to sync validators.
// Examples: MemProvider, files.Provider, client.Provider, CacheProvider.
type Provider interface {
// LatestFullCommit returns the latest commit with minHeight <= height <=
// maxHeight.
// If maxHeight is zero, returns the latest where minHeight <= height.
LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error)
// Get the valset that corresponds to chainID and height and return.
// Height must be >= 1.
ValidatorSet(chainID string, height int64) (*types.ValidatorSet, error)
// Set a logger.
SetLogger(logger log.Logger)
}
// A provider that can also persist new information.
// Examples: MemProvider, files.Provider, CacheProvider.
type PersistentProvider interface {
Provider
// SaveFullCommit saves a FullCommit (without verification).
SaveFullCommit(fc FullCommit) error
}

+ 0
- 141
lite/provider_test.go View File

@ -1,141 +0,0 @@
package lite
import (
"errors"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
log "github.com/tendermint/tendermint/libs/log"
lerr "github.com/tendermint/tendermint/lite/errors"
"github.com/tendermint/tendermint/types"
)
// missingProvider doesn't store anything, always a miss.
// Designed as a mock for testing.
type missingProvider struct{}
// NewMissingProvider returns a provider which does not store anything and always misses.
func NewMissingProvider() PersistentProvider {
return missingProvider{}
}
func (missingProvider) SaveFullCommit(FullCommit) error { return nil }
func (missingProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error) {
return FullCommit{}, lerr.ErrCommitNotFound()
}
func (missingProvider) ValidatorSet(chainID string, height int64) (*types.ValidatorSet, error) {
return nil, errors.New("missing validator set")
}
func (missingProvider) SetLogger(_ log.Logger) {}
func TestMemProvider(t *testing.T) {
p := NewDBProvider("mem", dbm.NewMemDB())
checkProvider(t, p, "test-mem", "empty")
}
func TestMultiProvider(t *testing.T) {
p := NewMultiProvider(
NewMissingProvider(),
NewDBProvider("mem", dbm.NewMemDB()),
NewMissingProvider(),
)
checkProvider(t, p, "test-cache", "kjfhekfhkewhgit")
}
func checkProvider(t *testing.T, p PersistentProvider, chainID, app string) {
assert, require := assert.New(t), require.New(t)
appHash := []byte(app)
keys := genPrivKeys(5)
count := 10
// Make a bunch of full commits.
fcz := make([]FullCommit, count)
for i := 0; i < count; i++ {
vals := keys.ToValidators(10, int64(count/2))
h := int64(20 + 10*i)
fcz[i] = keys.GenFullCommit(chainID, h, nil, vals, vals, appHash, []byte("params"), []byte("results"), 0, 5)
}
// Check that provider is initially empty.
fc, err := p.LatestFullCommit(chainID, 1, 1<<63-1)
require.NotNil(err)
assert.True(lerr.IsErrCommitNotFound(err))
// Save all full commits to the provider.
for _, fc := range fcz {
err = p.SaveFullCommit(fc)
require.Nil(err)
// Make sure we can get it back.
fc2, err := p.LatestFullCommit(chainID, fc.Height(), fc.Height())
assert.Nil(err)
assert.Equal(fc.SignedHeader, fc2.SignedHeader)
assert.Equal(fc.Validators, fc2.Validators)
assert.Equal(fc.NextValidators, fc2.NextValidators)
}
// Make sure we get the last hash if we overstep.
fc, err = p.LatestFullCommit(chainID, 1, 5000)
if assert.Nil(err) {
assert.Equal(fcz[count-1].Height(), fc.Height())
assert.Equal(fcz[count-1], fc)
}
// ... and middle ones as well.
fc, err = p.LatestFullCommit(chainID, 1, 47)
if assert.Nil(err) {
// we only step by 10, so 40 must be the one below this
assert.EqualValues(40, fc.Height())
}
}
// This will make a get height, and if it is good, set the data as well.
func checkLatestFullCommit(t *testing.T, p PersistentProvider, chainID string, ask, expect int64) {
fc, err := p.LatestFullCommit(chainID, 1, ask)
require.Nil(t, err)
if assert.Equal(t, expect, fc.Height()) {
err = p.SaveFullCommit(fc)
require.Nil(t, err)
}
}
func TestMultiLatestFullCommit(t *testing.T) {
require := require.New(t)
// We will write data to the second level of the cache (p2), and see what
// gets cached/stored in.
p := NewDBProvider("mem1", dbm.NewMemDB())
p2 := NewDBProvider("mem2", dbm.NewMemDB())
cp := NewMultiProvider(p, p2)
chainID := "cache-best-height"
appHash := []byte("01234567")
keys := genPrivKeys(5)
count := 10
// Set a bunch of full commits.
for i := 0; i < count; i++ {
vals := keys.ToValidators(10, int64(count/2))
h := int64(10 * (i + 1))
fc := keys.GenFullCommit(chainID, h, nil, vals, vals, appHash, []byte("params"), []byte("results"), 0, 5)
err := p2.SaveFullCommit(fc)
require.NoError(err)
}
// Get a few heights from the cache and set them proper.
checkLatestFullCommit(t, cp, chainID, 57, 50)
checkLatestFullCommit(t, cp, chainID, 33, 30)
// make sure they are set in p as well (but nothing else)
checkLatestFullCommit(t, p, chainID, 44, 30)
checkLatestFullCommit(t, p, chainID, 50, 50)
checkLatestFullCommit(t, p, chainID, 99, 50)
// now, query the cache for a higher value
checkLatestFullCommit(t, p2, chainID, 99, 90)
checkLatestFullCommit(t, cp, chainID, 99, 90)
}

+ 0
- 48
lite/proxy/block.go View File

@ -1,48 +0,0 @@
package proxy
import (
"bytes"
"errors"
"github.com/tendermint/tendermint/types"
)
func ValidateBlockMeta(meta *types.BlockMeta, sh types.SignedHeader) error {
if meta == nil {
return errors.New("expecting a non-nil BlockMeta")
}
// TODO: check the BlockID??
return ValidateHeader(&meta.Header, sh)
}
func ValidateBlock(meta *types.Block, sh types.SignedHeader) error {
if meta == nil {
return errors.New("expecting a non-nil Block")
}
err := ValidateHeader(&meta.Header, sh)
if err != nil {
return err
}
if !bytes.Equal(meta.Data.Hash(), meta.Header.DataHash) {
return errors.New("data hash doesn't match header")
}
return nil
}
func ValidateHeader(head *types.Header, sh types.SignedHeader) error {
if head == nil {
return errors.New("expecting a non-nil Header")
}
if sh.Header == nil {
return errors.New("unexpected empty SignedHeader")
}
// Make sure they are for the same height (obvious fail).
if head.Height != sh.Height {
return errors.New("header heights mismatched")
}
// Check if they are equal by using hashes.
if !bytes.Equal(head.Hash(), sh.Hash()) {
return errors.New("headers don't match")
}
return nil
}

+ 0
- 21
lite/proxy/errors.go View File

@ -1,21 +0,0 @@
package proxy
import (
"github.com/pkg/errors"
)
type errNoData struct{}
func (e errNoData) Error() string {
return "No data returned for query"
}
// IsErrNoData checks whether an error is due to a query returning empty data
func IsErrNoData(err error) bool {
_, ok := errors.Cause(err).(errNoData)
return ok
}
func ErrNoData() error {
return errors.Wrap(errNoData{}, "")
}

+ 0
- 14
lite/proxy/proof.go View File

@ -1,14 +0,0 @@
package proxy
import (
"github.com/tendermint/tendermint/crypto/merkle"
)
func defaultProofRuntime() *merkle.ProofRuntime {
prt := merkle.NewProofRuntime()
prt.RegisterOpDecoder(
merkle.ProofOpSimpleValue,
merkle.SimpleValueOpDecoder,
)
return prt
}

+ 0
- 187
lite/proxy/proxy.go View File

@ -1,187 +0,0 @@
package proxy
import (
"context"
"net/http"
amino "github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/libs/bytes"
"github.com/tendermint/tendermint/libs/log"
rpcclient "github.com/tendermint/tendermint/rpc/client"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server"
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
"github.com/tendermint/tendermint/types"
)
const (
wsEndpoint = "/websocket"
)
// StartProxy will start the websocket manager on the client,
// set up the rpc routes to proxy via the given client,
// and start up an http/rpc server on the location given by bind (eg. :1234)
// NOTE: This function blocks - you may want to call it in a go-routine.
func StartProxy(c rpcclient.Client, listenAddr string, logger log.Logger, maxOpenConnections int) error {
err := c.Start()
if err != nil {
return err
}
cdc := amino.NewCodec()
ctypes.RegisterAmino(cdc)
r := RPCRoutes(c)
// build the handler...
mux := http.NewServeMux()
rpcserver.RegisterRPCFuncs(mux, r, cdc, logger)
unsubscribeFromAllEvents := func(remoteAddr string) {
if err := c.UnsubscribeAll(context.Background(), remoteAddr); err != nil {
logger.Error("Failed to unsubscribe from events", "err", err)
}
}
wm := rpcserver.NewWebsocketManager(r, cdc, rpcserver.OnDisconnect(unsubscribeFromAllEvents))
wm.SetLogger(logger)
// core.SetLogger(logger)
mux.HandleFunc(wsEndpoint, wm.WebsocketHandler)
config := rpcserver.DefaultConfig()
config.MaxOpenConnections = maxOpenConnections
l, err := rpcserver.Listen(listenAddr, config)
if err != nil {
return err
}
return rpcserver.Serve(l, mux, logger, config)
}
// RPCRoutes just routes everything to the given client, as if it were
// a tendermint fullnode.
//
// if we want security, the client must implement it as a secure client
func RPCRoutes(c rpcclient.Client) map[string]*rpcserver.RPCFunc {
return map[string]*rpcserver.RPCFunc{
// Subscribe/unsubscribe are reserved for websocket events.
"subscribe": rpcserver.NewWSRPCFunc(c.(Wrapper).SubscribeWS, "query"),
"unsubscribe": rpcserver.NewWSRPCFunc(c.(Wrapper).UnsubscribeWS, "query"),
"unsubscribe_all": rpcserver.NewWSRPCFunc(c.(Wrapper).UnsubscribeAllWS, ""),
// info API
"status": rpcserver.NewRPCFunc(makeStatusFunc(c), ""),
"blockchain": rpcserver.NewRPCFunc(makeBlockchainInfoFunc(c), "minHeight,maxHeight"),
"genesis": rpcserver.NewRPCFunc(makeGenesisFunc(c), ""),
"block": rpcserver.NewRPCFunc(makeBlockFunc(c), "height"),
"block_by_hash": rpcserver.NewRPCFunc(makeBlockByHashFunc(c), "hash"),
"commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height"),
"tx": rpcserver.NewRPCFunc(makeTxFunc(c), "hash,prove"),
"validators": rpcserver.NewRPCFunc(makeValidatorsFunc(c), "height"),
// broadcast API
"broadcast_tx_commit": rpcserver.NewRPCFunc(makeBroadcastTxCommitFunc(c), "tx"),
"broadcast_tx_sync": rpcserver.NewRPCFunc(makeBroadcastTxSyncFunc(c), "tx"),
"broadcast_tx_async": rpcserver.NewRPCFunc(makeBroadcastTxAsyncFunc(c), "tx"),
// abci API
"abci_query": rpcserver.NewRPCFunc(makeABCIQueryFunc(c), "path,data"),
"abci_info": rpcserver.NewRPCFunc(makeABCIInfoFunc(c), ""),
}
}
func makeStatusFunc(c rpcclient.StatusClient) func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) {
return func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) {
return c.Status()
}
}
func makeBlockchainInfoFunc(c rpcclient.Client) func(
ctx *rpctypes.Context,
minHeight,
maxHeight int64,
) (*ctypes.ResultBlockchainInfo, error) {
return func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
return c.BlockchainInfo(minHeight, maxHeight)
}
}
func makeGenesisFunc(c rpcclient.Client) func(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) {
return func(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) {
return c.Genesis()
}
}
func makeBlockFunc(c rpcclient.Client) func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlock, error) {
return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlock, error) {
return c.Block(height)
}
}
func makeBlockByHashFunc(c rpcclient.Client) func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) {
return func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) {
return c.BlockByHash(hash)
}
}
func makeCommitFunc(c rpcclient.Client) func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultCommit, error) {
return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultCommit, error) {
return c.Commit(height)
}
}
func makeTxFunc(c rpcclient.Client) func(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) {
return func(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) {
return c.Tx(hash, prove)
}
}
func makeValidatorsFunc(c rpcclient.Client) func(
ctx *rpctypes.Context,
height *int64,
) (*ctypes.ResultValidators, error) {
return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultValidators, error) {
return c.Validators(height, 0, 0)
}
}
func makeBroadcastTxCommitFunc(c rpcclient.Client) func(
ctx *rpctypes.Context,
tx types.Tx,
) (*ctypes.ResultBroadcastTxCommit, error) {
return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
return c.BroadcastTxCommit(tx)
}
}
func makeBroadcastTxSyncFunc(c rpcclient.Client) func(
ctx *rpctypes.Context,
tx types.Tx,
) (*ctypes.ResultBroadcastTx, error) {
return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return c.BroadcastTxSync(tx)
}
}
func makeBroadcastTxAsyncFunc(c rpcclient.Client) func(
ctx *rpctypes.Context,
tx types.Tx,
) (*ctypes.ResultBroadcastTx, error) {
return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return c.BroadcastTxAsync(tx)
}
}
func makeABCIQueryFunc(c rpcclient.Client) func(
ctx *rpctypes.Context,
path string,
data bytes.HexBytes,
) (*ctypes.ResultABCIQuery, error) {
return func(ctx *rpctypes.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) {
return c.ABCIQuery(path, data)
}
}
func makeABCIInfoFunc(c rpcclient.Client) func(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) {
return func(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) {
return c.ABCIInfo()
}
}

+ 0
- 148
lite/proxy/query.go View File

@ -1,148 +0,0 @@
package proxy
import (
"fmt"
"strings"
"github.com/pkg/errors"
"github.com/tendermint/tendermint/crypto/merkle"
"github.com/tendermint/tendermint/libs/bytes"
"github.com/tendermint/tendermint/lite"
lerr "github.com/tendermint/tendermint/lite/errors"
rpcclient "github.com/tendermint/tendermint/rpc/client"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/types"
)
// GetWithProof will query the key on the given node, and verify it has
// a valid proof, as defined by the Verifier.
//
// If there is any error in checking, returns an error.
func GetWithProof(prt *merkle.ProofRuntime, key []byte, reqHeight int64, node rpcclient.Client,
cert lite.Verifier) (
val bytes.HexBytes, height int64, proof *merkle.Proof, err error) {
if reqHeight < 0 {
err = errors.New("height cannot be negative")
return
}
res, err := GetWithProofOptions(prt, "/key", key,
rpcclient.ABCIQueryOptions{Height: reqHeight, Prove: true},
node, cert)
if err != nil {
return
}
resp := res.Response
val, height = resp.Value, resp.Height
return val, height, proof, err
}
// GetWithProofOptions is useful if you want full access to the ABCIQueryOptions.
// XXX Usage of path? It's not used, and sometimes it's /, sometimes /key, sometimes /store.
func GetWithProofOptions(prt *merkle.ProofRuntime, path string, key []byte, opts rpcclient.ABCIQueryOptions,
node rpcclient.Client, cert lite.Verifier) (
*ctypes.ResultABCIQuery, error) {
opts.Prove = true
res, err := node.ABCIQueryWithOptions(path, key, opts)
if err != nil {
return nil, err
}
resp := res.Response
// Validate the response, e.g. height.
if resp.IsErr() {
err = errors.Errorf("query error for key %d: %d", key, resp.Code)
return nil, err
}
if len(resp.Key) == 0 || resp.Proof == nil {
return nil, lerr.ErrEmptyTree()
}
if resp.Height == 0 {
return nil, errors.New("height returned is zero")
}
// AppHash for height H is in header H+1
signedHeader, err := GetCertifiedCommit(resp.Height+1, node, cert)
if err != nil {
return nil, err
}
// Validate the proof against the certified header to ensure data integrity.
if resp.Value != nil {
// Value exists
// XXX How do we encode the key into a string...
storeName, err := parseQueryStorePath(path)
if err != nil {
return nil, err
}
kp := merkle.KeyPath{}
kp = kp.AppendKey([]byte(storeName), merkle.KeyEncodingURL)
kp = kp.AppendKey(resp.Key, merkle.KeyEncodingURL)
err = prt.VerifyValue(resp.Proof, signedHeader.AppHash, kp.String(), resp.Value)
if err != nil {
return nil, errors.Wrap(err, "couldn't verify value proof")
}
return &ctypes.ResultABCIQuery{Response: resp}, nil
}
// Value absent
// Validate the proof against the certified header to ensure data integrity.
// XXX How do we encode the key into a string...
err = prt.VerifyAbsence(resp.Proof, signedHeader.AppHash, string(resp.Key))
if err != nil {
return nil, errors.Wrap(err, "couldn't verify absence proof")
}
return &ctypes.ResultABCIQuery{Response: resp}, nil
}
func parseQueryStorePath(path string) (storeName string, err error) {
if !strings.HasPrefix(path, "/") {
return "", fmt.Errorf("expected path to start with /")
}
paths := strings.SplitN(path[1:], "/", 3)
switch {
case len(paths) != 3:
return "", fmt.Errorf("expected format like /store/<storeName>/key")
case paths[0] != "store":
return "", fmt.Errorf("expected format like /store/<storeName>/key")
case paths[2] != "key":
return "", fmt.Errorf("expected format like /store/<storeName>/key")
}
return paths[1], nil
}
// GetCertifiedCommit gets the signed header for a given height and certifies
// it. Returns error if unable to get a proven header.
func GetCertifiedCommit(h int64, client rpcclient.Client, cert lite.Verifier) (types.SignedHeader, error) {
// FIXME: cannot use cert.GetByHeight for now, as it also requires
// Validators and will fail on querying tendermint for non-current height.
// When this is supported, we should use it instead...
rpcclient.WaitForHeight(client, h, nil)
cresp, err := client.Commit(&h)
if err != nil {
return types.SignedHeader{}, err
}
// Validate downloaded checkpoint with our request and trust store.
sh := cresp.SignedHeader
if sh.Height != h {
return types.SignedHeader{}, fmt.Errorf("height mismatch: want %v got %v",
h, sh.Height)
}
if err = cert.Verify(sh); err != nil {
return types.SignedHeader{}, err
}
return sh, nil
}

+ 0
- 163
lite/proxy/query_test.go View File

@ -1,163 +0,0 @@
package proxy
import (
"fmt"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/abci/example/kvstore"
"github.com/tendermint/tendermint/crypto/merkle"
"github.com/tendermint/tendermint/lite"
certclient "github.com/tendermint/tendermint/lite/client"
nm "github.com/tendermint/tendermint/node"
"github.com/tendermint/tendermint/rpc/client"
rpclocal "github.com/tendermint/tendermint/rpc/client/local"
rpctest "github.com/tendermint/tendermint/rpc/test"
"github.com/tendermint/tendermint/types"
)
var node *nm.Node
var chainID = "tendermint_test" // TODO use from config.
//nolint:unused
var waitForEventTimeout = 5 * time.Second
// TODO fix tests!!
func TestMain(m *testing.M) {
app := kvstore.NewApplication()
node = rpctest.StartTendermint(app)
code := m.Run()
rpctest.StopTendermint(node)
os.Exit(code)
}
func kvstoreTx(k, v []byte) []byte {
return []byte(fmt.Sprintf("%s=%s", k, v))
}
// TODO: enable it after general proof format has been adapted
// in abci/examples/kvstore.go
//nolint:unused,deadcode
func _TestAppProofs(t *testing.T) {
assert, require := assert.New(t), require.New(t)
prt := defaultProofRuntime()
cl := rpclocal.New(node)
client.WaitForHeight(cl, 1, nil)
// This sets up our trust on the node based on some past point.
source := certclient.NewProvider(chainID, cl)
seed, err := source.LatestFullCommit(chainID, 1, 1)
require.NoError(err, "%#v", err)
cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators)
// Wait for tx confirmation.
done := make(chan int64)
go func() {
evtTyp := types.EventTx
_, err = client.WaitForOneEvent(cl, evtTyp, waitForEventTimeout)
require.Nil(err, "%#v", err)
close(done)
}()
// Submit a transaction.
k := []byte("my-key")
v := []byte("my-value")
tx := kvstoreTx(k, v)
br, err := cl.BroadcastTxCommit(tx)
require.NoError(err, "%#v", err)
require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx)
require.EqualValues(0, br.DeliverTx.Code)
brh := br.Height
// Fetch latest after tx commit.
<-done
latest, err := source.LatestFullCommit(chainID, 1, 1<<63-1)
require.NoError(err, "%#v", err)
rootHash := latest.SignedHeader.AppHash
if rootHash == nil {
// Fetch one block later, AppHash hasn't been committed yet.
// TODO find a way to avoid doing this.
client.WaitForHeight(cl, latest.SignedHeader.Height+1, nil)
latest, err = source.LatestFullCommit(chainID, latest.SignedHeader.Height+1, 1<<63-1)
require.NoError(err, "%#v", err)
rootHash = latest.SignedHeader.AppHash
}
require.NotNil(rootHash)
// verify a query before the tx block has no data (and valid non-exist proof)
bs, height, proof, err := GetWithProof(prt, k, brh-1, cl, cert)
require.NoError(err, "%#v", err)
require.NotNil(proof)
require.Equal(height, brh-1)
// require.NotNil(proof)
// TODO: Ensure that *some* keys will be there, ensuring that proof is nil,
// (currently there's a race condition)
// and ensure that proof proves absence of k.
require.Nil(bs)
// but given that block it is good
bs, height, proof, err = GetWithProof(prt, k, brh, cl, cert)
require.NoError(err, "%#v", err)
require.NotNil(proof)
require.Equal(height, brh)
assert.EqualValues(v, bs)
err = prt.VerifyValue(proof, rootHash, string(k), bs) // XXX key encoding
assert.NoError(err, "%#v", err)
// Test non-existing key.
missing := []byte("my-missing-key")
bs, _, proof, err = GetWithProof(prt, missing, 0, cl, cert)
require.NoError(err)
require.Nil(bs)
require.NotNil(proof)
err = prt.VerifyAbsence(proof, rootHash, string(missing)) // XXX VerifyAbsence(), keyencoding
assert.NoError(err, "%#v", err)
err = prt.VerifyAbsence(proof, rootHash, string(k)) // XXX VerifyAbsence(), keyencoding
assert.Error(err, "%#v", err)
}
func TestTxProofs(t *testing.T) {
assert, require := assert.New(t), require.New(t)
cl := rpclocal.New(node)
client.WaitForHeight(cl, 1, nil)
tx := kvstoreTx([]byte("key-a"), []byte("value-a"))
br, err := cl.BroadcastTxCommit(tx)
require.NoError(err, "%#v", err)
require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx)
require.EqualValues(0, br.DeliverTx.Code)
brh := br.Height
source := certclient.NewProvider(chainID, cl)
seed, err := source.LatestFullCommit(chainID, brh-2, brh-2)
require.NoError(err, "%#v", err)
cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators)
// First let's make sure a bogus transaction hash returns a valid non-existence proof.
key := types.Tx([]byte("bogus")).Hash()
_, err = cl.Tx(key, true)
require.NotNil(err)
require.Contains(err.Error(), "not found")
// Now let's check with the real tx root hash.
key = types.Tx(tx).Hash()
res, err := cl.Tx(key, true)
require.NoError(err, "%#v", err)
require.NotNil(res)
keyHash := merkle.SimpleHashFromByteSlices([][]byte{key})
err = res.Proof.Validate(keyHash)
assert.NoError(err, "%#v", err)
commit, err := GetCertifiedCommit(br.Height, cl, cert)
require.Nil(err, "%#v", err)
require.Equal(res.Proof.RootHash, commit.Header.DataHash)
}

+ 0
- 211
lite/proxy/validate_test.go View File

@ -1,211 +0,0 @@
package proxy_test
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/tendermint/tendermint/lite/proxy"
"github.com/tendermint/tendermint/types"
)
var (
deadBeefTxs = types.Txs{[]byte("DE"), []byte("AD"), []byte("BE"), []byte("EF")}
deadBeefHash = deadBeefTxs.Hash()
testTime1 = time.Date(2018, 1, 1, 1, 1, 1, 1, time.UTC)
testTime2 = time.Date(2017, 1, 2, 1, 1, 1, 1, time.UTC)
)
var hdrHeight11 = types.Header{
Height: 11,
Time: testTime1,
ValidatorsHash: []byte("Tendermint"),
}
func TestValidateBlock(t *testing.T) {
tests := []struct {
block *types.Block
signedHeader types.SignedHeader
wantErr string
}{
{
block: nil, wantErr: "non-nil Block",
},
{
block: &types.Block{}, wantErr: "unexpected empty SignedHeader",
},
// Start Header.Height mismatch test
{
block: &types.Block{Header: types.Header{Height: 10}},
signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}},
wantErr: "header heights mismatched",
},
{
block: &types.Block{Header: types.Header{Height: 11}},
signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}},
},
// End Header.Height mismatch test
// Start Header.Hash mismatch test
{
block: &types.Block{Header: hdrHeight11},
signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}},
wantErr: "headers don't match",
},
{
block: &types.Block{Header: hdrHeight11},
signedHeader: types.SignedHeader{Header: &hdrHeight11},
},
// End Header.Hash mismatch test
// Start Header.Data hash mismatch test
{
block: &types.Block{
Header: types.Header{Height: 11},
Data: types.Data{Txs: []types.Tx{[]byte("0xDE"), []byte("AD")}},
},
signedHeader: types.SignedHeader{
Header: &types.Header{Height: 11},
Commit: types.NewCommit(11, 0, types.BlockID{Hash: []byte("0xDEADBEEF")}, nil),
},
wantErr: "data hash doesn't match header",
},
{
block: &types.Block{
Header: types.Header{Height: 11, DataHash: deadBeefHash},
Data: types.Data{Txs: deadBeefTxs},
},
signedHeader: types.SignedHeader{
Header: &types.Header{Height: 11},
Commit: types.NewCommit(11, 0, types.BlockID{Hash: []byte("DEADBEEF")}, nil),
},
},
// End Header.Data hash mismatch test
}
for i, tt := range tests {
err := proxy.ValidateBlock(tt.block, tt.signedHeader)
if tt.wantErr != "" {
if err == nil {
assert.FailNowf(t, "Unexpectedly passed", "#%d", i)
} else {
assert.Contains(t, err.Error(), tt.wantErr, "#%d should contain the substring\n\n", i)
}
continue
}
assert.Nil(t, err, "#%d: expecting a nil error", i)
}
}
func TestValidateBlockMeta(t *testing.T) {
tests := []struct {
meta *types.BlockMeta
signedHeader types.SignedHeader
wantErr string
}{
{
meta: nil, wantErr: "non-nil BlockMeta",
},
{
meta: &types.BlockMeta{}, wantErr: "unexpected empty SignedHeader",
},
// Start Header.Height mismatch test
{
meta: &types.BlockMeta{Header: types.Header{Height: 10}},
signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}},
wantErr: "header heights mismatched",
},
{
meta: &types.BlockMeta{Header: types.Header{Height: 11}},
signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}},
},
// End Header.Height mismatch test
// Start Headers don't match test
{
meta: &types.BlockMeta{Header: hdrHeight11},
signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}},
wantErr: "headers don't match",
},
{
meta: &types.BlockMeta{Header: hdrHeight11},
signedHeader: types.SignedHeader{Header: &hdrHeight11},
},
{
meta: &types.BlockMeta{
Header: types.Header{
Height: 11,
ValidatorsHash: []byte("lite-test"),
// TODO: should be able to use empty time after Amino upgrade
Time: testTime1,
},
},
signedHeader: types.SignedHeader{
Header: &types.Header{Height: 11, DataHash: deadBeefHash},
},
wantErr: "headers don't match",
},
{
meta: &types.BlockMeta{
Header: types.Header{
Height: 11, DataHash: deadBeefHash,
ValidatorsHash: []byte("Tendermint"),
Time: testTime1,
},
},
signedHeader: types.SignedHeader{
Header: &types.Header{
Height: 11, DataHash: deadBeefHash,
ValidatorsHash: []byte("Tendermint"),
Time: testTime2,
},
Commit: types.NewCommit(11, 0, types.BlockID{Hash: []byte("DEADBEEF")}, nil),
},
wantErr: "headers don't match",
},
{
meta: &types.BlockMeta{
Header: types.Header{
Height: 11, DataHash: deadBeefHash,
ValidatorsHash: []byte("Tendermint"),
Time: testTime2,
},
},
signedHeader: types.SignedHeader{
Header: &types.Header{
Height: 11, DataHash: deadBeefHash,
ValidatorsHash: []byte("Tendermint-x"),
Time: testTime2,
},
Commit: types.NewCommit(11, 0, types.BlockID{Hash: []byte("DEADBEEF")}, nil),
},
wantErr: "headers don't match",
},
// End Headers don't match test
}
for i, tt := range tests {
err := proxy.ValidateBlockMeta(tt.meta, tt.signedHeader)
if tt.wantErr != "" {
if err == nil {
assert.FailNowf(t, "Unexpectedly passed", "#%d: wanted error %q", i, tt.wantErr)
} else {
assert.Contains(t, err.Error(), tt.wantErr, "#%d should contain the substring\n\n", i)
}
continue
}
assert.Nil(t, err, "#%d: expecting a nil error", i)
}
}

+ 0
- 49
lite/proxy/verifier.go View File

@ -1,49 +0,0 @@
package proxy
import (
"github.com/pkg/errors"
dbm "github.com/tendermint/tm-db"
log "github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/lite"
lclient "github.com/tendermint/tendermint/lite/client"
)
func NewVerifier(
chainID,
rootDir string,
client lclient.SignStatusClient,
logger log.Logger,
cacheSize int,
) (*lite.DynamicVerifier, error) {
logger = logger.With("module", "lite/proxy")
logger.Info("lite/proxy/NewVerifier()...", "chainID", chainID, "rootDir", rootDir, "client", client)
memProvider := lite.NewDBProvider("trusted.mem", dbm.NewMemDB()).SetLimit(cacheSize)
lvlProvider := lite.NewDBProvider("trusted.lvl", dbm.NewDB("trust-base", dbm.GoLevelDBBackend, rootDir))
trust := lite.NewMultiProvider(
memProvider,
lvlProvider,
)
source := lclient.NewProvider(chainID, client)
cert := lite.NewDynamicVerifier(chainID, trust, source)
cert.SetLogger(logger) // Sets logger recursively.
// TODO: Make this more secure, e.g. make it interactive in the console?
_, err := trust.LatestFullCommit(chainID, 1, 1<<63-1)
if err != nil {
logger.Info("lite/proxy/NewVerifier found no trusted full commit, initializing from source from height 1...")
fc, err := source.LatestFullCommit(chainID, 1, 1)
if err != nil {
return nil, errors.Wrap(err, "fetching source full commit @ height 1")
}
err = trust.SaveFullCommit(fc)
if err != nil {
return nil, errors.Wrap(err, "saving full commit to trusted")
}
}
return cert, nil
}

+ 0
- 275
lite/proxy/wrapper.go View File

@ -1,275 +0,0 @@
package proxy
import (
"context"
"github.com/tendermint/tendermint/crypto/merkle"
"github.com/tendermint/tendermint/libs/bytes"
"github.com/tendermint/tendermint/lite"
rpcclient "github.com/tendermint/tendermint/rpc/client"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
)
var _ rpcclient.Client = Wrapper{}
// Wrapper wraps a rpcclient with a Verifier and double-checks any input that is
// provable before passing it along. Allows you to make any rpcclient fully secure.
type Wrapper struct {
rpcclient.Client
cert *lite.DynamicVerifier
prt *merkle.ProofRuntime
}
// SecureClient uses a given Verifier to wrap an connection to an untrusted
// host and return a cryptographically secure rpc client.
//
// If it is wrapping an HTTP rpcclient, it will also wrap the websocket interface
func SecureClient(c rpcclient.Client, cert *lite.DynamicVerifier) Wrapper {
prt := defaultProofRuntime()
wrap := Wrapper{c, cert, prt}
// TODO: no longer possible as no more such interface exposed....
// if we wrap http client, then we can swap out the event switch to filter
// if hc, ok := c.(*rpcclient.HTTP); ok {
// evt := hc.WSEvents.EventSwitch
// hc.WSEvents.EventSwitch = WrappedSwitch{evt, wrap}
// }
return wrap
}
// ABCIQueryWithOptions exposes all options for the ABCI query and verifies the returned proof
func (w Wrapper) ABCIQueryWithOptions(path string, data bytes.HexBytes,
opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
res, err := GetWithProofOptions(w.prt, path, data, opts, w.Client, w.cert)
return res, err
}
// ABCIQuery uses default options for the ABCI query and verifies the returned proof
func (w Wrapper) ABCIQuery(path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) {
return w.ABCIQueryWithOptions(path, data, rpcclient.DefaultABCIQueryOptions)
}
// Tx queries for a given tx and verifies the proof if it was requested
func (w Wrapper) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
res, err := w.Client.Tx(hash, prove)
if !prove || err != nil {
return res, err
}
h := res.Height
sh, err := GetCertifiedCommit(h, w.Client, w.cert)
if err != nil {
return res, err
}
err = res.Proof.Validate(sh.DataHash)
return res, err
}
// BlockchainInfo requests a list of headers and verifies them all...
// Rather expensive.
//
// TODO: optimize this if used for anything needing performance
func (w Wrapper) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
r, err := w.Client.BlockchainInfo(minHeight, maxHeight)
if err != nil {
return nil, err
}
// go and verify every blockmeta in the result....
for _, meta := range r.BlockMetas {
// get a checkpoint to verify from
res, err := w.Commit(&meta.Header.Height)
if err != nil {
return nil, err
}
sh := res.SignedHeader
err = ValidateBlockMeta(meta, sh)
if err != nil {
return nil, err
}
}
return r, nil
}
// Block returns an entire block and verifies all signatures
func (w Wrapper) Block(height *int64) (*ctypes.ResultBlock, error) {
resBlock, err := w.Client.Block(height)
if err != nil {
return nil, err
}
// get a checkpoint to verify from
resCommit, err := w.Commit(height)
if err != nil {
return nil, err
}
sh := resCommit.SignedHeader
err = ValidateBlock(resBlock.Block, sh)
if err != nil {
return nil, err
}
return resBlock, nil
}
// BlockByHash returns an entire block and verifies all signatures
func (w Wrapper) BlockByHash(hash []byte) (*ctypes.ResultBlock, error) {
resBlock, err := w.Client.BlockByHash(hash)
if err != nil {
return nil, err
}
// get a checkpoint to verify from
resCommit, err := w.Commit(&resBlock.Block.Height)
if err != nil {
return nil, err
}
sh := resCommit.SignedHeader
err = ValidateBlock(resBlock.Block, sh)
if err != nil {
return nil, err
}
return resBlock, nil
}
// Commit downloads the Commit and certifies it with the lite.
//
// This is the foundation for all other verification in this module
func (w Wrapper) Commit(height *int64) (*ctypes.ResultCommit, error) {
if height == nil {
resStatus, err := w.Client.Status()
if err != nil {
return nil, err
}
// NOTE: If resStatus.CatchingUp, there is a race
// condition where the validator set for the next height
// isn't available until some time after the blockstore
// has height h on the remote node. This isn't an issue
// once the node has caught up, and a syncing node likely
// won't have this issue esp with the implementation we
// have here, but we may have to address this at some
// point.
height = new(int64)
*height = resStatus.SyncInfo.LatestBlockHeight
}
rpcclient.WaitForHeight(w.Client, *height, nil)
res, err := w.Client.Commit(height)
// if we got it, then verify it
if err == nil {
sh := res.SignedHeader
err = w.cert.Verify(sh)
}
return res, err
}
func (w Wrapper) RegisterOpDecoder(typ string, dec merkle.OpDecoder) {
w.prt.RegisterOpDecoder(typ, dec)
}
// SubscribeWS subscribes for events using the given query and remote address as
// a subscriber, but does not verify responses (UNSAFE)!
func (w Wrapper) SubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) {
out, err := w.Client.Subscribe(context.Background(), ctx.RemoteAddr(), query)
if err != nil {
return nil, err
}
go func() {
for {
select {
case resultEvent := <-out:
// XXX(melekes) We should have a switch here that performs a validation
// depending on the event's type.
ctx.WSConn.TryWriteRPCResponse(
rpctypes.NewRPCSuccessResponse(
ctx.WSConn.Codec(),
ctx.JSONReq.ID,
resultEvent,
))
case <-w.Client.Quit():
return
}
}
}()
return &ctypes.ResultSubscribe{}, nil
}
// UnsubscribeWS calls original client's Unsubscribe using remote address as a
// subscriber.
func (w Wrapper) UnsubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) {
err := w.Client.Unsubscribe(context.Background(), ctx.RemoteAddr(), query)
if err != nil {
return nil, err
}
return &ctypes.ResultUnsubscribe{}, nil
}
// UnsubscribeAllWS calls original client's UnsubscribeAll using remote address
// as a subscriber.
func (w Wrapper) UnsubscribeAllWS(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) {
err := w.Client.UnsubscribeAll(context.Background(), ctx.RemoteAddr())
if err != nil {
return nil, err
}
return &ctypes.ResultUnsubscribe{}, nil
}
// // WrappedSwitch creates a websocket connection that auto-verifies any info
// // coming through before passing it along.
// //
// // Since the verification takes 1-2 rpc calls, this is obviously only for
// // relatively low-throughput situations that can tolerate a bit extra latency
// type WrappedSwitch struct {
// types.EventSwitch
// client rpcclient.Client
// }
// // FireEvent verifies any block or header returned from the eventswitch
// func (s WrappedSwitch) FireEvent(event string, data events.EventData) {
// tm, ok := data.(types.TMEventData)
// if !ok {
// fmt.Printf("bad type %#v\n", data)
// return
// }
// // check to validate it if possible, and drop if not valid
// switch t := tm.(type) {
// case types.EventDataNewBlockHeader:
// err := verifyHeader(s.client, t.Header)
// if err != nil {
// fmt.Printf("Invalid header: %#v\n", err)
// return
// }
// case types.EventDataNewBlock:
// err := verifyBlock(s.client, t.Block)
// if err != nil {
// fmt.Printf("Invalid block: %#v\n", err)
// return
// }
// // TODO: can we verify tx as well? anything else
// }
// // looks good, we fire it
// s.EventSwitch.FireEvent(event, data)
// }
// func verifyHeader(c rpcclient.Client, head *types.Header) error {
// // get a checkpoint to verify from
// commit, err := c.Commit(&head.Height)
// if err != nil {
// return err
// }
// check := certclient.CommitFromResult(commit)
// return ValidateHeader(head, check)
// }
//
// func verifyBlock(c rpcclient.Client, block *types.Block) error {
// // get a checkpoint to verify from
// commit, err := c.Commit(&block.Height)
// if err != nil {
// return err
// }
// check := certclient.CommitFromResult(commit)
// return ValidateBlock(block, check)
// }

+ 0
- 13
lite/types.go View File

@ -1,13 +0,0 @@
package lite
import (
"github.com/tendermint/tendermint/types"
)
// Verifier checks the votes to make sure the block really is signed properly.
// Verifier must know the current or recent set of validitors by some other
// means.
type Verifier interface {
Verify(sheader types.SignedHeader) error
ChainID() string
}

+ 3
- 3
node/node.go View File

@ -29,7 +29,7 @@ import (
"github.com/tendermint/tendermint/libs/log"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
"github.com/tendermint/tendermint/libs/service"
lite "github.com/tendermint/tendermint/lite2"
"github.com/tendermint/tendermint/light"
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/p2p/pex"
@ -574,11 +574,11 @@ func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reacto
if stateProvider == nil {
var err error
stateProvider, err = statesync.NewLightClientStateProvider(state.ChainID, state.Version,
config.RPCServers, lite.TrustOptions{
config.RPCServers, light.TrustOptions{
Period: config.TrustPeriod,
Height: config.TrustHeight,
Hash: config.TrustHashBytes(),
}, ssR.Logger.With("module", "lite"))
}, ssR.Logger.With("module", "light"))
if err != nil {
return fmt.Errorf("failed to set up light client state provider: %w", err)
}


+ 15
- 15
statesync/stateprovider.go View File

@ -9,11 +9,11 @@ import (
dbm "github.com/tendermint/tm-db"
"github.com/tendermint/tendermint/libs/log"
lite "github.com/tendermint/tendermint/lite2"
liteprovider "github.com/tendermint/tendermint/lite2/provider"
litehttp "github.com/tendermint/tendermint/lite2/provider/http"
literpc "github.com/tendermint/tendermint/lite2/rpc"
litedb "github.com/tendermint/tendermint/lite2/store/db"
"github.com/tendermint/tendermint/light"
lightprovider "github.com/tendermint/tendermint/light/provider"
lighthttp "github.com/tendermint/tendermint/light/provider/http"
lightrpc "github.com/tendermint/tendermint/light/rpc"
lightdb "github.com/tendermint/tendermint/light/store/db"
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
@ -34,10 +34,10 @@ type StateProvider interface {
// lightClientStateProvider is a state provider using the light client.
type lightClientStateProvider struct {
sync.Mutex // lite.Client is not concurrency-safe
lc *lite.Client
sync.Mutex // light.Client is not concurrency-safe
lc *light.Client
version sm.Version
providers map[liteprovider.Provider]string
providers map[lightprovider.Provider]string
}
// NewLightClientStateProvider creates a new StateProvider using a light client and RPC clients.
@ -45,29 +45,29 @@ func NewLightClientStateProvider(
chainID string,
version sm.Version,
servers []string,
trustOptions lite.TrustOptions,
trustOptions light.TrustOptions,
logger log.Logger,
) (StateProvider, error) {
if len(servers) < 2 {
return nil, fmt.Errorf("at least 2 RPC servers are required, got %v", len(servers))
}
providers := make([]liteprovider.Provider, 0, len(servers))
providerRemotes := make(map[liteprovider.Provider]string)
providers := make([]lightprovider.Provider, 0, len(servers))
providerRemotes := make(map[lightprovider.Provider]string)
for _, server := range servers {
client, err := rpcClient(server)
if err != nil {
return nil, fmt.Errorf("failed to set up RPC client: %w", err)
}
provider := litehttp.NewWithClient(chainID, client)
provider := lighthttp.NewWithClient(chainID, client)
providers = append(providers, provider)
// We store the RPC addresses keyed by provider, so we can find the address of the primary
// provider used by the light client and use it to fetch consensus parameters.
providerRemotes[provider] = server
}
lc, err := lite.NewClient(chainID, trustOptions, providers[0], providers[1:],
litedb.New(dbm.NewMemDB(), ""), lite.Logger(logger), lite.MaxRetryAttempts(5))
lc, err := light.NewClient(chainID, trustOptions, providers[0], providers[1:],
lightdb.New(dbm.NewMemDB(), ""), light.Logger(logger), light.MaxRetryAttempts(5))
if err != nil {
return nil, err
}
@ -155,7 +155,7 @@ func (s *lightClientStateProvider) State(height uint64) (sm.State, error) {
if err != nil {
return sm.State{}, fmt.Errorf("unable to create RPC client: %w", err)
}
rpcclient := literpc.NewClient(primaryRPC, s.lc)
rpcclient := lightrpc.NewClient(primaryRPC, s.lc)
result, err := rpcclient.ConsensusParams(&nextHeader.Height)
if err != nil {
return sm.State{}, fmt.Errorf("unable to fetch consensus parameters for height %v: %w",


+ 1
- 1
types/block.go View File

@ -902,7 +902,7 @@ func CommitFromProto(cp *tmproto.Commit) (*Commit, error) {
//-----------------------------------------------------------------------------
// SignedHeader is a header along with the commits that prove it.
// It is the basis of the lite client.
// It is the basis of the light client.
type SignedHeader struct {
*Header `json:"header"`


+ 1
- 1
types/validator_set.go View File

@ -728,7 +728,7 @@ func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID,
// old vals to sign the future commit at H, that way we preserve the property
// that if they weren't being truthful about the validator set at H (block hash
// -> vals hash) or about the app state (block hash -> app hash) we can slash
// > 2/3. Otherwise, the lite client isn't providing the same security
// > 2/3. Otherwise, the light client isn't providing the same security
// guarantees.
//
// Even if we added a slashing condition that if you sign a block header with


Loading…
Cancel
Save