Also, fix consensus liveness issue.pull/2296/head
@ -0,0 +1,72 @@ | |||||
package lite | |||||
import ( | |||||
"bytes" | |||||
lerr "github.com/tendermint/tendermint/lite/errors" | |||||
"github.com/tendermint/tendermint/types" | |||||
cmn "github.com/tendermint/tmlibs/common" | |||||
) | |||||
var _ Certifier = (*BaseCertifier)(nil) | |||||
// BaseCertifier lets us check the validity of SignedHeaders at height or | |||||
// later, requiring sufficient votes (> 2/3) from the given valset. | |||||
// To certify blocks produced by a blockchain with mutable validator sets, | |||||
// use the InquiringCertifier. | |||||
// TODO: Handle unbonding time. | |||||
type BaseCertifier struct { | |||||
chainID string | |||||
height int64 | |||||
valset *types.ValidatorSet | |||||
} | |||||
// NewBaseCertifier returns a new certifier initialized with a validator set at | |||||
// some height. | |||||
func NewBaseCertifier(chainID string, height int64, valset *types.ValidatorSet) *BaseCertifier { | |||||
if valset == nil || len(valset.Hash()) == 0 { | |||||
panic("NewBaseCertifier requires a valid valset") | |||||
} | |||||
return &BaseCertifier{ | |||||
chainID: chainID, | |||||
height: height, | |||||
valset: valset, | |||||
} | |||||
} | |||||
// Implements Certifier. | |||||
func (bc *BaseCertifier) ChainID() string { | |||||
return bc.chainID | |||||
} | |||||
// Implements Certifier. | |||||
func (bc *BaseCertifier) Certify(signedHeader types.SignedHeader) error { | |||||
// We can't certify commits older than bc.height. | |||||
if signedHeader.Height < bc.height { | |||||
return cmn.NewError("BaseCertifier height is %v, cannot certify height %v", | |||||
bc.height, signedHeader.Height) | |||||
} | |||||
// We can't certify with the wrong validator set. | |||||
if !bytes.Equal(signedHeader.ValidatorsHash, | |||||
bc.valset.Hash()) { | |||||
return lerr.ErrUnexpectedValidators(signedHeader.ValidatorsHash, bc.valset.Hash()) | |||||
} | |||||
// Do basic sanity checks. | |||||
err := signedHeader.ValidateBasic(bc.chainID) | |||||
if err != nil { | |||||
return cmn.ErrorWrap(err, "in certify") | |||||
} | |||||
// Check commit signatures. | |||||
err = bc.valset.VerifyCommit( | |||||
bc.chainID, signedHeader.Commit.BlockID, | |||||
signedHeader.Height, signedHeader.Commit) | |||||
if err != nil { | |||||
return cmn.ErrorWrap(err, "in certify") | |||||
} | |||||
return nil | |||||
} |
@ -1,25 +0,0 @@ | |||||
package client_test | |||||
import ( | |||||
"os" | |||||
"testing" | |||||
"github.com/tendermint/tendermint/abci/example/kvstore" | |||||
nm "github.com/tendermint/tendermint/node" | |||||
rpctest "github.com/tendermint/tendermint/rpc/test" | |||||
) | |||||
var node *nm.Node | |||||
func TestMain(m *testing.M) { | |||||
// start a tendermint node (and merkleeyes) in the background to test against | |||||
app := kvstore.NewKVStoreApplication() | |||||
node = rpctest.StartTendermint(app) | |||||
code := m.Run() | |||||
// and shut down proper at the end | |||||
node.Stop() | |||||
node.Wait() | |||||
os.Exit(code) | |||||
} |
@ -1,63 +1,73 @@ | |||||
package client | package client | ||||
import ( | import ( | ||||
"os" | |||||
"testing" | "testing" | ||||
"github.com/stretchr/testify/assert" | "github.com/stretchr/testify/assert" | ||||
"github.com/stretchr/testify/require" | "github.com/stretchr/testify/require" | ||||
"github.com/tendermint/tendermint/lite" | |||||
liteErr "github.com/tendermint/tendermint/lite/errors" | |||||
"github.com/tendermint/tendermint/abci/example/kvstore" | |||||
rpcclient "github.com/tendermint/tendermint/rpc/client" | rpcclient "github.com/tendermint/tendermint/rpc/client" | ||||
rpctest "github.com/tendermint/tendermint/rpc/test" | rpctest "github.com/tendermint/tendermint/rpc/test" | ||||
"github.com/tendermint/tendermint/types" | "github.com/tendermint/tendermint/types" | ||||
) | ) | ||||
// TODO fix tests!! | |||||
func TestMain(m *testing.M) { | |||||
app := kvstore.NewKVStoreApplication() | |||||
node := rpctest.StartTendermint(app) | |||||
code := m.Run() | |||||
node.Stop() | |||||
node.Wait() | |||||
os.Exit(code) | |||||
} | |||||
func TestProvider(t *testing.T) { | func TestProvider(t *testing.T) { | ||||
assert, require := assert.New(t), require.New(t) | assert, require := assert.New(t), require.New(t) | ||||
cfg := rpctest.GetConfig() | cfg := rpctest.GetConfig() | ||||
rpcAddr := cfg.RPC.ListenAddress | rpcAddr := cfg.RPC.ListenAddress | ||||
genDoc, _ := types.GenesisDocFromFile(cfg.GenesisFile()) | |||||
genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile()) | |||||
if err != nil { | |||||
panic(err) | |||||
} | |||||
chainID := genDoc.ChainID | chainID := genDoc.ChainID | ||||
p := NewHTTPProvider(rpcAddr) | |||||
t.Log("chainID:", chainID) | |||||
p := NewHTTPProvider(chainID, rpcAddr) | |||||
require.NotNil(t, p) | require.NotNil(t, p) | ||||
// let it produce some blocks | // let it produce some blocks | ||||
err := rpcclient.WaitForHeight(p.(*provider).node, 6, nil) | |||||
err = rpcclient.WaitForHeight(p.(*provider).client, 6, nil) | |||||
require.Nil(err) | require.Nil(err) | ||||
// let's get the highest block | // let's get the highest block | ||||
seed, err := p.LatestCommit() | |||||
fc, err := p.LatestFullCommit(chainID, 1, 1<<63-1) | |||||
require.Nil(err, "%+v", err) | require.Nil(err, "%+v", err) | ||||
sh := seed.Height() | |||||
vhash := seed.Header.ValidatorsHash | |||||
sh := fc.Height() | |||||
assert.True(sh < 5000) | assert.True(sh < 5000) | ||||
// let's check this is valid somehow | // let's check this is valid somehow | ||||
assert.Nil(seed.ValidateBasic(chainID)) | |||||
cert := lite.NewStaticCertifier(chainID, seed.Validators) | |||||
assert.Nil(fc.ValidateBasic(chainID)) | |||||
// historical queries now work :) | // historical queries now work :) | ||||
lower := sh - 5 | lower := sh - 5 | ||||
seed, err = p.GetByHeight(lower) | |||||
fc, err = p.LatestFullCommit(chainID, lower, lower) | |||||
assert.Nil(err, "%+v", err) | assert.Nil(err, "%+v", err) | ||||
assert.Equal(lower, seed.Height()) | |||||
assert.Equal(lower, fc.Height()) | |||||
// also get by hash (given the match) | |||||
seed, err = p.GetByHash(vhash) | |||||
require.Nil(err, "%+v", err) | |||||
require.Equal(vhash, seed.Header.ValidatorsHash) | |||||
err = cert.Certify(seed.Commit) | |||||
assert.Nil(err, "%+v", err) | |||||
/* | |||||
// also get by hash (given the match) | |||||
fc, err = p.GetByHash(vhash) | |||||
require.Nil(err, "%+v", err) | |||||
require.Equal(vhash, fc.Header.ValidatorsHash) | |||||
// get by hash fails without match | |||||
seed, err = p.GetByHash([]byte("foobar")) | |||||
assert.NotNil(err) | |||||
assert.True(liteErr.IsCommitNotFoundErr(err)) | |||||
// storing the seed silently ignored | |||||
err = p.StoreCommit(seed) | |||||
assert.Nil(err, "%+v", err) | |||||
// get by hash fails without match | |||||
fc, err = p.GetByHash([]byte("foobar")) | |||||
assert.NotNil(err) | |||||
assert.True(liteErr.IsCommitNotFoundErr(err)) | |||||
*/ | |||||
} | } |
@ -0,0 +1,168 @@ | |||||
package lite | |||||
import ( | |||||
"fmt" | |||||
"regexp" | |||||
"strconv" | |||||
amino "github.com/tendermint/go-amino" | |||||
crypto "github.com/tendermint/tendermint/crypto" | |||||
lerr "github.com/tendermint/tendermint/lite/errors" | |||||
"github.com/tendermint/tendermint/types" | |||||
dbm "github.com/tendermint/tmlibs/db" | |||||
) | |||||
func signedHeaderKey(chainID string, height int64) []byte { | |||||
return []byte(fmt.Sprintf("%s/%010d/sh", chainID, height)) | |||||
} | |||||
var signedHeaderKeyPattern = regexp.MustCompile(`([^/]+)/([0-9]*)/sh`) | |||||
func parseSignedHeaderKey(key []byte) (chainID string, height int64, ok bool) { | |||||
submatch := signedHeaderKeyPattern.FindSubmatch(key) | |||||
if submatch == nil { | |||||
return "", 0, false | |||||
} | |||||
chainID = string(submatch[1]) | |||||
heightStr := string(submatch[2]) | |||||
heightInt, err := strconv.Atoi(heightStr) | |||||
if err != nil { | |||||
return "", 0, false | |||||
} | |||||
height = int64(heightInt) | |||||
ok = true // good! | |||||
return | |||||
} | |||||
func validatorSetKey(chainID string, height int64) []byte { | |||||
return []byte(fmt.Sprintf("%s/%010d/vs", chainID, height)) | |||||
} | |||||
type DBProvider struct { | |||||
chainID string | |||||
db dbm.DB | |||||
cdc *amino.Codec | |||||
} | |||||
func NewDBProvider(db dbm.DB) *DBProvider { | |||||
//db = dbm.NewDebugDB("db provider "+cmn.RandStr(4), db) | |||||
cdc := amino.NewCodec() | |||||
crypto.RegisterAmino(cdc) | |||||
dbp := &DBProvider{db: db, cdc: cdc} | |||||
return dbp | |||||
} | |||||
// Implements PersistentProvider. | |||||
func (dbp *DBProvider) SaveFullCommit(fc FullCommit) error { | |||||
batch := dbp.db.NewBatch() | |||||
// Save the fc.validators. | |||||
// We might be overwriting what we already have, but | |||||
// it makes the logic easier for now. | |||||
vsKey := validatorSetKey(fc.ChainID(), fc.Height()) | |||||
vsBz, err := dbp.cdc.MarshalBinary(fc.Validators) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
batch.Set(vsKey, vsBz) | |||||
// Save the fc.NextValidators. | |||||
nvsKey := validatorSetKey(fc.ChainID(), fc.Height()+1) | |||||
nvsBz, err := dbp.cdc.MarshalBinary(fc.NextValidators) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
batch.Set(nvsKey, nvsBz) | |||||
// Save the fc.SignedHeader | |||||
shKey := signedHeaderKey(fc.ChainID(), fc.Height()) | |||||
shBz, err := dbp.cdc.MarshalBinary(fc.SignedHeader) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
batch.Set(shKey, shBz) | |||||
// And write sync. | |||||
batch.WriteSync() | |||||
return nil | |||||
} | |||||
// Implements Provider. | |||||
func (dbp *DBProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) ( | |||||
FullCommit, error) { | |||||
if minHeight <= 0 { | |||||
minHeight = 1 | |||||
} | |||||
if maxHeight == 0 { | |||||
maxHeight = 1<<63 - 1 | |||||
} | |||||
itr := dbp.db.ReverseIterator( | |||||
signedHeaderKey(chainID, maxHeight), | |||||
signedHeaderKey(chainID, minHeight-1), | |||||
) | |||||
defer itr.Close() | |||||
for itr.Valid() { | |||||
key := itr.Key() | |||||
_, _, ok := parseSignedHeaderKey(key) | |||||
if !ok { | |||||
// Skip over other keys. | |||||
itr.Next() | |||||
continue | |||||
} else { | |||||
// Found the latest full commit signed header. | |||||
shBz := itr.Value() | |||||
sh := types.SignedHeader{} | |||||
err := dbp.cdc.UnmarshalBinary(shBz, &sh) | |||||
if err != nil { | |||||
return FullCommit{}, err | |||||
} else { | |||||
return dbp.fillFullCommit(sh) | |||||
} | |||||
} | |||||
} | |||||
return FullCommit{}, lerr.ErrCommitNotFound() | |||||
} | |||||
func (dbp *DBProvider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { | |||||
return dbp.getValidatorSet(chainID, height) | |||||
} | |||||
func (dbp *DBProvider) getValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { | |||||
vsBz := dbp.db.Get(validatorSetKey(chainID, height)) | |||||
if vsBz == nil { | |||||
err = lerr.ErrMissingValidators(chainID, height) | |||||
return | |||||
} | |||||
err = dbp.cdc.UnmarshalBinary(vsBz, &valset) | |||||
if err != nil { | |||||
return | |||||
} | |||||
valset.TotalVotingPower() // to test deep equality. | |||||
return | |||||
} | |||||
func (dbp *DBProvider) fillFullCommit(sh types.SignedHeader) (FullCommit, error) { | |||||
var chainID = sh.ChainID | |||||
var height = sh.Height | |||||
var valset, nvalset *types.ValidatorSet | |||||
// Load the validator set. | |||||
valset, err := dbp.getValidatorSet(chainID, height) | |||||
if err != nil { | |||||
return FullCommit{}, err | |||||
} | |||||
// Load the next validator set. | |||||
nvalset, err = dbp.getValidatorSet(chainID, height+1) | |||||
if err != nil { | |||||
return FullCommit{}, err | |||||
} | |||||
// Return filled FullCommit. | |||||
return FullCommit{ | |||||
SignedHeader: sh, | |||||
Validators: valset, | |||||
NextValidators: nvalset, | |||||
}, nil | |||||
} |
@ -1,133 +1,139 @@ | |||||
/* | /* | ||||
Package lite allows you to securely validate headers | |||||
without a full node. | |||||
Package lite allows you to securely validate headers without a full node. | |||||
This library pulls together all the crypto and algorithms, | |||||
so given a relatively recent (< unbonding period) known | |||||
validator set, one can get indisputable proof that data is in | |||||
the chain (current state) or detect if the node is lying to | |||||
the client. | |||||
This library pulls together all the crypto and algorithms, so given a | |||||
relatively recent (< unbonding period) known validator set, one can get | |||||
indisputable proof that data is in the chain (current state) or detect if the | |||||
node is lying to the client. | |||||
Tendermint RPC exposes a lot of info, but a malicious node | |||||
could return any data it wants to queries, or even to block | |||||
headers, even making up fake signatures from non-existent | |||||
validators to justify it. This is a lot of logic to get | |||||
right, to be contained in a small, easy to use library, | |||||
that does this for you, so you can just build nice UI. | |||||
Tendermint RPC exposes a lot of info, but a malicious node could return any | |||||
data it wants to queries, or even to block headers, even making up fake | |||||
signatures from non-existent validators to justify it. This is a lot of logic | |||||
to get right, to be contained in a small, easy to use library, that does this | |||||
for you, so you can just build nice applications. | |||||
We design for clients who have no strong trust relationship | |||||
with any tendermint node, just the validator set as a whole. | |||||
Beyond building nice mobile or desktop applications, the | |||||
cosmos hub is another important example of a client, | |||||
that needs undeniable proof without syncing the full chain, | |||||
in order to efficiently implement IBC. | |||||
We design for clients who have no strong trust relationship with any Tendermint | |||||
node, just the blockchain and validator set as a whole. | |||||
Commits | |||||
# Data structures | |||||
There are two main data structures that we pass around - Commit | |||||
and FullCommit. Both of them mirror what information is | |||||
exposed in tendermint rpc. | |||||
## SignedHeader | |||||
Commit is a block header along with enough validator signatures | |||||
to prove its validity (> 2/3 of the voting power). A FullCommit | |||||
is a Commit along with the full validator set. When the | |||||
validator set doesn't change, the Commit is enough, but since | |||||
the block header only has a hash, we need the FullCommit to | |||||
follow any changes to the validator set. | |||||
SignedHeader is a block header along with a commit -- enough validator | |||||
precommit-vote signatures to prove its validity (> 2/3 of the voting power) | |||||
given the validator set responsible for signing that header. A FullCommit is a | |||||
SignedHeader along with the current and next validator sets. | |||||
Certifiers | |||||
The hash of the next validator set is included and signed in the SignedHeader. | |||||
This lets the lite client keep track of arbitrary changes to the validator set, | |||||
as every change to the validator set must be approved by inclusion in the | |||||
header and signed in the commit. | |||||
A Certifier validates a new Commit given the currently known | |||||
state. There are three different types of Certifiers exposed, | |||||
each one building on the last one, with additional complexity. | |||||
In the worst case, with every block changing the validators around completely, | |||||
a lite client can sync up with every block header to verify each validator set | |||||
change on the chain. In practice, most applications will not have frequent | |||||
drastic updates to the validator set, so the logic defined in this package for | |||||
lite client syncing is optimized to use intelligent bisection and | |||||
block-skipping for efficient sourcing and verification of these data structures | |||||
and updates to the validator set (see the InquiringCertifier for more | |||||
information). | |||||
Static - given the validator set upon initialization. Verifies | |||||
all signatures against that set and if the validator set | |||||
changes, it will reject all headers. | |||||
The FullCommit is also declared in this package as a convenience structure, | |||||
which includes the SignedHeader along with the full current and next | |||||
ValidatorSets. | |||||
Dynamic - This wraps Static and has the same Certify | |||||
method. However, it adds an Update method, which can be called | |||||
with a FullCommit when the validator set changes. If it can | |||||
prove this is a valid transition, it will update the validator | |||||
set. | |||||
## Certifier | |||||
Inquiring - this wraps Dynamic and implements an auto-update | |||||
strategy on top of the Dynamic update. If a call to | |||||
Certify fails as the validator set has changed, then it | |||||
attempts to find a FullCommit and Update to that header. | |||||
To get these FullCommits, it makes use of a Provider. | |||||
A Certifier validates a new SignedHeader given the currently known state. There | |||||
are two different types of Certifiers provided. | |||||
Providers | |||||
BaseCertifier - given a validator set and a height, this Certifier verifies | |||||
that > 2/3 of the voting power of the given validator set had signed the | |||||
SignedHeader, and that the SignedHeader was to be signed by the exact given | |||||
validator set, and that the height of the commit is at least height (or | |||||
greater). | |||||
A Provider allows us to store and retrieve the FullCommits, | |||||
to provide memory to the Inquiring Certifier. | |||||
SignedHeader.Commit may be signed by a different validator set, it can get | |||||
certified with a BaseCertifier as long as sufficient signatures from the | |||||
previous validator set are present in the commit. | |||||
NewMemStoreProvider - in-memory cache. | |||||
InquiringCertifier - this certifier implements an auto-update and persistence | |||||
strategy to certify any SignedHeader of the blockchain. | |||||
files.NewProvider - disk backed storage. | |||||
## Provider and PersistentProvider | |||||
client.NewHTTPProvider - query tendermint rpc. | |||||
A Provider allows us to store and retrieve the FullCommits. | |||||
NewCacheProvider - combine multiple providers. | |||||
```go | |||||
type Provider interface { | |||||
// LatestFullCommit returns the latest commit with | |||||
// minHeight <= height <= maxHeight. | |||||
// If maxHeight is zero, returns the latest where | |||||
// minHeight <= height. | |||||
LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error) | |||||
} | |||||
``` | |||||
The suggested use for local light clients is | |||||
client.NewHTTPProvider for getting new data (Source), | |||||
and NewCacheProvider(NewMemStoreProvider(), | |||||
files.NewProvider()) to store confirmed headers (Trusted) | |||||
* client.NewHTTPProvider - query Tendermint rpc. | |||||
How We Track Validators | |||||
A PersistentProvider is a Provider that also allows for saving state. This is | |||||
used by the InquiringCertifier for persistence. | |||||
Unless you want to blindly trust the node you talk with, you | |||||
need to trace every response back to a hash in a block header | |||||
and validate the commit signatures of that block header match | |||||
the proper validator set. If there is a contant validator | |||||
set, you store it locally upon initialization of the client, | |||||
```go | |||||
type PersistentProvider interface { | |||||
Provider | |||||
// SaveFullCommit saves a FullCommit (without verification). | |||||
SaveFullCommit(fc FullCommit) error | |||||
} | |||||
``` | |||||
* DBProvider - persistence provider for use with any tmlibs/DB. | |||||
* MultiProvider - combine multiple providers. | |||||
The suggested use for local light clients is client.NewHTTPProvider(...) for | |||||
getting new data (Source), and NewMultiProvider(NewDBProvider(dbm.NewMemDB()), | |||||
NewDBProvider(db.NewFileDB(...))) to store confirmed full commits (Trusted) | |||||
# How We Track Validators | |||||
Unless you want to blindly trust the node you talk with, you need to trace | |||||
every response back to a hash in a block header and validate the commit | |||||
signatures of that block header match the proper validator set. If there is a | |||||
static validator set, you store it locally upon initialization of the client, | |||||
and check against that every time. | and check against that every time. | ||||
Once there is a dynamic validator set, the issue of | |||||
verifying a block becomes a bit more tricky. There is | |||||
background information in a | |||||
github issue (https://github.com/tendermint/tendermint/issues/377). | |||||
In short, if there is a block at height H with a known | |||||
(trusted) validator set V, and another block at height H' | |||||
(H' > H) with validator set V' != V, then we want a way to | |||||
safely update it. | |||||
First, get the new (unconfirmed) validator set V' and | |||||
verify H' is internally consistent and properly signed by | |||||
this V'. Assuming it is a valid block, we check that at | |||||
least 2/3 of the validators in V also signed it, meaning | |||||
it would also be valid under our old assumptions. | |||||
That should be enough, but we can also check that the | |||||
V counts for at least 2/3 of the total votes in H' | |||||
for extra safety (we can have a discussion if this is | |||||
strictly required). If we can verify all this, | |||||
then we can accept H' and V' as valid and use that to | |||||
validate all blocks X > H'. | |||||
If we cannot update directly from H -> H' because there was | |||||
too much change to the validator set, then we can look for | |||||
some Hm (H < Hm < H') with a validator set Vm. Then we try | |||||
to update H -> Hm and Hm -> H' in two separate steps. | |||||
If one of these steps doesn't work, then we continue | |||||
bisecting, until we eventually have to externally | |||||
validate the valdiator set changes at every block. | |||||
Since we never trust any server in this protocol, only the | |||||
signatures themselves, it doesn't matter if the seed comes | |||||
from a (possibly malicious) node or a (possibly malicious) user. | |||||
We can accept it or reject it based only on our trusted | |||||
validator set and cryptographic proofs. This makes it | |||||
extremely important to verify that you have the proper | |||||
validator set when initializing the client, as that is the | |||||
root of all trust. | |||||
Or course, this assumes that the known block is within the | |||||
unbonding period to avoid the "nothing at stake" problem. | |||||
If you haven't seen the state in a few months, you will need | |||||
to manually verify the new validator set hash using off-chain | |||||
means (the same as getting the initial hash). | |||||
If the validator set for the blockchain is dynamic, verifying block commits is | |||||
a bit more involved -- if there is a block at height H with a known (trusted) | |||||
validator set V, and another block at height H' (H' > H) with validator set V' | |||||
!= V, then we want a way to safely update it. | |||||
First, we get the new (unconfirmed) validator set V' and verify that H' is | |||||
internally consistent and properly signed by this V'. Assuming it is a valid | |||||
block, we check that at least 2/3 of the validators in V also signed it, | |||||
meaning it would also be valid under our old assumptions. Then, we accept H' | |||||
and V' as valid and trusted and use that to validate for heights X > H' until a | |||||
more recent and updated validator set is found. | |||||
If we cannot update directly from H -> H' because there was too much change to | |||||
the validator set, then we can look for some Hm (H < Hm < H') with a validator | |||||
set Vm. Then we try to update H -> Hm and then Hm -> H' in two steps. If one | |||||
of these steps doesn't work, then we continue bisecting, until we eventually | |||||
have to externally validate the valdiator set changes at every block. | |||||
Since we never trust any server in this protocol, only the signatures | |||||
themselves, it doesn't matter if the seed comes from a (possibly malicious) | |||||
node or a (possibly malicious) user. We can accept it or reject it based only | |||||
on our trusted validator set and cryptographic proofs. This makes it extremely | |||||
important to verify that you have the proper validator set when initializing | |||||
the client, as that is the root of all trust. | |||||
The software currently assumes that the unbonding period is infinite in | |||||
duration. If the InquiringCertifier hasn't been updated in a while, you should | |||||
manually verify the block headers using other sources. | |||||
TODO: Update the software to handle cases around the unbonding period. | |||||
*/ | */ | ||||
package lite | package lite |
@ -1,96 +0,0 @@ | |||||
package lite | |||||
import ( | |||||
"github.com/tendermint/tendermint/types" | |||||
liteErr "github.com/tendermint/tendermint/lite/errors" | |||||
) | |||||
var _ Certifier = (*DynamicCertifier)(nil) | |||||
// DynamicCertifier uses a StaticCertifier for Certify, but adds an | |||||
// Update method to allow for a change of validators. | |||||
// | |||||
// You can pass in a FullCommit with another validator set, | |||||
// and if this is a provably secure transition (< 1/3 change, | |||||
// sufficient signatures), then it will update the | |||||
// validator set for the next Certify call. | |||||
// For security, it will only follow validator set changes | |||||
// going forward. | |||||
type DynamicCertifier struct { | |||||
cert *StaticCertifier | |||||
lastHeight int64 | |||||
} | |||||
// NewDynamic returns a new dynamic certifier. | |||||
func NewDynamicCertifier(chainID string, vals *types.ValidatorSet, height int64) *DynamicCertifier { | |||||
return &DynamicCertifier{ | |||||
cert: NewStaticCertifier(chainID, vals), | |||||
lastHeight: height, | |||||
} | |||||
} | |||||
// ChainID returns the chain id of this certifier. | |||||
// Implements Certifier. | |||||
func (dc *DynamicCertifier) ChainID() string { | |||||
return dc.cert.ChainID() | |||||
} | |||||
// Validators returns the validators of this certifier. | |||||
func (dc *DynamicCertifier) Validators() *types.ValidatorSet { | |||||
return dc.cert.vSet | |||||
} | |||||
// Hash returns the hash of this certifier. | |||||
func (dc *DynamicCertifier) Hash() []byte { | |||||
return dc.cert.Hash() | |||||
} | |||||
// LastHeight returns the last height of this certifier. | |||||
func (dc *DynamicCertifier) LastHeight() int64 { | |||||
return dc.lastHeight | |||||
} | |||||
// Certify will verify whether the commit is valid and will update the height if it is or return an | |||||
// error if it is not. | |||||
// Implements Certifier. | |||||
func (dc *DynamicCertifier) Certify(check Commit) error { | |||||
err := dc.cert.Certify(check) | |||||
if err == nil { | |||||
// update last seen height if input is valid | |||||
dc.lastHeight = check.Height() | |||||
} | |||||
return err | |||||
} | |||||
// Update will verify if this is a valid change and update | |||||
// the certifying validator set if safe to do so. | |||||
// | |||||
// Returns an error if update is impossible (invalid proof or IsTooMuchChangeErr) | |||||
func (dc *DynamicCertifier) Update(fc FullCommit) error { | |||||
// ignore all checkpoints in the past -> only to the future | |||||
h := fc.Height() | |||||
if h <= dc.lastHeight { | |||||
return liteErr.ErrPastTime() | |||||
} | |||||
// first, verify if the input is self-consistent.... | |||||
err := fc.ValidateBasic(dc.ChainID()) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
// now, make sure not too much change... meaning this commit | |||||
// would be approved by the currently known validator set | |||||
// as well as the new set | |||||
commit := fc.Commit.Commit | |||||
err = dc.Validators().VerifyCommitAny(fc.Validators, dc.ChainID(), commit.BlockID, h, commit) | |||||
if err != nil { | |||||
return liteErr.ErrTooMuchChange() | |||||
} | |||||
// looks good, we can update | |||||
dc.cert = NewStaticCertifier(dc.ChainID(), fc.Validators) | |||||
dc.lastHeight = h | |||||
return nil | |||||
} |
@ -1,130 +0,0 @@ | |||||
package lite_test | |||||
import ( | |||||
"testing" | |||||
"github.com/stretchr/testify/assert" | |||||
"github.com/stretchr/testify/require" | |||||
"github.com/tendermint/tendermint/types" | |||||
"github.com/tendermint/tendermint/lite" | |||||
"github.com/tendermint/tendermint/lite/errors" | |||||
) | |||||
// TestDynamicCert just makes sure it still works like StaticCert | |||||
func TestDynamicCert(t *testing.T) { | |||||
// assert, require := assert.New(t), require.New(t) | |||||
assert := assert.New(t) | |||||
// require := require.New(t) | |||||
keys := lite.GenValKeys(4) | |||||
// 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! | |||||
vals := keys.ToValidators(20, 10) | |||||
// and a certifier based on our known set | |||||
chainID := "test-dyno" | |||||
cert := lite.NewDynamicCertifier(chainID, vals, 0) | |||||
cases := []struct { | |||||
keys lite.ValKeys | |||||
vals *types.ValidatorSet | |||||
height int64 | |||||
first, last int // who actually signs | |||||
proper bool // true -> expect no error | |||||
changed bool // true -> expect validator change error | |||||
}{ | |||||
// perfect, signed by everyone | |||||
{keys, vals, 1, 0, len(keys), true, false}, | |||||
// skip little guy is okay | |||||
{keys, vals, 2, 1, len(keys), true, false}, | |||||
// but not the big guy | |||||
{keys, vals, 3, 0, len(keys) - 1, false, false}, | |||||
// even changing the power a little bit breaks the static validator | |||||
// the sigs are enough, but the validator hash is unknown | |||||
{keys, keys.ToValidators(20, 11), 4, 0, len(keys), false, true}, | |||||
} | |||||
for _, tc := range cases { | |||||
check := tc.keys.GenCommit(chainID, tc.height, nil, tc.vals, | |||||
[]byte("bar"), []byte("params"), []byte("results"), tc.first, tc.last) | |||||
err := cert.Certify(check) | |||||
if tc.proper { | |||||
assert.Nil(err, "%+v", err) | |||||
assert.Equal(cert.LastHeight(), tc.height) | |||||
} else { | |||||
assert.NotNil(err) | |||||
if tc.changed { | |||||
assert.True(errors.IsValidatorsChangedErr(err), "%+v", err) | |||||
} | |||||
} | |||||
} | |||||
} | |||||
// TestDynamicUpdate makes sure we update safely and sanely | |||||
func TestDynamicUpdate(t *testing.T) { | |||||
assert, require := assert.New(t), require.New(t) | |||||
chainID := "test-dyno-up" | |||||
keys := lite.GenValKeys(5) | |||||
vals := keys.ToValidators(20, 0) | |||||
cert := lite.NewDynamicCertifier(chainID, vals, 40) | |||||
// one valid block to give us a sense of time | |||||
h := int64(100) | |||||
good := keys.GenCommit(chainID, h, nil, vals, []byte("foo"), []byte("params"), []byte("results"), 0, len(keys)) | |||||
err := cert.Certify(good) | |||||
require.Nil(err, "%+v", err) | |||||
// some new sets to try later | |||||
keys2 := keys.Extend(2) | |||||
keys3 := keys2.Extend(4) | |||||
// we try to update with some blocks | |||||
cases := []struct { | |||||
keys lite.ValKeys | |||||
vals *types.ValidatorSet | |||||
height int64 | |||||
first, last int // who actually signs | |||||
proper bool // true -> expect no error | |||||
changed bool // true -> expect too much change error | |||||
}{ | |||||
// same validator set, well signed, of course it is okay | |||||
{keys, vals, h + 10, 0, len(keys), true, false}, | |||||
// same validator set, poorly signed, fails | |||||
{keys, vals, h + 20, 2, len(keys), false, false}, | |||||
// shift the power a little, works if properly signed | |||||
{keys, keys.ToValidators(10, 0), h + 30, 1, len(keys), true, false}, | |||||
// but not on a poor signature | |||||
{keys, keys.ToValidators(10, 0), h + 40, 2, len(keys), false, false}, | |||||
// and not if it was in the past | |||||
{keys, keys.ToValidators(10, 0), h + 25, 0, len(keys), false, false}, | |||||
// let's try to adjust to a whole new validator set (we have 5/7 of the votes) | |||||
{keys2, keys2.ToValidators(10, 0), h + 33, 0, len(keys2), true, false}, | |||||
// properly signed but too much change, not allowed (only 7/11 validators known) | |||||
{keys3, keys3.ToValidators(10, 0), h + 50, 0, len(keys3), false, true}, | |||||
} | |||||
for _, tc := range cases { | |||||
fc := tc.keys.GenFullCommit(chainID, tc.height, nil, tc.vals, | |||||
[]byte("bar"), []byte("params"), []byte("results"), tc.first, tc.last) | |||||
err := cert.Update(fc) | |||||
if tc.proper { | |||||
assert.Nil(err, "%d: %+v", tc.height, err) | |||||
// we update last seen height | |||||
assert.Equal(cert.LastHeight(), tc.height) | |||||
// and we update the proper validators | |||||
assert.EqualValues(fc.Header.ValidatorsHash, cert.Hash()) | |||||
} else { | |||||
assert.NotNil(err, "%d", tc.height) | |||||
// we don't update the height | |||||
assert.NotEqual(cert.LastHeight(), tc.height) | |||||
if tc.changed { | |||||
assert.True(errors.IsTooMuchChangeErr(err), | |||||
"%d: %+v", tc.height, err) | |||||
} | |||||
} | |||||
} | |||||
} |
@ -1,18 +0,0 @@ | |||||
package errors | |||||
import ( | |||||
"errors" | |||||
"testing" | |||||
"github.com/stretchr/testify/assert" | |||||
) | |||||
func TestErrorHeight(t *testing.T) { | |||||
e1 := ErrHeightMismatch(2, 3) | |||||
e1.Error() | |||||
assert.True(t, IsHeightMismatchErr(e1)) | |||||
e2 := errors.New("foobar") | |||||
assert.False(t, IsHeightMismatchErr(e2)) | |||||
assert.False(t, IsHeightMismatchErr(nil)) | |||||
} |
@ -1,93 +0,0 @@ | |||||
package files | |||||
import ( | |||||
"io/ioutil" | |||||
"os" | |||||
"github.com/pkg/errors" | |||||
"github.com/tendermint/tendermint/lite" | |||||
liteErr "github.com/tendermint/tendermint/lite/errors" | |||||
) | |||||
const ( | |||||
// MaxFullCommitSize is the maximum number of bytes we will | |||||
// read in for a full commit to avoid excessive allocations | |||||
// in the deserializer | |||||
MaxFullCommitSize = 1024 * 1024 | |||||
) | |||||
// SaveFullCommit exports the seed in binary / go-amino style | |||||
func SaveFullCommit(fc lite.FullCommit, path string) error { | |||||
f, err := os.Create(path) | |||||
if err != nil { | |||||
return errors.WithStack(err) | |||||
} | |||||
defer f.Close() | |||||
_, err = cdc.MarshalBinaryWriter(f, fc) | |||||
if err != nil { | |||||
return errors.WithStack(err) | |||||
} | |||||
return nil | |||||
} | |||||
// SaveFullCommitJSON exports the seed in a json format | |||||
func SaveFullCommitJSON(fc lite.FullCommit, path string) error { | |||||
f, err := os.Create(path) | |||||
if err != nil { | |||||
return errors.WithStack(err) | |||||
} | |||||
defer f.Close() | |||||
bz, err := cdc.MarshalJSON(fc) | |||||
if err != nil { | |||||
return errors.WithStack(err) | |||||
} | |||||
_, err = f.Write(bz) | |||||
if err != nil { | |||||
return errors.WithStack(err) | |||||
} | |||||
return nil | |||||
} | |||||
// LoadFullCommit loads the full commit from the file system. | |||||
func LoadFullCommit(path string) (lite.FullCommit, error) { | |||||
var fc lite.FullCommit | |||||
f, err := os.Open(path) | |||||
if err != nil { | |||||
if os.IsNotExist(err) { | |||||
return fc, liteErr.ErrCommitNotFound() | |||||
} | |||||
return fc, errors.WithStack(err) | |||||
} | |||||
defer f.Close() | |||||
_, err = cdc.UnmarshalBinaryReader(f, &fc, 0) | |||||
if err != nil { | |||||
return fc, errors.WithStack(err) | |||||
} | |||||
return fc, nil | |||||
} | |||||
// LoadFullCommitJSON loads the commit from the file system in JSON format. | |||||
func LoadFullCommitJSON(path string) (lite.FullCommit, error) { | |||||
var fc lite.FullCommit | |||||
f, err := os.Open(path) | |||||
if err != nil { | |||||
if os.IsNotExist(err) { | |||||
return fc, liteErr.ErrCommitNotFound() | |||||
} | |||||
return fc, errors.WithStack(err) | |||||
} | |||||
defer f.Close() | |||||
bz, err := ioutil.ReadAll(f) | |||||
if err != nil { | |||||
return fc, errors.WithStack(err) | |||||
} | |||||
err = cdc.UnmarshalJSON(bz, &fc) | |||||
if err != nil { | |||||
return fc, errors.WithStack(err) | |||||
} | |||||
return fc, nil | |||||
} |
@ -1,66 +0,0 @@ | |||||
package files | |||||
import ( | |||||
"os" | |||||
"path/filepath" | |||||
"testing" | |||||
"github.com/stretchr/testify/assert" | |||||
"github.com/stretchr/testify/require" | |||||
cmn "github.com/tendermint/tmlibs/common" | |||||
"github.com/tendermint/tendermint/lite" | |||||
) | |||||
func tmpFile() string { | |||||
suffix := cmn.RandStr(16) | |||||
return filepath.Join(os.TempDir(), "fc-test-"+suffix) | |||||
} | |||||
func TestSerializeFullCommits(t *testing.T) { | |||||
assert, require := assert.New(t), require.New(t) | |||||
// some constants | |||||
appHash := []byte("some crazy thing") | |||||
chainID := "ser-ial" | |||||
h := int64(25) | |||||
// build a fc | |||||
keys := lite.GenValKeys(5) | |||||
vals := keys.ToValidators(10, 0) | |||||
fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) | |||||
require.Equal(h, fc.Height()) | |||||
require.Equal(vals.Hash(), fc.ValidatorsHash()) | |||||
// try read/write with json | |||||
jfile := tmpFile() | |||||
defer os.Remove(jfile) | |||||
jseed, err := LoadFullCommitJSON(jfile) | |||||
assert.NotNil(err) | |||||
err = SaveFullCommitJSON(fc, jfile) | |||||
require.Nil(err) | |||||
jseed, err = LoadFullCommitJSON(jfile) | |||||
assert.Nil(err, "%+v", err) | |||||
assert.Equal(h, jseed.Height()) | |||||
assert.Equal(vals.Hash(), jseed.ValidatorsHash()) | |||||
// try read/write with binary | |||||
bfile := tmpFile() | |||||
defer os.Remove(bfile) | |||||
bseed, err := LoadFullCommit(bfile) | |||||
assert.NotNil(err) | |||||
err = SaveFullCommit(fc, bfile) | |||||
require.Nil(err) | |||||
bseed, err = LoadFullCommit(bfile) | |||||
assert.Nil(err, "%+v", err) | |||||
assert.Equal(h, bseed.Height()) | |||||
assert.Equal(vals.Hash(), bseed.ValidatorsHash()) | |||||
// make sure they don't read the other format (different) | |||||
_, err = LoadFullCommit(jfile) | |||||
assert.NotNil(err) | |||||
_, err = LoadFullCommitJSON(bfile) | |||||
assert.NotNil(err) | |||||
} |
@ -1,139 +0,0 @@ | |||||
/* | |||||
Package files defines a Provider that stores all data in the filesystem | |||||
We assume the same validator hash may be reused by many different | |||||
headers/Commits, and thus store it separately. This leaves us | |||||
with three issues: | |||||
1. Given a validator hash, retrieve the validator set if previously stored | |||||
2. Given a block height, find the Commit with the highest height <= h | |||||
3. Given a FullCommit, store it quickly to satisfy 1 and 2 | |||||
Note that we do not worry about caching, as that can be achieved by | |||||
pairing this with a MemStoreProvider and CacheProvider from certifiers | |||||
*/ | |||||
package files | |||||
import ( | |||||
"encoding/hex" | |||||
"fmt" | |||||
"math" | |||||
"os" | |||||
"path/filepath" | |||||
"sort" | |||||
"github.com/pkg/errors" | |||||
"github.com/tendermint/tendermint/lite" | |||||
liteErr "github.com/tendermint/tendermint/lite/errors" | |||||
) | |||||
// nolint | |||||
const ( | |||||
Ext = ".tsd" | |||||
ValDir = "validators" | |||||
CheckDir = "checkpoints" | |||||
dirPerm = os.FileMode(0755) | |||||
//filePerm = os.FileMode(0644) | |||||
) | |||||
type provider struct { | |||||
valDir string | |||||
checkDir string | |||||
} | |||||
// NewProvider creates the parent dir and subdirs | |||||
// for validators and checkpoints as needed | |||||
func NewProvider(dir string) lite.Provider { | |||||
valDir := filepath.Join(dir, ValDir) | |||||
checkDir := filepath.Join(dir, CheckDir) | |||||
for _, d := range []string{valDir, checkDir} { | |||||
err := os.MkdirAll(d, dirPerm) | |||||
if err != nil { | |||||
panic(err) | |||||
} | |||||
} | |||||
return &provider{valDir: valDir, checkDir: checkDir} | |||||
} | |||||
func (p *provider) encodeHash(hash []byte) string { | |||||
return hex.EncodeToString(hash) + Ext | |||||
} | |||||
func (p *provider) encodeHeight(h int64) string { | |||||
// pad up to 10^12 for height... | |||||
return fmt.Sprintf("%012d%s", h, Ext) | |||||
} | |||||
// StoreCommit saves a full commit after it has been verified. | |||||
func (p *provider) StoreCommit(fc lite.FullCommit) error { | |||||
// make sure the fc is self-consistent before saving | |||||
err := fc.ValidateBasic(fc.Commit.Header.ChainID) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
paths := []string{ | |||||
filepath.Join(p.checkDir, p.encodeHeight(fc.Height())), | |||||
filepath.Join(p.valDir, p.encodeHash(fc.Header.ValidatorsHash)), | |||||
} | |||||
for _, path := range paths { | |||||
err := SaveFullCommit(fc, path) | |||||
// unknown error in creating or writing immediately breaks | |||||
if err != nil { | |||||
return err | |||||
} | |||||
} | |||||
return nil | |||||
} | |||||
// GetByHeight returns the closest commit with height <= h. | |||||
func (p *provider) GetByHeight(h int64) (lite.FullCommit, error) { | |||||
// first we look for exact match, then search... | |||||
path := filepath.Join(p.checkDir, p.encodeHeight(h)) | |||||
fc, err := LoadFullCommit(path) | |||||
if liteErr.IsCommitNotFoundErr(err) { | |||||
path, err = p.searchForHeight(h) | |||||
if err == nil { | |||||
fc, err = LoadFullCommit(path) | |||||
} | |||||
} | |||||
return fc, err | |||||
} | |||||
// LatestCommit returns the newest commit stored. | |||||
func (p *provider) LatestCommit() (fc lite.FullCommit, err error) { | |||||
// Note to future: please update by 2077 to avoid rollover | |||||
return p.GetByHeight(math.MaxInt32 - 1) | |||||
} | |||||
// search for height, looks for a file with highest height < h | |||||
// return certifiers.ErrCommitNotFound() if not there... | |||||
func (p *provider) searchForHeight(h int64) (string, error) { | |||||
d, err := os.Open(p.checkDir) | |||||
if err != nil { | |||||
return "", errors.WithStack(err) | |||||
} | |||||
files, err := d.Readdirnames(0) | |||||
d.Close() | |||||
if err != nil { | |||||
return "", errors.WithStack(err) | |||||
} | |||||
desired := p.encodeHeight(h) | |||||
sort.Strings(files) | |||||
i := sort.SearchStrings(files, desired) | |||||
if i == 0 { | |||||
return "", liteErr.ErrCommitNotFound() | |||||
} | |||||
found := files[i-1] | |||||
path := filepath.Join(p.checkDir, found) | |||||
return path, errors.WithStack(err) | |||||
} | |||||
// GetByHash returns a commit exactly matching this validator hash. | |||||
func (p *provider) GetByHash(hash []byte) (lite.FullCommit, error) { | |||||
path := filepath.Join(p.valDir, p.encodeHash(hash)) | |||||
return LoadFullCommit(path) | |||||
} |
@ -1,96 +0,0 @@ | |||||
package files_test | |||||
import ( | |||||
"bytes" | |||||
"errors" | |||||
"io/ioutil" | |||||
"os" | |||||
"testing" | |||||
"github.com/stretchr/testify/assert" | |||||
"github.com/stretchr/testify/require" | |||||
"github.com/tendermint/tendermint/lite" | |||||
liteErr "github.com/tendermint/tendermint/lite/errors" | |||||
"github.com/tendermint/tendermint/lite/files" | |||||
) | |||||
func checkEqual(stored, loaded lite.FullCommit, chainID string) error { | |||||
err := loaded.ValidateBasic(chainID) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
if !bytes.Equal(stored.ValidatorsHash(), loaded.ValidatorsHash()) { | |||||
return errors.New("Different block hashes") | |||||
} | |||||
return nil | |||||
} | |||||
func TestFileProvider(t *testing.T) { | |||||
assert, require := assert.New(t), require.New(t) | |||||
dir, err := ioutil.TempDir("", "fileprovider-test") | |||||
assert.Nil(err) | |||||
defer os.RemoveAll(dir) | |||||
p := files.NewProvider(dir) | |||||
chainID := "test-files" | |||||
appHash := []byte("some-data") | |||||
keys := lite.GenValKeys(5) | |||||
count := 10 | |||||
// make a bunch of seeds... | |||||
seeds := make([]lite.FullCommit, count) | |||||
for i := 0; i < count; i++ { | |||||
// two seeds for each validator, to check how we handle dups | |||||
// (10, 0), (10, 1), (10, 1), (10, 2), (10, 2), ... | |||||
vals := keys.ToValidators(10, int64(count/2)) | |||||
h := int64(20 + 10*i) | |||||
check := keys.GenCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) | |||||
seeds[i] = lite.NewFullCommit(check, vals) | |||||
} | |||||
// check provider is empty | |||||
seed, err := p.GetByHeight(20) | |||||
require.NotNil(err) | |||||
assert.True(liteErr.IsCommitNotFoundErr(err)) | |||||
seed, err = p.GetByHash(seeds[3].ValidatorsHash()) | |||||
require.NotNil(err) | |||||
assert.True(liteErr.IsCommitNotFoundErr(err)) | |||||
// now add them all to the provider | |||||
for _, s := range seeds { | |||||
err = p.StoreCommit(s) | |||||
require.Nil(err) | |||||
// and make sure we can get it back | |||||
s2, err := p.GetByHash(s.ValidatorsHash()) | |||||
assert.Nil(err) | |||||
err = checkEqual(s, s2, chainID) | |||||
assert.Nil(err) | |||||
// by height as well | |||||
s2, err = p.GetByHeight(s.Height()) | |||||
err = checkEqual(s, s2, chainID) | |||||
assert.Nil(err) | |||||
} | |||||
// make sure we get the last hash if we overstep | |||||
seed, err = p.GetByHeight(5000) | |||||
if assert.Nil(err, "%+v", err) { | |||||
assert.Equal(seeds[count-1].Height(), seed.Height()) | |||||
err = checkEqual(seeds[count-1], seed, chainID) | |||||
assert.Nil(err) | |||||
} | |||||
// and middle ones as well | |||||
seed, err = p.GetByHeight(47) | |||||
if assert.Nil(err, "%+v", err) { | |||||
// we only step by 10, so 40 must be the one below this | |||||
assert.EqualValues(40, seed.Height()) | |||||
} | |||||
// and proper error for too low | |||||
_, err = p.GetByHeight(5) | |||||
assert.NotNil(err) | |||||
assert.True(liteErr.IsCommitNotFoundErr(err)) | |||||
} |
@ -1,12 +0,0 @@ | |||||
package files | |||||
import ( | |||||
"github.com/tendermint/go-amino" | |||||
"github.com/tendermint/tendermint/crypto" | |||||
) | |||||
var cdc = amino.NewCodec() | |||||
func init() { | |||||
crypto.RegisterAmino(cdc) | |||||
} |
@ -1,163 +1,209 @@ | |||||
package lite | package lite | ||||
import ( | import ( | ||||
"bytes" | |||||
"github.com/tendermint/tendermint/types" | "github.com/tendermint/tendermint/types" | ||||
liteErr "github.com/tendermint/tendermint/lite/errors" | |||||
lerr "github.com/tendermint/tendermint/lite/errors" | |||||
) | ) | ||||
var _ Certifier = (*InquiringCertifier)(nil) | var _ Certifier = (*InquiringCertifier)(nil) | ||||
// InquiringCertifier wraps a dynamic certifier and implements an auto-update strategy. If a call | |||||
// to Certify fails due to a change it validator set, InquiringCertifier will try and find a | |||||
// previous FullCommit which it can use to safely update the validator set. It uses a source | |||||
// provider to obtain the needed FullCommits. It stores properly validated data on the local system. | |||||
// InquiringCertifier implements an auto-updating certifier. It uses a | |||||
// "source" provider to obtain the needed FullCommits to securely sync with | |||||
// validator set changes. It stores properly validated data on the | |||||
// "trusted" local system. | |||||
type InquiringCertifier struct { | type InquiringCertifier struct { | ||||
cert *DynamicCertifier | |||||
// These are only properly validated data, from local system | |||||
trusted Provider | |||||
// This is a source of new info, like a node rpc, or other import method | |||||
Source Provider | |||||
chainID string | |||||
// These are only properly validated data, from local system. | |||||
trusted PersistentProvider | |||||
// This is a source of new info, like a node rpc, or other import method. | |||||
source Provider | |||||
} | } | ||||
// NewInquiringCertifier returns a new Inquiring object. It uses the trusted provider to store | |||||
// validated data and the source provider to obtain missing FullCommits. | |||||
// NewInquiringCertifier returns a new InquiringCertifier. It uses the | |||||
// trusted provider to store validated data and the source provider to | |||||
// obtain missing data (e.g. FullCommits). | |||||
// | // | ||||
// Example: The trusted provider should a CacheProvider, MemProvider or files.Provider. The source | |||||
// provider should be a client.HTTPProvider. | |||||
func NewInquiringCertifier(chainID string, fc FullCommit, trusted Provider, | |||||
source Provider) (*InquiringCertifier, error) { | |||||
// store the data in trusted | |||||
err := trusted.StoreCommit(fc) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
// The trusted provider should a CacheProvider, MemProvider or | |||||
// files.Provider. The source provider should be a client.HTTPProvider. | |||||
func NewInquiringCertifier(chainID string, trusted PersistentProvider, source Provider) ( | |||||
*InquiringCertifier, error) { | |||||
return &InquiringCertifier{ | return &InquiringCertifier{ | ||||
cert: NewDynamicCertifier(chainID, fc.Validators, fc.Height()), | |||||
chainID: chainID, | |||||
trusted: trusted, | trusted: trusted, | ||||
Source: source, | |||||
source: source, | |||||
}, nil | }, nil | ||||
} | } | ||||
// ChainID returns the chain id. | |||||
// Implements Certifier. | // Implements Certifier. | ||||
func (ic *InquiringCertifier) ChainID() string { | func (ic *InquiringCertifier) ChainID() string { | ||||
return ic.cert.ChainID() | |||||
return ic.chainID | |||||
} | } | ||||
// Validators returns the validator set. | |||||
func (ic *InquiringCertifier) Validators() *types.ValidatorSet { | |||||
return ic.cert.cert.vSet | |||||
} | |||||
// LastHeight returns the last height. | |||||
func (ic *InquiringCertifier) LastHeight() int64 { | |||||
return ic.cert.lastHeight | |||||
} | |||||
// Certify makes sure this is checkpoint is valid. | |||||
// | |||||
// If the validators have changed since the last know time, it looks | |||||
// for a path to prove the new validators. | |||||
// | |||||
// On success, it will store the checkpoint in the store for later viewing | |||||
// Implements Certifier. | // Implements Certifier. | ||||
func (ic *InquiringCertifier) Certify(commit Commit) error { | |||||
err := ic.useClosestTrust(commit.Height()) | |||||
// | |||||
// If the validators have changed since the last know time, it looks to | |||||
// ic.trusted and ic.source to prove the new validators. On success, it will | |||||
// try to store the SignedHeader in ic.trusted if the next | |||||
// validator can be sourced. | |||||
func (ic *InquiringCertifier) Certify(shdr types.SignedHeader) error { | |||||
// Get the latest known full commit <= h-1 from our trusted providers. | |||||
// The full commit at h-1 contains the valset to sign for h. | |||||
h := shdr.Height - 1 | |||||
tfc, err := ic.trusted.LatestFullCommit(ic.chainID, 1, h) | |||||
if err != nil { | if err != nil { | ||||
return err | return err | ||||
} | } | ||||
err = ic.cert.Certify(commit) | |||||
if !liteErr.IsValidatorsChangedErr(err) { | |||||
return err | |||||
} | |||||
err = ic.updateToHash(commit.Header.ValidatorsHash) | |||||
if err != nil { | |||||
return err | |||||
if tfc.Height() == h { | |||||
// Return error if valset doesn't match. | |||||
if !bytes.Equal( | |||||
tfc.NextValidators.Hash(), | |||||
shdr.Header.ValidatorsHash) { | |||||
return lerr.ErrUnexpectedValidators( | |||||
tfc.NextValidators.Hash(), | |||||
shdr.Header.ValidatorsHash) | |||||
} | |||||
} else { | |||||
// If valset doesn't match... | |||||
if !bytes.Equal(tfc.NextValidators.Hash(), | |||||
shdr.Header.ValidatorsHash) { | |||||
// ... update. | |||||
tfc, err = ic.updateToHeight(h) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
// Return error if valset _still_ doesn't match. | |||||
if !bytes.Equal(tfc.NextValidators.Hash(), | |||||
shdr.Header.ValidatorsHash) { | |||||
return lerr.ErrUnexpectedValidators( | |||||
tfc.NextValidators.Hash(), | |||||
shdr.Header.ValidatorsHash) | |||||
} | |||||
} | |||||
} | } | ||||
err = ic.cert.Certify(commit) | |||||
// Certify the signed header using the matching valset. | |||||
cert := NewBaseCertifier(ic.chainID, tfc.Height()+1, tfc.NextValidators) | |||||
err = cert.Certify(shdr) | |||||
if err != nil { | if err != nil { | ||||
return err | return err | ||||
} | } | ||||
// store the new checkpoint | |||||
return ic.trusted.StoreCommit(NewFullCommit(commit, ic.Validators())) | |||||
} | |||||
// Update will verify if this is a valid change and update | |||||
// the certifying validator set if safe to do so. | |||||
func (ic *InquiringCertifier) Update(fc FullCommit) error { | |||||
err := ic.useClosestTrust(fc.Height()) | |||||
if err != nil { | |||||
// Get the next validator set. | |||||
nvalset, err := ic.source.ValidatorSet(ic.chainID, shdr.Height+1) | |||||
if lerr.IsErrMissingValidators(err) { | |||||
// Ignore this error. | |||||
return nil | |||||
} else if err != nil { | |||||
return err | return err | ||||
} else { | |||||
// Create filled FullCommit. | |||||
nfc := FullCommit{ | |||||
SignedHeader: shdr, | |||||
Validators: tfc.NextValidators, | |||||
NextValidators: nvalset, | |||||
} | |||||
// Validate the full commit. This checks the cryptographic | |||||
// signatures of Commit against Validators. | |||||
if err := nfc.ValidateBasic(ic.chainID); err != nil { | |||||
return err | |||||
} | |||||
// Trust it. | |||||
return ic.trusted.SaveFullCommit(nfc) | |||||
} | } | ||||
err = ic.cert.Update(fc) | |||||
if err == nil { | |||||
err = ic.trusted.StoreCommit(fc) | |||||
} | |||||
return err | |||||
} | } | ||||
func (ic *InquiringCertifier) useClosestTrust(h int64) error { | |||||
closest, err := ic.trusted.GetByHeight(h) | |||||
// verifyAndSave will verify if this is a valid source full commit given the | |||||
// best match trusted full commit, and if good, persist to ic.trusted. | |||||
// Returns ErrTooMuchChange when >2/3 of tfc did not sign sfc. | |||||
// Panics if tfc.Height() >= sfc.Height(). | |||||
func (ic *InquiringCertifier) verifyAndSave(tfc, sfc FullCommit) error { | |||||
if tfc.Height() >= sfc.Height() { | |||||
panic("should not happen") | |||||
} | |||||
err := tfc.NextValidators.VerifyFutureCommit( | |||||
sfc.Validators, | |||||
ic.chainID, sfc.SignedHeader.Commit.BlockID, | |||||
sfc.SignedHeader.Height, sfc.SignedHeader.Commit, | |||||
) | |||||
if err != nil { | if err != nil { | ||||
return err | return err | ||||
} | } | ||||
// if the best seed is not the one we currently use, | |||||
// let's just reset the dynamic validator | |||||
if closest.Height() != ic.LastHeight() { | |||||
ic.cert = NewDynamicCertifier(ic.ChainID(), closest.Validators, closest.Height()) | |||||
} | |||||
return nil | |||||
return ic.trusted.SaveFullCommit(sfc) | |||||
} | } | ||||
// updateToHash gets the validator hash we want to update to | |||||
// if IsTooMuchChangeErr, we try to find a path by binary search over height | |||||
func (ic *InquiringCertifier) updateToHash(vhash []byte) error { | |||||
// try to get the match, and update | |||||
fc, err := ic.Source.GetByHash(vhash) | |||||
// updateToHeight will use divide-and-conquer to find a path to h. | |||||
// Returns nil iff we successfully verify and persist a full commit | |||||
// for height h, using repeated applications of bisection if necessary. | |||||
// | |||||
// Returns ErrCommitNotFound if source provider doesn't have the commit for h. | |||||
func (ic *InquiringCertifier) updateToHeight(h int64) (FullCommit, error) { | |||||
// Fetch latest full commit from source. | |||||
sfc, err := ic.source.LatestFullCommit(ic.chainID, h, h) | |||||
if err != nil { | if err != nil { | ||||
return err | |||||
} | |||||
err = ic.cert.Update(fc) | |||||
// handle IsTooMuchChangeErr by using divide and conquer | |||||
if liteErr.IsTooMuchChangeErr(err) { | |||||
err = ic.updateToHeight(fc.Height()) | |||||
return FullCommit{}, err | |||||
} | } | ||||
return err | |||||
} | |||||
// updateToHeight will use divide-and-conquer to find a path to h | |||||
func (ic *InquiringCertifier) updateToHeight(h int64) error { | |||||
// try to update to this height (with checks) | |||||
fc, err := ic.Source.GetByHeight(h) | |||||
if err != nil { | |||||
return err | |||||
// Validate the full commit. This checks the cryptographic | |||||
// signatures of Commit against Validators. | |||||
if err := sfc.ValidateBasic(ic.chainID); err != nil { | |||||
return FullCommit{}, err | |||||
} | } | ||||
start, end := ic.LastHeight(), fc.Height() | |||||
if end <= start { | |||||
return liteErr.ErrNoPathFound() | |||||
// If sfc.Height() != h, we can't do it. | |||||
if sfc.Height() != h { | |||||
return FullCommit{}, lerr.ErrCommitNotFound() | |||||
} | } | ||||
err = ic.Update(fc) | |||||
// we can handle IsTooMuchChangeErr specially | |||||
if !liteErr.IsTooMuchChangeErr(err) { | |||||
return err | |||||
FOR_LOOP: | |||||
for { | |||||
// Fetch latest full commit from trusted. | |||||
tfc, err := ic.trusted.LatestFullCommit(ic.chainID, 1, h) | |||||
if err != nil { | |||||
return FullCommit{}, err | |||||
} | |||||
// Maybe we have nothing to do. | |||||
if tfc.Height() == h { | |||||
return FullCommit{}, nil | |||||
} | |||||
// Try to update to full commit with checks. | |||||
err = ic.verifyAndSave(tfc, sfc) | |||||
if err == nil { | |||||
// All good! | |||||
return sfc, nil | |||||
} else { | |||||
// Handle special case when err is ErrTooMuchChange. | |||||
if lerr.IsErrTooMuchChange(err) { | |||||
// Divide and conquer. | |||||
start, end := tfc.Height(), sfc.Height() | |||||
if !(start < end) { | |||||
panic("should not happen") | |||||
} | |||||
mid := (start + end) / 2 | |||||
_, err = ic.updateToHeight(mid) | |||||
if err != nil { | |||||
return FullCommit{}, err | |||||
} | |||||
// If we made it to mid, we retry. | |||||
continue FOR_LOOP | |||||
} | |||||
return FullCommit{}, err | |||||
} | |||||
} | } | ||||
} | |||||
// try to update to mid | |||||
mid := (start + end) / 2 | |||||
err = ic.updateToHeight(mid) | |||||
func (ic *InquiringCertifier) LastTrustedHeight() int64 { | |||||
fc, err := ic.trusted.LatestFullCommit(ic.chainID, 1, 1<<63-1) | |||||
if err != nil { | if err != nil { | ||||
return err | |||||
panic("should not happen") | |||||
} | } | ||||
// if we made it to mid, we recurse | |||||
return ic.updateToHeight(h) | |||||
return fc.Height() | |||||
} | } |
@ -1,152 +0,0 @@ | |||||
package lite | |||||
import ( | |||||
"encoding/hex" | |||||
"sort" | |||||
"sync" | |||||
liteErr "github.com/tendermint/tendermint/lite/errors" | |||||
) | |||||
type memStoreProvider struct { | |||||
mtx sync.RWMutex | |||||
// byHeight is always sorted by Height... need to support range search (nil, h] | |||||
// btree would be more efficient for larger sets | |||||
byHeight fullCommits | |||||
byHash map[string]FullCommit | |||||
sorted bool | |||||
} | |||||
// fullCommits just exists to allow easy sorting | |||||
type fullCommits []FullCommit | |||||
func (s fullCommits) Len() int { return len(s) } | |||||
func (s fullCommits) Swap(i, j int) { s[i], s[j] = s[j], s[i] } | |||||
func (s fullCommits) Less(i, j int) bool { | |||||
return s[i].Height() < s[j].Height() | |||||
} | |||||
// NewMemStoreProvider returns a new in-memory provider. | |||||
func NewMemStoreProvider() Provider { | |||||
return &memStoreProvider{ | |||||
byHeight: fullCommits{}, | |||||
byHash: map[string]FullCommit{}, | |||||
} | |||||
} | |||||
func (m *memStoreProvider) encodeHash(hash []byte) string { | |||||
return hex.EncodeToString(hash) | |||||
} | |||||
// StoreCommit stores a FullCommit after verifying it. | |||||
func (m *memStoreProvider) StoreCommit(fc FullCommit) error { | |||||
// make sure the fc is self-consistent before saving | |||||
err := fc.ValidateBasic(fc.Commit.Header.ChainID) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
// store the valid fc | |||||
key := m.encodeHash(fc.ValidatorsHash()) | |||||
m.mtx.Lock() | |||||
defer m.mtx.Unlock() | |||||
m.byHash[key] = fc | |||||
m.byHeight = append(m.byHeight, fc) | |||||
m.sorted = false | |||||
return nil | |||||
} | |||||
// GetByHeight returns the FullCommit for height h or an error if the commit is not found. | |||||
func (m *memStoreProvider) GetByHeight(h int64) (FullCommit, error) { | |||||
// By heuristics, GetByHeight with linearsearch is fast enough | |||||
// for about 50 keys but after that, it needs binary search. | |||||
// See https://github.com/tendermint/tendermint/pull/1043#issue-285188242 | |||||
m.mtx.RLock() | |||||
n := len(m.byHeight) | |||||
m.mtx.RUnlock() | |||||
if n <= 50 { | |||||
return m.getByHeightLinearSearch(h) | |||||
} | |||||
return m.getByHeightBinarySearch(h) | |||||
} | |||||
func (m *memStoreProvider) sortByHeightIfNecessaryLocked() { | |||||
if !m.sorted { | |||||
sort.Sort(m.byHeight) | |||||
m.sorted = true | |||||
} | |||||
} | |||||
func (m *memStoreProvider) getByHeightLinearSearch(h int64) (FullCommit, error) { | |||||
m.mtx.Lock() | |||||
defer m.mtx.Unlock() | |||||
m.sortByHeightIfNecessaryLocked() | |||||
// search from highest to lowest | |||||
for i := len(m.byHeight) - 1; i >= 0; i-- { | |||||
if fc := m.byHeight[i]; fc.Height() <= h { | |||||
return fc, nil | |||||
} | |||||
} | |||||
return FullCommit{}, liteErr.ErrCommitNotFound() | |||||
} | |||||
func (m *memStoreProvider) getByHeightBinarySearch(h int64) (FullCommit, error) { | |||||
m.mtx.Lock() | |||||
defer m.mtx.Unlock() | |||||
m.sortByHeightIfNecessaryLocked() | |||||
low, high := 0, len(m.byHeight)-1 | |||||
var mid int | |||||
var hmid int64 | |||||
var midFC FullCommit | |||||
// Our goal is to either find: | |||||
// * item ByHeight with the query | |||||
// * greatest height with a height <= query | |||||
for low <= high { | |||||
mid = int(uint(low+high) >> 1) // Avoid an overflow | |||||
midFC = m.byHeight[mid] | |||||
hmid = midFC.Height() | |||||
switch { | |||||
case hmid == h: | |||||
return midFC, nil | |||||
case hmid < h: | |||||
low = mid + 1 | |||||
case hmid > h: | |||||
high = mid - 1 | |||||
} | |||||
} | |||||
if high >= 0 { | |||||
if highFC := m.byHeight[high]; highFC.Height() < h { | |||||
return highFC, nil | |||||
} | |||||
} | |||||
return FullCommit{}, liteErr.ErrCommitNotFound() | |||||
} | |||||
// GetByHash returns the FullCommit for the hash or an error if the commit is not found. | |||||
func (m *memStoreProvider) GetByHash(hash []byte) (FullCommit, error) { | |||||
m.mtx.RLock() | |||||
defer m.mtx.RUnlock() | |||||
fc, ok := m.byHash[m.encodeHash(hash)] | |||||
if !ok { | |||||
return fc, liteErr.ErrCommitNotFound() | |||||
} | |||||
return fc, nil | |||||
} | |||||
// LatestCommit returns the latest FullCommit or an error if no commits exist. | |||||
func (m *memStoreProvider) LatestCommit() (FullCommit, error) { | |||||
m.mtx.Lock() | |||||
defer m.mtx.Unlock() | |||||
l := len(m.byHeight) | |||||
if l == 0 { | |||||
return FullCommit{}, liteErr.ErrCommitNotFound() | |||||
} | |||||
m.sortByHeightIfNecessaryLocked() | |||||
return m.byHeight[l-1], nil | |||||
} |
@ -0,0 +1,72 @@ | |||||
package lite | |||||
import ( | |||||
lerr "github.com/tendermint/tendermint/lite/errors" | |||||
"github.com/tendermint/tendermint/types" | |||||
) | |||||
// multiProvider allows you to place one or more caches in front of a source | |||||
// Provider. It runs through them in order until a match is found. | |||||
type multiProvider struct { | |||||
Providers []PersistentProvider | |||||
} | |||||
// NewMultiProvider returns a new provider which wraps multiple other providers. | |||||
func NewMultiProvider(providers ...PersistentProvider) multiProvider { | |||||
return multiProvider{ | |||||
Providers: providers, | |||||
} | |||||
} | |||||
// SaveFullCommit saves on all providers, and aborts on the first error. | |||||
func (mc multiProvider) SaveFullCommit(fc FullCommit) (err error) { | |||||
for _, p := range mc.Providers { | |||||
err = p.SaveFullCommit(fc) | |||||
if err != nil { | |||||
return | |||||
} | |||||
} | |||||
return | |||||
} | |||||
// LatestFullCommit loads the latest from all providers and provides | |||||
// the latest FullCommit that satisfies the conditions. | |||||
// Returns the first error encountered. | |||||
func (mc multiProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (fc FullCommit, err error) { | |||||
for _, p := range mc.Providers { | |||||
var fc_ FullCommit | |||||
fc_, err = p.LatestFullCommit(chainID, minHeight, maxHeight) | |||||
if lerr.IsErrCommitNotFound(err) { | |||||
err = nil | |||||
continue | |||||
} else if err != nil { | |||||
return | |||||
} | |||||
if fc == (FullCommit{}) { | |||||
fc = fc_ | |||||
} else if fc_.Height() > fc.Height() { | |||||
fc = fc_ | |||||
} | |||||
if fc.Height() == maxHeight { | |||||
return | |||||
} | |||||
} | |||||
if fc == (FullCommit{}) { | |||||
err = lerr.ErrCommitNotFound() | |||||
return | |||||
} | |||||
return | |||||
} | |||||
// ValidatorSet returns validator set at height as provided by the first | |||||
// provider which has it, or an error otherwise. | |||||
func (mc multiProvider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { | |||||
for _, p := range mc.Providers { | |||||
valset, err = p.ValidatorSet(chainID, height) | |||||
if err == nil { | |||||
// TODO Log unexpected types of errors. | |||||
return valset, nil | |||||
} | |||||
} | |||||
return nil, lerr.ErrMissingValidators(chainID, height) | |||||
} |
@ -1,365 +0,0 @@ | |||||
package lite | |||||
import ( | |||||
"fmt" | |||||
"math/rand" | |||||
"sync" | |||||
"testing" | |||||
"github.com/stretchr/testify/assert" | |||||
"github.com/stretchr/testify/require" | |||||
liteErr "github.com/tendermint/tendermint/lite/errors" | |||||
) | |||||
func TestMemStoreProvidergetByHeightBinaryAndLinearSameResult(t *testing.T) { | |||||
p := NewMemStoreProvider().(*memStoreProvider) | |||||
// Store a bunch of commits at specific heights | |||||
// and then ensure that: | |||||
// * getByHeightLinearSearch | |||||
// * getByHeightBinarySearch | |||||
// both return the exact same result | |||||
// 1. Non-existent height commits | |||||
nonExistent := []int64{-1000, -1, 0, 1, 10, 11, 17, 31, 67, 1000, 1e9} | |||||
ensureNonExistentCommitsAtHeight(t, "getByHeightLinearSearch", p.getByHeightLinearSearch, nonExistent) | |||||
ensureNonExistentCommitsAtHeight(t, "getByHeightBinarySearch", p.getByHeightBinarySearch, nonExistent) | |||||
// 2. Save some known height commits | |||||
knownHeights := []int64{0, 1, 7, 9, 12, 13, 18, 44, 23, 16, 1024, 100, 199, 1e9} | |||||
createAndStoreCommits(t, p, knownHeights) | |||||
// 3. Now check if those heights are retrieved | |||||
ensureExistentCommitsAtHeight(t, "getByHeightLinearSearch", p.getByHeightLinearSearch, knownHeights) | |||||
ensureExistentCommitsAtHeight(t, "getByHeightBinarySearch", p.getByHeightBinarySearch, knownHeights) | |||||
// 4. And now for the height probing to ensure that any height | |||||
// requested returns a fullCommit of height <= requestedHeight. | |||||
comparegetByHeightAlgorithms(t, p, 0, 0) | |||||
comparegetByHeightAlgorithms(t, p, 1, 1) | |||||
comparegetByHeightAlgorithms(t, p, 2, 1) | |||||
comparegetByHeightAlgorithms(t, p, 5, 1) | |||||
comparegetByHeightAlgorithms(t, p, 7, 7) | |||||
comparegetByHeightAlgorithms(t, p, 10, 9) | |||||
comparegetByHeightAlgorithms(t, p, 12, 12) | |||||
comparegetByHeightAlgorithms(t, p, 14, 13) | |||||
comparegetByHeightAlgorithms(t, p, 19, 18) | |||||
comparegetByHeightAlgorithms(t, p, 43, 23) | |||||
comparegetByHeightAlgorithms(t, p, 45, 44) | |||||
comparegetByHeightAlgorithms(t, p, 1025, 1024) | |||||
comparegetByHeightAlgorithms(t, p, 101, 100) | |||||
comparegetByHeightAlgorithms(t, p, 1e3, 199) | |||||
comparegetByHeightAlgorithms(t, p, 1e4, 1024) | |||||
comparegetByHeightAlgorithms(t, p, 1e9, 1e9) | |||||
comparegetByHeightAlgorithms(t, p, 1e9+1, 1e9) | |||||
} | |||||
func createAndStoreCommits(t *testing.T, p Provider, heights []int64) { | |||||
chainID := "cache-best-height-binary-and-linear" | |||||
appHash := []byte("0xdeadbeef") | |||||
keys := GenValKeys(len(heights) / 2) | |||||
for _, h := range heights { | |||||
vals := keys.ToValidators(10, int64(len(heights)/2)) | |||||
fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) | |||||
err := p.StoreCommit(fc) | |||||
require.NoError(t, err, "StoreCommit height=%d", h) | |||||
} | |||||
} | |||||
func comparegetByHeightAlgorithms(t *testing.T, p *memStoreProvider, ask, expect int64) { | |||||
algos := map[string]func(int64) (FullCommit, error){ | |||||
"getHeightByLinearSearch": p.getByHeightLinearSearch, | |||||
"getHeightByBinarySearch": p.getByHeightBinarySearch, | |||||
} | |||||
for algo, fn := range algos { | |||||
fc, err := fn(ask) | |||||
// t.Logf("%s got=%v want=%d", algo, expect, fc.Height()) | |||||
require.Nil(t, err, "%s: %+v", algo, err) | |||||
if assert.Equal(t, expect, fc.Height()) { | |||||
err = p.StoreCommit(fc) | |||||
require.Nil(t, err, "%s: %+v", algo, err) | |||||
} | |||||
} | |||||
} | |||||
var blankFullCommit FullCommit | |||||
func ensureNonExistentCommitsAtHeight(t *testing.T, prefix string, fn func(int64) (FullCommit, error), data []int64) { | |||||
for i, qh := range data { | |||||
fc, err := fn(qh) | |||||
assert.NotNil(t, err, "#%d: %s: height=%d should return non-nil error", i, prefix, qh) | |||||
assert.Equal(t, fc, blankFullCommit, "#%d: %s: height=%d\ngot =%+v\nwant=%+v", i, prefix, qh, fc, blankFullCommit) | |||||
} | |||||
} | |||||
func ensureExistentCommitsAtHeight(t *testing.T, prefix string, fn func(int64) (FullCommit, error), data []int64) { | |||||
for i, qh := range data { | |||||
fc, err := fn(qh) | |||||
assert.Nil(t, err, "#%d: %s: height=%d should not return an error: %v", i, prefix, qh, err) | |||||
assert.NotEqual(t, fc, blankFullCommit, "#%d: %s: height=%d got a blankCommit", i, prefix, qh) | |||||
} | |||||
} | |||||
func BenchmarkGenCommit20(b *testing.B) { | |||||
keys := GenValKeys(20) | |||||
benchmarkGenCommit(b, keys) | |||||
} | |||||
func BenchmarkGenCommit100(b *testing.B) { | |||||
keys := GenValKeys(100) | |||||
benchmarkGenCommit(b, keys) | |||||
} | |||||
func BenchmarkGenCommitSec20(b *testing.B) { | |||||
keys := GenSecpValKeys(20) | |||||
benchmarkGenCommit(b, keys) | |||||
} | |||||
func BenchmarkGenCommitSec100(b *testing.B) { | |||||
keys := GenSecpValKeys(100) | |||||
benchmarkGenCommit(b, keys) | |||||
} | |||||
func benchmarkGenCommit(b *testing.B, keys ValKeys) { | |||||
chainID := fmt.Sprintf("bench-%d", len(keys)) | |||||
vals := keys.ToValidators(20, 10) | |||||
for i := 0; i < b.N; i++ { | |||||
h := int64(1 + i) | |||||
appHash := []byte(fmt.Sprintf("h=%d", h)) | |||||
resHash := []byte(fmt.Sprintf("res=%d", h)) | |||||
keys.GenCommit(chainID, h, nil, vals, appHash, []byte("params"), resHash, 0, len(keys)) | |||||
} | |||||
} | |||||
// this benchmarks generating one key | |||||
func BenchmarkGenValKeys(b *testing.B) { | |||||
keys := GenValKeys(20) | |||||
for i := 0; i < b.N; i++ { | |||||
keys = keys.Extend(1) | |||||
} | |||||
} | |||||
// this benchmarks generating one key | |||||
func BenchmarkGenSecpValKeys(b *testing.B) { | |||||
keys := GenSecpValKeys(20) | |||||
for i := 0; i < b.N; i++ { | |||||
keys = keys.Extend(1) | |||||
} | |||||
} | |||||
func BenchmarkToValidators20(b *testing.B) { | |||||
benchmarkToValidators(b, 20) | |||||
} | |||||
func BenchmarkToValidators100(b *testing.B) { | |||||
benchmarkToValidators(b, 100) | |||||
} | |||||
// this benchmarks constructing the validator set (.PubKey() * nodes) | |||||
func benchmarkToValidators(b *testing.B, nodes int) { | |||||
keys := GenValKeys(nodes) | |||||
for i := 1; i <= b.N; i++ { | |||||
keys.ToValidators(int64(2*i), int64(i)) | |||||
} | |||||
} | |||||
func BenchmarkToValidatorsSec100(b *testing.B) { | |||||
benchmarkToValidatorsSec(b, 100) | |||||
} | |||||
// this benchmarks constructing the validator set (.PubKey() * nodes) | |||||
func benchmarkToValidatorsSec(b *testing.B, nodes int) { | |||||
keys := GenSecpValKeys(nodes) | |||||
for i := 1; i <= b.N; i++ { | |||||
keys.ToValidators(int64(2*i), int64(i)) | |||||
} | |||||
} | |||||
func BenchmarkCertifyCommit20(b *testing.B) { | |||||
keys := GenValKeys(20) | |||||
benchmarkCertifyCommit(b, keys) | |||||
} | |||||
func BenchmarkCertifyCommit100(b *testing.B) { | |||||
keys := GenValKeys(100) | |||||
benchmarkCertifyCommit(b, keys) | |||||
} | |||||
func BenchmarkCertifyCommitSec20(b *testing.B) { | |||||
keys := GenSecpValKeys(20) | |||||
benchmarkCertifyCommit(b, keys) | |||||
} | |||||
func BenchmarkCertifyCommitSec100(b *testing.B) { | |||||
keys := GenSecpValKeys(100) | |||||
benchmarkCertifyCommit(b, keys) | |||||
} | |||||
func benchmarkCertifyCommit(b *testing.B, keys ValKeys) { | |||||
chainID := "bench-certify" | |||||
vals := keys.ToValidators(20, 10) | |||||
cert := NewStaticCertifier(chainID, vals) | |||||
check := keys.GenCommit(chainID, 123, nil, vals, []byte("foo"), []byte("params"), []byte("res"), 0, len(keys)) | |||||
for i := 0; i < b.N; i++ { | |||||
err := cert.Certify(check) | |||||
if err != nil { | |||||
panic(err) | |||||
} | |||||
} | |||||
} | |||||
type algo bool | |||||
const ( | |||||
linearSearch = true | |||||
binarySearch = false | |||||
) | |||||
// Lazy load the commits | |||||
var fcs5, fcs50, fcs100, fcs500, fcs1000 []FullCommit | |||||
var h5, h50, h100, h500, h1000 []int64 | |||||
var commitsOnce sync.Once | |||||
func lazyGenerateFullCommits(b *testing.B) { | |||||
b.Logf("Generating FullCommits") | |||||
commitsOnce.Do(func() { | |||||
fcs5, h5 = genFullCommits(nil, nil, 5) | |||||
b.Logf("Generated 5 FullCommits") | |||||
fcs50, h50 = genFullCommits(fcs5, h5, 50) | |||||
b.Logf("Generated 50 FullCommits") | |||||
fcs100, h100 = genFullCommits(fcs50, h50, 100) | |||||
b.Logf("Generated 100 FullCommits") | |||||
fcs500, h500 = genFullCommits(fcs100, h100, 500) | |||||
b.Logf("Generated 500 FullCommits") | |||||
fcs1000, h1000 = genFullCommits(fcs500, h500, 1000) | |||||
b.Logf("Generated 1000 FullCommits") | |||||
}) | |||||
} | |||||
func BenchmarkMemStoreProviderGetByHeightLinearSearch5(b *testing.B) { | |||||
benchmarkMemStoreProvidergetByHeight(b, fcs5, h5, linearSearch) | |||||
} | |||||
func BenchmarkMemStoreProviderGetByHeightLinearSearch50(b *testing.B) { | |||||
benchmarkMemStoreProvidergetByHeight(b, fcs50, h50, linearSearch) | |||||
} | |||||
func BenchmarkMemStoreProviderGetByHeightLinearSearch100(b *testing.B) { | |||||
benchmarkMemStoreProvidergetByHeight(b, fcs100, h100, linearSearch) | |||||
} | |||||
func BenchmarkMemStoreProviderGetByHeightLinearSearch500(b *testing.B) { | |||||
benchmarkMemStoreProvidergetByHeight(b, fcs500, h500, linearSearch) | |||||
} | |||||
func BenchmarkMemStoreProviderGetByHeightLinearSearch1000(b *testing.B) { | |||||
benchmarkMemStoreProvidergetByHeight(b, fcs1000, h1000, linearSearch) | |||||
} | |||||
func BenchmarkMemStoreProviderGetByHeightBinarySearch5(b *testing.B) { | |||||
benchmarkMemStoreProvidergetByHeight(b, fcs5, h5, binarySearch) | |||||
} | |||||
func BenchmarkMemStoreProviderGetByHeightBinarySearch50(b *testing.B) { | |||||
benchmarkMemStoreProvidergetByHeight(b, fcs50, h50, binarySearch) | |||||
} | |||||
func BenchmarkMemStoreProviderGetByHeightBinarySearch100(b *testing.B) { | |||||
benchmarkMemStoreProvidergetByHeight(b, fcs100, h100, binarySearch) | |||||
} | |||||
func BenchmarkMemStoreProviderGetByHeightBinarySearch500(b *testing.B) { | |||||
benchmarkMemStoreProvidergetByHeight(b, fcs500, h500, binarySearch) | |||||
} | |||||
func BenchmarkMemStoreProviderGetByHeightBinarySearch1000(b *testing.B) { | |||||
benchmarkMemStoreProvidergetByHeight(b, fcs1000, h1000, binarySearch) | |||||
} | |||||
var rng = rand.New(rand.NewSource(10)) | |||||
func benchmarkMemStoreProvidergetByHeight(b *testing.B, fcs []FullCommit, fHeights []int64, algo algo) { | |||||
lazyGenerateFullCommits(b) | |||||
b.StopTimer() | |||||
mp := NewMemStoreProvider() | |||||
for i, fc := range fcs { | |||||
if err := mp.StoreCommit(fc); err != nil { | |||||
b.Fatalf("FullCommit #%d: err: %v", i, err) | |||||
} | |||||
} | |||||
qHeights := make([]int64, len(fHeights)) | |||||
copy(qHeights, fHeights) | |||||
// Append some non-existent heights to trigger the worst cases. | |||||
qHeights = append(qHeights, 19, -100, -10000, 1e7, -17, 31, -1e9) | |||||
memP := mp.(*memStoreProvider) | |||||
searchFn := memP.getByHeightLinearSearch | |||||
if algo == binarySearch { // nolint | |||||
searchFn = memP.getByHeightBinarySearch | |||||
} | |||||
hPerm := rng.Perm(len(qHeights)) | |||||
b.StartTimer() | |||||
b.ResetTimer() | |||||
for i := 0; i < b.N; i++ { | |||||
for _, j := range hPerm { | |||||
h := qHeights[j] | |||||
if _, err := searchFn(h); err != nil { | |||||
} | |||||
} | |||||
} | |||||
b.ReportAllocs() | |||||
} | |||||
func genFullCommits(prevFC []FullCommit, prevH []int64, want int) ([]FullCommit, []int64) { | |||||
fcs := make([]FullCommit, len(prevFC)) | |||||
copy(fcs, prevFC) | |||||
heights := make([]int64, len(prevH)) | |||||
copy(heights, prevH) | |||||
appHash := []byte("benchmarks") | |||||
chainID := "benchmarks-gen-full-commits" | |||||
n := want | |||||
keys := GenValKeys(2 + (n / 3)) | |||||
for i := 0; i < n; i++ { | |||||
vals := keys.ToValidators(10, int64(n/2)) | |||||
h := int64(20 + 10*i) | |||||
fcs = append(fcs, keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5)) | |||||
heights = append(heights, h) | |||||
} | |||||
return fcs, heights | |||||
} | |||||
func TestMemStoreProviderLatestCommitAlwaysUsesSorted(t *testing.T) { | |||||
p := NewMemStoreProvider().(*memStoreProvider) | |||||
// 1. With no commits yet stored, it should return ErrCommitNotFound | |||||
got, err := p.LatestCommit() | |||||
require.Equal(t, err.Error(), liteErr.ErrCommitNotFound().Error(), "should return ErrCommitNotFound()") | |||||
require.Equal(t, got, blankFullCommit, "With no fullcommits, it should return a blank FullCommit") | |||||
// 2. Generate some full commits now and we'll add them unsorted. | |||||
genAndStoreCommitsOfHeight(t, p, 27, 100, 1, 12, 1000, 17, 91) | |||||
fc, err := p.LatestCommit() | |||||
require.Nil(t, err, "with commits saved no error expected") | |||||
require.NotEqual(t, fc, blankFullCommit, "with commits saved no blank FullCommit") | |||||
require.Equal(t, fc.Height(), int64(1000), "the latest commit i.e. the largest expected") | |||||
} | |||||
func genAndStoreCommitsOfHeight(t *testing.T, p Provider, heights ...int64) { | |||||
n := len(heights) | |||||
appHash := []byte("tests") | |||||
chainID := "tests-gen-full-commits" | |||||
keys := GenValKeys(2 + (n / 3)) | |||||
for i := 0; i < n; i++ { | |||||
h := heights[i] | |||||
vals := keys.ToValidators(10, int64(n/2)) | |||||
fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) | |||||
err := p.StoreCommit(fc) | |||||
require.NoError(t, err, "StoreCommit height=%d", h) | |||||
} | |||||
} |
@ -1,103 +1,28 @@ | |||||
package lite | package lite | ||||
// Provider is used to get more validators by other means. | |||||
// | |||||
// Examples: MemProvider, files.Provider, client.Provider, CacheProvider.... | |||||
type Provider interface { | |||||
// StoreCommit saves a FullCommit after we have verified it, | |||||
// so we can query for it later. Important for updating our | |||||
// store of trusted commits. | |||||
StoreCommit(fc FullCommit) error | |||||
// GetByHeight returns the closest commit with height <= h. | |||||
GetByHeight(h int64) (FullCommit, error) | |||||
// GetByHash returns a commit exactly matching this validator hash. | |||||
GetByHash(hash []byte) (FullCommit, error) | |||||
// LatestCommit returns the newest commit stored. | |||||
LatestCommit() (FullCommit, error) | |||||
} | |||||
// cacheProvider allows you to place one or more caches in front of a source | |||||
// Provider. It runs through them in order until a match is found. | |||||
// So you can keep a local cache, and check with the network if | |||||
// no data is there. | |||||
type cacheProvider struct { | |||||
Providers []Provider | |||||
} | |||||
import ( | |||||
"github.com/tendermint/tendermint/types" | |||||
) | |||||
// NewCacheProvider returns a new provider which wraps multiple other providers. | |||||
func NewCacheProvider(providers ...Provider) Provider { | |||||
return cacheProvider{ | |||||
Providers: providers, | |||||
} | |||||
} | |||||
// Provider provides information for the lite client to sync validators. | |||||
// Examples: MemProvider, files.Provider, client.Provider, CacheProvider. | |||||
type Provider interface { | |||||
// StoreCommit tries to add the seed to all providers. | |||||
// | |||||
// Aborts on first error it encounters (closest provider) | |||||
func (c cacheProvider) StoreCommit(fc FullCommit) (err error) { | |||||
for _, p := range c.Providers { | |||||
err = p.StoreCommit(fc) | |||||
if err != nil { | |||||
break | |||||
} | |||||
} | |||||
return err | |||||
} | |||||
// LatestFullCommit returns the latest commit with minHeight <= height <= | |||||
// maxHeight. | |||||
// If maxHeight is zero, returns the latest where minHeight <= height. | |||||
LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error) | |||||
// GetByHeight should return the closest possible match from all providers. | |||||
// | |||||
// The Cache is usually organized in order from cheapest call (memory) | |||||
// to most expensive calls (disk/network). However, since GetByHeight returns | |||||
// a FullCommit at h' <= h, if the memory has a seed at h-10, but the network would | |||||
// give us the exact match, a naive "stop at first non-error" would hide | |||||
// the actual desired results. | |||||
// | |||||
// Thus, we query each provider in order until we find an exact match | |||||
// or we finished querying them all. If at least one returned a non-error, | |||||
// then this returns the best match (minimum h-h'). | |||||
func (c cacheProvider) GetByHeight(h int64) (fc FullCommit, err error) { | |||||
for _, p := range c.Providers { | |||||
var tfc FullCommit | |||||
tfc, err = p.GetByHeight(h) | |||||
if err == nil { | |||||
if tfc.Height() > fc.Height() { | |||||
fc = tfc | |||||
} | |||||
if tfc.Height() == h { | |||||
break | |||||
} | |||||
} | |||||
} | |||||
// even if the last one had an error, if any was a match, this is good | |||||
if fc.Height() > 0 { | |||||
err = nil | |||||
} | |||||
return fc, err | |||||
// Get the valset that corresponds to chainID and height and return. | |||||
// Height must be >= 1. | |||||
ValidatorSet(chainID string, height int64) (*types.ValidatorSet, error) | |||||
} | } | ||||
// GetByHash returns the FullCommit for the hash or an error if the commit is not found. | |||||
func (c cacheProvider) GetByHash(hash []byte) (fc FullCommit, err error) { | |||||
for _, p := range c.Providers { | |||||
fc, err = p.GetByHash(hash) | |||||
if err == nil { | |||||
break | |||||
} | |||||
} | |||||
return fc, err | |||||
} | |||||
// A provider that can also persist new information. | |||||
// Examples: MemProvider, files.Provider, CacheProvider. | |||||
type PersistentProvider interface { | |||||
Provider | |||||
// LatestCommit returns the latest FullCommit or an error if no commit exists. | |||||
func (c cacheProvider) LatestCommit() (fc FullCommit, err error) { | |||||
for _, p := range c.Providers { | |||||
var tfc FullCommit | |||||
tfc, err = p.LatestCommit() | |||||
if err == nil && tfc.Height() > fc.Height() { | |||||
fc = tfc | |||||
} | |||||
} | |||||
// even if the last one had an error, if any was a match, this is good | |||||
if fc.Height() > 0 { | |||||
err = nil | |||||
} | |||||
return fc, err | |||||
// SaveFullCommit saves a FullCommit (without verification). | |||||
SaveFullCommit(fc FullCommit) error | |||||
} | } |
@ -1,22 +1,24 @@ | |||||
package proxy | package proxy | ||||
import ( | import ( | ||||
"fmt" | |||||
"github.com/pkg/errors" | |||||
cmn "github.com/tendermint/tmlibs/common" | |||||
) | ) | ||||
//-------------------------------------------- | |||||
type errNoData struct{} | |||||
var errNoData = fmt.Errorf("No data returned for query") | |||||
func (e errNoData) Error() string { | |||||
return "No data returned for query" | |||||
} | |||||
// IsNoDataErr checks whether an error is due to a query returning empty data | |||||
func IsNoDataErr(err error) bool { | |||||
return errors.Cause(err) == errNoData | |||||
// IsErrNoData checks whether an error is due to a query returning empty data | |||||
func IsErrNoData(err error) bool { | |||||
if err_, ok := err.(cmn.Error); ok { | |||||
_, ok := err_.Data().(errNoData) | |||||
return ok | |||||
} | |||||
return false | |||||
} | } | ||||
func ErrNoData() error { | func ErrNoData() error { | ||||
return errors.WithStack(errNoData) | |||||
return cmn.ErrorWrap(errNoData{}, "") | |||||
} | } | ||||
//-------------------------------------------- |
@ -1,17 +0,0 @@ | |||||
package proxy | |||||
import ( | |||||
"errors" | |||||
"testing" | |||||
"github.com/stretchr/testify/assert" | |||||
) | |||||
func TestErrorNoData(t *testing.T) { | |||||
e1 := ErrNoData() | |||||
assert.True(t, IsNoDataErr(e1)) | |||||
e2 := errors.New("foobar") | |||||
assert.False(t, IsNoDataErr(e2)) | |||||
assert.False(t, IsNoDataErr(nil)) | |||||
} |
@ -1,73 +0,0 @@ | |||||
package lite | |||||
import ( | |||||
"bytes" | |||||
"github.com/pkg/errors" | |||||
"github.com/tendermint/tendermint/types" | |||||
liteErr "github.com/tendermint/tendermint/lite/errors" | |||||
) | |||||
var _ Certifier = (*StaticCertifier)(nil) | |||||
// StaticCertifier assumes a static set of validators, set on | |||||
// initilization and checks against them. | |||||
// The signatures on every header is checked for > 2/3 votes | |||||
// against the known validator set upon Certify | |||||
// | |||||
// Good for testing or really simple chains. Building block | |||||
// to support real-world functionality. | |||||
type StaticCertifier struct { | |||||
chainID string | |||||
vSet *types.ValidatorSet | |||||
vhash []byte | |||||
} | |||||
// NewStaticCertifier returns a new certifier with a static validator set. | |||||
func NewStaticCertifier(chainID string, vals *types.ValidatorSet) *StaticCertifier { | |||||
return &StaticCertifier{ | |||||
chainID: chainID, | |||||
vSet: vals, | |||||
} | |||||
} | |||||
// ChainID returns the chain id. | |||||
// Implements Certifier. | |||||
func (sc *StaticCertifier) ChainID() string { | |||||
return sc.chainID | |||||
} | |||||
// Validators returns the validator set. | |||||
func (sc *StaticCertifier) Validators() *types.ValidatorSet { | |||||
return sc.vSet | |||||
} | |||||
// Hash returns the hash of the validator set. | |||||
func (sc *StaticCertifier) Hash() []byte { | |||||
if len(sc.vhash) == 0 { | |||||
sc.vhash = sc.vSet.Hash() | |||||
} | |||||
return sc.vhash | |||||
} | |||||
// Certify makes sure that the commit is valid. | |||||
// Implements Certifier. | |||||
func (sc *StaticCertifier) Certify(commit Commit) error { | |||||
// do basic sanity checks | |||||
err := commit.ValidateBasic(sc.chainID) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
// make sure it has the same validator set we have (static means static) | |||||
if !bytes.Equal(sc.Hash(), commit.Header.ValidatorsHash) { | |||||
return liteErr.ErrValidatorsChanged() | |||||
} | |||||
// then make sure we have the proper signatures for this | |||||
err = sc.vSet.VerifyCommit(sc.chainID, commit.Commit.BlockID, | |||||
commit.Header.Height, commit.Commit) | |||||
return errors.WithStack(err) | |||||
} |
@ -0,0 +1,13 @@ | |||||
package lite | |||||
import ( | |||||
"github.com/tendermint/tendermint/types" | |||||
) | |||||
// Certifier checks the votes to make sure the block really is signed properly. | |||||
// Certifier must know the current or recent set of validitors by some other | |||||
// means. | |||||
type Certifier interface { | |||||
Certify(sheader types.SignedHeader) error | |||||
ChainID() string | |||||
} |