diff --git a/Gopkg.lock b/Gopkg.lock index f9729ffab..9dfc2a5f9 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -7,6 +7,12 @@ packages = ["quantile"] revision = "3a771d992973f24aa725d07868b467d1ddfceafb" +[[projects]] + branch = "master" + name = "github.com/brejski/hid" + packages = ["."] + revision = "06112dcfcc50a7e0e4fd06e17f9791e788fdaafc" + [[projects]] branch = "master" name = "github.com/btcsuite/btcd" @@ -289,11 +295,8 @@ [[projects]] name = "github.com/tendermint/abci" packages = [ - "client", "example/code", - "example/counter", "example/kvstore", - "server", "types" ] revision = "198dccf0ddfd1bb176f87657e3286a05a6ed9540" @@ -327,10 +330,16 @@ "flowrate", "log", "merkle", + "merkle/tmhash", "test" ] - revision = "692f1d86a6e2c0efa698fd1e4541b68c74ffaf38" - version = "v0.8.4" + revision = "fb7ec62b2925f48de159aeea73b254ae8c58a738" + version = "v0.9.0-rc1" + +[[projects]] + name = "github.com/zondax/ledger-goclient" + packages = ["."] + revision = "3e2146609cdb97894c064d59e9d00accd8c2b1dd" [[projects]] branch = "master" @@ -435,6 +444,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "d17038089dd6383ff5028229d4026bb92f5c7adc7e9c1cd52584237e2e5fd431" + inputs-digest = "fcc5b0344f1e328b6abefa1a937d1161e14bbaef603e6f2065e6690531bc5de1" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 4c32f3d80..d892405b0 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -69,13 +69,17 @@ name = "github.com/stretchr/testify" version = "~1.2.1" +[[constraint]] + name = "github.com/tendermint/abci" + version = "~0.12.0" + [[constraint]] name = "github.com/tendermint/go-amino" version = "~0.10.1" [[override]] name = "github.com/tendermint/tmlibs" - version = "~0.8.4" + version = "0.9.0-rc1" [[constraint]] name = "google.golang.org/grpc" diff --git a/blockchain/store_test.go b/blockchain/store_test.go index 1e0c223ad..5cb18cdcb 100644 --- a/blockchain/store_test.go +++ b/blockchain/store_test.go @@ -49,7 +49,7 @@ func TestNewBlockStore(t *testing.T) { return nil, nil }) require.NotNil(t, panicErr, "#%d panicCauser: %q expected a panic", i, tt.data) - assert.Contains(t, panicErr.Error(), tt.wantErr, "#%d data: %q", i, tt.data) + assert.Contains(t, fmt.Sprintf("%#v", panicErr), tt.wantErr, "#%d data: %q", i, tt.data) } db.Set(blockStoreKey, nil) @@ -238,7 +238,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { if subStr := tuple.wantPanic; subStr != "" { if panicErr == nil { t.Errorf("#%d: want a non-nil panic", i) - } else if got := panicErr.Error(); !strings.Contains(got, subStr) { + } else if got := fmt.Sprintf("%#v", panicErr); !strings.Contains(got, subStr) { t.Errorf("#%d:\n\tgotErr: %q\nwant substring: %q", i, got, subStr) } continue @@ -287,7 +287,7 @@ func TestLoadBlockPart(t *testing.T) { db.Set(calcBlockPartKey(height, index), []byte("Tendermint")) res, _, panicErr = doFn(loadPart) require.NotNil(t, panicErr, "expecting a non-nil panic") - require.Contains(t, panicErr.Error(), "Error reading block part") + require.Contains(t, fmt.Sprintf("%#v", panicErr), "Error reading block part") // 3. A good block serialized and saved to the DB should be retrievable db.Set(calcBlockPartKey(height, index), cdc.MustMarshalBinaryBare(part1)) @@ -316,7 +316,7 @@ func TestLoadBlockMeta(t *testing.T) { db.Set(calcBlockMetaKey(height), []byte("Tendermint-Meta")) res, _, panicErr = doFn(loadMeta) require.NotNil(t, panicErr, "expecting a non-nil panic") - require.Contains(t, panicErr.Error(), "Error reading block meta") + require.Contains(t, fmt.Sprintf("%#v", panicErr), "Error reading block meta") // 3. A good blockMeta serialized and saved to the DB should be retrievable meta := &types.BlockMeta{} diff --git a/consensus/common_test.go b/consensus/common_test.go index b990f525c..8c5aa6c92 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -7,6 +7,7 @@ import ( "io/ioutil" "os" "path" + "reflect" "sort" "sync" "testing" @@ -325,6 +326,30 @@ func ensureNewStep(stepCh <-chan interface{}) { } } +func ensureVote(voteCh chan interface{}, height int64, round int, voteType byte) { + timer := time.NewTimer(ensureTimeout) + select { + case <-timer.C: + break + case v := <-voteCh: + edv, ok := v.(types.EventDataVote) + if !ok { + panic(fmt.Sprintf("expected a *types.Vote, got %v. wrong subscription channel?", + reflect.TypeOf(v))) + } + vote := edv.Vote + if vote.Height != height { + panic(fmt.Sprintf("expected height %v, got %v", height, vote.Height)) + } + if vote.Round != round { + panic(fmt.Sprintf("expected round %v, got %v", round, vote.Round)) + } + if vote.Type != voteType { + panic(fmt.Sprintf("expected type %v, got %v", voteType, vote.Type)) + } + } +} + //------------------------------------------------------------------------------- // consensus nets diff --git a/consensus/state.go b/consensus/state.go index a12345d75..93e1f6b49 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -1594,7 +1594,9 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, blockID, ok := precommits.TwoThirdsMajority() if ok { if len(blockID.Hash) == 0 { - cs.enterNewRound(height, vote.Round+1) + cs.enterNewRound(height, vote.Round) + cs.enterPrecommit(height, vote.Round) + cs.enterPrecommitWait(height, vote.Round) } else { cs.enterNewRound(height, vote.Round) cs.enterPrecommit(height, vote.Round) diff --git a/consensus/state_test.go b/consensus/state_test.go index ece70dd5d..307a39931 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -718,6 +718,8 @@ func TestStateLockPOLUnlock(t *testing.T) { func TestStateLockPOLSafety1(t *testing.T) { cs1, vss := randConsensusState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] + h := cs1.GetRoundState().Height + r := cs1.GetRoundState().Round partSize := cs1.state.ConsensusParams.BlockPartSizeBytes @@ -734,7 +736,7 @@ func TestStateLockPOLSafety1(t *testing.T) { rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) propBlock := rs.ProposalBlock - <-voteCh // prevote + ensureVote(voteCh, h, r, types.VoteTypePrevote) validatePrevote(t, cs1, 0, vss[0], propBlock.Hash()) @@ -755,6 +757,11 @@ func TestStateLockPOLSafety1(t *testing.T) { // we do see them precommit nil signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3, vs4) + ensureVote(voteCh, h, r, types.VoteTypePrecommit) + + <-newRoundCh + t.Log("### ONTO ROUND 1") + prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) propBlockHash := propBlock.Hash() propBlockParts := propBlock.MakePartSet(partSize) @@ -765,9 +772,6 @@ func TestStateLockPOLSafety1(t *testing.T) { if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } - - <-newRoundCh - t.Log("### ONTO ROUND 1") /*Round2 // we timeout and prevote our lock // a polka happened but we didn't see it! @@ -788,13 +792,13 @@ func TestStateLockPOLSafety1(t *testing.T) { } t.Logf("new prop hash %v", fmt.Sprintf("%X", propBlockHash)) // go to prevote, prevote for proposal block - <-voteCh + ensureVote(voteCh, h, r+1, types.VoteTypePrevote) validatePrevote(t, cs1, 1, vss[0], propBlockHash) // now we see the others prevote for it, so we should lock on it signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) - <-voteCh // precommit + ensureVote(voteCh, h, r+1, types.VoteTypePrecommit) // we should have precommitted validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash) @@ -816,7 +820,7 @@ func TestStateLockPOLSafety1(t *testing.T) { <-timeoutProposeCh // finish prevote - <-voteCh + ensureVote(voteCh, h, r+2, types.VoteTypePrevote) // we should prevote what we're locked on validatePrevote(t, cs1, 2, vss[0], propBlockHash) diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index 776e0653b..df7bc7a40 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -283,7 +283,8 @@ loop: } func (state *state) add(clientID string, q Query, ch chan<- interface{}) { - // add query if needed + + // initialize clientToChannelMap per query if needed if _, ok := state.queries[q]; !ok { state.queries[q] = make(map[string]chan<- interface{}) } diff --git a/lite/base_certifier.go b/lite/base_certifier.go new file mode 100644 index 000000000..6f2b3da94 --- /dev/null +++ b/lite/base_certifier.go @@ -0,0 +1,72 @@ +package lite + +import ( + "bytes" + + lerr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tmlibs/common" +) + +var _ Certifier = (*BaseCertifier)(nil) + +// BaseCertifier lets us check the validity of SignedHeaders at height or +// later, requiring sufficient votes (> 2/3) from the given valset. +// To certify blocks produced by a blockchain with mutable validator sets, +// use the InquiringCertifier. +// TODO: Handle unbonding time. +type BaseCertifier struct { + chainID string + height int64 + valset *types.ValidatorSet +} + +// NewBaseCertifier returns a new certifier initialized with a validator set at +// some height. +func NewBaseCertifier(chainID string, height int64, valset *types.ValidatorSet) *BaseCertifier { + if valset == nil || len(valset.Hash()) == 0 { + panic("NewBaseCertifier requires a valid valset") + } + return &BaseCertifier{ + chainID: chainID, + height: height, + valset: valset, + } +} + +// Implements Certifier. +func (bc *BaseCertifier) ChainID() string { + return bc.chainID +} + +// Implements Certifier. +func (bc *BaseCertifier) Certify(signedHeader types.SignedHeader) error { + + // We can't certify commits older than bc.height. + if signedHeader.Height < bc.height { + return cmn.NewError("BaseCertifier height is %v, cannot certify height %v", + bc.height, signedHeader.Height) + } + + // We can't certify with the wrong validator set. + if !bytes.Equal(signedHeader.ValidatorsHash, + bc.valset.Hash()) { + return lerr.ErrUnexpectedValidators(signedHeader.ValidatorsHash, bc.valset.Hash()) + } + + // Do basic sanity checks. + err := signedHeader.ValidateBasic(bc.chainID) + if err != nil { + return cmn.ErrorWrap(err, "in certify") + } + + // Check commit signatures. + err = bc.valset.VerifyCommit( + bc.chainID, signedHeader.Commit.BlockID, + signedHeader.Height, signedHeader.Commit) + if err != nil { + return cmn.ErrorWrap(err, "in certify") + } + + return nil +} diff --git a/lite/static_certifier_test.go b/lite/base_certifier_test.go similarity index 54% rename from lite/static_certifier_test.go rename to lite/base_certifier_test.go index 03567daa6..20342c90a 100644 --- a/lite/static_certifier_test.go +++ b/lite/base_certifier_test.go @@ -1,59 +1,58 @@ -package lite_test +package lite import ( "testing" "github.com/stretchr/testify/assert" + lerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" ) -func TestStaticCert(t *testing.T) { +func TestBaseCert(t *testing.T) { // assert, require := assert.New(t), require.New(t) assert := assert.New(t) // require := require.New(t) - keys := lite.GenValKeys(4) + keys := genPrivKeys(4) // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! vals := keys.ToValidators(20, 10) // and a certifier based on our known set chainID := "test-static" - cert := lite.NewStaticCertifier(chainID, vals) + cert := NewBaseCertifier(chainID, 2, vals) cases := []struct { - keys lite.ValKeys + keys privKeys vals *types.ValidatorSet height int64 first, last int // who actually signs proper bool // true -> expect no error changed bool // true -> expect validator change error }{ + // height regression + {keys, vals, 1, 0, len(keys), false, false}, // perfect, signed by everyone - {keys, vals, 1, 0, len(keys), true, false}, + {keys, vals, 2, 0, len(keys), true, false}, // skip little guy is okay - {keys, vals, 2, 1, len(keys), true, false}, + {keys, vals, 3, 1, len(keys), true, false}, // but not the big guy - {keys, vals, 3, 0, len(keys) - 1, false, false}, - // even changing the power a little bit breaks the static validator - // the sigs are enough, but the validator hash is unknown - {keys, keys.ToValidators(20, 11), 4, 0, len(keys), false, true}, + {keys, vals, 4, 0, len(keys) - 1, false, false}, + // Changing the power a little bit breaks the static validator. + // The sigs are enough, but the validator hash is unknown. + {keys, keys.ToValidators(20, 11), 5, 0, len(keys), false, true}, } for _, tc := range cases { - check := tc.keys.GenCommit(chainID, tc.height, nil, tc.vals, + sh := tc.keys.GenSignedHeader(chainID, tc.height, nil, tc.vals, tc.vals, []byte("foo"), []byte("params"), []byte("results"), tc.first, tc.last) - err := cert.Certify(check) + err := cert.Certify(sh) if tc.proper { assert.Nil(err, "%+v", err) } else { assert.NotNil(err) if tc.changed { - assert.True(liteErr.IsValidatorsChangedErr(err), "%+v", err) + assert.True(lerr.IsErrUnexpectedValidators(err), "%+v", err) } } } - } diff --git a/lite/client/main_test.go b/lite/client/main_test.go deleted file mode 100644 index 49b194366..000000000 --- a/lite/client/main_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package client_test - -import ( - "os" - "testing" - - "github.com/tendermint/tendermint/abci/example/kvstore" - - nm "github.com/tendermint/tendermint/node" - rpctest "github.com/tendermint/tendermint/rpc/test" -) - -var node *nm.Node - -func TestMain(m *testing.M) { - // start a tendermint node (and merkleeyes) in the background to test against - app := kvstore.NewKVStoreApplication() - node = rpctest.StartTendermint(app) - code := m.Run() - - // and shut down proper at the end - node.Stop() - node.Wait() - os.Exit(code) -} diff --git a/lite/client/provider.go b/lite/client/provider.go index 5f3d72450..188ce7d0a 100644 --- a/lite/client/provider.go +++ b/lite/client/provider.go @@ -1,19 +1,19 @@ /* Package client defines a provider that uses a rpcclient to get information, which is used to get new headers -and validators directly from a node. +and validators directly from a Tendermint client. */ package client import ( - "bytes" + "fmt" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" + lerr "github.com/tendermint/tendermint/lite/errors" ) // SignStatusClient combines a SignClient and StatusClient. @@ -23,119 +23,111 @@ type SignStatusClient interface { } type provider struct { - node SignStatusClient - lastHeight int64 + chainID string + client SignStatusClient } -// NewProvider can wrap any rpcclient to expose it as -// a read-only provider. -func NewProvider(node SignStatusClient) lite.Provider { - return &provider{node: node} +// NewProvider implements Provider (but not PersistentProvider). +func NewProvider(chainID string, client SignStatusClient) lite.Provider { + return &provider{chainID: chainID, client: client} } // NewHTTPProvider can connect to a tendermint json-rpc endpoint // at the given url, and uses that as a read-only provider. -func NewHTTPProvider(remote string) lite.Provider { +func NewHTTPProvider(chainID, remote string) lite.Provider { return &provider{ - node: rpcclient.NewHTTP(remote, "/websocket"), + chainID: chainID, + client: rpcclient.NewHTTP(remote, "/websocket"), } } -// StatusClient returns the internal node as a StatusClient +// StatusClient returns the internal client as a StatusClient func (p *provider) StatusClient() rpcclient.StatusClient { - return p.node + return p.client } -// StoreCommit is a noop, as clients can only read from the chain... -func (p *provider) StoreCommit(_ lite.FullCommit) error { return nil } - -// GetHash gets the most recent validator and sees if it matches -// -// TODO: improve when the rpc interface supports more functionality -func (p *provider) GetByHash(hash []byte) (lite.FullCommit, error) { - var fc lite.FullCommit - vals, err := p.node.Validators(nil) - // if we get no validators, or a different height, return an error - if err != nil { - return fc, err +// LatestFullCommit implements Provider. +func (p *provider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (fc lite.FullCommit, err error) { + if chainID != p.chainID { + err = fmt.Errorf("expected chainID %s, got %s", p.chainID, chainID) + return } - p.updateHeight(vals.BlockHeight) - vhash := types.NewValidatorSet(vals.Validators).Hash() - if !bytes.Equal(hash, vhash) { - return fc, liteErr.ErrCommitNotFound() + if maxHeight != 0 && maxHeight < minHeight { + err = fmt.Errorf("need maxHeight == 0 or minHeight <= maxHeight, got %v and %v", + minHeight, maxHeight) + return } - return p.seedFromVals(vals) -} - -// GetByHeight gets the validator set by height -func (p *provider) GetByHeight(h int64) (fc lite.FullCommit, err error) { - commit, err := p.node.Commit(&h) + commit, err := p.fetchLatestCommit(minHeight, maxHeight) if err != nil { - return fc, err + return } - return p.seedFromCommit(commit) + fc, err = p.fillFullCommit(commit.SignedHeader) + return } -// LatestCommit returns the newest commit stored. -func (p *provider) LatestCommit() (fc lite.FullCommit, err error) { - commit, err := p.GetLatestCommit() +// fetchLatestCommit fetches the latest commit from the client. +func (p *provider) fetchLatestCommit(minHeight int64, maxHeight int64) (*ctypes.ResultCommit, error) { + status, err := p.client.Status() if err != nil { - return fc, err + return nil, err } - return p.seedFromCommit(commit) -} - -// GetLatestCommit should return the most recent commit there is, -// which handles queries for future heights as per the semantics -// of GetByHeight. -func (p *provider) GetLatestCommit() (*ctypes.ResultCommit, error) { - status, err := p.node.Status() - if err != nil { + if status.SyncInfo.LatestBlockHeight < minHeight { + err = fmt.Errorf("provider is at %v but require minHeight=%v", + status.SyncInfo.LatestBlockHeight, minHeight) return nil, err } - return p.node.Commit(&status.SyncInfo.LatestBlockHeight) + if maxHeight == 0 { + maxHeight = status.SyncInfo.LatestBlockHeight + } else if status.SyncInfo.LatestBlockHeight < maxHeight { + maxHeight = status.SyncInfo.LatestBlockHeight + } + return p.client.Commit(&maxHeight) } -// CommitFromResult ... -func CommitFromResult(result *ctypes.ResultCommit) lite.Commit { - return (lite.Commit)(result.SignedHeader) +// Implements Provider. +func (p *provider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { + return p.getValidatorSet(chainID, height) } -func (p *provider) seedFromVals(vals *ctypes.ResultValidators) (lite.FullCommit, error) { - // now get the commits and build a full commit - commit, err := p.node.Commit(&vals.BlockHeight) +func (p *provider) getValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { + if chainID != p.chainID { + err = fmt.Errorf("expected chainID %s, got %s", p.chainID, chainID) + return + } + if height < 1 { + err = fmt.Errorf("expected height >= 1, got %v", height) + return + } + heightPtr := new(int64) + *heightPtr = height + res, err := p.client.Validators(heightPtr) if err != nil { - return lite.FullCommit{}, err + // TODO pass through other types of errors. + return nil, lerr.ErrMissingValidators(chainID, height) } - fc := lite.NewFullCommit( - CommitFromResult(commit), - types.NewValidatorSet(vals.Validators), - ) - return fc, nil + valset = types.NewValidatorSet(res.Validators) + valset.TotalVotingPower() // to test deep equality. + return } -func (p *provider) seedFromCommit(commit *ctypes.ResultCommit) (fc lite.FullCommit, err error) { - fc.Commit = CommitFromResult(commit) +// This does no validation. +func (p *provider) fillFullCommit(signedHeader types.SignedHeader) (fc lite.FullCommit, err error) { + fc.SignedHeader = signedHeader - // now get the proper validators - vals, err := p.node.Validators(&commit.Header.Height) + // Get the validators. + valset, err := p.getValidatorSet(signedHeader.ChainID, signedHeader.Height) if err != nil { - return fc, err + return lite.FullCommit{}, err } + fc.Validators = valset - // make sure they match the commit (as we cannot enforce height) - vset := types.NewValidatorSet(vals.Validators) - if !bytes.Equal(vset.Hash(), commit.Header.ValidatorsHash) { - return fc, liteErr.ErrValidatorsChanged() + // Get the next validators. + nvalset, err := p.getValidatorSet(signedHeader.ChainID, signedHeader.Height+1) + if err != nil { + return lite.FullCommit{}, err + } else { + fc.NextValidators = nvalset } - p.updateHeight(commit.Header.Height) - fc.Validators = vset return fc, nil } - -func (p *provider) updateHeight(h int64) { - if h > p.lastHeight { - p.lastHeight = h - } -} diff --git a/lite/client/provider_test.go b/lite/client/provider_test.go index 94d47da3f..2385bbbe1 100644 --- a/lite/client/provider_test.go +++ b/lite/client/provider_test.go @@ -1,63 +1,73 @@ package client import ( + "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/abci/example/kvstore" rpcclient "github.com/tendermint/tendermint/rpc/client" rpctest "github.com/tendermint/tendermint/rpc/test" "github.com/tendermint/tendermint/types" ) +// TODO fix tests!! +func TestMain(m *testing.M) { + app := kvstore.NewKVStoreApplication() + node := rpctest.StartTendermint(app) + + code := m.Run() + + node.Stop() + node.Wait() + os.Exit(code) +} + func TestProvider(t *testing.T) { assert, require := assert.New(t), require.New(t) cfg := rpctest.GetConfig() rpcAddr := cfg.RPC.ListenAddress - genDoc, _ := types.GenesisDocFromFile(cfg.GenesisFile()) + genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile()) + if err != nil { + panic(err) + } chainID := genDoc.ChainID - p := NewHTTPProvider(rpcAddr) + t.Log("chainID:", chainID) + p := NewHTTPProvider(chainID, rpcAddr) require.NotNil(t, p) // let it produce some blocks - err := rpcclient.WaitForHeight(p.(*provider).node, 6, nil) + err = rpcclient.WaitForHeight(p.(*provider).client, 6, nil) require.Nil(err) // let's get the highest block - seed, err := p.LatestCommit() + fc, err := p.LatestFullCommit(chainID, 1, 1<<63-1) require.Nil(err, "%+v", err) - sh := seed.Height() - vhash := seed.Header.ValidatorsHash + sh := fc.Height() assert.True(sh < 5000) // let's check this is valid somehow - assert.Nil(seed.ValidateBasic(chainID)) - cert := lite.NewStaticCertifier(chainID, seed.Validators) + assert.Nil(fc.ValidateBasic(chainID)) // historical queries now work :) lower := sh - 5 - seed, err = p.GetByHeight(lower) + fc, err = p.LatestFullCommit(chainID, lower, lower) assert.Nil(err, "%+v", err) - assert.Equal(lower, seed.Height()) + assert.Equal(lower, fc.Height()) - // also get by hash (given the match) - seed, err = p.GetByHash(vhash) - require.Nil(err, "%+v", err) - require.Equal(vhash, seed.Header.ValidatorsHash) - err = cert.Certify(seed.Commit) - assert.Nil(err, "%+v", err) + /* + // also get by hash (given the match) + fc, err = p.GetByHash(vhash) + require.Nil(err, "%+v", err) + require.Equal(vhash, fc.Header.ValidatorsHash) - // get by hash fails without match - seed, err = p.GetByHash([]byte("foobar")) - assert.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) - - // storing the seed silently ignored - err = p.StoreCommit(seed) - assert.Nil(err, "%+v", err) + // get by hash fails without match + fc, err = p.GetByHash([]byte("foobar")) + assert.NotNil(err) + assert.True(liteErr.IsCommitNotFoundErr(err)) + */ } diff --git a/lite/commit.go b/lite/commit.go index 11ae6d7ff..8449bf698 100644 --- a/lite/commit.go +++ b/lite/commit.go @@ -2,98 +2,92 @@ package lite import ( "bytes" - - "github.com/pkg/errors" + "errors" + "fmt" "github.com/tendermint/tendermint/types" - - liteErr "github.com/tendermint/tendermint/lite/errors" ) -// Certifier checks the votes to make sure the block really is signed properly. -// Certifier must know the current set of validitors by some other means. -type Certifier interface { - Certify(check Commit) error - ChainID() string -} - -// Commit is basically the rpc /commit response, but extended -// -// This is the basepoint for proving anything on the blockchain. It contains -// a signed header. If the signatures are valid and > 2/3 of the known set, -// we can store this checkpoint and use it to prove any number of aspects of -// the system: such as txs, abci state, validator sets, etc... -type Commit types.SignedHeader - -// FullCommit is a commit and the actual validator set, -// the base info you need to update to a given point, -// assuming knowledge of some previous validator set +// FullCommit is a signed header (the block header and a commit that signs it), +// the validator set which signed the commit, and the next validator set. The +// next validator set (which is proven from the block header) allows us to +// revert to block-by-block updating of lite certifier's latest validator set, +// even in the face of arbitrarily power changes. type FullCommit struct { - Commit `json:"commit"` - Validators *types.ValidatorSet `json:"validator_set"` + SignedHeader types.SignedHeader `json:"signed_header"` + Validators *types.ValidatorSet `json:"validator_set"` + NextValidators *types.ValidatorSet `json:"next_validator_set"` } // NewFullCommit returns a new FullCommit. -func NewFullCommit(commit Commit, vals *types.ValidatorSet) FullCommit { +func NewFullCommit(signedHeader types.SignedHeader, valset, nvalset *types.ValidatorSet) FullCommit { return FullCommit{ - Commit: commit, - Validators: vals, - } -} - -// Height returns the height of the header. -func (c Commit) Height() int64 { - if c.Header == nil { - return 0 + SignedHeader: signedHeader, + Validators: valset, + NextValidators: nvalset, } - return c.Header.Height } -// ValidatorsHash returns the hash of the validator set. -func (c Commit) ValidatorsHash() []byte { - if c.Header == nil { - return nil +// Validate the components and check for consistency. +// This also checks to make sure that Validators actually +// signed the SignedHeader.Commit. +// If > 2/3 did not sign the Commit from fc.Validators, it +// is not a valid commit! +func (fc FullCommit) ValidateBasic(chainID string) error { + // Ensure that Validators exists and matches the header. + if fc.Validators.Size() == 0 { + return errors.New("need FullCommit.Validators") } - return c.Header.ValidatorsHash -} - -// ValidateBasic does basic consistency checks and makes sure the headers -// and commits are all consistent and refer to our chain. -// -// Make sure to use a Verifier to validate the signatures actually provide -// a significantly strong proof for this header's validity. -func (c Commit) ValidateBasic(chainID string) error { - // make sure the header is reasonable - if c.Header == nil { - return errors.New("Commit missing header") + if !bytes.Equal( + fc.SignedHeader.ValidatorsHash, + fc.Validators.Hash()) { + return fmt.Errorf("header has vhash %X but valset hash is %X", + fc.SignedHeader.ValidatorsHash, + fc.Validators.Hash(), + ) } - if c.Header.ChainID != chainID { - return errors.Errorf("Header belongs to another chain '%s' not '%s'", - c.Header.ChainID, chainID) + // Ensure that NextValidators exists and matches the header. + if fc.NextValidators.Size() == 0 { + return errors.New("need FullCommit.NextValidators") } - - if c.Commit == nil { - return errors.New("Commit missing signatures") + if !bytes.Equal( + fc.SignedHeader.NextValidatorsHash, + fc.NextValidators.Hash()) { + return fmt.Errorf("header has next vhash %X but next valset hash is %X", + fc.SignedHeader.NextValidatorsHash, + fc.NextValidators.Hash(), + ) } - - // make sure the header and commit match (height and hash) - if c.Commit.Height() != c.Header.Height { - return liteErr.ErrHeightMismatch(c.Commit.Height(), c.Header.Height) - } - hhash := c.Header.Hash() - chash := c.Commit.BlockID.Hash - if !bytes.Equal(hhash, chash) { - return errors.Errorf("Commits sign block %X header is block %X", - chash, hhash) + // Validate the header. + err := fc.SignedHeader.ValidateBasic(chainID) + if err != nil { + return err } - - // make sure the commit is reasonable - err := c.Commit.ValidateBasic() + // Validate the signatures on the commit. + hdr, cmt := fc.SignedHeader.Header, fc.SignedHeader.Commit + err = fc.Validators.VerifyCommit( + hdr.ChainID, cmt.BlockID, + hdr.Height, cmt) if err != nil { - return errors.WithStack(err) + return err } - // looks good, we just need to make sure the signatures are really from - // empowered validators + // All good! return nil } + +// Height returns the height of the header. +func (fc FullCommit) Height() int64 { + if fc.SignedHeader.Header == nil { + panic("should not happen") + } + return fc.SignedHeader.Height +} + +// ChainID returns the chainID of the header. +func (fc FullCommit) ChainID() string { + if fc.SignedHeader.Header == nil { + panic("should not happen") + } + return fc.SignedHeader.ChainID +} diff --git a/lite/dbprovider.go b/lite/dbprovider.go new file mode 100644 index 000000000..834bab663 --- /dev/null +++ b/lite/dbprovider.go @@ -0,0 +1,168 @@ +package lite + +import ( + "fmt" + "regexp" + "strconv" + + amino "github.com/tendermint/go-amino" + crypto "github.com/tendermint/tendermint/crypto" + lerr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/types" + dbm "github.com/tendermint/tmlibs/db" +) + +func signedHeaderKey(chainID string, height int64) []byte { + return []byte(fmt.Sprintf("%s/%010d/sh", chainID, height)) +} + +var signedHeaderKeyPattern = regexp.MustCompile(`([^/]+)/([0-9]*)/sh`) + +func parseSignedHeaderKey(key []byte) (chainID string, height int64, ok bool) { + submatch := signedHeaderKeyPattern.FindSubmatch(key) + if submatch == nil { + return "", 0, false + } + chainID = string(submatch[1]) + heightStr := string(submatch[2]) + heightInt, err := strconv.Atoi(heightStr) + if err != nil { + return "", 0, false + } + height = int64(heightInt) + ok = true // good! + return +} + +func validatorSetKey(chainID string, height int64) []byte { + return []byte(fmt.Sprintf("%s/%010d/vs", chainID, height)) +} + +type DBProvider struct { + chainID string + db dbm.DB + cdc *amino.Codec +} + +func NewDBProvider(db dbm.DB) *DBProvider { + //db = dbm.NewDebugDB("db provider "+cmn.RandStr(4), db) + cdc := amino.NewCodec() + crypto.RegisterAmino(cdc) + dbp := &DBProvider{db: db, cdc: cdc} + return dbp +} + +// Implements PersistentProvider. +func (dbp *DBProvider) SaveFullCommit(fc FullCommit) error { + + batch := dbp.db.NewBatch() + + // Save the fc.validators. + // We might be overwriting what we already have, but + // it makes the logic easier for now. + vsKey := validatorSetKey(fc.ChainID(), fc.Height()) + vsBz, err := dbp.cdc.MarshalBinary(fc.Validators) + if err != nil { + return err + } + batch.Set(vsKey, vsBz) + + // Save the fc.NextValidators. + nvsKey := validatorSetKey(fc.ChainID(), fc.Height()+1) + nvsBz, err := dbp.cdc.MarshalBinary(fc.NextValidators) + if err != nil { + return err + } + batch.Set(nvsKey, nvsBz) + + // Save the fc.SignedHeader + shKey := signedHeaderKey(fc.ChainID(), fc.Height()) + shBz, err := dbp.cdc.MarshalBinary(fc.SignedHeader) + if err != nil { + return err + } + batch.Set(shKey, shBz) + + // And write sync. + batch.WriteSync() + return nil +} + +// Implements Provider. +func (dbp *DBProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) ( + FullCommit, error) { + + if minHeight <= 0 { + minHeight = 1 + } + if maxHeight == 0 { + maxHeight = 1<<63 - 1 + } + + itr := dbp.db.ReverseIterator( + signedHeaderKey(chainID, maxHeight), + signedHeaderKey(chainID, minHeight-1), + ) + defer itr.Close() + + for itr.Valid() { + key := itr.Key() + _, _, ok := parseSignedHeaderKey(key) + if !ok { + // Skip over other keys. + itr.Next() + continue + } else { + // Found the latest full commit signed header. + shBz := itr.Value() + sh := types.SignedHeader{} + err := dbp.cdc.UnmarshalBinary(shBz, &sh) + if err != nil { + return FullCommit{}, err + } else { + return dbp.fillFullCommit(sh) + } + } + } + return FullCommit{}, lerr.ErrCommitNotFound() +} + +func (dbp *DBProvider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { + return dbp.getValidatorSet(chainID, height) +} + +func (dbp *DBProvider) getValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { + vsBz := dbp.db.Get(validatorSetKey(chainID, height)) + if vsBz == nil { + err = lerr.ErrMissingValidators(chainID, height) + return + } + err = dbp.cdc.UnmarshalBinary(vsBz, &valset) + if err != nil { + return + } + valset.TotalVotingPower() // to test deep equality. + return +} + +func (dbp *DBProvider) fillFullCommit(sh types.SignedHeader) (FullCommit, error) { + var chainID = sh.ChainID + var height = sh.Height + var valset, nvalset *types.ValidatorSet + // Load the validator set. + valset, err := dbp.getValidatorSet(chainID, height) + if err != nil { + return FullCommit{}, err + } + // Load the next validator set. + nvalset, err = dbp.getValidatorSet(chainID, height+1) + if err != nil { + return FullCommit{}, err + } + // Return filled FullCommit. + return FullCommit{ + SignedHeader: sh, + Validators: valset, + NextValidators: nvalset, + }, nil +} diff --git a/lite/doc.go b/lite/doc.go index 89dc702fc..881880f66 100644 --- a/lite/doc.go +++ b/lite/doc.go @@ -1,133 +1,139 @@ /* -Package lite allows you to securely validate headers -without a full node. +Package lite allows you to securely validate headers without a full node. -This library pulls together all the crypto and algorithms, -so given a relatively recent (< unbonding period) known -validator set, one can get indisputable proof that data is in -the chain (current state) or detect if the node is lying to -the client. +This library pulls together all the crypto and algorithms, so given a +relatively recent (< unbonding period) known validator set, one can get +indisputable proof that data is in the chain (current state) or detect if the +node is lying to the client. -Tendermint RPC exposes a lot of info, but a malicious node -could return any data it wants to queries, or even to block -headers, even making up fake signatures from non-existent -validators to justify it. This is a lot of logic to get -right, to be contained in a small, easy to use library, -that does this for you, so you can just build nice UI. +Tendermint RPC exposes a lot of info, but a malicious node could return any +data it wants to queries, or even to block headers, even making up fake +signatures from non-existent validators to justify it. This is a lot of logic +to get right, to be contained in a small, easy to use library, that does this +for you, so you can just build nice applications. -We design for clients who have no strong trust relationship -with any tendermint node, just the validator set as a whole. -Beyond building nice mobile or desktop applications, the -cosmos hub is another important example of a client, -that needs undeniable proof without syncing the full chain, -in order to efficiently implement IBC. +We design for clients who have no strong trust relationship with any Tendermint +node, just the blockchain and validator set as a whole. -Commits +# Data structures -There are two main data structures that we pass around - Commit -and FullCommit. Both of them mirror what information is -exposed in tendermint rpc. +## SignedHeader -Commit is a block header along with enough validator signatures -to prove its validity (> 2/3 of the voting power). A FullCommit -is a Commit along with the full validator set. When the -validator set doesn't change, the Commit is enough, but since -the block header only has a hash, we need the FullCommit to -follow any changes to the validator set. +SignedHeader is a block header along with a commit -- enough validator +precommit-vote signatures to prove its validity (> 2/3 of the voting power) +given the validator set responsible for signing that header. A FullCommit is a +SignedHeader along with the current and next validator sets. -Certifiers +The hash of the next validator set is included and signed in the SignedHeader. +This lets the lite client keep track of arbitrary changes to the validator set, +as every change to the validator set must be approved by inclusion in the +header and signed in the commit. -A Certifier validates a new Commit given the currently known -state. There are three different types of Certifiers exposed, -each one building on the last one, with additional complexity. +In the worst case, with every block changing the validators around completely, +a lite client can sync up with every block header to verify each validator set +change on the chain. In practice, most applications will not have frequent +drastic updates to the validator set, so the logic defined in this package for +lite client syncing is optimized to use intelligent bisection and +block-skipping for efficient sourcing and verification of these data structures +and updates to the validator set (see the InquiringCertifier for more +information). -Static - given the validator set upon initialization. Verifies -all signatures against that set and if the validator set -changes, it will reject all headers. +The FullCommit is also declared in this package as a convenience structure, +which includes the SignedHeader along with the full current and next +ValidatorSets. -Dynamic - This wraps Static and has the same Certify -method. However, it adds an Update method, which can be called -with a FullCommit when the validator set changes. If it can -prove this is a valid transition, it will update the validator -set. +## Certifier -Inquiring - this wraps Dynamic and implements an auto-update -strategy on top of the Dynamic update. If a call to -Certify fails as the validator set has changed, then it -attempts to find a FullCommit and Update to that header. -To get these FullCommits, it makes use of a Provider. +A Certifier validates a new SignedHeader given the currently known state. There +are two different types of Certifiers provided. -Providers +BaseCertifier - given a validator set and a height, this Certifier verifies +that > 2/3 of the voting power of the given validator set had signed the +SignedHeader, and that the SignedHeader was to be signed by the exact given +validator set, and that the height of the commit is at least height (or +greater). -A Provider allows us to store and retrieve the FullCommits, -to provide memory to the Inquiring Certifier. +SignedHeader.Commit may be signed by a different validator set, it can get +certified with a BaseCertifier as long as sufficient signatures from the +previous validator set are present in the commit. -NewMemStoreProvider - in-memory cache. +InquiringCertifier - this certifier implements an auto-update and persistence +strategy to certify any SignedHeader of the blockchain. -files.NewProvider - disk backed storage. +## Provider and PersistentProvider -client.NewHTTPProvider - query tendermint rpc. +A Provider allows us to store and retrieve the FullCommits. -NewCacheProvider - combine multiple providers. +```go +type Provider interface { + // LatestFullCommit returns the latest commit with + // minHeight <= height <= maxHeight. + // If maxHeight is zero, returns the latest where + // minHeight <= height. + LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error) +} +``` -The suggested use for local light clients is -client.NewHTTPProvider for getting new data (Source), -and NewCacheProvider(NewMemStoreProvider(), -files.NewProvider()) to store confirmed headers (Trusted) +* client.NewHTTPProvider - query Tendermint rpc. -How We Track Validators +A PersistentProvider is a Provider that also allows for saving state. This is +used by the InquiringCertifier for persistence. -Unless you want to blindly trust the node you talk with, you -need to trace every response back to a hash in a block header -and validate the commit signatures of that block header match -the proper validator set. If there is a contant validator -set, you store it locally upon initialization of the client, +```go +type PersistentProvider interface { + Provider + + // SaveFullCommit saves a FullCommit (without verification). + SaveFullCommit(fc FullCommit) error +} +``` + +* DBProvider - persistence provider for use with any tmlibs/DB. +* MultiProvider - combine multiple providers. + +The suggested use for local light clients is client.NewHTTPProvider(...) for +getting new data (Source), and NewMultiProvider(NewDBProvider(dbm.NewMemDB()), +NewDBProvider(db.NewFileDB(...))) to store confirmed full commits (Trusted) + + +# How We Track Validators + +Unless you want to blindly trust the node you talk with, you need to trace +every response back to a hash in a block header and validate the commit +signatures of that block header match the proper validator set. If there is a +static validator set, you store it locally upon initialization of the client, and check against that every time. -Once there is a dynamic validator set, the issue of -verifying a block becomes a bit more tricky. There is -background information in a -github issue (https://github.com/tendermint/tendermint/issues/377). - -In short, if there is a block at height H with a known -(trusted) validator set V, and another block at height H' -(H' > H) with validator set V' != V, then we want a way to -safely update it. - -First, get the new (unconfirmed) validator set V' and -verify H' is internally consistent and properly signed by -this V'. Assuming it is a valid block, we check that at -least 2/3 of the validators in V also signed it, meaning -it would also be valid under our old assumptions. -That should be enough, but we can also check that the -V counts for at least 2/3 of the total votes in H' -for extra safety (we can have a discussion if this is -strictly required). If we can verify all this, -then we can accept H' and V' as valid and use that to -validate all blocks X > H'. - -If we cannot update directly from H -> H' because there was -too much change to the validator set, then we can look for -some Hm (H < Hm < H') with a validator set Vm. Then we try -to update H -> Hm and Hm -> H' in two separate steps. -If one of these steps doesn't work, then we continue -bisecting, until we eventually have to externally -validate the valdiator set changes at every block. - -Since we never trust any server in this protocol, only the -signatures themselves, it doesn't matter if the seed comes -from a (possibly malicious) node or a (possibly malicious) user. -We can accept it or reject it based only on our trusted -validator set and cryptographic proofs. This makes it -extremely important to verify that you have the proper -validator set when initializing the client, as that is the -root of all trust. - -Or course, this assumes that the known block is within the -unbonding period to avoid the "nothing at stake" problem. -If you haven't seen the state in a few months, you will need -to manually verify the new validator set hash using off-chain -means (the same as getting the initial hash). +If the validator set for the blockchain is dynamic, verifying block commits is +a bit more involved -- if there is a block at height H with a known (trusted) +validator set V, and another block at height H' (H' > H) with validator set V' +!= V, then we want a way to safely update it. + +First, we get the new (unconfirmed) validator set V' and verify that H' is +internally consistent and properly signed by this V'. Assuming it is a valid +block, we check that at least 2/3 of the validators in V also signed it, +meaning it would also be valid under our old assumptions. Then, we accept H' +and V' as valid and trusted and use that to validate for heights X > H' until a +more recent and updated validator set is found. + +If we cannot update directly from H -> H' because there was too much change to +the validator set, then we can look for some Hm (H < Hm < H') with a validator +set Vm. Then we try to update H -> Hm and then Hm -> H' in two steps. If one +of these steps doesn't work, then we continue bisecting, until we eventually +have to externally validate the valdiator set changes at every block. + +Since we never trust any server in this protocol, only the signatures +themselves, it doesn't matter if the seed comes from a (possibly malicious) +node or a (possibly malicious) user. We can accept it or reject it based only +on our trusted validator set and cryptographic proofs. This makes it extremely +important to verify that you have the proper validator set when initializing +the client, as that is the root of all trust. + +The software currently assumes that the unbonding period is infinite in +duration. If the InquiringCertifier hasn't been updated in a while, you should +manually verify the block headers using other sources. + +TODO: Update the software to handle cases around the unbonding period. */ package lite diff --git a/lite/dynamic_certifier.go b/lite/dynamic_certifier.go deleted file mode 100644 index 0ddace8b6..000000000 --- a/lite/dynamic_certifier.go +++ /dev/null @@ -1,96 +0,0 @@ -package lite - -import ( - "github.com/tendermint/tendermint/types" - - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -var _ Certifier = (*DynamicCertifier)(nil) - -// DynamicCertifier uses a StaticCertifier for Certify, but adds an -// Update method to allow for a change of validators. -// -// You can pass in a FullCommit with another validator set, -// and if this is a provably secure transition (< 1/3 change, -// sufficient signatures), then it will update the -// validator set for the next Certify call. -// For security, it will only follow validator set changes -// going forward. -type DynamicCertifier struct { - cert *StaticCertifier - lastHeight int64 -} - -// NewDynamic returns a new dynamic certifier. -func NewDynamicCertifier(chainID string, vals *types.ValidatorSet, height int64) *DynamicCertifier { - return &DynamicCertifier{ - cert: NewStaticCertifier(chainID, vals), - lastHeight: height, - } -} - -// ChainID returns the chain id of this certifier. -// Implements Certifier. -func (dc *DynamicCertifier) ChainID() string { - return dc.cert.ChainID() -} - -// Validators returns the validators of this certifier. -func (dc *DynamicCertifier) Validators() *types.ValidatorSet { - return dc.cert.vSet -} - -// Hash returns the hash of this certifier. -func (dc *DynamicCertifier) Hash() []byte { - return dc.cert.Hash() -} - -// LastHeight returns the last height of this certifier. -func (dc *DynamicCertifier) LastHeight() int64 { - return dc.lastHeight -} - -// Certify will verify whether the commit is valid and will update the height if it is or return an -// error if it is not. -// Implements Certifier. -func (dc *DynamicCertifier) Certify(check Commit) error { - err := dc.cert.Certify(check) - if err == nil { - // update last seen height if input is valid - dc.lastHeight = check.Height() - } - return err -} - -// Update will verify if this is a valid change and update -// the certifying validator set if safe to do so. -// -// Returns an error if update is impossible (invalid proof or IsTooMuchChangeErr) -func (dc *DynamicCertifier) Update(fc FullCommit) error { - // ignore all checkpoints in the past -> only to the future - h := fc.Height() - if h <= dc.lastHeight { - return liteErr.ErrPastTime() - } - - // first, verify if the input is self-consistent.... - err := fc.ValidateBasic(dc.ChainID()) - if err != nil { - return err - } - - // now, make sure not too much change... meaning this commit - // would be approved by the currently known validator set - // as well as the new set - commit := fc.Commit.Commit - err = dc.Validators().VerifyCommitAny(fc.Validators, dc.ChainID(), commit.BlockID, h, commit) - if err != nil { - return liteErr.ErrTooMuchChange() - } - - // looks good, we can update - dc.cert = NewStaticCertifier(dc.ChainID(), fc.Validators) - dc.lastHeight = h - return nil -} diff --git a/lite/dynamic_certifier_test.go b/lite/dynamic_certifier_test.go deleted file mode 100644 index 88c145f95..000000000 --- a/lite/dynamic_certifier_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package lite_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/types" - - "github.com/tendermint/tendermint/lite" - "github.com/tendermint/tendermint/lite/errors" -) - -// TestDynamicCert just makes sure it still works like StaticCert -func TestDynamicCert(t *testing.T) { - // assert, require := assert.New(t), require.New(t) - assert := assert.New(t) - // require := require.New(t) - - keys := lite.GenValKeys(4) - // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! - vals := keys.ToValidators(20, 10) - // and a certifier based on our known set - chainID := "test-dyno" - cert := lite.NewDynamicCertifier(chainID, vals, 0) - - cases := []struct { - keys lite.ValKeys - vals *types.ValidatorSet - height int64 - first, last int // who actually signs - proper bool // true -> expect no error - changed bool // true -> expect validator change error - }{ - // perfect, signed by everyone - {keys, vals, 1, 0, len(keys), true, false}, - // skip little guy is okay - {keys, vals, 2, 1, len(keys), true, false}, - // but not the big guy - {keys, vals, 3, 0, len(keys) - 1, false, false}, - // even changing the power a little bit breaks the static validator - // the sigs are enough, but the validator hash is unknown - {keys, keys.ToValidators(20, 11), 4, 0, len(keys), false, true}, - } - - for _, tc := range cases { - check := tc.keys.GenCommit(chainID, tc.height, nil, tc.vals, - []byte("bar"), []byte("params"), []byte("results"), tc.first, tc.last) - err := cert.Certify(check) - if tc.proper { - assert.Nil(err, "%+v", err) - assert.Equal(cert.LastHeight(), tc.height) - } else { - assert.NotNil(err) - if tc.changed { - assert.True(errors.IsValidatorsChangedErr(err), "%+v", err) - } - } - } -} - -// TestDynamicUpdate makes sure we update safely and sanely -func TestDynamicUpdate(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - chainID := "test-dyno-up" - keys := lite.GenValKeys(5) - vals := keys.ToValidators(20, 0) - cert := lite.NewDynamicCertifier(chainID, vals, 40) - - // one valid block to give us a sense of time - h := int64(100) - good := keys.GenCommit(chainID, h, nil, vals, []byte("foo"), []byte("params"), []byte("results"), 0, len(keys)) - err := cert.Certify(good) - require.Nil(err, "%+v", err) - - // some new sets to try later - keys2 := keys.Extend(2) - keys3 := keys2.Extend(4) - - // we try to update with some blocks - cases := []struct { - keys lite.ValKeys - vals *types.ValidatorSet - height int64 - first, last int // who actually signs - proper bool // true -> expect no error - changed bool // true -> expect too much change error - }{ - // same validator set, well signed, of course it is okay - {keys, vals, h + 10, 0, len(keys), true, false}, - // same validator set, poorly signed, fails - {keys, vals, h + 20, 2, len(keys), false, false}, - - // shift the power a little, works if properly signed - {keys, keys.ToValidators(10, 0), h + 30, 1, len(keys), true, false}, - // but not on a poor signature - {keys, keys.ToValidators(10, 0), h + 40, 2, len(keys), false, false}, - // and not if it was in the past - {keys, keys.ToValidators(10, 0), h + 25, 0, len(keys), false, false}, - - // let's try to adjust to a whole new validator set (we have 5/7 of the votes) - {keys2, keys2.ToValidators(10, 0), h + 33, 0, len(keys2), true, false}, - - // properly signed but too much change, not allowed (only 7/11 validators known) - {keys3, keys3.ToValidators(10, 0), h + 50, 0, len(keys3), false, true}, - } - - for _, tc := range cases { - fc := tc.keys.GenFullCommit(chainID, tc.height, nil, tc.vals, - []byte("bar"), []byte("params"), []byte("results"), tc.first, tc.last) - err := cert.Update(fc) - if tc.proper { - assert.Nil(err, "%d: %+v", tc.height, err) - // we update last seen height - assert.Equal(cert.LastHeight(), tc.height) - // and we update the proper validators - assert.EqualValues(fc.Header.ValidatorsHash, cert.Hash()) - } else { - assert.NotNil(err, "%d", tc.height) - // we don't update the height - assert.NotEqual(cert.LastHeight(), tc.height) - if tc.changed { - assert.True(errors.IsTooMuchChangeErr(err), - "%d: %+v", tc.height, err) - } - } - } -} diff --git a/lite/errors/errors.go b/lite/errors/errors.go index 99e42a0bd..c38ecf88f 100644 --- a/lite/errors/errors.go +++ b/lite/errors/errors.go @@ -3,90 +3,110 @@ package errors import ( "fmt" - "github.com/pkg/errors" + cmn "github.com/tendermint/tmlibs/common" ) -var ( - errValidatorsChanged = fmt.Errorf("Validators differ between header and certifier") - errCommitNotFound = fmt.Errorf("Commit not found by provider") - errTooMuchChange = fmt.Errorf("Validators change too much to safely update") - errPastTime = fmt.Errorf("Update older than certifier height") - errNoPathFound = fmt.Errorf("Cannot find a path of validators") -) +//---------------------------------------- +// Error types + +type errCommitNotFound struct{} -// IsCommitNotFoundErr checks whether an error is due to missing data -func IsCommitNotFoundErr(err error) bool { - return err != nil && (errors.Cause(err) == errCommitNotFound) +func (e errCommitNotFound) Error() string { + return "Commit not found by provider" } -// ErrCommitNotFound indicates that a the requested commit was not found. -func ErrCommitNotFound() error { - return errors.WithStack(errCommitNotFound) +type errUnexpectedValidators struct { + got []byte + want []byte } -// IsValidatorsChangedErr checks whether an error is due -// to a differing validator set. -func IsValidatorsChangedErr(err error) bool { - return err != nil && (errors.Cause(err) == errValidatorsChanged) +func (e errUnexpectedValidators) Error() string { + return fmt.Sprintf("Validator set is different. Got %X want %X", + e.got, e.want) } -// ErrValidatorsChanged indicates that the validator set was changed between two commits. -func ErrValidatorsChanged() error { - return errors.WithStack(errValidatorsChanged) +type errTooMuchChange struct{} + +func (e errTooMuchChange) Error() string { + return "Insufficient signatures to validate due to valset changes" } -// IsTooMuchChangeErr checks whether an error is due to too much change -// between these validators sets. -func IsTooMuchChangeErr(err error) bool { - return err != nil && (errors.Cause(err) == errTooMuchChange) +type errMissingValidators struct { + chainID string + height int64 } -// ErrTooMuchChange indicates that the underlying validator set was changed by >1/3. -func ErrTooMuchChange() error { - return errors.WithStack(errTooMuchChange) +func (e errMissingValidators) Error() string { + return fmt.Sprintf("Validators are unknown or missing for chain %s and height %d", + e.chainID, e.height) } -// IsPastTimeErr ... -func IsPastTimeErr(err error) bool { - return err != nil && (errors.Cause(err) == errPastTime) +//---------------------------------------- +// Methods for above error types + +//----------------- +// ErrCommitNotFound + +// ErrCommitNotFound indicates that a the requested commit was not found. +func ErrCommitNotFound() error { + return cmn.ErrorWrap(errCommitNotFound{}, "") } -// ErrPastTime ... -func ErrPastTime() error { - return errors.WithStack(errPastTime) +func IsErrCommitNotFound(err error) bool { + if err_, ok := err.(cmn.Error); ok { + _, ok := err_.Data().(errCommitNotFound) + return ok + } + return false } -// IsNoPathFoundErr checks whether an error is due to no path of -// validators in provider from where we are to where we want to be -func IsNoPathFoundErr(err error) bool { - return err != nil && (errors.Cause(err) == errNoPathFound) +//----------------- +// ErrUnexpectedValidators + +// ErrUnexpectedValidators indicates a validator set mismatch. +func ErrUnexpectedValidators(got, want []byte) error { + return cmn.ErrorWrap(errUnexpectedValidators{ + got: got, + want: want, + }, "") } -// ErrNoPathFound ... -func ErrNoPathFound() error { - return errors.WithStack(errNoPathFound) +func IsErrUnexpectedValidators(err error) bool { + if err_, ok := err.(cmn.Error); ok { + _, ok := err_.Data().(errUnexpectedValidators) + return ok + } + return false } -//-------------------------------------------- +//----------------- +// ErrTooMuchChange -type errHeightMismatch struct { - h1, h2 int64 +// ErrTooMuchChange indicates that the underlying validator set was changed by >1/3. +func ErrTooMuchChange() error { + return cmn.ErrorWrap(errTooMuchChange{}, "") } -func (e errHeightMismatch) Error() string { - return fmt.Sprintf("Blocks don't match - %d vs %d", e.h1, e.h2) +func IsErrTooMuchChange(err error) bool { + if err_, ok := err.(cmn.Error); ok { + _, ok := err_.Data().(errTooMuchChange) + return ok + } + return false } -// IsHeightMismatchErr checks whether an error is due to data from different blocks -func IsHeightMismatchErr(err error) bool { - if err == nil { - return false - } - _, ok := errors.Cause(err).(errHeightMismatch) - return ok +//----------------- +// ErrMissingValidators + +// ErrMissingValidators indicates that some validator set was missing or unknown. +func ErrMissingValidators(chainID string, height int64) error { + return cmn.ErrorWrap(errMissingValidators{chainID, height}, "") } -// ErrHeightMismatch returns an mismatch error with stack-trace -func ErrHeightMismatch(h1, h2 int64) error { - return errors.WithStack(errHeightMismatch{h1, h2}) +func IsErrMissingValidators(err error) bool { + if err_, ok := err.(cmn.Error); ok { + _, ok := err_.Data().(errMissingValidators) + return ok + } + return false } diff --git a/lite/errors/errors_test.go b/lite/errors/errors_test.go deleted file mode 100644 index 479215e47..000000000 --- a/lite/errors/errors_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package errors - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestErrorHeight(t *testing.T) { - e1 := ErrHeightMismatch(2, 3) - e1.Error() - assert.True(t, IsHeightMismatchErr(e1)) - - e2 := errors.New("foobar") - assert.False(t, IsHeightMismatchErr(e2)) - assert.False(t, IsHeightMismatchErr(nil)) -} diff --git a/lite/files/commit.go b/lite/files/commit.go deleted file mode 100644 index 8a7e4721e..000000000 --- a/lite/files/commit.go +++ /dev/null @@ -1,93 +0,0 @@ -package files - -import ( - "io/ioutil" - "os" - - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -const ( - // MaxFullCommitSize is the maximum number of bytes we will - // read in for a full commit to avoid excessive allocations - // in the deserializer - MaxFullCommitSize = 1024 * 1024 -) - -// SaveFullCommit exports the seed in binary / go-amino style -func SaveFullCommit(fc lite.FullCommit, path string) error { - f, err := os.Create(path) - if err != nil { - return errors.WithStack(err) - } - defer f.Close() - - _, err = cdc.MarshalBinaryWriter(f, fc) - if err != nil { - return errors.WithStack(err) - } - return nil -} - -// SaveFullCommitJSON exports the seed in a json format -func SaveFullCommitJSON(fc lite.FullCommit, path string) error { - f, err := os.Create(path) - if err != nil { - return errors.WithStack(err) - } - defer f.Close() - bz, err := cdc.MarshalJSON(fc) - if err != nil { - return errors.WithStack(err) - } - _, err = f.Write(bz) - if err != nil { - return errors.WithStack(err) - } - return nil -} - -// LoadFullCommit loads the full commit from the file system. -func LoadFullCommit(path string) (lite.FullCommit, error) { - var fc lite.FullCommit - f, err := os.Open(path) - if err != nil { - if os.IsNotExist(err) { - return fc, liteErr.ErrCommitNotFound() - } - return fc, errors.WithStack(err) - } - defer f.Close() - - _, err = cdc.UnmarshalBinaryReader(f, &fc, 0) - if err != nil { - return fc, errors.WithStack(err) - } - return fc, nil -} - -// LoadFullCommitJSON loads the commit from the file system in JSON format. -func LoadFullCommitJSON(path string) (lite.FullCommit, error) { - var fc lite.FullCommit - f, err := os.Open(path) - if err != nil { - if os.IsNotExist(err) { - return fc, liteErr.ErrCommitNotFound() - } - return fc, errors.WithStack(err) - } - defer f.Close() - - bz, err := ioutil.ReadAll(f) - if err != nil { - return fc, errors.WithStack(err) - } - err = cdc.UnmarshalJSON(bz, &fc) - if err != nil { - return fc, errors.WithStack(err) - } - return fc, nil -} diff --git a/lite/files/commit_test.go b/lite/files/commit_test.go deleted file mode 100644 index e0235ba29..000000000 --- a/lite/files/commit_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package files - -import ( - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - cmn "github.com/tendermint/tmlibs/common" - - "github.com/tendermint/tendermint/lite" -) - -func tmpFile() string { - suffix := cmn.RandStr(16) - return filepath.Join(os.TempDir(), "fc-test-"+suffix) -} - -func TestSerializeFullCommits(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - // some constants - appHash := []byte("some crazy thing") - chainID := "ser-ial" - h := int64(25) - - // build a fc - keys := lite.GenValKeys(5) - vals := keys.ToValidators(10, 0) - fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - - require.Equal(h, fc.Height()) - require.Equal(vals.Hash(), fc.ValidatorsHash()) - - // try read/write with json - jfile := tmpFile() - defer os.Remove(jfile) - jseed, err := LoadFullCommitJSON(jfile) - assert.NotNil(err) - err = SaveFullCommitJSON(fc, jfile) - require.Nil(err) - jseed, err = LoadFullCommitJSON(jfile) - assert.Nil(err, "%+v", err) - assert.Equal(h, jseed.Height()) - assert.Equal(vals.Hash(), jseed.ValidatorsHash()) - - // try read/write with binary - bfile := tmpFile() - defer os.Remove(bfile) - bseed, err := LoadFullCommit(bfile) - assert.NotNil(err) - err = SaveFullCommit(fc, bfile) - require.Nil(err) - bseed, err = LoadFullCommit(bfile) - assert.Nil(err, "%+v", err) - assert.Equal(h, bseed.Height()) - assert.Equal(vals.Hash(), bseed.ValidatorsHash()) - - // make sure they don't read the other format (different) - _, err = LoadFullCommit(jfile) - assert.NotNil(err) - _, err = LoadFullCommitJSON(bfile) - assert.NotNil(err) -} diff --git a/lite/files/provider.go b/lite/files/provider.go deleted file mode 100644 index 327b0331a..000000000 --- a/lite/files/provider.go +++ /dev/null @@ -1,139 +0,0 @@ -/* -Package files defines a Provider that stores all data in the filesystem - -We assume the same validator hash may be reused by many different -headers/Commits, and thus store it separately. This leaves us -with three issues: - - 1. Given a validator hash, retrieve the validator set if previously stored - 2. Given a block height, find the Commit with the highest height <= h - 3. Given a FullCommit, store it quickly to satisfy 1 and 2 - -Note that we do not worry about caching, as that can be achieved by -pairing this with a MemStoreProvider and CacheProvider from certifiers -*/ -package files - -import ( - "encoding/hex" - "fmt" - "math" - "os" - "path/filepath" - "sort" - - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -// nolint -const ( - Ext = ".tsd" - ValDir = "validators" - CheckDir = "checkpoints" - dirPerm = os.FileMode(0755) - //filePerm = os.FileMode(0644) -) - -type provider struct { - valDir string - checkDir string -} - -// NewProvider creates the parent dir and subdirs -// for validators and checkpoints as needed -func NewProvider(dir string) lite.Provider { - valDir := filepath.Join(dir, ValDir) - checkDir := filepath.Join(dir, CheckDir) - for _, d := range []string{valDir, checkDir} { - err := os.MkdirAll(d, dirPerm) - if err != nil { - panic(err) - } - } - return &provider{valDir: valDir, checkDir: checkDir} -} - -func (p *provider) encodeHash(hash []byte) string { - return hex.EncodeToString(hash) + Ext -} - -func (p *provider) encodeHeight(h int64) string { - // pad up to 10^12 for height... - return fmt.Sprintf("%012d%s", h, Ext) -} - -// StoreCommit saves a full commit after it has been verified. -func (p *provider) StoreCommit(fc lite.FullCommit) error { - // make sure the fc is self-consistent before saving - err := fc.ValidateBasic(fc.Commit.Header.ChainID) - if err != nil { - return err - } - - paths := []string{ - filepath.Join(p.checkDir, p.encodeHeight(fc.Height())), - filepath.Join(p.valDir, p.encodeHash(fc.Header.ValidatorsHash)), - } - for _, path := range paths { - err := SaveFullCommit(fc, path) - // unknown error in creating or writing immediately breaks - if err != nil { - return err - } - } - return nil -} - -// GetByHeight returns the closest commit with height <= h. -func (p *provider) GetByHeight(h int64) (lite.FullCommit, error) { - // first we look for exact match, then search... - path := filepath.Join(p.checkDir, p.encodeHeight(h)) - fc, err := LoadFullCommit(path) - if liteErr.IsCommitNotFoundErr(err) { - path, err = p.searchForHeight(h) - if err == nil { - fc, err = LoadFullCommit(path) - } - } - return fc, err -} - -// LatestCommit returns the newest commit stored. -func (p *provider) LatestCommit() (fc lite.FullCommit, err error) { - // Note to future: please update by 2077 to avoid rollover - return p.GetByHeight(math.MaxInt32 - 1) -} - -// search for height, looks for a file with highest height < h -// return certifiers.ErrCommitNotFound() if not there... -func (p *provider) searchForHeight(h int64) (string, error) { - d, err := os.Open(p.checkDir) - if err != nil { - return "", errors.WithStack(err) - } - files, err := d.Readdirnames(0) - - d.Close() - if err != nil { - return "", errors.WithStack(err) - } - - desired := p.encodeHeight(h) - sort.Strings(files) - i := sort.SearchStrings(files, desired) - if i == 0 { - return "", liteErr.ErrCommitNotFound() - } - found := files[i-1] - path := filepath.Join(p.checkDir, found) - return path, errors.WithStack(err) -} - -// GetByHash returns a commit exactly matching this validator hash. -func (p *provider) GetByHash(hash []byte) (lite.FullCommit, error) { - path := filepath.Join(p.valDir, p.encodeHash(hash)) - return LoadFullCommit(path) -} diff --git a/lite/files/provider_test.go b/lite/files/provider_test.go deleted file mode 100644 index 5deebb1a2..000000000 --- a/lite/files/provider_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package files_test - -import ( - "bytes" - "errors" - "io/ioutil" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" - "github.com/tendermint/tendermint/lite/files" -) - -func checkEqual(stored, loaded lite.FullCommit, chainID string) error { - err := loaded.ValidateBasic(chainID) - if err != nil { - return err - } - if !bytes.Equal(stored.ValidatorsHash(), loaded.ValidatorsHash()) { - return errors.New("Different block hashes") - } - return nil -} - -func TestFileProvider(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - dir, err := ioutil.TempDir("", "fileprovider-test") - assert.Nil(err) - defer os.RemoveAll(dir) - p := files.NewProvider(dir) - - chainID := "test-files" - appHash := []byte("some-data") - keys := lite.GenValKeys(5) - count := 10 - - // make a bunch of seeds... - seeds := make([]lite.FullCommit, count) - for i := 0; i < count; i++ { - // two seeds for each validator, to check how we handle dups - // (10, 0), (10, 1), (10, 1), (10, 2), (10, 2), ... - vals := keys.ToValidators(10, int64(count/2)) - h := int64(20 + 10*i) - check := keys.GenCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - seeds[i] = lite.NewFullCommit(check, vals) - } - - // check provider is empty - seed, err := p.GetByHeight(20) - require.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) - - seed, err = p.GetByHash(seeds[3].ValidatorsHash()) - require.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) - - // now add them all to the provider - for _, s := range seeds { - err = p.StoreCommit(s) - require.Nil(err) - // and make sure we can get it back - s2, err := p.GetByHash(s.ValidatorsHash()) - assert.Nil(err) - err = checkEqual(s, s2, chainID) - assert.Nil(err) - // by height as well - s2, err = p.GetByHeight(s.Height()) - err = checkEqual(s, s2, chainID) - assert.Nil(err) - } - - // make sure we get the last hash if we overstep - seed, err = p.GetByHeight(5000) - if assert.Nil(err, "%+v", err) { - assert.Equal(seeds[count-1].Height(), seed.Height()) - err = checkEqual(seeds[count-1], seed, chainID) - assert.Nil(err) - } - - // and middle ones as well - seed, err = p.GetByHeight(47) - if assert.Nil(err, "%+v", err) { - // we only step by 10, so 40 must be the one below this - assert.EqualValues(40, seed.Height()) - } - - // and proper error for too low - _, err = p.GetByHeight(5) - assert.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) -} diff --git a/lite/files/wire.go b/lite/files/wire.go deleted file mode 100644 index 3a207744a..000000000 --- a/lite/files/wire.go +++ /dev/null @@ -1,12 +0,0 @@ -package files - -import ( - "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/crypto" -) - -var cdc = amino.NewCodec() - -func init() { - crypto.RegisterAmino(cdc) -} diff --git a/lite/helpers.go b/lite/helpers.go index 695f6fb9b..764df507b 100644 --- a/lite/helpers.go +++ b/lite/helpers.go @@ -4,24 +4,21 @@ import ( "time" crypto "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/types" ) -// ValKeys is a helper for testing. +// privKeys is a helper type for testing. // -// It lets us simulate signing with many keys, either ed25519 or secp256k1. -// The main use case is to create a set, and call GenCommit -// to get properly signed header for testing. +// It lets us simulate signing with many keys. The main use case is to create +// a set, and call GenSignedHeader to get properly signed header for testing. // -// You can set different weights of validators each time you call -// ToValidators, and can optionally extend the validator set later -// with Extend or ExtendSecp -type ValKeys []crypto.PrivKey - -// GenValKeys produces an array of private keys to generate commits. -func GenValKeys(n int) ValKeys { - res := make(ValKeys, n) +// You can set different weights of validators each time you call ToValidators, +// and can optionally extend the validator set later with Extend. +type privKeys []crypto.PrivKey + +// genPrivKeys produces an array of private keys to generate commits. +func genPrivKeys(n int) privKeys { + res := make(privKeys, n) for i := range res { res[i] = crypto.GenPrivKeyEd25519() } @@ -29,56 +26,41 @@ func GenValKeys(n int) ValKeys { } // Change replaces the key at index i. -func (v ValKeys) Change(i int) ValKeys { - res := make(ValKeys, len(v)) - copy(res, v) +func (pkz privKeys) Change(i int) privKeys { + res := make(privKeys, len(pkz)) + copy(res, pkz) res[i] = crypto.GenPrivKeyEd25519() return res } // Extend adds n more keys (to remove, just take a slice). -func (v ValKeys) Extend(n int) ValKeys { - extra := GenValKeys(n) - return append(v, extra...) -} - -// GenSecpValKeys produces an array of secp256k1 private keys to generate commits. -func GenSecpValKeys(n int) ValKeys { - res := make(ValKeys, n) - for i := range res { - res[i] = crypto.GenPrivKeySecp256k1() - } - return res -} - -// ExtendSecp adds n more secp256k1 keys (to remove, just take a slice). -func (v ValKeys) ExtendSecp(n int) ValKeys { - extra := GenSecpValKeys(n) - return append(v, extra...) +func (pkz privKeys) Extend(n int) privKeys { + extra := genPrivKeys(n) + return append(pkz, extra...) } -// ToValidators produces a list of validators from the set of keys +// ToValidators produces a valset from the set of keys. // The first key has weight `init` and it increases by `inc` every step // so we can have all the same weight, or a simple linear distribution // (should be enough for testing). -func (v ValKeys) ToValidators(init, inc int64) *types.ValidatorSet { - res := make([]*types.Validator, len(v)) - for i, k := range v { +func (pkz privKeys) ToValidators(init, inc int64) *types.ValidatorSet { + res := make([]*types.Validator, len(pkz)) + for i, k := range pkz { res[i] = types.NewValidator(k.PubKey(), init+int64(i)*inc) } return types.NewValidatorSet(res) } // signHeader properly signs the header with all keys from first to last exclusive. -func (v ValKeys) signHeader(header *types.Header, first, last int) *types.Commit { - votes := make([]*types.Vote, len(v)) +func (pkz privKeys) signHeader(header *types.Header, first, last int) *types.Commit { + votes := make([]*types.Vote, len(pkz)) - // we need this list to keep the ordering... - vset := v.ToValidators(1, 0) + // We need this list to keep the ordering. + vset := pkz.ToValidators(1, 0) - // fill in the votes we want - for i := first; i < last && i < len(v); i++ { - vote := makeVote(header, vset, v[i]) + // Fill in the votes we want. + for i := first; i < last && i < len(pkz); i++ { + vote := makeVote(header, vset, pkz[i]) votes[vote.ValidatorIndex] = vote } @@ -89,15 +71,15 @@ func (v ValKeys) signHeader(header *types.Header, first, last int) *types.Commit return res } -func makeVote(header *types.Header, vals *types.ValidatorSet, key crypto.PrivKey) *types.Vote { +func makeVote(header *types.Header, valset *types.ValidatorSet, key crypto.PrivKey) *types.Vote { addr := key.PubKey().Address() - idx, _ := vals.GetByAddress(addr) + idx, _ := valset.GetByAddress(addr) vote := &types.Vote{ ValidatorAddress: addr, ValidatorIndex: idx, Height: header.Height, Round: 1, - Timestamp: time.Now().UTC(), + Timestamp: time.Now().Round(0).UTC(), Type: types.VoteTypePrecommit, BlockID: types.BlockID{Hash: header.Hash()}, } @@ -113,47 +95,46 @@ func makeVote(header *types.Header, vals *types.ValidatorSet, key crypto.PrivKey return vote } -// Silences warning that vals can also be merkle.Hashable -// nolint: interfacer func genHeader(chainID string, height int64, txs types.Txs, - vals *types.ValidatorSet, appHash, consHash, resHash []byte) *types.Header { + valset, nvalset *types.ValidatorSet, appHash, consHash, resHash []byte) *types.Header { return &types.Header{ ChainID: chainID, Height: height, - Time: time.Now(), + Time: time.Now().Round(0).UTC(), NumTxs: int64(len(txs)), TotalTxs: int64(len(txs)), // LastBlockID // LastCommitHash - ValidatorsHash: vals.Hash(), - DataHash: txs.Hash(), - AppHash: appHash, - ConsensusHash: consHash, - LastResultsHash: resHash, + ValidatorsHash: valset.Hash(), + NextValidatorsHash: nvalset.Hash(), + DataHash: txs.Hash(), + AppHash: appHash, + ConsensusHash: consHash, + LastResultsHash: resHash, } } -// GenCommit calls genHeader and signHeader and combines them into a Commit. -func (v ValKeys) GenCommit(chainID string, height int64, txs types.Txs, - vals *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) Commit { +// GenSignedHeader calls genHeader and signHeader and combines them into a SignedHeader. +func (pkz privKeys) GenSignedHeader(chainID string, height int64, txs types.Txs, + valset, nvalset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) types.SignedHeader { - header := genHeader(chainID, height, txs, vals, appHash, consHash, resHash) - check := Commit{ + header := genHeader(chainID, height, txs, valset, nvalset, appHash, consHash, resHash) + check := types.SignedHeader{ Header: header, - Commit: v.signHeader(header, first, last), + Commit: pkz.signHeader(header, first, last), } return check } -// GenFullCommit calls genHeader and signHeader and combines them into a Commit. -func (v ValKeys) GenFullCommit(chainID string, height int64, txs types.Txs, - vals *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) FullCommit { +// GenFullCommit calls genHeader and signHeader and combines them into a FullCommit. +func (pkz privKeys) GenFullCommit(chainID string, height int64, txs types.Txs, + valset, nvalset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) FullCommit { - header := genHeader(chainID, height, txs, vals, appHash, consHash, resHash) - commit := Commit{ + header := genHeader(chainID, height, txs, valset, nvalset, appHash, consHash, resHash) + commit := types.SignedHeader{ Header: header, - Commit: v.signHeader(header, first, last), + Commit: pkz.signHeader(header, first, last), } - return NewFullCommit(commit, vals) + return NewFullCommit(commit, valset, nvalset) } diff --git a/lite/inquiring_certifier.go b/lite/inquiring_certifier.go index 042bd08e3..049cd728c 100644 --- a/lite/inquiring_certifier.go +++ b/lite/inquiring_certifier.go @@ -1,163 +1,209 @@ package lite import ( + "bytes" + "github.com/tendermint/tendermint/types" - liteErr "github.com/tendermint/tendermint/lite/errors" + lerr "github.com/tendermint/tendermint/lite/errors" ) var _ Certifier = (*InquiringCertifier)(nil) -// InquiringCertifier wraps a dynamic certifier and implements an auto-update strategy. If a call -// to Certify fails due to a change it validator set, InquiringCertifier will try and find a -// previous FullCommit which it can use to safely update the validator set. It uses a source -// provider to obtain the needed FullCommits. It stores properly validated data on the local system. +// InquiringCertifier implements an auto-updating certifier. It uses a +// "source" provider to obtain the needed FullCommits to securely sync with +// validator set changes. It stores properly validated data on the +// "trusted" local system. type InquiringCertifier struct { - cert *DynamicCertifier - // These are only properly validated data, from local system - trusted Provider - // This is a source of new info, like a node rpc, or other import method - Source Provider + chainID string + // These are only properly validated data, from local system. + trusted PersistentProvider + // This is a source of new info, like a node rpc, or other import method. + source Provider } -// NewInquiringCertifier returns a new Inquiring object. It uses the trusted provider to store -// validated data and the source provider to obtain missing FullCommits. +// NewInquiringCertifier returns a new InquiringCertifier. It uses the +// trusted provider to store validated data and the source provider to +// obtain missing data (e.g. FullCommits). // -// Example: The trusted provider should a CacheProvider, MemProvider or files.Provider. The source -// provider should be a client.HTTPProvider. -func NewInquiringCertifier(chainID string, fc FullCommit, trusted Provider, - source Provider) (*InquiringCertifier, error) { - - // store the data in trusted - err := trusted.StoreCommit(fc) - if err != nil { - return nil, err - } +// The trusted provider should a CacheProvider, MemProvider or +// files.Provider. The source provider should be a client.HTTPProvider. +func NewInquiringCertifier(chainID string, trusted PersistentProvider, source Provider) ( + *InquiringCertifier, error) { return &InquiringCertifier{ - cert: NewDynamicCertifier(chainID, fc.Validators, fc.Height()), + chainID: chainID, trusted: trusted, - Source: source, + source: source, }, nil } -// ChainID returns the chain id. // Implements Certifier. func (ic *InquiringCertifier) ChainID() string { - return ic.cert.ChainID() + return ic.chainID } -// Validators returns the validator set. -func (ic *InquiringCertifier) Validators() *types.ValidatorSet { - return ic.cert.cert.vSet -} - -// LastHeight returns the last height. -func (ic *InquiringCertifier) LastHeight() int64 { - return ic.cert.lastHeight -} - -// Certify makes sure this is checkpoint is valid. -// -// If the validators have changed since the last know time, it looks -// for a path to prove the new validators. -// -// On success, it will store the checkpoint in the store for later viewing // Implements Certifier. -func (ic *InquiringCertifier) Certify(commit Commit) error { - err := ic.useClosestTrust(commit.Height()) +// +// If the validators have changed since the last know time, it looks to +// ic.trusted and ic.source to prove the new validators. On success, it will +// try to store the SignedHeader in ic.trusted if the next +// validator can be sourced. +func (ic *InquiringCertifier) Certify(shdr types.SignedHeader) error { + + // Get the latest known full commit <= h-1 from our trusted providers. + // The full commit at h-1 contains the valset to sign for h. + h := shdr.Height - 1 + tfc, err := ic.trusted.LatestFullCommit(ic.chainID, 1, h) if err != nil { return err } - err = ic.cert.Certify(commit) - if !liteErr.IsValidatorsChangedErr(err) { - return err - } - err = ic.updateToHash(commit.Header.ValidatorsHash) - if err != nil { - return err + if tfc.Height() == h { + // Return error if valset doesn't match. + if !bytes.Equal( + tfc.NextValidators.Hash(), + shdr.Header.ValidatorsHash) { + return lerr.ErrUnexpectedValidators( + tfc.NextValidators.Hash(), + shdr.Header.ValidatorsHash) + } + } else { + // If valset doesn't match... + if !bytes.Equal(tfc.NextValidators.Hash(), + shdr.Header.ValidatorsHash) { + // ... update. + tfc, err = ic.updateToHeight(h) + if err != nil { + return err + } + // Return error if valset _still_ doesn't match. + if !bytes.Equal(tfc.NextValidators.Hash(), + shdr.Header.ValidatorsHash) { + return lerr.ErrUnexpectedValidators( + tfc.NextValidators.Hash(), + shdr.Header.ValidatorsHash) + } + } } - err = ic.cert.Certify(commit) + // Certify the signed header using the matching valset. + cert := NewBaseCertifier(ic.chainID, tfc.Height()+1, tfc.NextValidators) + err = cert.Certify(shdr) if err != nil { return err } - // store the new checkpoint - return ic.trusted.StoreCommit(NewFullCommit(commit, ic.Validators())) -} - -// Update will verify if this is a valid change and update -// the certifying validator set if safe to do so. -func (ic *InquiringCertifier) Update(fc FullCommit) error { - err := ic.useClosestTrust(fc.Height()) - if err != nil { + // Get the next validator set. + nvalset, err := ic.source.ValidatorSet(ic.chainID, shdr.Height+1) + if lerr.IsErrMissingValidators(err) { + // Ignore this error. + return nil + } else if err != nil { return err + } else { + // Create filled FullCommit. + nfc := FullCommit{ + SignedHeader: shdr, + Validators: tfc.NextValidators, + NextValidators: nvalset, + } + // Validate the full commit. This checks the cryptographic + // signatures of Commit against Validators. + if err := nfc.ValidateBasic(ic.chainID); err != nil { + return err + } + // Trust it. + return ic.trusted.SaveFullCommit(nfc) } - - err = ic.cert.Update(fc) - if err == nil { - err = ic.trusted.StoreCommit(fc) - } - return err } -func (ic *InquiringCertifier) useClosestTrust(h int64) error { - closest, err := ic.trusted.GetByHeight(h) +// verifyAndSave will verify if this is a valid source full commit given the +// best match trusted full commit, and if good, persist to ic.trusted. +// Returns ErrTooMuchChange when >2/3 of tfc did not sign sfc. +// Panics if tfc.Height() >= sfc.Height(). +func (ic *InquiringCertifier) verifyAndSave(tfc, sfc FullCommit) error { + if tfc.Height() >= sfc.Height() { + panic("should not happen") + } + err := tfc.NextValidators.VerifyFutureCommit( + sfc.Validators, + ic.chainID, sfc.SignedHeader.Commit.BlockID, + sfc.SignedHeader.Height, sfc.SignedHeader.Commit, + ) if err != nil { return err } - // if the best seed is not the one we currently use, - // let's just reset the dynamic validator - if closest.Height() != ic.LastHeight() { - ic.cert = NewDynamicCertifier(ic.ChainID(), closest.Validators, closest.Height()) - } - return nil + return ic.trusted.SaveFullCommit(sfc) } -// updateToHash gets the validator hash we want to update to -// if IsTooMuchChangeErr, we try to find a path by binary search over height -func (ic *InquiringCertifier) updateToHash(vhash []byte) error { - // try to get the match, and update - fc, err := ic.Source.GetByHash(vhash) +// updateToHeight will use divide-and-conquer to find a path to h. +// Returns nil iff we successfully verify and persist a full commit +// for height h, using repeated applications of bisection if necessary. +// +// Returns ErrCommitNotFound if source provider doesn't have the commit for h. +func (ic *InquiringCertifier) updateToHeight(h int64) (FullCommit, error) { + + // Fetch latest full commit from source. + sfc, err := ic.source.LatestFullCommit(ic.chainID, h, h) if err != nil { - return err - } - err = ic.cert.Update(fc) - // handle IsTooMuchChangeErr by using divide and conquer - if liteErr.IsTooMuchChangeErr(err) { - err = ic.updateToHeight(fc.Height()) + return FullCommit{}, err } - return err -} -// updateToHeight will use divide-and-conquer to find a path to h -func (ic *InquiringCertifier) updateToHeight(h int64) error { - // try to update to this height (with checks) - fc, err := ic.Source.GetByHeight(h) - if err != nil { - return err + // Validate the full commit. This checks the cryptographic + // signatures of Commit against Validators. + if err := sfc.ValidateBasic(ic.chainID); err != nil { + return FullCommit{}, err } - start, end := ic.LastHeight(), fc.Height() - if end <= start { - return liteErr.ErrNoPathFound() + + // If sfc.Height() != h, we can't do it. + if sfc.Height() != h { + return FullCommit{}, lerr.ErrCommitNotFound() } - err = ic.Update(fc) - // we can handle IsTooMuchChangeErr specially - if !liteErr.IsTooMuchChangeErr(err) { - return err +FOR_LOOP: + for { + // Fetch latest full commit from trusted. + tfc, err := ic.trusted.LatestFullCommit(ic.chainID, 1, h) + if err != nil { + return FullCommit{}, err + } + // Maybe we have nothing to do. + if tfc.Height() == h { + return FullCommit{}, nil + } + + // Try to update to full commit with checks. + err = ic.verifyAndSave(tfc, sfc) + if err == nil { + // All good! + return sfc, nil + } else { + // Handle special case when err is ErrTooMuchChange. + if lerr.IsErrTooMuchChange(err) { + // Divide and conquer. + start, end := tfc.Height(), sfc.Height() + if !(start < end) { + panic("should not happen") + } + mid := (start + end) / 2 + _, err = ic.updateToHeight(mid) + if err != nil { + return FullCommit{}, err + } + // If we made it to mid, we retry. + continue FOR_LOOP + } + return FullCommit{}, err + } } +} - // try to update to mid - mid := (start + end) / 2 - err = ic.updateToHeight(mid) +func (ic *InquiringCertifier) LastTrustedHeight() int64 { + fc, err := ic.trusted.LatestFullCommit(ic.chainID, 1, 1<<63-1) if err != nil { - return err + panic("should not happen") } - - // if we made it to mid, we recurse - return ic.updateToHeight(h) + return fc.Height() } diff --git a/lite/inquiring_certifier_test.go b/lite/inquiring_certifier_test.go index db8160bdc..b3d8edea4 100644 --- a/lite/inquiring_certifier_test.go +++ b/lite/inquiring_certifier_test.go @@ -1,5 +1,4 @@ -// nolint: vetshadow -package lite_test +package lite import ( "fmt" @@ -8,166 +7,146 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/lite" + dbm "github.com/tendermint/tmlibs/db" ) func TestInquirerValidPath(t *testing.T) { assert, require := assert.New(t), require.New(t) - trust := lite.NewMemStoreProvider() - source := lite.NewMemStoreProvider() + trust := NewDBProvider(dbm.NewMemDB()) + source := NewDBProvider(dbm.NewMemDB()) - // set up the validators to generate test blocks + // Set up the validators to generate test blocks. var vote int64 = 10 - keys := lite.GenValKeys(5) + keys := genPrivKeys(5) + nkeys := keys.Extend(1) - // construct a bunch of commits, each with one more height than the last + // Construct a bunch of commits, each with one more height than the last. chainID := "inquiry-test" consHash := []byte("params") resHash := []byte("results") count := 50 - commits := make([]lite.FullCommit, count) + fcz := make([]FullCommit, count) for i := 0; i < count; i++ { - // extend the keys by 1 each time - keys = keys.Extend(1) vals := keys.ToValidators(vote, 0) - h := int64(20 + 10*i) + nvals := nkeys.ToValidators(vote, 0) + h := int64(1 + i) appHash := []byte(fmt.Sprintf("h=%d", h)) - commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0, - len(keys)) + fcz[i] = keys.GenFullCommit( + chainID, h, nil, + vals, nvals, + appHash, consHash, resHash, 0, len(keys)) + // Extend the keys by 1 each time. + keys = nkeys + nkeys = nkeys.Extend(1) } - // initialize a certifier with the initial state - cert, err := lite.NewInquiringCertifier(chainID, commits[0], trust, source) + // Initialize a certifier with the initial state. + err := trust.SaveFullCommit(fcz[0]) + require.Nil(err) + cert, err := NewInquiringCertifier(chainID, trust, source) require.Nil(err) - // this should fail validation.... - commit := commits[count-1].Commit - err = cert.Certify(commit) + // This should fail validation: + sh := fcz[count-1].SignedHeader + err = cert.Certify(sh) require.NotNil(err) - // adding a few commits in the middle should be insufficient + // Adding a few commits in the middle should be insufficient. for i := 10; i < 13; i++ { - err := source.StoreCommit(commits[i]) - require.Nil(err) - } - err = cert.Certify(commit) - assert.NotNil(err) - - // with more info, we succeed - for i := 0; i < count; i++ { - err := source.StoreCommit(commits[i]) - require.Nil(err) - } - err = cert.Certify(commit) - assert.Nil(err, "%+v", err) -} - -func TestInquirerMinimalPath(t *testing.T) { - assert, require := assert.New(t), require.New(t) - trust := lite.NewMemStoreProvider() - source := lite.NewMemStoreProvider() - - // set up the validators to generate test blocks - var vote int64 = 10 - keys := lite.GenValKeys(5) - - // construct a bunch of commits, each with one more height than the last - chainID := "minimal-path" - consHash := []byte("other-params") - count := 12 - commits := make([]lite.FullCommit, count) - for i := 0; i < count; i++ { - // extend the validators, so we are just below 2/3 - keys = keys.Extend(len(keys)/2 - 1) - vals := keys.ToValidators(vote, 0) - h := int64(5 + 10*i) - appHash := []byte(fmt.Sprintf("h=%d", h)) - resHash := []byte(fmt.Sprintf("res=%d", h)) - commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0, - len(keys)) - } - - // initialize a certifier with the initial state - cert, _ := lite.NewInquiringCertifier(chainID, commits[0], trust, source) - - // this should fail validation.... - commit := commits[count-1].Commit - err := cert.Certify(commit) - require.NotNil(err) - - // add a few seed in the middle should be insufficient - for i := 5; i < 8; i++ { - err := source.StoreCommit(commits[i]) + err := source.SaveFullCommit(fcz[i]) require.Nil(err) } - err = cert.Certify(commit) + err = cert.Certify(sh) assert.NotNil(err) - // with more info, we succeed + // With more info, we succeed. for i := 0; i < count; i++ { - err := source.StoreCommit(commits[i]) + err := source.SaveFullCommit(fcz[i]) require.Nil(err) } - err = cert.Certify(commit) + err = cert.Certify(sh) assert.Nil(err, "%+v", err) } func TestInquirerVerifyHistorical(t *testing.T) { assert, require := assert.New(t), require.New(t) - trust := lite.NewMemStoreProvider() - source := lite.NewMemStoreProvider() + trust := NewDBProvider(dbm.NewMemDB()) + source := NewDBProvider(dbm.NewMemDB()) - // set up the validators to generate test blocks + // Set up the validators to generate test blocks. var vote int64 = 10 - keys := lite.GenValKeys(5) + keys := genPrivKeys(5) + nkeys := keys.Extend(1) - // construct a bunch of commits, each with one more height than the last + // Construct a bunch of commits, each with one more height than the last. chainID := "inquiry-test" count := 10 consHash := []byte("special-params") - commits := make([]lite.FullCommit, count) + fcz := make([]FullCommit, count) for i := 0; i < count; i++ { - // extend the keys by 1 each time - keys = keys.Extend(1) vals := keys.ToValidators(vote, 0) - h := int64(20 + 10*i) + nvals := nkeys.ToValidators(vote, 0) + h := int64(1 + i) appHash := []byte(fmt.Sprintf("h=%d", h)) resHash := []byte(fmt.Sprintf("res=%d", h)) - commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0, - len(keys)) + fcz[i] = keys.GenFullCommit( + chainID, h, nil, + vals, nvals, + appHash, consHash, resHash, 0, len(keys)) + // Extend the keys by 1 each time. + keys = nkeys + nkeys = nkeys.Extend(1) } - // initialize a certifier with the initial state - cert, _ := lite.NewInquiringCertifier(chainID, commits[0], trust, source) + // Initialize a certifier with the initial state. + err := trust.SaveFullCommit(fcz[0]) + require.Nil(err) + cert, err := NewInquiringCertifier(chainID, trust, source) + require.Nil(err) - // store a few commits as trust + // Store a few full commits as trust. for _, i := range []int{2, 5} { - trust.StoreCommit(commits[i]) + trust.SaveFullCommit(fcz[i]) } - // let's see if we can jump forward using trusted commits - err := source.StoreCommit(commits[7]) + // See if we can jump forward using trusted full commits. + // Souce doesn't have fcz[9] so cert.LastTrustedHeight wont' change. + err = source.SaveFullCommit(fcz[7]) + require.Nil(err, "%+v", err) + sh := fcz[8].SignedHeader + err = cert.Certify(sh) + require.Nil(err, "%+v", err) + assert.Equal(fcz[7].Height(), cert.LastTrustedHeight()) + fc_, err := trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height()) + require.NotNil(err, "%+v", err) + assert.Equal(fc_, (FullCommit{})) + + // With fcz[9] Certify will update last trusted height. + err = source.SaveFullCommit(fcz[9]) + require.Nil(err, "%+v", err) + sh = fcz[8].SignedHeader + err = cert.Certify(sh) require.Nil(err, "%+v", err) - check := commits[7].Commit - err = cert.Certify(check) + assert.Equal(fcz[8].Height(), cert.LastTrustedHeight()) + fc_, err = trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height()) require.Nil(err, "%+v", err) - assert.Equal(check.Height(), cert.LastHeight()) + assert.Equal(fc_.Height(), fcz[8].Height()) - // add access to all commits via untrusted source + // Add access to all full commits via untrusted source. for i := 0; i < count; i++ { - err := source.StoreCommit(commits[i]) + err := source.SaveFullCommit(fcz[i]) require.Nil(err) } - // try to check an unknown seed in the past - mid := commits[3].Commit - err = cert.Certify(mid) + // Try to check an unknown seed in the past. + sh = fcz[3].SignedHeader + err = cert.Certify(sh) require.Nil(err, "%+v", err) - assert.Equal(mid.Height(), cert.LastHeight()) + assert.Equal(fcz[8].Height(), cert.LastTrustedHeight()) - // and jump all the way forward again - end := commits[count-1].Commit - err = cert.Certify(end) + // Jump all the way forward again. + sh = fcz[count-1].SignedHeader + err = cert.Certify(sh) require.Nil(err, "%+v", err) - assert.Equal(end.Height(), cert.LastHeight()) + assert.Equal(fcz[9].Height(), cert.LastTrustedHeight()) } diff --git a/lite/memprovider.go b/lite/memprovider.go deleted file mode 100644 index ac0d83215..000000000 --- a/lite/memprovider.go +++ /dev/null @@ -1,152 +0,0 @@ -package lite - -import ( - "encoding/hex" - "sort" - "sync" - - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -type memStoreProvider struct { - mtx sync.RWMutex - // byHeight is always sorted by Height... need to support range search (nil, h] - // btree would be more efficient for larger sets - byHeight fullCommits - byHash map[string]FullCommit - - sorted bool -} - -// fullCommits just exists to allow easy sorting -type fullCommits []FullCommit - -func (s fullCommits) Len() int { return len(s) } -func (s fullCommits) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s fullCommits) Less(i, j int) bool { - return s[i].Height() < s[j].Height() -} - -// NewMemStoreProvider returns a new in-memory provider. -func NewMemStoreProvider() Provider { - return &memStoreProvider{ - byHeight: fullCommits{}, - byHash: map[string]FullCommit{}, - } -} - -func (m *memStoreProvider) encodeHash(hash []byte) string { - return hex.EncodeToString(hash) -} - -// StoreCommit stores a FullCommit after verifying it. -func (m *memStoreProvider) StoreCommit(fc FullCommit) error { - // make sure the fc is self-consistent before saving - err := fc.ValidateBasic(fc.Commit.Header.ChainID) - if err != nil { - return err - } - - // store the valid fc - key := m.encodeHash(fc.ValidatorsHash()) - - m.mtx.Lock() - defer m.mtx.Unlock() - m.byHash[key] = fc - m.byHeight = append(m.byHeight, fc) - m.sorted = false - return nil -} - -// GetByHeight returns the FullCommit for height h or an error if the commit is not found. -func (m *memStoreProvider) GetByHeight(h int64) (FullCommit, error) { - // By heuristics, GetByHeight with linearsearch is fast enough - // for about 50 keys but after that, it needs binary search. - // See https://github.com/tendermint/tendermint/pull/1043#issue-285188242 - m.mtx.RLock() - n := len(m.byHeight) - m.mtx.RUnlock() - - if n <= 50 { - return m.getByHeightLinearSearch(h) - } - return m.getByHeightBinarySearch(h) -} - -func (m *memStoreProvider) sortByHeightIfNecessaryLocked() { - if !m.sorted { - sort.Sort(m.byHeight) - m.sorted = true - } -} - -func (m *memStoreProvider) getByHeightLinearSearch(h int64) (FullCommit, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.sortByHeightIfNecessaryLocked() - // search from highest to lowest - for i := len(m.byHeight) - 1; i >= 0; i-- { - if fc := m.byHeight[i]; fc.Height() <= h { - return fc, nil - } - } - return FullCommit{}, liteErr.ErrCommitNotFound() -} - -func (m *memStoreProvider) getByHeightBinarySearch(h int64) (FullCommit, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.sortByHeightIfNecessaryLocked() - low, high := 0, len(m.byHeight)-1 - var mid int - var hmid int64 - var midFC FullCommit - // Our goal is to either find: - // * item ByHeight with the query - // * greatest height with a height <= query - for low <= high { - mid = int(uint(low+high) >> 1) // Avoid an overflow - midFC = m.byHeight[mid] - hmid = midFC.Height() - switch { - case hmid == h: - return midFC, nil - case hmid < h: - low = mid + 1 - case hmid > h: - high = mid - 1 - } - } - - if high >= 0 { - if highFC := m.byHeight[high]; highFC.Height() < h { - return highFC, nil - } - } - return FullCommit{}, liteErr.ErrCommitNotFound() -} - -// GetByHash returns the FullCommit for the hash or an error if the commit is not found. -func (m *memStoreProvider) GetByHash(hash []byte) (FullCommit, error) { - m.mtx.RLock() - defer m.mtx.RUnlock() - - fc, ok := m.byHash[m.encodeHash(hash)] - if !ok { - return fc, liteErr.ErrCommitNotFound() - } - return fc, nil -} - -// LatestCommit returns the latest FullCommit or an error if no commits exist. -func (m *memStoreProvider) LatestCommit() (FullCommit, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - - l := len(m.byHeight) - if l == 0 { - return FullCommit{}, liteErr.ErrCommitNotFound() - } - m.sortByHeightIfNecessaryLocked() - return m.byHeight[l-1], nil -} diff --git a/lite/multiprovider.go b/lite/multiprovider.go new file mode 100644 index 000000000..dcfd1318c --- /dev/null +++ b/lite/multiprovider.go @@ -0,0 +1,72 @@ +package lite + +import ( + lerr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/types" +) + +// multiProvider allows you to place one or more caches in front of a source +// Provider. It runs through them in order until a match is found. +type multiProvider struct { + Providers []PersistentProvider +} + +// NewMultiProvider returns a new provider which wraps multiple other providers. +func NewMultiProvider(providers ...PersistentProvider) multiProvider { + return multiProvider{ + Providers: providers, + } +} + +// SaveFullCommit saves on all providers, and aborts on the first error. +func (mc multiProvider) SaveFullCommit(fc FullCommit) (err error) { + for _, p := range mc.Providers { + err = p.SaveFullCommit(fc) + if err != nil { + return + } + } + return +} + +// LatestFullCommit loads the latest from all providers and provides +// the latest FullCommit that satisfies the conditions. +// Returns the first error encountered. +func (mc multiProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (fc FullCommit, err error) { + for _, p := range mc.Providers { + var fc_ FullCommit + fc_, err = p.LatestFullCommit(chainID, minHeight, maxHeight) + if lerr.IsErrCommitNotFound(err) { + err = nil + continue + } else if err != nil { + return + } + if fc == (FullCommit{}) { + fc = fc_ + } else if fc_.Height() > fc.Height() { + fc = fc_ + } + if fc.Height() == maxHeight { + return + } + } + if fc == (FullCommit{}) { + err = lerr.ErrCommitNotFound() + return + } + return +} + +// ValidatorSet returns validator set at height as provided by the first +// provider which has it, or an error otherwise. +func (mc multiProvider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { + for _, p := range mc.Providers { + valset, err = p.ValidatorSet(chainID, height) + if err == nil { + // TODO Log unexpected types of errors. + return valset, nil + } + } + return nil, lerr.ErrMissingValidators(chainID, height) +} diff --git a/lite/performance_test.go b/lite/performance_test.go deleted file mode 100644 index 8cd522cbb..000000000 --- a/lite/performance_test.go +++ /dev/null @@ -1,365 +0,0 @@ -package lite - -import ( - "fmt" - "math/rand" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -func TestMemStoreProvidergetByHeightBinaryAndLinearSameResult(t *testing.T) { - p := NewMemStoreProvider().(*memStoreProvider) - - // Store a bunch of commits at specific heights - // and then ensure that: - // * getByHeightLinearSearch - // * getByHeightBinarySearch - // both return the exact same result - - // 1. Non-existent height commits - nonExistent := []int64{-1000, -1, 0, 1, 10, 11, 17, 31, 67, 1000, 1e9} - ensureNonExistentCommitsAtHeight(t, "getByHeightLinearSearch", p.getByHeightLinearSearch, nonExistent) - ensureNonExistentCommitsAtHeight(t, "getByHeightBinarySearch", p.getByHeightBinarySearch, nonExistent) - - // 2. Save some known height commits - knownHeights := []int64{0, 1, 7, 9, 12, 13, 18, 44, 23, 16, 1024, 100, 199, 1e9} - createAndStoreCommits(t, p, knownHeights) - - // 3. Now check if those heights are retrieved - ensureExistentCommitsAtHeight(t, "getByHeightLinearSearch", p.getByHeightLinearSearch, knownHeights) - ensureExistentCommitsAtHeight(t, "getByHeightBinarySearch", p.getByHeightBinarySearch, knownHeights) - - // 4. And now for the height probing to ensure that any height - // requested returns a fullCommit of height <= requestedHeight. - comparegetByHeightAlgorithms(t, p, 0, 0) - comparegetByHeightAlgorithms(t, p, 1, 1) - comparegetByHeightAlgorithms(t, p, 2, 1) - comparegetByHeightAlgorithms(t, p, 5, 1) - comparegetByHeightAlgorithms(t, p, 7, 7) - comparegetByHeightAlgorithms(t, p, 10, 9) - comparegetByHeightAlgorithms(t, p, 12, 12) - comparegetByHeightAlgorithms(t, p, 14, 13) - comparegetByHeightAlgorithms(t, p, 19, 18) - comparegetByHeightAlgorithms(t, p, 43, 23) - comparegetByHeightAlgorithms(t, p, 45, 44) - comparegetByHeightAlgorithms(t, p, 1025, 1024) - comparegetByHeightAlgorithms(t, p, 101, 100) - comparegetByHeightAlgorithms(t, p, 1e3, 199) - comparegetByHeightAlgorithms(t, p, 1e4, 1024) - comparegetByHeightAlgorithms(t, p, 1e9, 1e9) - comparegetByHeightAlgorithms(t, p, 1e9+1, 1e9) -} - -func createAndStoreCommits(t *testing.T, p Provider, heights []int64) { - chainID := "cache-best-height-binary-and-linear" - appHash := []byte("0xdeadbeef") - keys := GenValKeys(len(heights) / 2) - - for _, h := range heights { - vals := keys.ToValidators(10, int64(len(heights)/2)) - fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - err := p.StoreCommit(fc) - require.NoError(t, err, "StoreCommit height=%d", h) - } -} - -func comparegetByHeightAlgorithms(t *testing.T, p *memStoreProvider, ask, expect int64) { - algos := map[string]func(int64) (FullCommit, error){ - "getHeightByLinearSearch": p.getByHeightLinearSearch, - "getHeightByBinarySearch": p.getByHeightBinarySearch, - } - - for algo, fn := range algos { - fc, err := fn(ask) - // t.Logf("%s got=%v want=%d", algo, expect, fc.Height()) - require.Nil(t, err, "%s: %+v", algo, err) - if assert.Equal(t, expect, fc.Height()) { - err = p.StoreCommit(fc) - require.Nil(t, err, "%s: %+v", algo, err) - } - } -} - -var blankFullCommit FullCommit - -func ensureNonExistentCommitsAtHeight(t *testing.T, prefix string, fn func(int64) (FullCommit, error), data []int64) { - for i, qh := range data { - fc, err := fn(qh) - assert.NotNil(t, err, "#%d: %s: height=%d should return non-nil error", i, prefix, qh) - assert.Equal(t, fc, blankFullCommit, "#%d: %s: height=%d\ngot =%+v\nwant=%+v", i, prefix, qh, fc, blankFullCommit) - } -} - -func ensureExistentCommitsAtHeight(t *testing.T, prefix string, fn func(int64) (FullCommit, error), data []int64) { - for i, qh := range data { - fc, err := fn(qh) - assert.Nil(t, err, "#%d: %s: height=%d should not return an error: %v", i, prefix, qh, err) - assert.NotEqual(t, fc, blankFullCommit, "#%d: %s: height=%d got a blankCommit", i, prefix, qh) - } -} - -func BenchmarkGenCommit20(b *testing.B) { - keys := GenValKeys(20) - benchmarkGenCommit(b, keys) -} - -func BenchmarkGenCommit100(b *testing.B) { - keys := GenValKeys(100) - benchmarkGenCommit(b, keys) -} - -func BenchmarkGenCommitSec20(b *testing.B) { - keys := GenSecpValKeys(20) - benchmarkGenCommit(b, keys) -} - -func BenchmarkGenCommitSec100(b *testing.B) { - keys := GenSecpValKeys(100) - benchmarkGenCommit(b, keys) -} - -func benchmarkGenCommit(b *testing.B, keys ValKeys) { - chainID := fmt.Sprintf("bench-%d", len(keys)) - vals := keys.ToValidators(20, 10) - for i := 0; i < b.N; i++ { - h := int64(1 + i) - appHash := []byte(fmt.Sprintf("h=%d", h)) - resHash := []byte(fmt.Sprintf("res=%d", h)) - keys.GenCommit(chainID, h, nil, vals, appHash, []byte("params"), resHash, 0, len(keys)) - } -} - -// this benchmarks generating one key -func BenchmarkGenValKeys(b *testing.B) { - keys := GenValKeys(20) - for i := 0; i < b.N; i++ { - keys = keys.Extend(1) - } -} - -// this benchmarks generating one key -func BenchmarkGenSecpValKeys(b *testing.B) { - keys := GenSecpValKeys(20) - for i := 0; i < b.N; i++ { - keys = keys.Extend(1) - } -} - -func BenchmarkToValidators20(b *testing.B) { - benchmarkToValidators(b, 20) -} - -func BenchmarkToValidators100(b *testing.B) { - benchmarkToValidators(b, 100) -} - -// this benchmarks constructing the validator set (.PubKey() * nodes) -func benchmarkToValidators(b *testing.B, nodes int) { - keys := GenValKeys(nodes) - for i := 1; i <= b.N; i++ { - keys.ToValidators(int64(2*i), int64(i)) - } -} - -func BenchmarkToValidatorsSec100(b *testing.B) { - benchmarkToValidatorsSec(b, 100) -} - -// this benchmarks constructing the validator set (.PubKey() * nodes) -func benchmarkToValidatorsSec(b *testing.B, nodes int) { - keys := GenSecpValKeys(nodes) - for i := 1; i <= b.N; i++ { - keys.ToValidators(int64(2*i), int64(i)) - } -} - -func BenchmarkCertifyCommit20(b *testing.B) { - keys := GenValKeys(20) - benchmarkCertifyCommit(b, keys) -} - -func BenchmarkCertifyCommit100(b *testing.B) { - keys := GenValKeys(100) - benchmarkCertifyCommit(b, keys) -} - -func BenchmarkCertifyCommitSec20(b *testing.B) { - keys := GenSecpValKeys(20) - benchmarkCertifyCommit(b, keys) -} - -func BenchmarkCertifyCommitSec100(b *testing.B) { - keys := GenSecpValKeys(100) - benchmarkCertifyCommit(b, keys) -} - -func benchmarkCertifyCommit(b *testing.B, keys ValKeys) { - chainID := "bench-certify" - vals := keys.ToValidators(20, 10) - cert := NewStaticCertifier(chainID, vals) - check := keys.GenCommit(chainID, 123, nil, vals, []byte("foo"), []byte("params"), []byte("res"), 0, len(keys)) - for i := 0; i < b.N; i++ { - err := cert.Certify(check) - if err != nil { - panic(err) - } - } - -} - -type algo bool - -const ( - linearSearch = true - binarySearch = false -) - -// Lazy load the commits -var fcs5, fcs50, fcs100, fcs500, fcs1000 []FullCommit -var h5, h50, h100, h500, h1000 []int64 -var commitsOnce sync.Once - -func lazyGenerateFullCommits(b *testing.B) { - b.Logf("Generating FullCommits") - commitsOnce.Do(func() { - fcs5, h5 = genFullCommits(nil, nil, 5) - b.Logf("Generated 5 FullCommits") - fcs50, h50 = genFullCommits(fcs5, h5, 50) - b.Logf("Generated 50 FullCommits") - fcs100, h100 = genFullCommits(fcs50, h50, 100) - b.Logf("Generated 100 FullCommits") - fcs500, h500 = genFullCommits(fcs100, h100, 500) - b.Logf("Generated 500 FullCommits") - fcs1000, h1000 = genFullCommits(fcs500, h500, 1000) - b.Logf("Generated 1000 FullCommits") - }) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch5(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs5, h5, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch50(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs50, h50, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch100(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs100, h100, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch500(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs500, h500, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch1000(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs1000, h1000, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch5(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs5, h5, binarySearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch50(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs50, h50, binarySearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch100(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs100, h100, binarySearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch500(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs500, h500, binarySearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch1000(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs1000, h1000, binarySearch) -} - -var rng = rand.New(rand.NewSource(10)) - -func benchmarkMemStoreProvidergetByHeight(b *testing.B, fcs []FullCommit, fHeights []int64, algo algo) { - lazyGenerateFullCommits(b) - - b.StopTimer() - mp := NewMemStoreProvider() - for i, fc := range fcs { - if err := mp.StoreCommit(fc); err != nil { - b.Fatalf("FullCommit #%d: err: %v", i, err) - } - } - qHeights := make([]int64, len(fHeights)) - copy(qHeights, fHeights) - // Append some non-existent heights to trigger the worst cases. - qHeights = append(qHeights, 19, -100, -10000, 1e7, -17, 31, -1e9) - - memP := mp.(*memStoreProvider) - searchFn := memP.getByHeightLinearSearch - if algo == binarySearch { // nolint - searchFn = memP.getByHeightBinarySearch - } - - hPerm := rng.Perm(len(qHeights)) - b.StartTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - for _, j := range hPerm { - h := qHeights[j] - if _, err := searchFn(h); err != nil { - } - } - } - b.ReportAllocs() -} - -func genFullCommits(prevFC []FullCommit, prevH []int64, want int) ([]FullCommit, []int64) { - fcs := make([]FullCommit, len(prevFC)) - copy(fcs, prevFC) - heights := make([]int64, len(prevH)) - copy(heights, prevH) - - appHash := []byte("benchmarks") - chainID := "benchmarks-gen-full-commits" - n := want - keys := GenValKeys(2 + (n / 3)) - for i := 0; i < n; i++ { - vals := keys.ToValidators(10, int64(n/2)) - h := int64(20 + 10*i) - fcs = append(fcs, keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5)) - heights = append(heights, h) - } - return fcs, heights -} - -func TestMemStoreProviderLatestCommitAlwaysUsesSorted(t *testing.T) { - p := NewMemStoreProvider().(*memStoreProvider) - // 1. With no commits yet stored, it should return ErrCommitNotFound - got, err := p.LatestCommit() - require.Equal(t, err.Error(), liteErr.ErrCommitNotFound().Error(), "should return ErrCommitNotFound()") - require.Equal(t, got, blankFullCommit, "With no fullcommits, it should return a blank FullCommit") - - // 2. Generate some full commits now and we'll add them unsorted. - genAndStoreCommitsOfHeight(t, p, 27, 100, 1, 12, 1000, 17, 91) - fc, err := p.LatestCommit() - require.Nil(t, err, "with commits saved no error expected") - require.NotEqual(t, fc, blankFullCommit, "with commits saved no blank FullCommit") - require.Equal(t, fc.Height(), int64(1000), "the latest commit i.e. the largest expected") -} - -func genAndStoreCommitsOfHeight(t *testing.T, p Provider, heights ...int64) { - n := len(heights) - appHash := []byte("tests") - chainID := "tests-gen-full-commits" - keys := GenValKeys(2 + (n / 3)) - for i := 0; i < n; i++ { - h := heights[i] - vals := keys.ToValidators(10, int64(n/2)) - fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - err := p.StoreCommit(fc) - require.NoError(t, err, "StoreCommit height=%d", h) - } -} diff --git a/lite/provider.go b/lite/provider.go index 22dc964a1..34ba40d44 100644 --- a/lite/provider.go +++ b/lite/provider.go @@ -1,103 +1,28 @@ package lite -// Provider is used to get more validators by other means. -// -// Examples: MemProvider, files.Provider, client.Provider, CacheProvider.... -type Provider interface { - // StoreCommit saves a FullCommit after we have verified it, - // so we can query for it later. Important for updating our - // store of trusted commits. - StoreCommit(fc FullCommit) error - // GetByHeight returns the closest commit with height <= h. - GetByHeight(h int64) (FullCommit, error) - // GetByHash returns a commit exactly matching this validator hash. - GetByHash(hash []byte) (FullCommit, error) - // LatestCommit returns the newest commit stored. - LatestCommit() (FullCommit, error) -} - -// cacheProvider allows you to place one or more caches in front of a source -// Provider. It runs through them in order until a match is found. -// So you can keep a local cache, and check with the network if -// no data is there. -type cacheProvider struct { - Providers []Provider -} +import ( + "github.com/tendermint/tendermint/types" +) -// NewCacheProvider returns a new provider which wraps multiple other providers. -func NewCacheProvider(providers ...Provider) Provider { - return cacheProvider{ - Providers: providers, - } -} +// Provider provides information for the lite client to sync validators. +// Examples: MemProvider, files.Provider, client.Provider, CacheProvider. +type Provider interface { -// StoreCommit tries to add the seed to all providers. -// -// Aborts on first error it encounters (closest provider) -func (c cacheProvider) StoreCommit(fc FullCommit) (err error) { - for _, p := range c.Providers { - err = p.StoreCommit(fc) - if err != nil { - break - } - } - return err -} + // LatestFullCommit returns the latest commit with minHeight <= height <= + // maxHeight. + // If maxHeight is zero, returns the latest where minHeight <= height. + LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error) -// GetByHeight should return the closest possible match from all providers. -// -// The Cache is usually organized in order from cheapest call (memory) -// to most expensive calls (disk/network). However, since GetByHeight returns -// a FullCommit at h' <= h, if the memory has a seed at h-10, but the network would -// give us the exact match, a naive "stop at first non-error" would hide -// the actual desired results. -// -// Thus, we query each provider in order until we find an exact match -// or we finished querying them all. If at least one returned a non-error, -// then this returns the best match (minimum h-h'). -func (c cacheProvider) GetByHeight(h int64) (fc FullCommit, err error) { - for _, p := range c.Providers { - var tfc FullCommit - tfc, err = p.GetByHeight(h) - if err == nil { - if tfc.Height() > fc.Height() { - fc = tfc - } - if tfc.Height() == h { - break - } - } - } - // even if the last one had an error, if any was a match, this is good - if fc.Height() > 0 { - err = nil - } - return fc, err + // Get the valset that corresponds to chainID and height and return. + // Height must be >= 1. + ValidatorSet(chainID string, height int64) (*types.ValidatorSet, error) } -// GetByHash returns the FullCommit for the hash or an error if the commit is not found. -func (c cacheProvider) GetByHash(hash []byte) (fc FullCommit, err error) { - for _, p := range c.Providers { - fc, err = p.GetByHash(hash) - if err == nil { - break - } - } - return fc, err -} +// A provider that can also persist new information. +// Examples: MemProvider, files.Provider, CacheProvider. +type PersistentProvider interface { + Provider -// LatestCommit returns the latest FullCommit or an error if no commit exists. -func (c cacheProvider) LatestCommit() (fc FullCommit, err error) { - for _, p := range c.Providers { - var tfc FullCommit - tfc, err = p.LatestCommit() - if err == nil && tfc.Height() > fc.Height() { - fc = tfc - } - } - // even if the last one had an error, if any was a match, this is good - if fc.Height() > 0 { - err = nil - } - return fc, err + // SaveFullCommit saves a FullCommit (without verification). + SaveFullCommit(fc FullCommit) error } diff --git a/lite/provider_test.go b/lite/provider_test.go index 77b5b1a85..96523d943 100644 --- a/lite/provider_test.go +++ b/lite/provider_test.go @@ -1,98 +1,88 @@ -// nolint: vetshadow -package lite_test +package lite import ( + "errors" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" + lerr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/types" + dbm "github.com/tendermint/tmlibs/db" ) -// missingProvider doesn't store anything, always a miss -// Designed as a mock for testing +// missingProvider doesn't store anything, always a miss. +// Designed as a mock for testing. type missingProvider struct{} // NewMissingProvider returns a provider which does not store anything and always misses. -func NewMissingProvider() lite.Provider { +func NewMissingProvider() PersistentProvider { return missingProvider{} } -func (missingProvider) StoreCommit(lite.FullCommit) error { return nil } -func (missingProvider) GetByHeight(int64) (lite.FullCommit, error) { - return lite.FullCommit{}, liteErr.ErrCommitNotFound() +func (missingProvider) SaveFullCommit(FullCommit) error { return nil } +func (missingProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error) { + return FullCommit{}, lerr.ErrCommitNotFound() } -func (missingProvider) GetByHash([]byte) (lite.FullCommit, error) { - return lite.FullCommit{}, liteErr.ErrCommitNotFound() -} -func (missingProvider) LatestCommit() (lite.FullCommit, error) { - return lite.FullCommit{}, liteErr.ErrCommitNotFound() +func (missingProvider) ValidatorSet(chainID string, height int64) (*types.ValidatorSet, error) { + return nil, errors.New("missing validator set") } func TestMemProvider(t *testing.T) { - p := lite.NewMemStoreProvider() + p := NewDBProvider(dbm.NewMemDB()) checkProvider(t, p, "test-mem", "empty") } -func TestCacheProvider(t *testing.T) { - p := lite.NewCacheProvider( +func TestMultiProvider(t *testing.T) { + p := NewMultiProvider( NewMissingProvider(), - lite.NewMemStoreProvider(), + NewDBProvider(dbm.NewMemDB()), NewMissingProvider(), ) checkProvider(t, p, "test-cache", "kjfhekfhkewhgit") } -func checkProvider(t *testing.T, p lite.Provider, chainID, app string) { +func checkProvider(t *testing.T, p PersistentProvider, chainID, app string) { assert, require := assert.New(t), require.New(t) appHash := []byte(app) - keys := lite.GenValKeys(5) + keys := genPrivKeys(5) count := 10 - // make a bunch of commits... - commits := make([]lite.FullCommit, count) + // Make a bunch of full commits. + fcz := make([]FullCommit, count) for i := 0; i < count; i++ { - // two commits for each validator, to check how we handle dups - // (10, 0), (10, 1), (10, 1), (10, 2), (10, 2), ... vals := keys.ToValidators(10, int64(count/2)) h := int64(20 + 10*i) - commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) + fcz[i] = keys.GenFullCommit(chainID, h, nil, vals, vals, appHash, []byte("params"), []byte("results"), 0, 5) } - // check provider is empty - fc, err := p.GetByHeight(20) - require.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) - - fc, err = p.GetByHash(commits[3].ValidatorsHash()) + // Check that provider is initially empty. + fc, err := p.LatestFullCommit(chainID, 1, 1<<63-1) require.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) + assert.True(lerr.IsErrCommitNotFound(err)) - // now add them all to the provider - for _, s := range commits { - err = p.StoreCommit(s) + // Save all full commits to the provider. + for _, fc := range fcz { + err = p.SaveFullCommit(fc) require.Nil(err) - // and make sure we can get it back - s2, err := p.GetByHash(s.ValidatorsHash()) - assert.Nil(err) - assert.Equal(s, s2) - // by height as well - s2, err = p.GetByHeight(s.Height()) + // Make sure we can get it back. + fc2, err := p.LatestFullCommit(chainID, fc.Height(), fc.Height()) assert.Nil(err) - assert.Equal(s, s2) + assert.Equal(fc.SignedHeader, fc2.SignedHeader) + assert.Equal(fc.Validators, fc2.Validators) + assert.Equal(fc.NextValidators, fc2.NextValidators) } - // make sure we get the last hash if we overstep - fc, err = p.GetByHeight(5000) + // Make sure we get the last hash if we overstep. + fc, err = p.LatestFullCommit(chainID, 1, 5000) if assert.Nil(err) { - assert.Equal(commits[count-1].Height(), fc.Height()) - assert.Equal(commits[count-1], fc) + assert.Equal(fcz[count-1].Height(), fc.Height()) + assert.Equal(fcz[count-1], fc) } - // and middle ones as well - fc, err = p.GetByHeight(47) + // ... and middle ones as well. + fc, err = p.LatestFullCommit(chainID, 1, 47) if assert.Nil(err) { // we only step by 10, so 40 must be the one below this assert.EqualValues(40, fc.Height()) @@ -100,50 +90,49 @@ func checkProvider(t *testing.T, p lite.Provider, chainID, app string) { } -// this will make a get height, and if it is good, set the data as well -func checkGetHeight(t *testing.T, p lite.Provider, ask, expect int64) { - fc, err := p.GetByHeight(ask) - require.Nil(t, err, "GetByHeight") +// This will make a get height, and if it is good, set the data as well. +func checkLatestFullCommit(t *testing.T, p PersistentProvider, chainID string, ask, expect int64) { + fc, err := p.LatestFullCommit(chainID, 1, ask) + require.Nil(t, err) if assert.Equal(t, expect, fc.Height()) { - err = p.StoreCommit(fc) - require.Nil(t, err, "StoreCommit") + err = p.SaveFullCommit(fc) + require.Nil(t, err) } } -func TestCacheGetsBestHeight(t *testing.T) { - // assert, require := assert.New(t), require.New(t) +func TestMultiLatestFullCommit(t *testing.T) { require := require.New(t) - // we will write data to the second level of the cache (p2), - // and see what gets cached, stored in - p := lite.NewMemStoreProvider() - p2 := lite.NewMemStoreProvider() - cp := lite.NewCacheProvider(p, p2) + // We will write data to the second level of the cache (p2), and see what + // gets cached/stored in. + p := NewDBProvider(dbm.NewMemDB()) + p2 := NewDBProvider(dbm.NewMemDB()) + cp := NewMultiProvider(p, p2) chainID := "cache-best-height" appHash := []byte("01234567") - keys := lite.GenValKeys(5) + keys := genPrivKeys(5) count := 10 - // set a bunch of commits + // Set a bunch of full commits. for i := 0; i < count; i++ { vals := keys.ToValidators(10, int64(count/2)) h := int64(10 * (i + 1)) - fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - err := p2.StoreCommit(fc) + fc := keys.GenFullCommit(chainID, h, nil, vals, vals, appHash, []byte("params"), []byte("results"), 0, 5) + err := p2.SaveFullCommit(fc) require.NoError(err) } - // let's get a few heights from the cache and set them proper - checkGetHeight(t, cp, 57, 50) - checkGetHeight(t, cp, 33, 30) + // Get a few heights from the cache and set them proper. + checkLatestFullCommit(t, cp, chainID, 57, 50) + checkLatestFullCommit(t, cp, chainID, 33, 30) // make sure they are set in p as well (but nothing else) - checkGetHeight(t, p, 44, 30) - checkGetHeight(t, p, 50, 50) - checkGetHeight(t, p, 99, 50) + checkLatestFullCommit(t, p, chainID, 44, 30) + checkLatestFullCommit(t, p, chainID, 50, 50) + checkLatestFullCommit(t, p, chainID, 99, 50) // now, query the cache for a higher value - checkGetHeight(t, p2, 99, 90) - checkGetHeight(t, cp, 99, 90) + checkLatestFullCommit(t, p2, chainID, 99, 90) + checkLatestFullCommit(t, cp, chainID, 99, 90) } diff --git a/lite/proxy/block.go b/lite/proxy/block.go index 4cff9ee68..663395fab 100644 --- a/lite/proxy/block.go +++ b/lite/proxy/block.go @@ -2,27 +2,24 @@ package proxy import ( "bytes" + "errors" - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/lite" - certerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" ) -func ValidateBlockMeta(meta *types.BlockMeta, check lite.Commit) error { +func ValidateBlockMeta(meta *types.BlockMeta, sh types.SignedHeader) error { if meta == nil { return errors.New("expecting a non-nil BlockMeta") } // TODO: check the BlockID?? - return ValidateHeader(meta.Header, check) + return ValidateHeader(meta.Header, sh) } -func ValidateBlock(meta *types.Block, check lite.Commit) error { +func ValidateBlock(meta *types.Block, sh types.SignedHeader) error { if meta == nil { return errors.New("expecting a non-nil Block") } - err := ValidateHeader(meta.Header, check) + err := ValidateHeader(meta.Header, sh) if err != nil { return err } @@ -32,17 +29,19 @@ func ValidateBlock(meta *types.Block, check lite.Commit) error { return nil } -func ValidateHeader(head *types.Header, check lite.Commit) error { +func ValidateHeader(head *types.Header, sh types.SignedHeader) error { if head == nil { return errors.New("expecting a non-nil Header") } - // make sure they are for the same height (obvious fail) - if head.Height != check.Height() { - return certerr.ErrHeightMismatch(head.Height, check.Height()) + if sh.Header == nil { + return errors.New("unexpected empty SignedHeader") + } + // Make sure they are for the same height (obvious fail). + if head.Height != sh.Height { + return errors.New("Header heights mismatched") } - // check if they are equal by using hashes - chead := check.Header - if !bytes.Equal(head.Hash(), chead.Hash()) { + // Check if they are equal by using hashes. + if !bytes.Equal(head.Hash(), sh.Hash()) { return errors.New("Headers don't match") } return nil diff --git a/lite/proxy/certifier.go b/lite/proxy/certifier.go index 6e319dc0d..a67654025 100644 --- a/lite/proxy/certifier.go +++ b/lite/proxy/certifier.go @@ -2,31 +2,29 @@ package proxy import ( "github.com/tendermint/tendermint/lite" - certclient "github.com/tendermint/tendermint/lite/client" - "github.com/tendermint/tendermint/lite/files" + lclient "github.com/tendermint/tendermint/lite/client" + dbm "github.com/tendermint/tmlibs/db" ) func GetCertifier(chainID, rootDir, nodeAddr string) (*lite.InquiringCertifier, error) { - trust := lite.NewCacheProvider( - lite.NewMemStoreProvider(), - files.NewProvider(rootDir), + trust := lite.NewMultiProvider( + lite.NewDBProvider(dbm.NewMemDB()), + lite.NewDBProvider(dbm.NewDB("trust-base", dbm.LevelDBBackend, rootDir)), ) - source := certclient.NewHTTPProvider(nodeAddr) + source := lclient.NewHTTPProvider(chainID, nodeAddr) // XXX: total insecure hack to avoid `init` - fc, err := source.LatestCommit() - /* XXX - // this gets the most recent verified commit - fc, err := trust.LatestCommit() - if certerr.IsCommitNotFoundErr(err) { - return nil, errors.New("Please run init first to establish a root of trust") - }*/ + fc, err := source.LatestFullCommit(chainID, 1, 1) + if err != nil { + return nil, err + } + err = trust.SaveFullCommit(fc) if err != nil { return nil, err } - cert, err := lite.NewInquiringCertifier(chainID, fc, trust, source) + cert, err := lite.NewInquiringCertifier(chainID, trust, source) if err != nil { return nil, err } diff --git a/lite/proxy/errors.go b/lite/proxy/errors.go index 5a2713e3c..9af72a54c 100644 --- a/lite/proxy/errors.go +++ b/lite/proxy/errors.go @@ -1,22 +1,24 @@ package proxy import ( - "fmt" - - "github.com/pkg/errors" + cmn "github.com/tendermint/tmlibs/common" ) -//-------------------------------------------- +type errNoData struct{} -var errNoData = fmt.Errorf("No data returned for query") +func (e errNoData) Error() string { + return "No data returned for query" +} -// IsNoDataErr checks whether an error is due to a query returning empty data -func IsNoDataErr(err error) bool { - return errors.Cause(err) == errNoData +// IsErrNoData checks whether an error is due to a query returning empty data +func IsErrNoData(err error) bool { + if err_, ok := err.(cmn.Error); ok { + _, ok := err_.Data().(errNoData) + return ok + } + return false } func ErrNoData() error { - return errors.WithStack(errNoData) + return cmn.ErrorWrap(errNoData{}, "") } - -//-------------------------------------------- diff --git a/lite/proxy/errors_test.go b/lite/proxy/errors_test.go deleted file mode 100644 index 7f51be50f..000000000 --- a/lite/proxy/errors_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package proxy - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestErrorNoData(t *testing.T) { - e1 := ErrNoData() - assert.True(t, IsNoDataErr(e1)) - - e2 := errors.New("foobar") - assert.False(t, IsNoDataErr(e2)) - assert.False(t, IsNoDataErr(nil)) -} diff --git a/lite/proxy/query.go b/lite/proxy/query.go index 9c9557f8f..aa25cdcf2 100644 --- a/lite/proxy/query.go +++ b/lite/proxy/query.go @@ -1,15 +1,16 @@ package proxy import ( + "fmt" + "github.com/pkg/errors" cmn "github.com/tendermint/tmlibs/common" "github.com/tendermint/tendermint/lite" - "github.com/tendermint/tendermint/lite/client" - certerr "github.com/tendermint/tendermint/lite/errors" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/types" ) // KeyProof represents a proof of existence or absence of a single key. @@ -75,12 +76,12 @@ func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOption } // AppHash for height H is in header H+1 - commit, err := GetCertifiedCommit(resp.Height+1, node, cert) + signedHeader, err := GetCertifiedCommit(resp.Height+1, node, cert) if err != nil { return nil, nil, err } - _ = commit + _ = signedHeader return &ctypes.ResultABCIQuery{Response: resp}, nil, nil /* // TODO refactor so iavl stuff is not in tendermint core @@ -98,7 +99,7 @@ func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOption } // Validate the proof against the certified header to ensure data integrity. - err = eproof.Verify(resp.Key, resp.Value, commit.Header.AppHash) + err = eproof.Verify(resp.Key, resp.Value, signedHeader.AppHash) if err != nil { return nil, nil, errors.Wrap(err, "Couldn't verify proof") } @@ -117,7 +118,7 @@ func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOption } // Validate the proof against the certified header to ensure data integrity. - err = aproof.Verify(resp.Key, nil, commit.Header.AppHash) + err = aproof.Verify(resp.Key, nil, signedHeader.AppHash) if err != nil { return nil, nil, errors.Wrap(err, "Couldn't verify proof") } @@ -125,28 +126,29 @@ func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOption */ } -// GetCertifiedCommit gets the signed header for a given height -// and certifies it. Returns error if unable to get a proven header. -func GetCertifiedCommit(h int64, node rpcclient.Client, cert lite.Certifier) (lite.Commit, error) { +// GetCertifiedCommit gets the signed header for a given height and certifies +// it. Returns error if unable to get a proven header. +func GetCertifiedCommit(h int64, client rpcclient.Client, cert lite.Certifier) (types.SignedHeader, error) { // FIXME: cannot use cert.GetByHeight for now, as it also requires // Validators and will fail on querying tendermint for non-current height. // When this is supported, we should use it instead... - rpcclient.WaitForHeight(node, h, nil) - cresp, err := node.Commit(&h) + rpcclient.WaitForHeight(client, h, nil) + cresp, err := client.Commit(&h) if err != nil { - return lite.Commit{}, err + return types.SignedHeader{}, err } - commit := client.CommitFromResult(cresp) - // validate downloaded checkpoint with our request and trust store. - if commit.Height() != h { - return lite.Commit{}, certerr.ErrHeightMismatch(h, commit.Height()) + // Validate downloaded checkpoint with our request and trust store. + sh := cresp.SignedHeader + if sh.Height != h { + return types.SignedHeader{}, fmt.Errorf("height mismatch: want %v got %v", + h, sh.Height) } - if err = cert.Certify(commit); err != nil { - return lite.Commit{}, err + if err = cert.Certify(sh); err != nil { + return types.SignedHeader{}, err } - return commit, nil + return sh, nil } diff --git a/lite/proxy/query_test.go b/lite/proxy/query_test.go index 38a43af2b..fcc6659af 100644 --- a/lite/proxy/query_test.go +++ b/lite/proxy/query_test.go @@ -19,12 +19,12 @@ import ( ) var node *nm.Node +var chainID = "tendermint_test" // TODO use from config. // TODO fix tests!! func TestMain(m *testing.M) { app := kvstore.NewKVStoreApplication() - node = rpctest.StartTendermint(app) code := m.Run() @@ -55,28 +55,28 @@ func _TestAppProofs(t *testing.T) { brh := br.Height // This sets up our trust on the node based on some past point. - source := certclient.NewProvider(cl) - seed, err := source.GetByHeight(brh - 2) + source := certclient.NewProvider(chainID, cl) + seed, err := source.LatestFullCommit(chainID, brh-2, brh-2) require.NoError(err, "%+v", err) - cert := lite.NewStaticCertifier("my-chain", seed.Validators) + cert := lite.NewBaseCertifier("my-chain", seed.Height(), seed.Validators) client.WaitForHeight(cl, 3, nil) - latest, err := source.LatestCommit() + latest, err := source.LatestFullCommit(chainID, 1, 1<<63-1) require.NoError(err, "%+v", err) - rootHash := latest.Header.AppHash + rootHash := latest.SignedHeader.AppHash // verify a query before the tx block has no data (and valid non-exist proof) bs, height, proof, err := GetWithProof(k, brh-1, cl, cert) fmt.Println(bs, height, proof, err) require.NotNil(err) - require.True(IsNoDataErr(err), err.Error()) + require.True(IsErrNoData(err), err.Error()) require.Nil(bs) // but given that block it is good bs, height, proof, err = GetWithProof(k, brh, cl, cert) require.NoError(err, "%+v", err) require.NotNil(proof) - require.True(height >= int64(latest.Header.Height)) + require.True(height >= int64(latest.Height())) // Alexis there is a bug here, somehow the above code gives us rootHash = nil // and proof.Verify doesn't care, while proofNotExists.Verify fails. @@ -92,7 +92,7 @@ func _TestAppProofs(t *testing.T) { // Test non-existing key. missing := []byte("my-missing-key") bs, _, proof, err = GetWithProof(missing, 0, cl, cert) - require.True(IsNoDataErr(err)) + require.True(IsErrNoData(err)) require.Nil(bs) require.NotNil(proof) err = proof.Verify(missing, nil, rootHash) @@ -114,10 +114,10 @@ func _TestTxProofs(t *testing.T) { require.EqualValues(0, br.DeliverTx.Code) brh := br.Height - source := certclient.NewProvider(cl) - seed, err := source.GetByHeight(brh - 2) + source := certclient.NewProvider(chainID, cl) + seed, err := source.LatestFullCommit(chainID, brh-2, brh-2) require.NoError(err, "%+v", err) - cert := lite.NewStaticCertifier("my-chain", seed.Validators) + cert := lite.NewBaseCertifier("my-chain", seed.Height(), seed.Validators) // First let's make sure a bogus transaction hash returns a valid non-existence proof. key := types.Tx([]byte("bogus")).Hash() diff --git a/lite/proxy/validate_test.go b/lite/proxy/validate_test.go index 782a6aabb..af4fc26f4 100644 --- a/lite/proxy/validate_test.go +++ b/lite/proxy/validate_test.go @@ -6,7 +6,6 @@ import ( "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/lite" "github.com/tendermint/tendermint/lite/proxy" "github.com/tendermint/tendermint/types" ) @@ -26,9 +25,9 @@ var hdrHeight11 = &types.Header{ func TestValidateBlock(t *testing.T) { tests := []struct { - block *types.Block - commit lite.Commit - wantErr string + block *types.Block + signedHeader types.SignedHeader + wantErr string }{ { block: nil, wantErr: "non-nil Block", @@ -37,32 +36,32 @@ func TestValidateBlock(t *testing.T) { block: &types.Block{}, wantErr: "nil Header", }, { - block: &types.Block{Header: new(types.Header)}, + block: &types.Block{Header: new(types.Header)}, wantErr: "unexpected empty SignedHeader", }, // Start Header.Height mismatch test { - block: &types.Block{Header: &types.Header{Height: 10}}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, - wantErr: "don't match - 10 vs 11", + block: &types.Block{Header: &types.Header{Height: 10}}, + signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, + wantErr: "Header heights mismatched", }, { - block: &types.Block{Header: &types.Header{Height: 11}}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, + block: &types.Block{Header: &types.Header{Height: 11}}, + signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, }, // End Header.Height mismatch test // Start Header.Hash mismatch test { - block: &types.Block{Header: hdrHeight11}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, - wantErr: "Headers don't match", + block: &types.Block{Header: hdrHeight11}, + signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, + wantErr: "Headers don't match", }, { - block: &types.Block{Header: hdrHeight11}, - commit: lite.Commit{Header: hdrHeight11}, + block: &types.Block{Header: hdrHeight11}, + signedHeader: types.SignedHeader{Header: hdrHeight11}, }, // End Header.Hash mismatch test @@ -72,7 +71,7 @@ func TestValidateBlock(t *testing.T) { Header: &types.Header{Height: 11}, Data: &types.Data{Txs: []types.Tx{[]byte("0xDE"), []byte("AD")}}, }, - commit: lite.Commit{ + signedHeader: types.SignedHeader{ Header: &types.Header{Height: 11}, Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("0xDEADBEEF")}}, }, @@ -83,7 +82,7 @@ func TestValidateBlock(t *testing.T) { Header: &types.Header{Height: 11, DataHash: deadBeefHash}, Data: &types.Data{Txs: deadBeefTxs}, }, - commit: lite.Commit{ + signedHeader: types.SignedHeader{ Header: &types.Header{Height: 11}, Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}}, }, @@ -92,7 +91,7 @@ func TestValidateBlock(t *testing.T) { } for i, tt := range tests { - err := proxy.ValidateBlock(tt.block, tt.commit) + err := proxy.ValidateBlock(tt.block, tt.signedHeader) if tt.wantErr != "" { if err == nil { assert.FailNowf(t, "Unexpectedly passed", "#%d", i) @@ -108,9 +107,9 @@ func TestValidateBlock(t *testing.T) { func TestValidateBlockMeta(t *testing.T) { tests := []struct { - meta *types.BlockMeta - commit lite.Commit - wantErr string + meta *types.BlockMeta + signedHeader types.SignedHeader + wantErr string }{ { meta: nil, wantErr: "non-nil BlockMeta", @@ -119,32 +118,32 @@ func TestValidateBlockMeta(t *testing.T) { meta: &types.BlockMeta{}, wantErr: "non-nil Header", }, { - meta: &types.BlockMeta{Header: new(types.Header)}, + meta: &types.BlockMeta{Header: new(types.Header)}, wantErr: "unexpected empty SignedHeader", }, // Start Header.Height mismatch test { - meta: &types.BlockMeta{Header: &types.Header{Height: 10}}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, - wantErr: "don't match - 10 vs 11", + meta: &types.BlockMeta{Header: &types.Header{Height: 10}}, + signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, + wantErr: "Header heights mismatched", }, { - meta: &types.BlockMeta{Header: &types.Header{Height: 11}}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, + meta: &types.BlockMeta{Header: &types.Header{Height: 11}}, + signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, }, // End Header.Height mismatch test // Start Headers don't match test { - meta: &types.BlockMeta{Header: hdrHeight11}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, - wantErr: "Headers don't match", + meta: &types.BlockMeta{Header: hdrHeight11}, + signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, + wantErr: "Headers don't match", }, { - meta: &types.BlockMeta{Header: hdrHeight11}, - commit: lite.Commit{Header: hdrHeight11}, + meta: &types.BlockMeta{Header: hdrHeight11}, + signedHeader: types.SignedHeader{Header: hdrHeight11}, }, { @@ -156,7 +155,7 @@ func TestValidateBlockMeta(t *testing.T) { Time: testTime1, }, }, - commit: lite.Commit{ + signedHeader: types.SignedHeader{ Header: &types.Header{Height: 11, DataHash: deadBeefHash}, }, wantErr: "Headers don't match", @@ -170,7 +169,7 @@ func TestValidateBlockMeta(t *testing.T) { Time: testTime1, }, }, - commit: lite.Commit{ + signedHeader: types.SignedHeader{ Header: &types.Header{ Height: 11, DataHash: deadBeefHash, ValidatorsHash: []byte("Tendermint"), @@ -189,7 +188,7 @@ func TestValidateBlockMeta(t *testing.T) { Time: testTime2, }, }, - commit: lite.Commit{ + signedHeader: types.SignedHeader{ Header: &types.Header{ Height: 11, DataHash: deadBeefHash, ValidatorsHash: []byte("Tendermint-x"), @@ -203,7 +202,7 @@ func TestValidateBlockMeta(t *testing.T) { } for i, tt := range tests { - err := proxy.ValidateBlockMeta(tt.meta, tt.commit) + err := proxy.ValidateBlockMeta(tt.meta, tt.signedHeader) if tt.wantErr != "" { if err == nil { assert.FailNowf(t, "Unexpectedly passed", "#%d: wanted error %q", i, tt.wantErr) diff --git a/lite/proxy/wrapper.go b/lite/proxy/wrapper.go index 5fb12a40a..83fc96a15 100644 --- a/lite/proxy/wrapper.go +++ b/lite/proxy/wrapper.go @@ -4,7 +4,6 @@ import ( cmn "github.com/tendermint/tmlibs/common" "github.com/tendermint/tendermint/lite" - certclient "github.com/tendermint/tendermint/lite/client" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" ) @@ -53,11 +52,11 @@ func (w Wrapper) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { return res, err } h := int64(res.Height) - check, err := GetCertifiedCommit(h, w.Client, w.cert) + sh, err := GetCertifiedCommit(h, w.Client, w.cert) if err != nil { return res, err } - err = res.Proof.Validate(check.Header.DataHash) + err = res.Proof.Validate(sh.DataHash) return res, err } @@ -74,12 +73,12 @@ func (w Wrapper) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlock // go and verify every blockmeta in the result.... for _, meta := range r.BlockMetas { // get a checkpoint to verify from - c, err := w.Commit(&meta.Header.Height) + res, err := w.Commit(&meta.Header.Height) if err != nil { return nil, err } - check := certclient.CommitFromResult(c) - err = ValidateBlockMeta(meta, check) + sh := res.SignedHeader + err = ValidateBlockMeta(meta, sh) if err != nil { return nil, err } @@ -95,18 +94,18 @@ func (w Wrapper) Block(height *int64) (*ctypes.ResultBlock, error) { return nil, err } // get a checkpoint to verify from - c, err := w.Commit(height) + res, err := w.Commit(height) if err != nil { return nil, err } - check := certclient.CommitFromResult(c) + sh := res.SignedHeader // now verify - err = ValidateBlockMeta(r.BlockMeta, check) + err = ValidateBlockMeta(r.BlockMeta, sh) if err != nil { return nil, err } - err = ValidateBlock(r.Block, check) + err = ValidateBlock(r.Block, sh) if err != nil { return nil, err } @@ -118,13 +117,13 @@ func (w Wrapper) Block(height *int64) (*ctypes.ResultBlock, error) { // This is the foundation for all other verification in this module func (w Wrapper) Commit(height *int64) (*ctypes.ResultCommit, error) { rpcclient.WaitForHeight(w.Client, *height, nil) - r, err := w.Client.Commit(height) + res, err := w.Client.Commit(height) // if we got it, then certify it if err == nil { - check := certclient.CommitFromResult(r) - err = w.cert.Certify(check) + sh := res.SignedHeader + err = w.cert.Certify(sh) } - return r, err + return res, err } // // WrappedSwitch creates a websocket connection that auto-verifies any info diff --git a/lite/static_certifier.go b/lite/static_certifier.go deleted file mode 100644 index 1ec3b809a..000000000 --- a/lite/static_certifier.go +++ /dev/null @@ -1,73 +0,0 @@ -package lite - -import ( - "bytes" - - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/types" - - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -var _ Certifier = (*StaticCertifier)(nil) - -// StaticCertifier assumes a static set of validators, set on -// initilization and checks against them. -// The signatures on every header is checked for > 2/3 votes -// against the known validator set upon Certify -// -// Good for testing or really simple chains. Building block -// to support real-world functionality. -type StaticCertifier struct { - chainID string - vSet *types.ValidatorSet - vhash []byte -} - -// NewStaticCertifier returns a new certifier with a static validator set. -func NewStaticCertifier(chainID string, vals *types.ValidatorSet) *StaticCertifier { - return &StaticCertifier{ - chainID: chainID, - vSet: vals, - } -} - -// ChainID returns the chain id. -// Implements Certifier. -func (sc *StaticCertifier) ChainID() string { - return sc.chainID -} - -// Validators returns the validator set. -func (sc *StaticCertifier) Validators() *types.ValidatorSet { - return sc.vSet -} - -// Hash returns the hash of the validator set. -func (sc *StaticCertifier) Hash() []byte { - if len(sc.vhash) == 0 { - sc.vhash = sc.vSet.Hash() - } - return sc.vhash -} - -// Certify makes sure that the commit is valid. -// Implements Certifier. -func (sc *StaticCertifier) Certify(commit Commit) error { - // do basic sanity checks - err := commit.ValidateBasic(sc.chainID) - if err != nil { - return err - } - - // make sure it has the same validator set we have (static means static) - if !bytes.Equal(sc.Hash(), commit.Header.ValidatorsHash) { - return liteErr.ErrValidatorsChanged() - } - - // then make sure we have the proper signatures for this - err = sc.vSet.VerifyCommit(sc.chainID, commit.Commit.BlockID, - commit.Header.Height, commit.Commit) - return errors.WithStack(err) -} diff --git a/lite/types.go b/lite/types.go new file mode 100644 index 000000000..1f4797992 --- /dev/null +++ b/lite/types.go @@ -0,0 +1,13 @@ +package lite + +import ( + "github.com/tendermint/tendermint/types" +) + +// Certifier checks the votes to make sure the block really is signed properly. +// Certifier must know the current or recent set of validitors by some other +// means. +type Certifier interface { + Certify(sheader types.SignedHeader) error + ChainID() string +} diff --git a/privval/priv_validator_test.go b/privval/priv_validator_test.go index 4fc8f97fc..314101632 100644 --- a/privval/priv_validator_test.go +++ b/privval/priv_validator_test.go @@ -183,7 +183,7 @@ func TestDifferByTimestamp(t *testing.T) { assert.NoError(t, err, "expected no error signing proposal") signBytes := proposal.SignBytes(chainID) sig := proposal.Signature - timeStamp := clipToMS(proposal.Timestamp) + timeStamp := proposal.Timestamp // manipulate the timestamp. should get changed back proposal.Timestamp = proposal.Timestamp.Add(time.Millisecond) @@ -207,7 +207,7 @@ func TestDifferByTimestamp(t *testing.T) { signBytes := vote.SignBytes(chainID) sig := vote.Signature - timeStamp := clipToMS(vote.Timestamp) + timeStamp := vote.Timestamp // manipulate the timestamp. should get changed back vote.Timestamp = vote.Timestamp.Add(time.Millisecond) @@ -242,10 +242,3 @@ func newProposal(height int64, round int, partsHeader types.PartSetHeader) *type Timestamp: time.Now().UTC(), } } - -func clipToMS(t time.Time) time.Time { - nano := t.UnixNano() - million := int64(1000000) - nano = (nano / million) * million - return time.Unix(0, nano).UTC() -} diff --git a/privval/socket_test.go b/privval/socket_test.go index fcf21e0c6..1813893af 100644 --- a/privval/socket_test.go +++ b/privval/socket_test.go @@ -119,7 +119,7 @@ func TestSocketPVAcceptDeadline(t *testing.T) { SocketPVAcceptDeadline(time.Millisecond)(sc) - assert.Equal(t, sc.Start().(cmn.Error).Cause(), ErrConnWaitTimeout) + assert.Equal(t, sc.Start().(cmn.Error).Data(), ErrConnWaitTimeout) } func TestSocketPVDeadline(t *testing.T) { @@ -165,7 +165,7 @@ func TestSocketPVDeadline(t *testing.T) { time.Sleep(20 * time.Microsecond) _, err := sc.getPubKey() - assert.Equal(t, err.(cmn.Error).Cause(), ErrConnTimeout) + assert.Equal(t, err.(cmn.Error).Data(), ErrConnTimeout) } func TestSocketPVWait(t *testing.T) { @@ -178,7 +178,7 @@ func TestSocketPVWait(t *testing.T) { SocketPVConnWait(time.Millisecond)(sc) - assert.Equal(t, sc.Start().(cmn.Error).Cause(), ErrConnWaitTimeout) + assert.Equal(t, sc.Start().(cmn.Error).Data(), ErrConnWaitTimeout) } func TestRemoteSignerRetry(t *testing.T) { @@ -221,7 +221,7 @@ func TestRemoteSignerRetry(t *testing.T) { RemoteSignerConnDeadline(time.Millisecond)(rs) RemoteSignerConnRetries(retries)(rs) - assert.Equal(t, rs.Start().(cmn.Error).Cause(), ErrDialRetryMax) + assert.Equal(t, rs.Start().(cmn.Error).Data(), ErrDialRetryMax) select { case attempts := <-attemptc: diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index a5ad5b4cb..4cf44914e 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -349,16 +349,16 @@ func BlockResults(heightPtr *int64) (*ctypes.ResultBlockResults, error) { return res, nil } -func getHeight(storeHeight int64, heightPtr *int64) (int64, error) { +func getHeight(currentHeight int64, heightPtr *int64) (int64, error) { if heightPtr != nil { height := *heightPtr if height <= 0 { return 0, fmt.Errorf("Height must be greater than 0") } - if height > storeHeight { + if height > currentHeight { return 0, fmt.Errorf("Height must be less than or equal to the current blockchain height") } return height, nil } - return storeHeight, nil + return currentHeight, nil } diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index c026cd91f..4e4c54dea 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -44,8 +44,10 @@ import ( // } // ``` func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { - storeHeight := blockStore.Height() - height, err := getHeight(storeHeight, heightPtr) + // The latest validator that we know is the + // NextValidator of the last block. + height := consensusState.GetState().LastBlockHeight + 1 + height, err := getHeight(height, heightPtr) if err != nil { return nil, err } diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index 27302be13..516eced05 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -33,10 +33,8 @@ type ResultBlock struct { // Commit and Header type ResultCommit struct { - // SignedHeader is header and commit, embedded so we only have - // one level in the json output - types.SignedHeader - CanonicalCommit bool `json:"canonical"` + types.SignedHeader `json:"signed_header"` + CanonicalCommit bool `json:"canonical"` } // ABCI results from a block diff --git a/rpc/lib/server/http_server.go b/rpc/lib/server/http_server.go index 9bdb4dffa..8d011ce55 100644 --- a/rpc/lib/server/http_server.go +++ b/rpc/lib/server/http_server.go @@ -55,7 +55,7 @@ func StartHTTPServer( listener, RecoverAndLogHandler(handler, logger), ) - logger.Error("RPC HTTP server stopped", "err", err) + logger.Info("RPC HTTP server stopped", "err", err) }() return listener, nil } diff --git a/scripts/install_abci_apps.sh b/scripts/install_abci_apps.sh index eb70070df..ee4b9dde4 100644 --- a/scripts/install_abci_apps.sh +++ b/scripts/install_abci_apps.sh @@ -4,8 +4,8 @@ COMMIT=$(bash scripts/dep_utils/parse.sh abci) echo "Checking out vendored commit for abci: $COMMIT" -go get -d github.com/tendermint/abci -cd "$GOPATH/src/github.com/tendermint/abci" || exit +go get -d github.com/tendermint/tendermint/abci +cd "$GOPATH/src/github.com/tendermint/tendermint/abci" || exit git checkout "$COMMIT" make get_tools make get_vendor_deps diff --git a/test/app/grpc_client.go b/test/app/grpc_client.go index c55713c7f..efcac0f05 100644 --- a/test/app/grpc_client.go +++ b/test/app/grpc_client.go @@ -2,12 +2,12 @@ package main import ( "encoding/hex" - "encoding/json" "fmt" "os" "context" + "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/rpc/grpc" ) @@ -33,7 +33,7 @@ func main() { os.Exit(1) } - bz, err := json.Marshal(res) + bz, err := amino.NewCodec().MarshalJSON(res) if err != nil { fmt.Println(err) os.Exit(1) diff --git a/types/block.go b/types/block.go index e72b5fc7b..0faa24dbb 100644 --- a/types/block.go +++ b/types/block.go @@ -360,6 +360,7 @@ func (commit *Commit) IsCommit() bool { } // ValidateBasic performs basic validation that doesn't involve state data. +// Does not actually check the cryptographic signatures. func (commit *Commit) ValidateBasic() error { if commit.BlockID.IsZero() { return errors.New("Commit cannot be for nil block") @@ -369,23 +370,23 @@ func (commit *Commit) ValidateBasic() error { } height, round := commit.Height(), commit.Round() - // validate the precommits + // Validate the precommits. for _, precommit := range commit.Precommits { // It's OK for precommits to be missing. if precommit == nil { continue } - // Ensure that all votes are precommits + // Ensure that all votes are precommits. if precommit.Type != VoteTypePrecommit { return fmt.Errorf("Invalid commit vote. Expected precommit, got %v", precommit.Type) } - // Ensure that all heights are the same + // Ensure that all heights are the same. if precommit.Height != height { return fmt.Errorf("Invalid commit precommit height. Expected %v, got %v", height, precommit.Height) } - // Ensure that all rounds are the same + // Ensure that all rounds are the same. if precommit.Round != round { return fmt.Errorf("Invalid commit precommit round. Expected %v, got %v", round, precommit.Round) @@ -417,19 +418,77 @@ func (commit *Commit) StringIndented(indent string) string { } return fmt.Sprintf(`Commit{ %s BlockID: %v -%s Precommits: %v +%s Precommits: +%s %v %s}#%v`, indent, commit.BlockID, - indent, strings.Join(precommitStrings, "\n"+indent+" "), + indent, + indent, strings.Join(precommitStrings, "\n"+indent+" "), indent, commit.hash) } //----------------------------------------------------------------------------- -// SignedHeader is a header along with the commits that prove it +// SignedHeader is a header along with the commits that prove it. type SignedHeader struct { - Header *Header `json:"header"` - Commit *Commit `json:"commit"` + *Header `json:"header"` + Commit *Commit `json:"commit"` +} + +// ValidateBasic does basic consistency checks and makes sure the header +// and commit are consistent. +// +// NOTE: This does not actually check the cryptographic signatures. Make +// sure to use a Certifier to validate the signatures actually provide a +// significantly strong proof for this header's validity. +func (sh SignedHeader) ValidateBasic(chainID string) error { + + // Make sure the header is consistent with the commit. + if sh.Header == nil { + return errors.New("SignedHeader missing header.") + } + if sh.Commit == nil { + return errors.New("SignedHeader missing commit (precommit votes).") + } + // Check ChainID. + if sh.ChainID != chainID { + return fmt.Errorf("Header belongs to another chain '%s' not '%s'", + sh.ChainID, chainID) + } + // Check Height. + if sh.Commit.Height() != sh.Height { + return fmt.Errorf("SignedHeader header and commit height mismatch: %v vs %v", + sh.Height, sh.Commit.Height()) + } + // Check Hash. + hhash := sh.Hash() + chash := sh.Commit.BlockID.Hash + if !bytes.Equal(hhash, chash) { + return fmt.Errorf("SignedHeader commit signs block %X, header is block %X", + chash, hhash) + } + // ValidateBasic on the Commit. + err := sh.Commit.ValidateBasic() + if err != nil { + return cmn.ErrorWrap(err, "commit.ValidateBasic failed during SignedHeader.ValidateBasic") + } + return nil +} + +func (sh SignedHeader) String() string { + return sh.StringIndented("") +} + +// StringIndented returns a string representation of the SignedHeader. +func (sh SignedHeader) StringIndented(indent string) string { + return fmt.Sprintf(`SignedHeader{ +%s %v +%s %v +%s}`, + indent, sh.Header.StringIndented(indent+" "), + indent, sh.Commit.StringIndented(indent+" "), + indent) + return "" } //----------------------------------------------------------------------------- diff --git a/types/canonical_json.go b/types/canonical_json.go index 258f7714b..14881f622 100644 --- a/types/canonical_json.go +++ b/types/canonical_json.go @@ -9,7 +9,7 @@ import ( // Canonical json is amino's json for structs with fields in alphabetical order // TimeFormat is used for generating the sigs -const TimeFormat = "2006-01-02T15:04:05.000Z" +const TimeFormat = time.RFC3339Nano type CanonicalJSONBlockID struct { Hash cmn.HexBytes `json:"hash,omitempty"` @@ -110,5 +110,5 @@ func CanonicalTime(t time.Time) string { // Note that sending time over amino resets it to // local time, we need to force UTC here, so the // signatures match - return t.UTC().Format(TimeFormat) + return t.Round(0).UTC().Format(TimeFormat) } diff --git a/types/proposal.go b/types/proposal.go index 52ce8756e..964ca0caf 100644 --- a/types/proposal.go +++ b/types/proposal.go @@ -34,7 +34,7 @@ func NewProposal(height int64, round int, blockPartsHeader PartSetHeader, polRou return &Proposal{ Height: height, Round: round, - Timestamp: time.Now().UTC(), + Timestamp: time.Now().Round(0).UTC(), BlockPartsHeader: blockPartsHeader, POLRound: polRound, POLBlockID: polBlockID, diff --git a/types/validator_set.go b/types/validator_set.go index 8f085090e..dc1d0e882 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -29,48 +29,51 @@ type ValidatorSet struct { totalVotingPower int64 } -func NewValidatorSet(vals []*Validator) *ValidatorSet { - validators := make([]*Validator, len(vals)) - for i, val := range vals { +func NewValidatorSet(valz []*Validator) *ValidatorSet { + if valz != nil && len(valz) == 0 { + panic("validator set initialization slice cannot be an empty slice (but it can be nil)") + } + validators := make([]*Validator, len(valz)) + for i, val := range valz { validators[i] = val.Copy() } sort.Sort(ValidatorsByAddress(validators)) - vs := &ValidatorSet{ + vals := &ValidatorSet{ Validators: validators, } - - if vals != nil { - vs.IncrementAccum(1) + if valz != nil { + vals.IncrementAccum(1) } - return vs + return vals } // Increment Accum and update the proposer on a copy, and return it. -func (valSet *ValidatorSet) CopyIncrementAccum(times int) *ValidatorSet { - copy := valSet.Copy() +func (vals *ValidatorSet) CopyIncrementAccum(times int) *ValidatorSet { + copy := vals.Copy() copy.IncrementAccum(times) return copy } // Increment Accum and update the proposer. -func (valSet *ValidatorSet) IncrementAccum(times int) { +func (vals *ValidatorSet) IncrementAccum(times int) { + // Add VotingPower * times to each validator and order into heap. validatorsHeap := cmn.NewHeap() - for _, val := range valSet.Validators { - // check for overflow both multiplication and sum + for _, val := range vals.Validators { + // Check for overflow both multiplication and sum. val.Accum = safeAddClip(val.Accum, safeMulClip(val.VotingPower, int64(times))) validatorsHeap.PushComparable(val, accumComparable{val}) } - // Decrement the validator with most accum times times + // Decrement the validator with most accum times times. for i := 0; i < times; i++ { mostest := validatorsHeap.Peek().(*Validator) // mind underflow - mostest.Accum = safeSubClip(mostest.Accum, valSet.TotalVotingPower()) + mostest.Accum = safeSubClip(mostest.Accum, vals.TotalVotingPower()) if i == times-1 { - valSet.Proposer = mostest + vals.Proposer = mostest } else { validatorsHeap.Update(mostest, accumComparable{mostest}) } @@ -78,36 +81,36 @@ func (valSet *ValidatorSet) IncrementAccum(times int) { } // Copy each validator into a new ValidatorSet -func (valSet *ValidatorSet) Copy() *ValidatorSet { - validators := make([]*Validator, len(valSet.Validators)) - for i, val := range valSet.Validators { +func (vals *ValidatorSet) Copy() *ValidatorSet { + validators := make([]*Validator, len(vals.Validators)) + for i, val := range vals.Validators { // NOTE: must copy, since IncrementAccum updates in place. validators[i] = val.Copy() } return &ValidatorSet{ Validators: validators, - Proposer: valSet.Proposer, - totalVotingPower: valSet.totalVotingPower, + Proposer: vals.Proposer, + totalVotingPower: vals.totalVotingPower, } } // HasAddress returns true if address given is in the validator set, false - // otherwise. -func (valSet *ValidatorSet) HasAddress(address []byte) bool { - idx := sort.Search(len(valSet.Validators), func(i int) bool { - return bytes.Compare(address, valSet.Validators[i].Address) <= 0 +func (vals *ValidatorSet) HasAddress(address []byte) bool { + idx := sort.Search(len(vals.Validators), func(i int) bool { + return bytes.Compare(address, vals.Validators[i].Address) <= 0 }) - return idx < len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address) + return idx < len(vals.Validators) && bytes.Equal(vals.Validators[idx].Address, address) } // GetByAddress returns an index of the validator with address and validator // itself if found. Otherwise, -1 and nil are returned. -func (valSet *ValidatorSet) GetByAddress(address []byte) (index int, val *Validator) { - idx := sort.Search(len(valSet.Validators), func(i int) bool { - return bytes.Compare(address, valSet.Validators[i].Address) <= 0 +func (vals *ValidatorSet) GetByAddress(address []byte) (index int, val *Validator) { + idx := sort.Search(len(vals.Validators), func(i int) bool { + return bytes.Compare(address, vals.Validators[i].Address) <= 0 }) - if idx < len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address) { - return idx, valSet.Validators[idx].Copy() + if idx < len(vals.Validators) && bytes.Equal(vals.Validators[idx].Address, address) { + return idx, vals.Validators[idx].Copy() } return -1, nil } @@ -115,45 +118,45 @@ func (valSet *ValidatorSet) GetByAddress(address []byte) (index int, val *Valida // GetByIndex returns the validator's address and validator itself by index. // It returns nil values if index is less than 0 or greater or equal to // len(ValidatorSet.Validators). -func (valSet *ValidatorSet) GetByIndex(index int) (address []byte, val *Validator) { - if index < 0 || index >= len(valSet.Validators) { +func (vals *ValidatorSet) GetByIndex(index int) (address []byte, val *Validator) { + if index < 0 || index >= len(vals.Validators) { return nil, nil } - val = valSet.Validators[index] + val = vals.Validators[index] return val.Address, val.Copy() } // Size returns the length of the validator set. -func (valSet *ValidatorSet) Size() int { - return len(valSet.Validators) +func (vals *ValidatorSet) Size() int { + return len(vals.Validators) } // TotalVotingPower returns the sum of the voting powers of all validators. -func (valSet *ValidatorSet) TotalVotingPower() int64 { - if valSet.totalVotingPower == 0 { - for _, val := range valSet.Validators { +func (vals *ValidatorSet) TotalVotingPower() int64 { + if vals.totalVotingPower == 0 { + for _, val := range vals.Validators { // mind overflow - valSet.totalVotingPower = safeAddClip(valSet.totalVotingPower, val.VotingPower) + vals.totalVotingPower = safeAddClip(vals.totalVotingPower, val.VotingPower) } } - return valSet.totalVotingPower + return vals.totalVotingPower } // GetProposer returns the current proposer. If the validator set is empty, nil // is returned. -func (valSet *ValidatorSet) GetProposer() (proposer *Validator) { - if len(valSet.Validators) == 0 { +func (vals *ValidatorSet) GetProposer() (proposer *Validator) { + if len(vals.Validators) == 0 { return nil } - if valSet.Proposer == nil { - valSet.Proposer = valSet.findProposer() + if vals.Proposer == nil { + vals.Proposer = vals.findProposer() } - return valSet.Proposer.Copy() + return vals.Proposer.Copy() } -func (valSet *ValidatorSet) findProposer() *Validator { +func (vals *ValidatorSet) findProposer() *Validator { var proposer *Validator - for _, val := range valSet.Validators { + for _, val := range vals.Validators { if proposer == nil || !bytes.Equal(val.Address, proposer.Address) { proposer = proposer.CompareAccum(val) } @@ -163,12 +166,12 @@ func (valSet *ValidatorSet) findProposer() *Validator { // Hash returns the Merkle root hash build using validators (as leaves) in the // set. -func (valSet *ValidatorSet) Hash() []byte { - if len(valSet.Validators) == 0 { +func (vals *ValidatorSet) Hash() []byte { + if len(vals.Validators) == 0 { return nil } - hashers := make([]merkle.Hasher, len(valSet.Validators)) - for i, val := range valSet.Validators { + hashers := make([]merkle.Hasher, len(vals.Validators)) + for i, val := range vals.Validators { hashers[i] = val } return merkle.SimpleHashFromHashers(hashers) @@ -176,70 +179,70 @@ func (valSet *ValidatorSet) Hash() []byte { // Add adds val to the validator set and returns true. It returns false if val // is already in the set. -func (valSet *ValidatorSet) Add(val *Validator) (added bool) { +func (vals *ValidatorSet) Add(val *Validator) (added bool) { val = val.Copy() - idx := sort.Search(len(valSet.Validators), func(i int) bool { - return bytes.Compare(val.Address, valSet.Validators[i].Address) <= 0 + idx := sort.Search(len(vals.Validators), func(i int) bool { + return bytes.Compare(val.Address, vals.Validators[i].Address) <= 0 }) - if idx >= len(valSet.Validators) { - valSet.Validators = append(valSet.Validators, val) + if idx >= len(vals.Validators) { + vals.Validators = append(vals.Validators, val) // Invalidate cache - valSet.Proposer = nil - valSet.totalVotingPower = 0 + vals.Proposer = nil + vals.totalVotingPower = 0 return true - } else if bytes.Equal(valSet.Validators[idx].Address, val.Address) { + } else if bytes.Equal(vals.Validators[idx].Address, val.Address) { return false } else { - newValidators := make([]*Validator, len(valSet.Validators)+1) - copy(newValidators[:idx], valSet.Validators[:idx]) + newValidators := make([]*Validator, len(vals.Validators)+1) + copy(newValidators[:idx], vals.Validators[:idx]) newValidators[idx] = val - copy(newValidators[idx+1:], valSet.Validators[idx:]) - valSet.Validators = newValidators + copy(newValidators[idx+1:], vals.Validators[idx:]) + vals.Validators = newValidators // Invalidate cache - valSet.Proposer = nil - valSet.totalVotingPower = 0 + vals.Proposer = nil + vals.totalVotingPower = 0 return true } } // Update updates val and returns true. It returns false if val is not present // in the set. -func (valSet *ValidatorSet) Update(val *Validator) (updated bool) { - index, sameVal := valSet.GetByAddress(val.Address) +func (vals *ValidatorSet) Update(val *Validator) (updated bool) { + index, sameVal := vals.GetByAddress(val.Address) if sameVal == nil { return false } - valSet.Validators[index] = val.Copy() + vals.Validators[index] = val.Copy() // Invalidate cache - valSet.Proposer = nil - valSet.totalVotingPower = 0 + vals.Proposer = nil + vals.totalVotingPower = 0 return true } // Remove deletes the validator with address. It returns the validator removed // and true. If returns nil and false if validator is not present in the set. -func (valSet *ValidatorSet) Remove(address []byte) (val *Validator, removed bool) { - idx := sort.Search(len(valSet.Validators), func(i int) bool { - return bytes.Compare(address, valSet.Validators[i].Address) <= 0 +func (vals *ValidatorSet) Remove(address []byte) (val *Validator, removed bool) { + idx := sort.Search(len(vals.Validators), func(i int) bool { + return bytes.Compare(address, vals.Validators[i].Address) <= 0 }) - if idx >= len(valSet.Validators) || !bytes.Equal(valSet.Validators[idx].Address, address) { + if idx >= len(vals.Validators) || !bytes.Equal(vals.Validators[idx].Address, address) { return nil, false } - removedVal := valSet.Validators[idx] - newValidators := valSet.Validators[:idx] - if idx+1 < len(valSet.Validators) { - newValidators = append(newValidators, valSet.Validators[idx+1:]...) + removedVal := vals.Validators[idx] + newValidators := vals.Validators[:idx] + if idx+1 < len(vals.Validators) { + newValidators = append(newValidators, vals.Validators[idx+1:]...) } - valSet.Validators = newValidators + vals.Validators = newValidators // Invalidate cache - valSet.Proposer = nil - valSet.totalVotingPower = 0 + vals.Proposer = nil + vals.totalVotingPower = 0 return removedVal, true } // Iterate will run the given function over the set. -func (valSet *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) { - for i, val := range valSet.Validators { +func (vals *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) { + for i, val := range vals.Validators { stop := fn(i, val.Copy()) if stop { break @@ -247,87 +250,106 @@ func (valSet *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) { } } -// Verify that +2/3 of the set had signed the given signBytes -func (valSet *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height int64, commit *Commit) error { - if valSet.Size() != len(commit.Precommits) { - return fmt.Errorf("Invalid commit -- wrong set size: %v vs %v", valSet.Size(), len(commit.Precommits)) +// Verify that +2/3 of the set had signed the given signBytes. +func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height int64, commit *Commit) error { + if vals.Size() != len(commit.Precommits) { + return fmt.Errorf("Invalid commit -- wrong set size: %v vs %v", vals.Size(), len(commit.Precommits)) } if height != commit.Height() { return fmt.Errorf("Invalid commit -- wrong height: %v vs %v", height, commit.Height()) } + if !blockID.Equals(commit.BlockID) { + return fmt.Errorf("Invalid commit -- wrong block id: want %v got %v", + blockID, commit.BlockID) + } talliedVotingPower := int64(0) round := commit.Round() for idx, precommit := range commit.Precommits { - // may be nil if validator skipped. if precommit == nil { - continue + continue // OK, some precommits can be missing. } if precommit.Height != height { - return fmt.Errorf("Invalid commit -- wrong height: %v vs %v", height, precommit.Height) + return fmt.Errorf("Invalid commit -- wrong height: want %v got %v", height, precommit.Height) } if precommit.Round != round { - return fmt.Errorf("Invalid commit -- wrong round: %v vs %v", round, precommit.Round) + return fmt.Errorf("Invalid commit -- wrong round: want %v got %v", round, precommit.Round) } if precommit.Type != VoteTypePrecommit { return fmt.Errorf("Invalid commit -- not precommit @ index %v", idx) } - _, val := valSet.GetByIndex(idx) - // Validate signature + // NOTE: This will go away when we refactor Commit. + if !blockID.Equals(precommit.BlockID) { + return fmt.Errorf("Invalid commit -- wrong block id @ index %v: want %v got %v", + idx, blockID, precommit.BlockID) + } + _, val := vals.GetByIndex(idx) + // Validate signature. precommitSignBytes := precommit.SignBytes(chainID) if !val.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { return fmt.Errorf("Invalid commit -- invalid signature: %v", precommit) } - if !blockID.Equals(precommit.BlockID) { - continue // Not an error, but doesn't count - } // Good precommit! talliedVotingPower += val.VotingPower } - if talliedVotingPower > valSet.TotalVotingPower()*2/3 { + if talliedVotingPower > vals.TotalVotingPower()*2/3 { return nil } return fmt.Errorf("Invalid commit -- insufficient voting power: got %v, needed %v", - talliedVotingPower, (valSet.TotalVotingPower()*2/3 + 1)) + talliedVotingPower, (vals.TotalVotingPower()*2/3 + 1)) } -// VerifyCommitAny will check to see if the set would -// be valid with a different validator set. +// VerifyFutureCommit will check to see if the set would be valid with a different +// validator set. // -// valSet is the validator set that we know -// * over 2/3 of the power in old signed this block +// vals is the old validator set that we know. Over 2/3 of the power in old +// signed this block. // -// newSet is the validator set that signed this block -// * only votes from old are sufficient for 2/3 majority -// in the new set as well +// In Tendermint, 1/3 of the voting power can halt or fork the chain, but 1/3 +// can't make arbitrary state transitions. You still need > 2/3 Byzantine to +// make arbitrary state transitions. // -// That means that: -// * 10% of the valset can't just declare themselves kings -// * If the validator set is 3x old size, we need more proof to trust -func (valSet *ValidatorSet) VerifyCommitAny(newSet *ValidatorSet, chainID string, +// To preserve this property in the light client, we also require > 2/3 of the +// old vals to sign the future commit at H, that way we preserve the property +// that if they weren't being truthful about the validator set at H (block hash +// -> vals hash) or about the app state (block hash -> app hash) we can slash +// > 2/3. Otherwise, the lite client isn't providing the same security +// guarantees. +// +// Even if we added a slashing condition that if you sign a block header with +// the wrong validator set, then we would only need > 1/3 of signatures from +// the old vals on the new commit, it wouldn't be sufficient because the new +// vals can be arbitrary and commit some arbitrary app hash. +// +// newSet is the validator set that signed this block. Only votes from new are +// sufficient for 2/3 majority in the new set as well, for it to be a valid +// commit. +// +// NOTE: This doesn't check whether the commit is a future commit, because the +// current height isn't part of the ValidatorSet. Caller must check that the +// commit height is greater than the height for this validator set. +func (vals *ValidatorSet) VerifyFutureCommit(newSet *ValidatorSet, chainID string, blockID BlockID, height int64, commit *Commit) error { + oldVals := vals - if newSet.Size() != len(commit.Precommits) { - return cmn.NewError("Invalid commit -- wrong set size: %v vs %v", newSet.Size(), len(commit.Precommits)) - } - if height != commit.Height() { - return cmn.NewError("Invalid commit -- wrong height: %v vs %v", height, commit.Height()) + // Commit must be a valid commit for newSet. + err := newSet.VerifyCommit(chainID, blockID, height, commit) + if err != nil { + return err } + // Check old voting power. oldVotingPower := int64(0) - newVotingPower := int64(0) seen := map[int]bool{} round := commit.Round() for idx, precommit := range commit.Precommits { - // first check as in VerifyCommit if precommit == nil { continue } if precommit.Height != height { - // return certerr.ErrHeightMismatch(height, precommit.Height) return cmn.NewError("Blocks don't match - %d vs %d", round, precommit.Round) } if precommit.Round != round { @@ -336,54 +358,45 @@ func (valSet *ValidatorSet) VerifyCommitAny(newSet *ValidatorSet, chainID string if precommit.Type != VoteTypePrecommit { return cmn.NewError("Invalid commit -- not precommit @ index %v", idx) } + // NOTE: This will go away when we refactor Commit. if !blockID.Equals(precommit.BlockID) { - continue // Not an error, but doesn't count + return fmt.Errorf("Invalid commit -- wrong block id @ index %v: want %v got %v", + idx, blockID, precommit.BlockID) } - - // we only grab by address, ignoring unknown validators - vi, ov := valSet.GetByAddress(precommit.ValidatorAddress) - if ov == nil || seen[vi] { + // See if this validator is in oldVals. + idx, val := oldVals.GetByAddress(precommit.ValidatorAddress) + if val == nil || seen[idx] { continue // missing or double vote... } - seen[vi] = true + seen[idx] = true - // Validate signature old school + // Validate signature. precommitSignBytes := precommit.SignBytes(chainID) - if !ov.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { + if !val.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { return cmn.NewError("Invalid commit -- invalid signature: %v", precommit) } // Good precommit! - oldVotingPower += ov.VotingPower - - // check new school - _, cv := newSet.GetByIndex(idx) - if cv.PubKey.Equals(ov.PubKey) { - // make sure this is properly set in the current block as well - newVotingPower += cv.VotingPower - } + oldVotingPower += val.VotingPower } - if oldVotingPower <= valSet.TotalVotingPower()*2/3 { + if oldVotingPower <= oldVals.TotalVotingPower()*2/3 { return cmn.NewError("Invalid commit -- insufficient old voting power: got %v, needed %v", - oldVotingPower, (valSet.TotalVotingPower()*2/3 + 1)) - } else if newVotingPower <= newSet.TotalVotingPower()*2/3 { - return cmn.NewError("Invalid commit -- insufficient cur voting power: got %v, needed %v", - newVotingPower, (newSet.TotalVotingPower()*2/3 + 1)) + oldVotingPower, (oldVals.TotalVotingPower()*2/3 + 1)) } return nil } -func (valSet *ValidatorSet) String() string { - return valSet.StringIndented("") +func (vals *ValidatorSet) String() string { + return vals.StringIndented("") } // String -func (valSet *ValidatorSet) StringIndented(indent string) string { - if valSet == nil { +func (vals *ValidatorSet) StringIndented(indent string) string { + if vals == nil { return "nil-ValidatorSet" } valStrings := []string{} - valSet.Iterate(func(index int, val *Validator) bool { + vals.Iterate(func(index int, val *Validator) bool { valStrings = append(valStrings, val.String()) return false }) @@ -392,7 +405,7 @@ func (valSet *ValidatorSet) StringIndented(indent string) string { %s Validators: %s %v %s}`, - indent, valSet.GetProposer().String(), + indent, vals.GetProposer().String(), indent, indent, strings.Join(valStrings, "\n"+indent+" "), indent) @@ -405,18 +418,18 @@ func (valSet *ValidatorSet) StringIndented(indent string) string { // Sort validators by address type ValidatorsByAddress []*Validator -func (vs ValidatorsByAddress) Len() int { - return len(vs) +func (valz ValidatorsByAddress) Len() int { + return len(valz) } -func (vs ValidatorsByAddress) Less(i, j int) bool { - return bytes.Compare(vs[i].Address, vs[j].Address) == -1 +func (valz ValidatorsByAddress) Less(i, j int) bool { + return bytes.Compare(valz[i].Address, valz[j].Address) == -1 } -func (vs ValidatorsByAddress) Swap(i, j int) { - it := vs[i] - vs[i] = vs[j] - vs[j] = it +func (valz ValidatorsByAddress) Swap(i, j int) { + it := valz[i] + valz[i] = valz[j] + valz[j] = it } //------------------------------------- @@ -440,16 +453,16 @@ func (ac accumComparable) Less(o interface{}) bool { // NOTE: PrivValidator are in order. // UNSTABLE func RandValidatorSet(numValidators int, votingPower int64) (*ValidatorSet, []PrivValidator) { - vals := make([]*Validator, numValidators) + valz := make([]*Validator, numValidators) privValidators := make([]PrivValidator, numValidators) for i := 0; i < numValidators; i++ { val, privValidator := RandValidator(false, votingPower) - vals[i] = val + valz[i] = val privValidators[i] = privValidator } - valSet := NewValidatorSet(vals) + vals := NewValidatorSet(valz) sort.Sort(PrivValidatorsByAddress(privValidators)) - return valSet, privValidators + return vals, privValidators } /////////////////////////////////////////////////////////////////////////////// diff --git a/types/vote_set.go b/types/vote_set.go index a60d95daf..1c7fac19e 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -170,7 +170,7 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { "Cannot find validator %d in valSet of size %d", valIndex, voteSet.valSet.Size()) } - // Ensure that the signer has the right address + // Ensure that the signer has the right address. if !bytes.Equal(valAddr, lookupAddr) { return false, errors.Wrapf(ErrVoteInvalidValidatorAddress, "vote.ValidatorAddress (%X) does not match address (%X) for vote.ValidatorIndex (%d)\nEnsure the genesis file is correct across all validators.", @@ -190,7 +190,7 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { return false, errors.Wrapf(err, "Failed to verify vote with ChainID %s and PubKey %s", voteSet.chainID, val.PubKey) } - // Add vote and get conflicting vote if any + // Add vote and get conflicting vote if any. added, conflicting := voteSet.addVerifiedVote(vote, blockKey, val.VotingPower) if conflicting != nil { return added, NewConflictingVoteError(val, conflicting, vote) @@ -201,7 +201,7 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { return added, nil } -// Returns (vote, true) if vote exists for valIndex and blockKey +// Returns (vote, true) if vote exists for valIndex and blockKey. func (voteSet *VoteSet) getVote(valIndex int, blockKey string) (vote *Vote, ok bool) { if existing := voteSet.votes[valIndex]; existing != nil && existing.BlockID.Key() == blockKey { return existing, true