From 321061125f8d2549737f2f309966d0b488a93e81 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Thu, 5 Oct 2017 12:02:02 +0400 Subject: [PATCH 01/19] add app_options to GenesisDoc (Refs #564) --- types/genesis.go | 3 ++- types/genesis_test.go | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/types/genesis.go b/types/genesis.go index f1b2736f8..797aff9cc 100644 --- a/types/genesis.go +++ b/types/genesis.go @@ -7,7 +7,7 @@ import ( "github.com/pkg/errors" - "github.com/tendermint/go-crypto" + crypto "github.com/tendermint/go-crypto" "github.com/tendermint/go-wire/data" cmn "github.com/tendermint/tmlibs/common" ) @@ -29,6 +29,7 @@ type GenesisDoc struct { ConsensusParams *ConsensusParams `json:"consensus_params,omitempty"` Validators []GenesisValidator `json:"validators"` AppHash data.Bytes `json:"app_hash"` + AppOptions *json.RawMessage `json:"app_options,omitempty"` } // SaveAs is a utility method for saving GenensisDoc as a JSON file. diff --git a/types/genesis_test.go b/types/genesis_test.go index 0ffce4b53..fb0d4ca23 100644 --- a/types/genesis_test.go +++ b/types/genesis_test.go @@ -30,7 +30,7 @@ func TestGenesis(t *testing.T) { } // test a good one by raw json - genDocBytes := []byte(`{"genesis_time":"0001-01-01T00:00:00Z","chain_id":"test-chain-QDKdJr","consensus_params":null,"validators":[{"pub_key":{"type":"ed25519","data":"961EAB8752E51A03618502F55C2B6E09C38C65635C64CCF3173ED452CF86C957"},"power":10,"name":""}],"app_hash":""}`) + genDocBytes := []byte(`{"genesis_time":"0001-01-01T00:00:00Z","chain_id":"test-chain-QDKdJr","consensus_params":null,"validators":[{"pub_key":{"type":"ed25519","data":"961EAB8752E51A03618502F55C2B6E09C38C65635C64CCF3173ED452CF86C957"},"power":10,"name":""}],"app_hash":"","app_options":{"account_owner": "Bob"}}`) _, err := GenesisDocFromJSON(genDocBytes) assert.NoError(t, err, "expected no error for good genDoc json") From b26f81239931f07afd7dfe6a0bbee76eca044153 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Thu, 5 Oct 2017 12:05:12 +0400 Subject: [PATCH 02/19] update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c48d0850b..603c35a4a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,12 @@ BUG FIXES: - Graceful handling/recovery for apps that have non-determinism or fail to halt - Graceful handling/recovery for violations of safety, or liveness +## 0.12.0 (TBD) + +IMPROVEMENTS: + - genesis doc: added `app_options` field, which, along with the other fields, + will be returned upon `/genesis` rpc call. + ## 0.11.1 (October 10, 2017) IMPROVEMENTS: From 616b07ff6bd7c1a4888bbb82c56ade25024e7f4d Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 13 Oct 2017 13:34:47 +0400 Subject: [PATCH 03/19] make AppOptions an interface{} --- types/genesis.go | 2 +- types/validator_set_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/types/genesis.go b/types/genesis.go index 797aff9cc..e33f60258 100644 --- a/types/genesis.go +++ b/types/genesis.go @@ -29,7 +29,7 @@ type GenesisDoc struct { ConsensusParams *ConsensusParams `json:"consensus_params,omitempty"` Validators []GenesisValidator `json:"validators"` AppHash data.Bytes `json:"app_hash"` - AppOptions *json.RawMessage `json:"app_options,omitempty"` + AppOptions interface{} `json:"app_options,omitempty"` } // SaveAs is a utility method for saving GenensisDoc as a JSON file. diff --git a/types/validator_set_test.go b/types/validator_set_test.go index 71a1993e7..69f25d620 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -5,8 +5,8 @@ import ( "strings" "testing" - cmn "github.com/tendermint/tmlibs/common" "github.com/tendermint/go-crypto" + cmn "github.com/tendermint/tmlibs/common" ) func randPubKey() crypto.PubKey { From 0a7b2ab52c3cf80187eaf839d70db875f7403419 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 20 Oct 2017 21:56:10 +0400 Subject: [PATCH 04/19] fix invalid memory address or nil pointer dereference error (Refs #762) https://github.com/tendermint/tendermint/issues/762#issuecomment-338276055 ``` E[10-19|04:52:38.969] Stopping peer for error module=p2p peer="Peer{MConn{178.62.46.14:46656} B14916FAF38A out}" err="Error: runtime error: invalid memory address or nil pointer dereference\nStack: goroutine 529485 [running]:\nruntime/debug.Stack(0xc4355cfb38, 0xb463e0, 0x11b1c30)\n\t/usr/local/go/src/runtime/debug/stack.go:24 +0xa7\ngithub.com/cosmos/gaia/vendor/github.com/tendermint/tendermint/p2p.(*MConnection)._recover(0xc439a28870)\n\t/home/ubuntu/go/src/github.com/cosmos/gaia/vendor/github.com/tendermint/tendermint/p2p/connection.go:206 +0x6e\npanic(0xb463e0, 0x11b1c30)\n\t/usr/local/go/src/runtime/panic.go:491 +0x283\ngithub.com/cosmos/gaia/vendor/github.com/tendermint/tendermint/blockchain.(*bpPeer).decrPending(0x0, 0x381)\n\t/home/ubuntu/go/src/github.com/cosmos/gaia/vendor/github.com/tendermint/tendermint/blockchain/pool.go:376 +0x22\ngithub.com/cosmos/gaia/vendor/github.com/tendermint/tendermint/blockchain.(*BlockPool).AddBlock(0xc4200e4000, 0xc4266d1f00, 0x40, 0xc432ac9640, 0x381)\n\t/home/ubuntu/go/src/github.com/cosmos/gaia/vendor/github.com/tendermint/tendermint/blockchain/pool.go:215 +0x139\ngithub.com/cosmos/gaia/vendor/github.com/tendermint/tendermint/blockchain.(*BlockchainReactor).Receive(0xc42050a780, 0xc420257740, 0x1171be0, 0xc42ff302d0, 0xc4384b2000, 0x381, 0x1000)\n\t/home/ubuntu/go/src/github.com/cosmos/gaia/vendor/github.com/tendermint/tendermint/blockchain/reactor.go:160 +0x712\ngithub.com/cosmos/gaia/vendor/github.com/tendermint/tendermint/p2p.createMConnection.func1(0x11e5040, 0xc4384b2000, 0x381, 0x1000)\n\t/home/ubuntu/go/src/github.com/cosmos/gaia/vendor/github.com/tendermint/tendermint/p2p/peer.go:334 +0xbd\ngithub.com/cosmos/gaia/vendor/github.com/tendermint/tendermint/p2p.(*MConnection).recvRoutine(0xc439a28870)\n\t/home/ubuntu/go/src/github.com/cosmos/gaia/vendor/github.com/tendermint/tendermint/p2p/connection.go:475 +0x4a3\ncreated by github.com/cosmos/gaia/vendor/github.com/tendermint/tendermint/p2p.(*MConnection).OnStart\n\t/home/ubuntu/go/src/github.com/cosmos/gaia/vendor/github.com/tendermint/tendermint/p2p/connection.go:170 +0x187\n" ``` --- blockchain/pool.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/blockchain/pool.go b/blockchain/pool.go index bd52e280f..ca15c9910 100644 --- a/blockchain/pool.go +++ b/blockchain/pool.go @@ -212,7 +212,9 @@ func (pool *BlockPool) AddBlock(peerID string, block *types.Block, blockSize int if requester.setBlock(block, peerID) { pool.numPending-- peer := pool.peers[peerID] - peer.decrPending(blockSize) + if peer != nil { + peer.decrPending(blockSize) + } } else { // Bad peer? } From d64a48e0ee3419c48c1a2c91c20c34ccc48c53d2 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 20 Oct 2017 23:56:21 +0400 Subject: [PATCH 05/19] set logger on blockchain pool --- blockchain/pool.go | 4 +--- blockchain/reactor.go | 11 +++++++++-- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/blockchain/pool.go b/blockchain/pool.go index ca15c9910..aac4c77c3 100644 --- a/blockchain/pool.go +++ b/blockchain/pool.go @@ -69,9 +69,7 @@ func (pool *BlockPool) OnStart() error { return nil } -func (pool *BlockPool) OnStop() { - pool.BaseService.OnStop() -} +func (pool *BlockPool) OnStop() {} // Run spawns requesters as needed. func (pool *BlockPool) makeRequestersRoutine() { diff --git a/blockchain/reactor.go b/blockchain/reactor.go index 9cc01fbac..6ff010038 100644 --- a/blockchain/reactor.go +++ b/blockchain/reactor.go @@ -12,6 +12,7 @@ import ( sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" cmn "github.com/tendermint/tmlibs/common" + "github.com/tendermint/tmlibs/log" ) const ( @@ -79,7 +80,13 @@ func NewBlockchainReactor(state *sm.State, proxyAppConn proxy.AppConnConsensus, return bcR } -// OnStart implements BaseService +// SetLogger implements cmn.Service by setting the logger on reactor and pool. +func (bcR *BlockchainReactor) SetLogger(l log.Logger) { + bcR.BaseService.Logger = l + bcR.pool.Logger = l +} + +// OnStart implements cmn.Service. func (bcR *BlockchainReactor) OnStart() error { bcR.BaseReactor.OnStart() if bcR.fastSync { @@ -92,7 +99,7 @@ func (bcR *BlockchainReactor) OnStart() error { return nil } -// OnStop implements BaseService +// OnStop implements cmn.Service. func (bcR *BlockchainReactor) OnStop() { bcR.BaseReactor.OnStop() bcR.pool.Stop() From 0bbf38141a6498f661d6c956c9f45066c312d90e Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 20 Oct 2017 21:19:25 -0400 Subject: [PATCH 06/19] blockchain/pool: some comments and small changes --- blockchain/pool.go | 23 ++++++++++++++++++++--- blockchain/reactor.go | 4 ++-- p2p/pex_reactor.go | 1 + 3 files changed, 23 insertions(+), 5 deletions(-) diff --git a/blockchain/pool.go b/blockchain/pool.go index aac4c77c3..348ba09b3 100644 --- a/blockchain/pool.go +++ b/blockchain/pool.go @@ -11,11 +11,25 @@ import ( "github.com/tendermint/tmlibs/log" ) +/* + +eg, L = latency = 0.1s + P = num peers = 10 + FN = num full nodes + BS = 1kB block size + CB = 1 Mbit/s = 128 kB/s + CB/P = 12.8 kB + B/S = CB/P/BS = 12.8 blocks/s + + 12.8 * 0.1 = 1.28 blocks on conn + +*/ + const ( requestIntervalMS = 250 maxTotalRequesters = 300 maxPendingRequests = maxTotalRequesters - maxPendingRequestsPerPeer = 75 + maxPendingRequestsPerPeer = 10 minRecvRate = 10240 // 10Kb/s ) @@ -186,15 +200,16 @@ func (pool *BlockPool) PopRequest() { // Remove the peer and redo request from others. func (pool *BlockPool) RedoRequest(height int) { pool.mtx.Lock() + defer pool.mtx.Unlock() + request := pool.requesters[height] - pool.mtx.Unlock() if request.block == nil { cmn.PanicSanity("Expected block to be non-nil") } // RemovePeer will redo all requesters associated with this peer. // TODO: record this malfeasance - pool.RemovePeer(request.peerID) + pool.removePeer(request.peerID) } // TODO: ensure that blocks come in order for each peer. @@ -204,6 +219,8 @@ func (pool *BlockPool) AddBlock(peerID string, block *types.Block, blockSize int requester := pool.requesters[block.Height] if requester == nil { + // a block we didn't expect. + // TODO:if height is too far ahead, punish peer return } diff --git a/blockchain/reactor.go b/blockchain/reactor.go index 6ff010038..b46ad40fa 100644 --- a/blockchain/reactor.go +++ b/blockchain/reactor.go @@ -221,9 +221,9 @@ FOR_LOOP: // ask for status updates go bcR.BroadcastStatusRequest() case <-switchToConsensusTicker.C: - height, numPending, _ := bcR.pool.GetStatus() + height, numPending, lenRequesters := bcR.pool.GetStatus() outbound, inbound, _ := bcR.Switch.NumPeers() - bcR.Logger.Info("Consensus ticker", "numPending", numPending, "total", len(bcR.pool.requesters), + bcR.Logger.Info("Consensus ticker", "numPending", numPending, "total", lenRequesters, "outbound", outbound, "inbound", inbound) if bcR.pool.IsCaughtUp() { bcR.Logger.Info("Time to switch to consensus reactor!", "height", height) diff --git a/p2p/pex_reactor.go b/p2p/pex_reactor.go index 69ab55cc9..54c2d06b5 100644 --- a/p2p/pex_reactor.go +++ b/p2p/pex_reactor.go @@ -139,6 +139,7 @@ func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) { switch msg := msg.(type) { case *pexRequestMessage: // src requested some peers. + // NOTE: we might send an empty selection r.SendAddrs(src, r.book.GetSelection()) case *pexAddrsMessage: // We received some peer addresses from src. From 2802a06a080852cdc810c4e7a0ce028ecdcd960d Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 23 Oct 2017 19:46:14 -0400 Subject: [PATCH 07/19] blockchain/store: comment about panics --- blockchain/store.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/blockchain/store.go b/blockchain/store.go index 79edfeaf5..5bf854775 100644 --- a/blockchain/store.go +++ b/blockchain/store.go @@ -25,7 +25,8 @@ Currently the precommit signatures are duplicated in the Block parts as well as the Commit. In the future this may change, perhaps by moving the Commit data outside the Block. -Panics indicate probable corruption in the data +// NOTE: BlockStore methods will panic if they encounter errors +// deserializing loaded data, indicating probable corruption on disk. */ type BlockStore struct { db dbm.DB From 6a5254c4758df3107aa0769ae9c057d4c0d3683f Mon Sep 17 00:00:00 2001 From: Matt Bell Date: Thu, 28 Sep 2017 17:41:44 -0700 Subject: [PATCH 08/19] Added local blockchain sync benchmark script --- benchmarks/blockchain/.gitignore | 2 + benchmarks/blockchain/localsync.sh | 80 ++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+) create mode 100644 benchmarks/blockchain/.gitignore create mode 100755 benchmarks/blockchain/localsync.sh diff --git a/benchmarks/blockchain/.gitignore b/benchmarks/blockchain/.gitignore new file mode 100644 index 000000000..9e67bd47d --- /dev/null +++ b/benchmarks/blockchain/.gitignore @@ -0,0 +1,2 @@ +data + diff --git a/benchmarks/blockchain/localsync.sh b/benchmarks/blockchain/localsync.sh new file mode 100755 index 000000000..e181c5655 --- /dev/null +++ b/benchmarks/blockchain/localsync.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +DATA=$GOPATH/src/github.com/tendermint/tendermint/benchmarks/blockchain/data +if [ ! -d $DATA ]; then + echo "no data found, generating a chain... (this only has to happen once)" + + tendermint init --home $DATA + cp $DATA/config.toml $DATA/config2.toml + echo " + [consensus] + timeout_commit = 0 + " >> $DATA/config.toml + + echo "starting node" + tendermint node \ + --home $DATA \ + --proxy_app dummy \ + --p2p.laddr tcp://127.0.0.1:56656 \ + --rpc.laddr tcp://127.0.0.1:56657 \ + --log_level error & + + echo "making blocks for 60s" + sleep 60 + + mv $DATA/config2.toml $DATA/config.toml + + kill %1 + + echo "done generating chain." +fi + +# validator node +HOME1=$TMPDIR$RANDOM$RANDOM +cp -R $DATA $HOME1 +echo "starting validator node" +tendermint node \ + --home $HOME1 \ + --proxy_app dummy \ + --p2p.laddr tcp://127.0.0.1:56656 \ + --rpc.laddr tcp://127.0.0.1:56657 \ + --log_level error & +sleep 1 + +# downloader node +HOME2=$TMPDIR$RANDOM$RANDOM +tendermint init --home $HOME2 +cp $HOME1/genesis.json $HOME2 +printf "starting downloader node" +tendermint node \ + --home $HOME2 \ + --proxy_app dummy \ + --p2p.laddr tcp://127.0.0.1:56666 \ + --rpc.laddr tcp://127.0.0.1:56667 \ + --p2p.seeds 127.0.0.1:56656 \ + --log_level error & + +# wait for node to start up so we only count time where we are actually syncing +sleep 0.5 +while curl localhost:56667/status 2> /dev/null | grep "\"latest_block_height\": 0," > /dev/null +do + printf '.' + sleep 0.2 +done +echo + +echo "syncing blockchain for 10s" +for i in {1..10} +do + sleep 1 + HEIGHT="$(curl localhost:56667/status 2> /dev/null \ + | grep 'latest_block_height' \ + | grep -o ' [0-9]*' \ + | xargs)" + let 'RATE = HEIGHT / i' + echo "height: $HEIGHT, blocks/sec: $RATE" +done + +kill %1 +kill %2 +rm -rf $HOME1 $HOME2 From e06bbaf3036d529b84b737dfd3513999085aa493 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 24 Oct 2017 15:32:01 +0400 Subject: [PATCH 09/19] refactor TestNoBlockMessageResponse to eliminate a race condition --- blockchain/reactor_test.go | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/blockchain/reactor_test.go b/blockchain/reactor_test.go index 633cae169..584aadf39 100644 --- a/blockchain/reactor_test.go +++ b/blockchain/reactor_test.go @@ -1,12 +1,11 @@ package blockchain import ( - "bytes" "testing" wire "github.com/tendermint/go-wire" cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/db" + dbm "github.com/tendermint/tmlibs/db" "github.com/tendermint/tmlibs/log" cfg "github.com/tendermint/tendermint/config" @@ -15,28 +14,24 @@ import ( "github.com/tendermint/tendermint/types" ) -func newBlockchainReactor(logger log.Logger, maxBlockHeight int) *BlockchainReactor { - config := cfg.ResetTestRoot("node_node_test") +func newBlockchainReactor(maxBlockHeight int) *BlockchainReactor { + logger := log.TestingLogger() + config := cfg.ResetTestRoot("blockchain_reactor_test") - blockStoreDB := db.NewDB("blockstore", config.DBBackend, config.DBDir()) - blockStore := NewBlockStore(blockStoreDB) - - stateLogger := logger.With("module", "state") + blockStore := NewBlockStore(dbm.NewMemDB()) // Get State - stateDB := db.NewDB("state", config.DBBackend, config.DBDir()) - state, _ := sm.GetState(stateDB, config.GenesisFile()) - - state.SetLogger(stateLogger) + state, _ := sm.GetState(dbm.NewMemDB(), config.GenesisFile()) + state.SetLogger(logger.With("module", "state")) state.Save() // Make the blockchainReactor itself fastSync := true bcReactor := NewBlockchainReactor(state.Copy(), nil, blockStore, fastSync) + bcReactor.SetLogger(logger.With("module", "blockchain")) // Next: we need to set a switch in order for peers to be added in bcReactor.Switch = p2p.NewSwitch(cfg.DefaultP2PConfig()) - bcReactor.SetLogger(logger.With("module", "blockchain")) // Lastly: let's add some blocks in for blockHeight := 1; blockHeight <= maxBlockHeight; blockHeight++ { @@ -50,12 +45,10 @@ func newBlockchainReactor(logger log.Logger, maxBlockHeight int) *BlockchainReac } func TestNoBlockMessageResponse(t *testing.T) { - logBuf := new(bytes.Buffer) - logger := log.NewTMLogger(logBuf) maxBlockHeight := 20 - bcr := newBlockchainReactor(logger, maxBlockHeight) - go bcr.OnStart() + bcr := newBlockchainReactor(maxBlockHeight) + bcr.Start() defer bcr.Stop() // Add some peers in From 01be6fa3098786b57120083597ba51fea29995cf Mon Sep 17 00:00:00 2001 From: Silas Davis Date: Tue, 24 Oct 2017 13:20:49 +0100 Subject: [PATCH 10/19] Fix WSClient blocking in the readRoutine after Stop() as it tries to write to ResultsCh --- rpc/lib/client/ws_client.go | 20 +++++++++++++++----- rpc/lib/client/ws_client_test.go | 23 +++++++++++++++++++++++ 2 files changed, 38 insertions(+), 5 deletions(-) diff --git a/rpc/lib/client/ws_client.go b/rpc/lib/client/ws_client.go index d233004b6..62dce4d44 100644 --- a/rpc/lib/client/ws_client.go +++ b/rpc/lib/client/ws_client.go @@ -48,10 +48,11 @@ type WSClient struct { onReconnect func() // internal channels - send chan types.RPCRequest // user requests - backlog chan types.RPCRequest // stores a single user request received during a conn failure - reconnectAfter chan error // reconnect requests - readRoutineQuit chan struct{} // a way for readRoutine to close writeRoutine + send chan types.RPCRequest // user requests + backlog chan types.RPCRequest // stores a single user request received during a conn failure + reconnectAfter chan error // reconnect requests + readRoutineQuit chan struct{} // a way for readRoutine to close writeRoutine + writeRoutineQuit chan struct{} // a way for writeRoutine to close readRoutine (on <-BaseService.Quit) wg sync.WaitGroup @@ -282,6 +283,7 @@ func (c *WSClient) reconnect() error { func (c *WSClient) startReadWriteRoutines() { c.wg.Add(2) c.readRoutineQuit = make(chan struct{}) + c.writeRoutineQuit = make(chan struct{}) go c.readRoutine() go c.writeRoutine() } @@ -387,6 +389,9 @@ func (c *WSClient) writeRoutine() { case <-c.readRoutineQuit: return case <-c.Quit: + // We need to fan out the quit message from the single BaseService Quit Channel to the readRoutine + // Use a non-blocking close rather than a send in case readRoutine is in the process of quitting + close(c.writeRoutineQuit) c.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) return } @@ -441,7 +446,12 @@ func (c *WSClient) readRoutine() { continue } c.Logger.Info("got response", "resp", response.Result) - c.ResultsCh <- *response.Result + // Combine a non-blocking read on writeRoutineQuit with a non-blocking write on ResultsCh to avoid blocking + // c.wg.Wait() in c.Stop() + select { + case <-c.writeRoutineQuit: + case c.ResultsCh <- *response.Result: + } } } diff --git a/rpc/lib/client/ws_client_test.go b/rpc/lib/client/ws_client_test.go index f5aa027fe..049301f33 100644 --- a/rpc/lib/client/ws_client_test.go +++ b/rpc/lib/client/ws_client_test.go @@ -162,6 +162,29 @@ func TestWSClientReconnectFailure(t *testing.T) { } } +func TestNotBlockingOnStop(t *testing.T) { + timeout := 2 *time.Second + s := httptest.NewServer(&myHandler{}) + c := startClient(t, s.Listener.Addr()) + c.Call(context.Background(), "a", make(map[string]interface{})) + // Let the readRoutine get around to blocking + time.Sleep(time.Second) + passCh := make(chan struct{}) + go func() { + // Unless we have a non-blocking write to ResultsCh from readRoutine + // this blocks forever ont the waitgroup + c.Stop() + passCh <- struct{}{} + }() + select { + case <-passCh: + // Pass + case <-time.After(timeout): + t.Fatalf("WSClient did failed to stop within %v seconds - is one of the read/write routines blocking?", + timeout.Seconds()) + } +} + func startClient(t *testing.T, addr net.Addr) *WSClient { c := NewWSClient(addr.String(), "/websocket") _, err := c.Start() From f6adddb4a8e611c2c7d1f8ca64266bda6674e330 Mon Sep 17 00:00:00 2001 From: Silas Davis Date: Tue, 24 Oct 2017 17:38:12 +0100 Subject: [PATCH 11/19] Replace ResultsCh with ResponsesCh --- benchmarks/simu/counter.go | 2 +- rpc/client/httpclient.go | 12 +++++----- rpc/lib/client/ws_client.go | 22 ++++++------------ rpc/lib/client/ws_client_test.go | 22 +++++++++--------- rpc/lib/rpc_test.go | 38 ++++++++++++++++++-------------- 5 files changed, 47 insertions(+), 49 deletions(-) diff --git a/benchmarks/simu/counter.go b/benchmarks/simu/counter.go index ff5b14c0d..c6b4c161c 100644 --- a/benchmarks/simu/counter.go +++ b/benchmarks/simu/counter.go @@ -21,7 +21,7 @@ func main() { // Read a bunch of responses go func() { for { - _, ok := <-wsc.ResultsCh + _, ok := <-wsc.ResponsesCh if !ok { break } diff --git a/rpc/client/httpclient.go b/rpc/client/httpclient.go index 393250673..e63fcd4ba 100644 --- a/rpc/client/httpclient.go +++ b/rpc/client/httpclient.go @@ -318,16 +318,18 @@ func (w *WSEvents) redoSubscriptions() { func (w *WSEvents) eventListener() { for { select { - case res := <-w.ws.ResultsCh: + case resp := <-w.ws.ResponsesCh: // res is json.RawMessage - err := w.parseEvent(res) + if resp.Error != nil { + // FIXME: better logging/handling of errors?? + fmt.Printf("ws err: %+v\n", resp.Error.Error()) + continue + } + err := w.parseEvent(*resp.Result) if err != nil { // FIXME: better logging/handling of errors?? fmt.Printf("ws result: %+v\n", err) } - case err := <-w.ws.ErrorsCh: - // FIXME: better logging/handling of errors?? - fmt.Printf("ws err: %+v\n", err) case <-w.quit: // send a message so we can wait for the routine to exit // before cleaning up the w.ws stuff diff --git a/rpc/lib/client/ws_client.go b/rpc/lib/client/ws_client.go index 62dce4d44..a15aca2bb 100644 --- a/rpc/lib/client/ws_client.go +++ b/rpc/lib/client/ws_client.go @@ -40,9 +40,8 @@ type WSClient struct { // https://godoc.org/github.com/rcrowley/go-metrics#Timer. PingPongLatencyTimer metrics.Timer - // user facing channels, closed only when the client is being stopped. - ResultsCh chan json.RawMessage - ErrorsCh chan error + // Single user facing channel to read RPCResponses from, closed only when the client is being stopped. + ResponsesCh chan types.RPCResponse // Callback, which will be called each time after successful reconnect. onReconnect func() @@ -149,8 +148,7 @@ func (c *WSClient) OnStart() error { return err } - c.ResultsCh = make(chan json.RawMessage) - c.ErrorsCh = make(chan error) + c.ResponsesCh = make(chan types.RPCResponse) c.send = make(chan types.RPCRequest) // 1 additional error may come from the read/write @@ -175,8 +173,7 @@ func (c *WSClient) Stop() bool { success := c.BaseService.Stop() // only close user-facing channels when we can't write to them c.wg.Wait() - close(c.ResultsCh) - close(c.ErrorsCh) + close(c.ResponsesCh) return success } @@ -193,7 +190,7 @@ func (c *WSClient) IsActive() bool { } // Send the given RPC request to the server. Results will be available on -// ResultsCh, errors, if any, on ErrorsCh. Will block until send succeeds or +// ResponsesCh, errors, if any, on ErrorsCh. Will block until send succeeds or // ctx.Done is closed. func (c *WSClient) Send(ctx context.Context, request types.RPCRequest) error { select { @@ -438,19 +435,14 @@ func (c *WSClient) readRoutine() { err = json.Unmarshal(data, &response) if err != nil { c.Logger.Error("failed to parse response", "err", err, "data", string(data)) - c.ErrorsCh <- err - continue - } - if response.Error != nil { - c.ErrorsCh <- response.Error continue } c.Logger.Info("got response", "resp", response.Result) - // Combine a non-blocking read on writeRoutineQuit with a non-blocking write on ResultsCh to avoid blocking + // Combine a non-blocking read on writeRoutineQuit with a non-blocking write on ResponsesCh to avoid blocking // c.wg.Wait() in c.Stop() select { case <-c.writeRoutineQuit: - case c.ResultsCh <- *response.Result: + case c.ResponsesCh <- response: } } } diff --git a/rpc/lib/client/ws_client_test.go b/rpc/lib/client/ws_client_test.go index 049301f33..23f19dc00 100644 --- a/rpc/lib/client/ws_client_test.go +++ b/rpc/lib/client/ws_client_test.go @@ -125,8 +125,7 @@ func TestWSClientReconnectFailure(t *testing.T) { go func() { for { select { - case <-c.ResultsCh: - case <-c.ErrorsCh: + case <-c.ResponsesCh: case <-c.Quit: return } @@ -163,7 +162,7 @@ func TestWSClientReconnectFailure(t *testing.T) { } func TestNotBlockingOnStop(t *testing.T) { - timeout := 2 *time.Second + timeout := 2 * time.Second s := httptest.NewServer(&myHandler{}) c := startClient(t, s.Listener.Addr()) c.Call(context.Background(), "a", make(map[string]interface{})) @@ -171,10 +170,10 @@ func TestNotBlockingOnStop(t *testing.T) { time.Sleep(time.Second) passCh := make(chan struct{}) go func() { - // Unless we have a non-blocking write to ResultsCh from readRoutine + // Unless we have a non-blocking write to ResponsesCh from readRoutine // this blocks forever ont the waitgroup - c.Stop() - passCh <- struct{}{} + c.Stop() + passCh <- struct{}{} }() select { case <-passCh: @@ -201,13 +200,12 @@ func call(t *testing.T, method string, c *WSClient) { func callWgDoneOnResult(t *testing.T, c *WSClient, wg *sync.WaitGroup) { for { select { - case res := <-c.ResultsCh: - if res != nil { - wg.Done() + case resp := <-c.ResponsesCh: + if resp.Error != nil { + t.Fatalf("unexpected error: %v", resp.Error) } - case err := <-c.ErrorsCh: - if err != nil { - t.Fatalf("unexpected error: %v", err) + if *resp.Result != nil { + wg.Done() } case <-c.Quit: return diff --git a/rpc/lib/rpc_test.go b/rpc/lib/rpc_test.go index 4e83d23ef..2ec3014d5 100644 --- a/rpc/lib/rpc_test.go +++ b/rpc/lib/rpc_test.go @@ -217,15 +217,17 @@ func echoViaWS(cl *client.WSClient, val string) (string, error) { } select { - case msg := <-cl.ResultsCh: + case msg := <-cl.ResponsesCh: + if msg.Error != nil { + return "", err + + } result := new(ResultEcho) - err = json.Unmarshal(msg, result) + err = json.Unmarshal(*msg.Result, result) if err != nil { return "", nil } return result.Value, nil - case err := <-cl.ErrorsCh: - return "", err } } @@ -239,15 +241,17 @@ func echoBytesViaWS(cl *client.WSClient, bytes []byte) ([]byte, error) { } select { - case msg := <-cl.ResultsCh: + case msg := <-cl.ResponsesCh: + if msg.Error != nil { + return []byte{}, msg.Error + + } result := new(ResultEchoBytes) - err = json.Unmarshal(msg, result) + err = json.Unmarshal(*msg.Result, result) if err != nil { return []byte{}, nil } return result.Value, nil - case err := <-cl.ErrorsCh: - return []byte{}, err } } @@ -319,14 +323,15 @@ func TestWSNewWSRPCFunc(t *testing.T) { require.Nil(t, err) select { - case msg := <-cl.ResultsCh: + case msg := <-cl.ResponsesCh: + if msg.Error != nil { + t.Fatal(err) + } result := new(ResultEcho) - err = json.Unmarshal(msg, result) + err = json.Unmarshal(*msg.Result, result) require.Nil(t, err) got := result.Value assert.Equal(t, got, val) - case err := <-cl.ErrorsCh: - t.Fatal(err) } } @@ -343,14 +348,15 @@ func TestWSHandlesArrayParams(t *testing.T) { require.Nil(t, err) select { - case msg := <-cl.ResultsCh: + case msg := <-cl.ResponsesCh: + if msg.Error != nil { + t.Fatalf("%+v", err) + } result := new(ResultEcho) - err = json.Unmarshal(msg, result) + err = json.Unmarshal(*msg.Result, result) require.Nil(t, err) got := result.Value assert.Equal(t, got, val) - case err := <-cl.ErrorsCh: - t.Fatalf("%+v", err) } } From 45e18a183227bd9285529f59ffb4db8208c5d8bd Mon Sep 17 00:00:00 2001 From: Eric Mann Date: Tue, 24 Oct 2017 14:13:35 -0700 Subject: [PATCH 12/19] Change log level to Info when proposal block hashing fails due to partially complete block --- consensus/state.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/state.go b/consensus/state.go index f0fbad811..f221960ea 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -1130,7 +1130,7 @@ func (cs *ConsensusState) tryFinalizeCommit(height int) { if !cs.ProposalBlock.HashesTo(blockID.Hash) { // TODO: this happens every time if we're not a validator (ugly logs) // TODO: ^^ wait, why does it matter that we're a validator? - cs.Logger.Error("Attempt to finalize failed. We don't have the commit block.", "height", height, "proposal-block", cs.ProposalBlock.Hash(), "commit-block", blockID.Hash) + cs.Logger.Info("Attempt to finalize failed. We don't have the commit block.", "height", height, "proposal-block", cs.ProposalBlock.Hash(), "commit-block", blockID.Hash) return } From ae538337bac2a9663765dea79de10ce9190e9671 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 25 Oct 2017 11:19:53 +0400 Subject: [PATCH 13/19] fix panic: failed to determine gopath: exec: "go" (Refs #782) ``` -bash-4.2$ tendermint show_validators panic: failed to determine gopath: exec: "go": executable file not found in $PATH goroutine 1 [running]: github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/common.gopath(0xc4200632c0, 0x18) /var/lib/jenkins/workspace/03.Build.Package/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/common/os.go:26 +0x1b5 github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/common.init() /var/lib/jenkins/workspace/03.Build.Package/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/common/os.go:17 +0x13c github.com/tendermint/tendermint/vendor/github.com/tendermint/go-wire.init() /var/lib/jenkins/workspace/03.Build.Package/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-wire/wire.go:165 +0x50 github.com/tendermint/tendermint/vendor/github.com/tendermint/go-wire/data.init() /var/lib/jenkins/workspace/03.Build.Package/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-wire/data/wrapper.go:89 +0x50 github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/cli.init() /var/lib/jenkins/workspace/03.Build.Package/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/cli/setup.go:190 +0x76 main.init() /var/lib/jenkins/workspace/03.Build.Package/go/src/github.com/tendermint/tendermint/cmd/tendermint/main.go:42 +0x49``` An error message instead would be nice. ``` Now GoPath() is a function instead of a variable. --- consensus/replay_test.go | 2 +- glide.lock | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/consensus/replay_test.go b/consensus/replay_test.go index 6d1102b60..86c61035a 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -44,7 +44,7 @@ func init() { // after running it (eg. sometimes small_block2 will have 5 block parts, sometimes 6). // It should only have to be re-run if there is some breaking change to the consensus data structures (eg. blocks, votes) // or to the behaviour of the app (eg. computes app hash differently) -var data_dir = path.Join(cmn.GoPath, "src/github.com/tendermint/tendermint/consensus", "test_data") +var data_dir = path.Join(cmn.GoPath(), "src/github.com/tendermint/tendermint/consensus", "test_data") //------------------------------------------------------------------------------------------ // WAL Tests diff --git a/glide.lock b/glide.lock index a806e1f96..a8e571de5 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ hash: 9867fa4543ca4daea1a96a3883a7f483819c067ca34ed6d3aa67aace4a289e93 -updated: 2017-10-23T10:01:08.326324082-04:00 +updated: 2017-10-25T07:15:06.075544403Z imports: - name: github.com/btcsuite/btcd version: c7588cbf7690cd9f047a28efa2dcd8f2435a4e5e @@ -122,7 +122,7 @@ imports: subpackages: - iavl - name: github.com/tendermint/tmlibs - version: 8e5266a9ef2527e68a1571f932db8228a331b556 + version: 0a652499ead7cd20a57a6a592f0491a2b493bb85 subpackages: - autofile - cli From 4cb02d0bf21cc4c37b62a9071741134ae21cc810 Mon Sep 17 00:00:00 2001 From: Silas Davis Date: Tue, 24 Oct 2017 21:53:42 +0100 Subject: [PATCH 14/19] Exploit the fact the BaseService's closed Quit channel will keep emitting quit signals to close both readRoutine and writeRoutine --- rpc/lib/client/ws_client.go | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/rpc/lib/client/ws_client.go b/rpc/lib/client/ws_client.go index a15aca2bb..1d99dd25f 100644 --- a/rpc/lib/client/ws_client.go +++ b/rpc/lib/client/ws_client.go @@ -51,7 +51,6 @@ type WSClient struct { backlog chan types.RPCRequest // stores a single user request received during a conn failure reconnectAfter chan error // reconnect requests readRoutineQuit chan struct{} // a way for readRoutine to close writeRoutine - writeRoutineQuit chan struct{} // a way for writeRoutine to close readRoutine (on <-BaseService.Quit) wg sync.WaitGroup @@ -280,7 +279,6 @@ func (c *WSClient) reconnect() error { func (c *WSClient) startReadWriteRoutines() { c.wg.Add(2) c.readRoutineQuit = make(chan struct{}) - c.writeRoutineQuit = make(chan struct{}) go c.readRoutine() go c.writeRoutine() } @@ -386,9 +384,6 @@ func (c *WSClient) writeRoutine() { case <-c.readRoutineQuit: return case <-c.Quit: - // We need to fan out the quit message from the single BaseService Quit Channel to the readRoutine - // Use a non-blocking close rather than a send in case readRoutine is in the process of quitting - close(c.writeRoutineQuit) c.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) return } @@ -438,10 +433,11 @@ func (c *WSClient) readRoutine() { continue } c.Logger.Info("got response", "resp", response.Result) - // Combine a non-blocking read on writeRoutineQuit with a non-blocking write on ResponsesCh to avoid blocking - // c.wg.Wait() in c.Stop() + // Combine a non-blocking read on BaseService.Quit with a non-blocking write on ResponsesCh to avoid blocking + // c.wg.Wait() in c.Stop(). Note we rely on Quit being closed so that it sends unlimited Quit signals to stop + // both readRoutine and writeRoutine select { - case <-c.writeRoutineQuit: + case <-c.Quit: case c.ResponsesCh <- response: } } From b4fd6e876e591bfe27f92ac2d3b771fa0d2294ce Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Tue, 24 Oct 2017 12:34:36 +0200 Subject: [PATCH 15/19] Copy certifiers from light-client --- certifiers/checkpoint.go | 101 +++++++++++++++++ certifiers/client/main_test.go | 25 +++++ certifiers/client/provider.go | 129 +++++++++++++++++++++ certifiers/client/provider_test.go | 62 +++++++++++ certifiers/doc.go | 133 ++++++++++++++++++++++ certifiers/dynamic.go | 173 +++++++++++++++++++++++++++++ certifiers/dynamic_test.go | 130 ++++++++++++++++++++++ certifiers/errors/errors.go | 86 ++++++++++++++ certifiers/errors/errors_test.go | 18 +++ certifiers/files/commit.go | 77 +++++++++++++ certifiers/files/commit_test.go | 66 +++++++++++ certifiers/files/provider.go | 134 ++++++++++++++++++++++ certifiers/files/provider_test.go | 96 ++++++++++++++++ certifiers/helper.go | 149 +++++++++++++++++++++++++ certifiers/inquirer.go | 142 +++++++++++++++++++++++ certifiers/inquirer_test.go | 165 +++++++++++++++++++++++++++ certifiers/memprovider.go | 78 +++++++++++++ certifiers/performance_test.go | 116 +++++++++++++++++++ certifiers/provider.go | 125 +++++++++++++++++++++ certifiers/provider_test.go | 128 +++++++++++++++++++++ certifiers/static.go | 66 +++++++++++ certifiers/static_test.go | 59 ++++++++++ 22 files changed, 2258 insertions(+) create mode 100644 certifiers/checkpoint.go create mode 100644 certifiers/client/main_test.go create mode 100644 certifiers/client/provider.go create mode 100644 certifiers/client/provider_test.go create mode 100644 certifiers/doc.go create mode 100644 certifiers/dynamic.go create mode 100644 certifiers/dynamic_test.go create mode 100644 certifiers/errors/errors.go create mode 100644 certifiers/errors/errors_test.go create mode 100644 certifiers/files/commit.go create mode 100644 certifiers/files/commit_test.go create mode 100644 certifiers/files/provider.go create mode 100644 certifiers/files/provider_test.go create mode 100644 certifiers/helper.go create mode 100644 certifiers/inquirer.go create mode 100644 certifiers/inquirer_test.go create mode 100644 certifiers/memprovider.go create mode 100644 certifiers/performance_test.go create mode 100644 certifiers/provider.go create mode 100644 certifiers/provider_test.go create mode 100644 certifiers/static.go create mode 100644 certifiers/static_test.go diff --git a/certifiers/checkpoint.go b/certifiers/checkpoint.go new file mode 100644 index 000000000..3c37b8599 --- /dev/null +++ b/certifiers/checkpoint.go @@ -0,0 +1,101 @@ +package certifiers + +import ( + "bytes" + + "github.com/pkg/errors" + + rtypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/types" + + certerr "github.com/tendermint/tendermint/certifiers/errors" +) + +// Certifier checks the votes to make sure the block really is signed properly. +// Certifier must know the current set of validitors by some other means. +type Certifier interface { + Certify(check *Commit) error + ChainID() string +} + +// *Commit is basically the rpc /commit response, but extended +// +// This is the basepoint for proving anything on the blockchain. It contains +// a signed header. If the signatures are valid and > 2/3 of the known set, +// we can store this checkpoint and use it to prove any number of aspects of +// the system: such as txs, abci state, validator sets, etc... +type Commit rtypes.ResultCommit + +// FullCommit is a commit and the actual validator set, +// the base info you need to update to a given point, +// assuming knowledge of some previous validator set +type FullCommit struct { + *Commit `json:"commit"` + Validators *types.ValidatorSet `json:"validator_set"` +} + +func NewFullCommit(commit *Commit, vals *types.ValidatorSet) FullCommit { + return FullCommit{ + Commit: commit, + Validators: vals, + } +} + +func CommitFromResult(commit *rtypes.ResultCommit) *Commit { + return (*Commit)(commit) +} + +func (c *Commit) Height() int { + if c == nil || c.Header == nil { + return 0 + } + return c.Header.Height +} + +func (c *Commit) ValidatorsHash() []byte { + if c == nil || c.Header == nil { + return nil + } + return c.Header.ValidatorsHash +} + +// ValidateBasic does basic consistency checks and makes sure the headers +// and commits are all consistent and refer to our chain. +// +// Make sure to use a Verifier to validate the signatures actually provide +// a significantly strong proof for this header's validity. +func (c *Commit) ValidateBasic(chainID string) error { + // make sure the header is reasonable + if c.Header == nil { + return errors.New("Commit missing header") + } + if c.Header.ChainID != chainID { + return errors.Errorf("Header belongs to another chain '%s' not '%s'", + c.Header.ChainID, chainID) + } + + if c.Commit == nil { + return errors.New("Commit missing signatures") + } + + // make sure the header and commit match (height and hash) + if c.Commit.Height() != c.Header.Height { + return certerr.ErrHeightMismatch(c.Commit.Height(), c.Header.Height) + } + hhash := c.Header.Hash() + chash := c.Commit.BlockID.Hash + if !bytes.Equal(hhash, chash) { + return errors.Errorf("Commits sign block %X header is block %X", + chash, hhash) + } + + // make sure the commit is reasonable + err := c.Commit.ValidateBasic() + if err != nil { + return errors.WithStack(err) + } + + // looks good, we just need to make sure the signatures are really from + // empowered validators + return nil +} diff --git a/certifiers/client/main_test.go b/certifiers/client/main_test.go new file mode 100644 index 000000000..ab9867680 --- /dev/null +++ b/certifiers/client/main_test.go @@ -0,0 +1,25 @@ +package client_test + +import ( + "os" + "testing" + + "github.com/tendermint/abci/example/dummy" + + nm "github.com/tendermint/tendermint/node" + rpctest "github.com/tendermint/tendermint/rpc/test" +) + +var node *nm.Node + +func TestMain(m *testing.M) { + // start a tendermint node (and merkleeyes) in the background to test against + app := dummy.NewDummyApplication() + node = rpctest.StartTendermint(app) + code := m.Run() + + // and shut down proper at the end + node.Stop() + node.Wait() + os.Exit(code) +} diff --git a/certifiers/client/provider.go b/certifiers/client/provider.go new file mode 100644 index 000000000..d4c170ce9 --- /dev/null +++ b/certifiers/client/provider.go @@ -0,0 +1,129 @@ +/* +Package client defines a provider that uses a rpcclient +to get information, which is used to get new headers +and validators directly from a node. +*/ +package client + +import ( + "bytes" + + rpcclient "github.com/tendermint/tendermint/rpc/client" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/types" + + "github.com/tendermint/tendermint/certifiers" + certerr "github.com/tendermint/tendermint/certifiers/errors" +) + +type SignStatusClient interface { + rpcclient.SignClient + rpcclient.StatusClient +} + +type provider struct { + node SignStatusClient + lastHeight int +} + +// NewProvider can wrap any rpcclient to expose it as +// a read-only provider. +func NewProvider(node SignStatusClient) certifiers.Provider { + return &provider{node: node} +} + +// NewProvider can connects to a tendermint json-rpc endpoint +// at the given url, and uses that as a read-only provider. +func NewHTTPProvider(remote string) certifiers.Provider { + return &provider{ + node: rpcclient.NewHTTP(remote, "/websocket"), + } +} + +// StoreCommit is a noop, as clients can only read from the chain... +func (p *provider) StoreCommit(_ certifiers.FullCommit) error { return nil } + +// GetHash gets the most recent validator and sees if it matches +// +// TODO: improve when the rpc interface supports more functionality +func (p *provider) GetByHash(hash []byte) (certifiers.FullCommit, error) { + var fc certifiers.FullCommit + vals, err := p.node.Validators(nil) + // if we get no validators, or a different height, return an error + if err != nil { + return fc, err + } + p.updateHeight(vals.BlockHeight) + vhash := types.NewValidatorSet(vals.Validators).Hash() + if !bytes.Equal(hash, vhash) { + return fc, certerr.ErrCommitNotFound() + } + return p.seedFromVals(vals) +} + +// GetByHeight gets the validator set by height +func (p *provider) GetByHeight(h int) (fc certifiers.FullCommit, err error) { + commit, err := p.node.Commit(&h) + if err != nil { + return fc, err + } + return p.seedFromCommit(commit) +} + +func (p *provider) LatestCommit() (fc certifiers.FullCommit, err error) { + commit, err := p.GetLatestCommit() + if err != nil { + return fc, err + } + return p.seedFromCommit(commit) +} + +// GetLatestCommit should return the most recent commit there is, +// which handles queries for future heights as per the semantics +// of GetByHeight. +func (p *provider) GetLatestCommit() (*ctypes.ResultCommit, error) { + status, err := p.node.Status() + if err != nil { + return nil, err + } + return p.node.Commit(&status.LatestBlockHeight) +} + +func (p *provider) seedFromVals(vals *ctypes.ResultValidators) (certifiers.FullCommit, error) { + // now get the commits and build a full commit + commit, err := p.node.Commit(&vals.BlockHeight) + if err != nil { + return certifiers.FullCommit{}, err + } + fc := certifiers.NewFullCommit( + certifiers.CommitFromResult(commit), + types.NewValidatorSet(vals.Validators), + ) + return fc, nil +} + +func (p *provider) seedFromCommit(commit *ctypes.ResultCommit) (fc certifiers.FullCommit, err error) { + fc.Commit = certifiers.CommitFromResult(commit) + + // now get the proper validators + vals, err := p.node.Validators(&commit.Header.Height) + if err != nil { + return fc, err + } + + // make sure they match the commit (as we cannot enforce height) + vset := types.NewValidatorSet(vals.Validators) + if !bytes.Equal(vset.Hash(), commit.Header.ValidatorsHash) { + return fc, certerr.ErrValidatorsChanged() + } + + p.updateHeight(commit.Header.Height) + fc.Validators = vset + return fc, nil +} + +func (p *provider) updateHeight(h int) { + if h > p.lastHeight { + p.lastHeight = h + } +} diff --git a/certifiers/client/provider_test.go b/certifiers/client/provider_test.go new file mode 100644 index 000000000..c63cd6a1e --- /dev/null +++ b/certifiers/client/provider_test.go @@ -0,0 +1,62 @@ +package client_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + rpctest "github.com/tendermint/tendermint/rpc/test" + + "github.com/tendermint/tendermint/certifiers" + "github.com/tendermint/tendermint/certifiers/client" + certerr "github.com/tendermint/tendermint/certifiers/errors" +) + +func TestProvider(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + cfg := rpctest.GetConfig() + rpcAddr := cfg.RPC.ListenAddress + chainID := cfg.ChainID + p := client.NewHTTPProvider(rpcAddr) + require.NotNil(t, p) + + // let it produce some blocks + time.Sleep(500 * time.Millisecond) + + // let's get the highest block + seed, err := p.LatestCommit() + + require.Nil(err, "%+v", err) + sh := seed.Height() + vhash := seed.Header.ValidatorsHash + assert.True(sh < 5000) + + // let's check this is valid somehow + assert.Nil(seed.ValidateBasic(chainID)) + cert := certifiers.NewStatic(chainID, seed.Validators) + + // historical queries now work :) + lower := sh - 5 + seed, err = p.GetByHeight(lower) + assert.Nil(err, "%+v", err) + assert.Equal(lower, seed.Height()) + + // also get by hash (given the match) + seed, err = p.GetByHash(vhash) + require.Nil(err, "%+v", err) + require.Equal(vhash, seed.Header.ValidatorsHash) + err = cert.Certify(seed.Commit) + assert.Nil(err, "%+v", err) + + // get by hash fails without match + seed, err = p.GetByHash([]byte("foobar")) + assert.NotNil(err) + assert.True(certerr.IsCommitNotFoundErr(err)) + + // storing the seed silently ignored + err = p.StoreCommit(seed) + assert.Nil(err, "%+v", err) +} diff --git a/certifiers/doc.go b/certifiers/doc.go new file mode 100644 index 000000000..7566405bd --- /dev/null +++ b/certifiers/doc.go @@ -0,0 +1,133 @@ +/* +Package certifiers allows you to securely validate headers +without a full node. + +This library pulls together all the crypto and algorithms, +so given a relatively recent (< unbonding period) known +validator set, one can get indisputable proof that data is in +the chain (current state) or detect if the node is lying to +the client. + +Tendermint RPC exposes a lot of info, but a malicious node +could return any data it wants to queries, or even to block +headers, even making up fake signatures from non-existent +validators to justify it. This is a lot of logic to get +right, to be contained in a small, easy to use library, +that does this for you, so you can just build nice UI. + +We design for clients who have no strong trust relationship +with any tendermint node, just the validator set as a whole. +Beyond building nice mobile or desktop applications, the +cosmos hub is another important example of a client, +that needs undeniable proof without syncing the full chain, +in order to efficiently implement IBC. + +Commits + +There are two main data structures that we pass around - Commit +and FullCommit. Both of them mirror what information is +exposed in tendermint rpc. + +Commit is a block header along with enough validator signatures +to prove its validity (> 2/3 of the voting power). A FullCommit +is a Commit along with the full validator set. When the +validator set doesn't change, the Commit is enough, but since +the block header only has a hash, we need the FullCommit to +follow any changes to the validator set. + +Certifiers + +A Certifier validates a new Commit given the currently known +state. There are three different types of Certifiers exposed, +each one building on the last one, with additional complexity. + +Static - given the validator set upon initialization. Verifies +all signatures against that set and if the validator set +changes, it will reject all headers. + +Dynamic - This wraps Static and has the same Certify +method. However, it adds an Update method, which can be called +with a FullCommit when the validator set changes. If it can +prove this is a valid transition, it will update the validator +set. + +Inquiring - this wraps Dynamic and implements an auto-update +strategy on top of the Dynamic update. If a call to +Certify fails as the validator set has changed, then it +attempts to find a FullCommit and Update to that header. +To get these FullCommits, it makes use of a Provider. + +Providers + +A Provider allows us to store and retrieve the FullCommits, +to provide memory to the Inquiring Certifier. + +NewMemStoreProvider - in-memory cache. + +files.NewProvider - disk backed storage. + +client.NewHTTPProvider - query tendermint rpc. + +NewCacheProvider - combine multiple providers. + +The suggested use for local light clients is +client.NewHTTPProvider for getting new data (Source), +and NewCacheProvider(NewMemStoreProvider(), +files.NewProvider()) to store confirmed headers (Trusted) + +How We Track Validators + +Unless you want to blindly trust the node you talk with, you +need to trace every response back to a hash in a block header +and validate the commit signatures of that block header match +the proper validator set. If there is a contant validator +set, you store it locally upon initialization of the client, +and check against that every time. + +Once there is a dynamic validator set, the issue of +verifying a block becomes a bit more tricky. There is +background information in a +github issue (https://github.com/tendermint/tendermint/issues/377). + +In short, if there is a block at height H with a known +(trusted) validator set V, and another block at height H' +(H' > H) with validator set V' != V, then we want a way to +safely update it. + +First, get the new (unconfirmed) validator set V' and +verify H' is internally consistent and properly signed by +this V'. Assuming it is a valid block, we check that at +least 2/3 of the validators in V also signed it, meaning +it would also be valid under our old assumptions. +That should be enough, but we can also check that the +V counts for at least 2/3 of the total votes in H' +for extra safety (we can have a discussion if this is +strictly required). If we can verify all this, +then we can accept H' and V' as valid and use that to +validate all blocks X > H'. + +If we cannot update directly from H -> H' because there was +too much change to the validator set, then we can look for +some Hm (H < Hm < H') with a validator set Vm. Then we try +to update H -> Hm and Hm -> H' in two separate steps. +If one of these steps doesn't work, then we continue +bisecting, until we eventually have to externally +validate the valdiator set changes at every block. + +Since we never trust any server in this protocol, only the +signatures themselves, it doesn't matter if the seed comes +from a (possibly malicious) node or a (possibly malicious) user. +We can accept it or reject it based only on our trusted +validator set and cryptographic proofs. This makes it +extremely important to verify that you have the proper +validator set when initializing the client, as that is the +root of all trust. + +Or course, this assumes that the known block is within the +unbonding period to avoid the "nothing at stake" problem. +If you haven't seen the state in a few months, you will need +to manually verify the new validator set hash using off-chain +means (the same as getting the initial hash). + +*/ +package certifiers diff --git a/certifiers/dynamic.go b/certifiers/dynamic.go new file mode 100644 index 000000000..38f4bbb95 --- /dev/null +++ b/certifiers/dynamic.go @@ -0,0 +1,173 @@ +package certifiers + +import ( + "github.com/pkg/errors" + + "github.com/tendermint/tendermint/types" + + certerr "github.com/tendermint/tendermint/certifiers/errors" +) + +var _ Certifier = &Dynamic{} + +// Dynamic uses a Static for Certify, but adds an +// Update method to allow for a change of validators. +// +// You can pass in a FullCommit with another validator set, +// and if this is a provably secure transition (< 1/3 change, +// sufficient signatures), then it will update the +// validator set for the next Certify call. +// For security, it will only follow validator set changes +// going forward. +type Dynamic struct { + cert *Static + lastHeight int +} + +func NewDynamic(chainID string, vals *types.ValidatorSet, height int) *Dynamic { + return &Dynamic{ + cert: NewStatic(chainID, vals), + lastHeight: height, + } +} + +func (c *Dynamic) ChainID() string { + return c.cert.ChainID() +} + +func (c *Dynamic) Validators() *types.ValidatorSet { + return c.cert.vSet +} + +func (c *Dynamic) Hash() []byte { + return c.cert.Hash() +} + +func (c *Dynamic) LastHeight() int { + return c.lastHeight +} + +// Certify handles this with +func (c *Dynamic) Certify(check *Commit) error { + err := c.cert.Certify(check) + if err == nil { + // update last seen height if input is valid + c.lastHeight = check.Height() + } + return err +} + +// Update will verify if this is a valid change and update +// the certifying validator set if safe to do so. +// +// Returns an error if update is impossible (invalid proof or IsTooMuchChangeErr) +func (c *Dynamic) Update(fc FullCommit) error { + // ignore all checkpoints in the past -> only to the future + h := fc.Height() + if h <= c.lastHeight { + return certerr.ErrPastTime() + } + + // first, verify if the input is self-consistent.... + err := fc.ValidateBasic(c.ChainID()) + if err != nil { + return err + } + + // now, make sure not too much change... meaning this commit + // would be approved by the currently known validator set + // as well as the new set + commit := fc.Commit.Commit + err = VerifyCommitAny(c.Validators(), fc.Validators, c.ChainID(), + commit.BlockID, h, commit) + if err != nil { + return certerr.ErrTooMuchChange() + } + + // looks good, we can update + c.cert = NewStatic(c.ChainID(), fc.Validators) + c.lastHeight = h + return nil +} + +// VerifyCommitAny will check to see if the set would +// be valid with a different validator set. +// +// old is the validator set that we know +// * over 2/3 of the power in old signed this block +// +// cur is the validator set that signed this block +// * only votes from old are sufficient for 2/3 majority +// in the new set as well +// +// That means that: +// * 10% of the valset can't just declare themselves kings +// * If the validator set is 3x old size, we need more proof to trust +// +// *** TODO: move this. +// It belongs in tendermint/types/validator_set.go: VerifyCommitAny +func VerifyCommitAny(old, cur *types.ValidatorSet, chainID string, + blockID types.BlockID, height int, commit *types.Commit) error { + + if cur.Size() != len(commit.Precommits) { + return errors.Errorf("Invalid commit -- wrong set size: %v vs %v", cur.Size(), len(commit.Precommits)) + } + if height != commit.Height() { + return errors.Errorf("Invalid commit -- wrong height: %v vs %v", height, commit.Height()) + } + + oldVotingPower := int64(0) + curVotingPower := int64(0) + seen := map[int]bool{} + round := commit.Round() + + for idx, precommit := range commit.Precommits { + // first check as in VerifyCommit + if precommit == nil { + continue + } + if precommit.Height != height { + return certerr.ErrHeightMismatch(height, precommit.Height) + } + if precommit.Round != round { + return errors.Errorf("Invalid commit -- wrong round: %v vs %v", round, precommit.Round) + } + if precommit.Type != types.VoteTypePrecommit { + return errors.Errorf("Invalid commit -- not precommit @ index %v", idx) + } + if !blockID.Equals(precommit.BlockID) { + continue // Not an error, but doesn't count + } + + // we only grab by address, ignoring unknown validators + vi, ov := old.GetByAddress(precommit.ValidatorAddress) + if ov == nil || seen[vi] { + continue // missing or double vote... + } + seen[vi] = true + + // Validate signature old school + precommitSignBytes := types.SignBytes(chainID, precommit) + if !ov.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { + return errors.Errorf("Invalid commit -- invalid signature: %v", precommit) + } + // Good precommit! + oldVotingPower += ov.VotingPower + + // check new school + _, cv := cur.GetByIndex(idx) + if cv.PubKey.Equals(ov.PubKey) { + // make sure this is properly set in the current block as well + curVotingPower += cv.VotingPower + } + } + + if oldVotingPower <= old.TotalVotingPower()*2/3 { + return errors.Errorf("Invalid commit -- insufficient old voting power: got %v, needed %v", + oldVotingPower, (old.TotalVotingPower()*2/3 + 1)) + } else if curVotingPower <= cur.TotalVotingPower()*2/3 { + return errors.Errorf("Invalid commit -- insufficient cur voting power: got %v, needed %v", + curVotingPower, (cur.TotalVotingPower()*2/3 + 1)) + } + return nil +} diff --git a/certifiers/dynamic_test.go b/certifiers/dynamic_test.go new file mode 100644 index 000000000..2c921099f --- /dev/null +++ b/certifiers/dynamic_test.go @@ -0,0 +1,130 @@ +package certifiers_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/types" + + "github.com/tendermint/tendermint/certifiers" + "github.com/tendermint/tendermint/certifiers/errors" +) + +// TestDynamicCert just makes sure it still works like StaticCert +func TestDynamicCert(t *testing.T) { + // assert, require := assert.New(t), require.New(t) + assert := assert.New(t) + // require := require.New(t) + + keys := certifiers.GenValKeys(4) + // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! + vals := keys.ToValidators(20, 10) + // and a certifier based on our known set + chainID := "test-dyno" + cert := certifiers.NewDynamic(chainID, vals, 0) + + cases := []struct { + keys certifiers.ValKeys + vals *types.ValidatorSet + height int + first, last int // who actually signs + proper bool // true -> expect no error + changed bool // true -> expect validator change error + }{ + // perfect, signed by everyone + {keys, vals, 1, 0, len(keys), true, false}, + // skip little guy is okay + {keys, vals, 2, 1, len(keys), true, false}, + // but not the big guy + {keys, vals, 3, 0, len(keys) - 1, false, false}, + // even changing the power a little bit breaks the static validator + // the sigs are enough, but the validator hash is unknown + {keys, keys.ToValidators(20, 11), 4, 0, len(keys), false, true}, + } + + for _, tc := range cases { + check := tc.keys.GenCommit(chainID, tc.height, nil, tc.vals, + []byte("bar"), tc.first, tc.last) + err := cert.Certify(check) + if tc.proper { + assert.Nil(err, "%+v", err) + assert.Equal(cert.LastHeight(), tc.height) + } else { + assert.NotNil(err) + if tc.changed { + assert.True(errors.IsValidatorsChangedErr(err), "%+v", err) + } + } + } +} + +// TestDynamicUpdate makes sure we update safely and sanely +func TestDynamicUpdate(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + chainID := "test-dyno-up" + keys := certifiers.GenValKeys(5) + vals := keys.ToValidators(20, 0) + cert := certifiers.NewDynamic(chainID, vals, 40) + + // one valid block to give us a sense of time + h := 100 + good := keys.GenCommit(chainID, h, nil, vals, []byte("foo"), 0, len(keys)) + err := cert.Certify(good) + require.Nil(err, "%+v", err) + + // some new sets to try later + keys2 := keys.Extend(2) + keys3 := keys2.Extend(4) + + // we try to update with some blocks + cases := []struct { + keys certifiers.ValKeys + vals *types.ValidatorSet + height int + first, last int // who actually signs + proper bool // true -> expect no error + changed bool // true -> expect too much change error + }{ + // same validator set, well signed, of course it is okay + {keys, vals, h + 10, 0, len(keys), true, false}, + // same validator set, poorly signed, fails + {keys, vals, h + 20, 2, len(keys), false, false}, + + // shift the power a little, works if properly signed + {keys, keys.ToValidators(10, 0), h + 30, 1, len(keys), true, false}, + // but not on a poor signature + {keys, keys.ToValidators(10, 0), h + 40, 2, len(keys), false, false}, + // and not if it was in the past + {keys, keys.ToValidators(10, 0), h + 25, 0, len(keys), false, false}, + + // let's try to adjust to a whole new validator set (we have 5/7 of the votes) + {keys2, keys2.ToValidators(10, 0), h + 33, 0, len(keys2), true, false}, + + // properly signed but too much change, not allowed (only 7/11 validators known) + {keys3, keys3.ToValidators(10, 0), h + 50, 0, len(keys3), false, true}, + } + + for _, tc := range cases { + fc := tc.keys.GenFullCommit(chainID, tc.height, nil, tc.vals, + []byte("bar"), tc.first, tc.last) + err := cert.Update(fc) + if tc.proper { + assert.Nil(err, "%d: %+v", tc.height, err) + // we update last seen height + assert.Equal(cert.LastHeight(), tc.height) + // and we update the proper validators + assert.EqualValues(fc.Header.ValidatorsHash, cert.Hash()) + } else { + assert.NotNil(err, "%d", tc.height) + // we don't update the height + assert.NotEqual(cert.LastHeight(), tc.height) + if tc.changed { + assert.True(errors.IsTooMuchChangeErr(err), + "%d: %+v", tc.height, err) + } + } + } +} diff --git a/certifiers/errors/errors.go b/certifiers/errors/errors.go new file mode 100644 index 000000000..c716c8fc0 --- /dev/null +++ b/certifiers/errors/errors.go @@ -0,0 +1,86 @@ +package errors + +import ( + "fmt" + + "github.com/pkg/errors" +) + +var ( + errValidatorsChanged = fmt.Errorf("Validators differ between header and certifier") + errCommitNotFound = fmt.Errorf("Commit not found by provider") + errTooMuchChange = fmt.Errorf("Validators change too much to safely update") + errPastTime = fmt.Errorf("Update older than certifier height") + errNoPathFound = fmt.Errorf("Cannot find a path of validators") +) + +// IsCommitNotFoundErr checks whether an error is due to missing data +func IsCommitNotFoundErr(err error) bool { + return err != nil && (errors.Cause(err) == errCommitNotFound) +} + +func ErrCommitNotFound() error { + return errors.WithStack(errCommitNotFound) +} + +// IsValidatorsChangedErr checks whether an error is due +// to a differing validator set +func IsValidatorsChangedErr(err error) bool { + return err != nil && (errors.Cause(err) == errValidatorsChanged) +} + +func ErrValidatorsChanged() error { + return errors.WithStack(errValidatorsChanged) +} + +// IsTooMuchChangeErr checks whether an error is due to too much change +// between these validators sets +func IsTooMuchChangeErr(err error) bool { + return err != nil && (errors.Cause(err) == errTooMuchChange) +} + +func ErrTooMuchChange() error { + return errors.WithStack(errTooMuchChange) +} + +func IsPastTimeErr(err error) bool { + return err != nil && (errors.Cause(err) == errPastTime) +} + +func ErrPastTime() error { + return errors.WithStack(errPastTime) +} + +// IsNoPathFoundErr checks whether an error is due to no path of +// validators in provider from where we are to where we want to be +func IsNoPathFoundErr(err error) bool { + return err != nil && (errors.Cause(err) == errNoPathFound) +} + +func ErrNoPathFound() error { + return errors.WithStack(errNoPathFound) +} + +//-------------------------------------------- + +type errHeightMismatch struct { + h1, h2 int +} + +func (e errHeightMismatch) Error() string { + return fmt.Sprintf("Blocks don't match - %d vs %d", e.h1, e.h2) +} + +// IsHeightMismatchErr checks whether an error is due to data from different blocks +func IsHeightMismatchErr(err error) bool { + if err == nil { + return false + } + _, ok := errors.Cause(err).(errHeightMismatch) + return ok +} + +// ErrHeightMismatch returns an mismatch error with stack-trace +func ErrHeightMismatch(h1, h2 int) error { + return errors.WithStack(errHeightMismatch{h1, h2}) +} diff --git a/certifiers/errors/errors_test.go b/certifiers/errors/errors_test.go new file mode 100644 index 000000000..479215e47 --- /dev/null +++ b/certifiers/errors/errors_test.go @@ -0,0 +1,18 @@ +package errors + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestErrorHeight(t *testing.T) { + e1 := ErrHeightMismatch(2, 3) + e1.Error() + assert.True(t, IsHeightMismatchErr(e1)) + + e2 := errors.New("foobar") + assert.False(t, IsHeightMismatchErr(e2)) + assert.False(t, IsHeightMismatchErr(nil)) +} diff --git a/certifiers/files/commit.go b/certifiers/files/commit.go new file mode 100644 index 000000000..18994f0fc --- /dev/null +++ b/certifiers/files/commit.go @@ -0,0 +1,77 @@ +package files + +import ( + "encoding/json" + "os" + + "github.com/pkg/errors" + + wire "github.com/tendermint/go-wire" + + "github.com/tendermint/tendermint/certifiers" + certerr "github.com/tendermint/tendermint/certifiers/errors" +) + +const ( + // MaxFullCommitSize is the maximum number of bytes we will + // read in for a full commit to avoid excessive allocations + // in the deserializer + MaxFullCommitSize = 1024 * 1024 +) + +// SaveFullCommit exports the seed in binary / go-wire style +func SaveFullCommit(fc certifiers.FullCommit, path string) error { + f, err := os.Create(path) + if err != nil { + return errors.WithStack(err) + } + defer f.Close() + + var n int + wire.WriteBinary(fc, f, &n, &err) + return errors.WithStack(err) +} + +// SaveFullCommitJSON exports the seed in a json format +func SaveFullCommitJSON(fc certifiers.FullCommit, path string) error { + f, err := os.Create(path) + if err != nil { + return errors.WithStack(err) + } + defer f.Close() + stream := json.NewEncoder(f) + err = stream.Encode(fc) + return errors.WithStack(err) +} + +func LoadFullCommit(path string) (certifiers.FullCommit, error) { + var fc certifiers.FullCommit + f, err := os.Open(path) + if err != nil { + if os.IsNotExist(err) { + return fc, certerr.ErrCommitNotFound() + } + return fc, errors.WithStack(err) + } + defer f.Close() + + var n int + wire.ReadBinaryPtr(&fc, f, MaxFullCommitSize, &n, &err) + return fc, errors.WithStack(err) +} + +func LoadFullCommitJSON(path string) (certifiers.FullCommit, error) { + var fc certifiers.FullCommit + f, err := os.Open(path) + if err != nil { + if os.IsNotExist(err) { + return fc, certerr.ErrCommitNotFound() + } + return fc, errors.WithStack(err) + } + defer f.Close() + + stream := json.NewDecoder(f) + err = stream.Decode(&fc) + return fc, errors.WithStack(err) +} diff --git a/certifiers/files/commit_test.go b/certifiers/files/commit_test.go new file mode 100644 index 000000000..934ab7b66 --- /dev/null +++ b/certifiers/files/commit_test.go @@ -0,0 +1,66 @@ +package files + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cmn "github.com/tendermint/tmlibs/common" + + "github.com/tendermint/tendermint/certifiers" +) + +func tmpFile() string { + suffix := cmn.RandStr(16) + return filepath.Join(os.TempDir(), "fc-test-"+suffix) +} + +func TestSerializeFullCommits(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + // some constants + appHash := []byte("some crazy thing") + chainID := "ser-ial" + h := 25 + + // build a fc + keys := certifiers.GenValKeys(5) + vals := keys.ToValidators(10, 0) + fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, 0, 5) + + require.Equal(h, fc.Height()) + require.Equal(vals.Hash(), fc.ValidatorsHash()) + + // try read/write with json + jfile := tmpFile() + defer os.Remove(jfile) + jseed, err := LoadFullCommitJSON(jfile) + assert.NotNil(err) + err = SaveFullCommitJSON(fc, jfile) + require.Nil(err) + jseed, err = LoadFullCommitJSON(jfile) + assert.Nil(err, "%+v", err) + assert.Equal(h, jseed.Height()) + assert.Equal(vals.Hash(), jseed.ValidatorsHash()) + + // try read/write with binary + bfile := tmpFile() + defer os.Remove(bfile) + bseed, err := LoadFullCommit(bfile) + assert.NotNil(err) + err = SaveFullCommit(fc, bfile) + require.Nil(err) + bseed, err = LoadFullCommit(bfile) + assert.Nil(err, "%+v", err) + assert.Equal(h, bseed.Height()) + assert.Equal(vals.Hash(), bseed.ValidatorsHash()) + + // make sure they don't read the other format (different) + _, err = LoadFullCommit(jfile) + assert.NotNil(err) + _, err = LoadFullCommitJSON(bfile) + assert.NotNil(err) +} diff --git a/certifiers/files/provider.go b/certifiers/files/provider.go new file mode 100644 index 000000000..8b5c23de6 --- /dev/null +++ b/certifiers/files/provider.go @@ -0,0 +1,134 @@ +/* +Package files defines a Provider that stores all data in the filesystem + +We assume the same validator hash may be reused by many different +headers/*Commits, and thus store it separately. This leaves us +with three issues: + + 1. Given a validator hash, retrieve the validator set if previously stored + 2. Given a block height, find the *Commit with the highest height <= h + 3. Given a FullCommit, store it quickly to satisfy 1 and 2 + +Note that we do not worry about caching, as that can be achieved by +pairing this with a MemStoreProvider and CacheProvider from certifiers +*/ +package files + +import ( + "encoding/hex" + "fmt" + "math" + "os" + "path/filepath" + "sort" + + "github.com/pkg/errors" + + "github.com/tendermint/tendermint/certifiers" + certerr "github.com/tendermint/tendermint/certifiers/errors" +) + +const ( + Ext = ".tsd" + ValDir = "validators" + CheckDir = "checkpoints" + dirPerm = os.FileMode(0755) + filePerm = os.FileMode(0644) +) + +type provider struct { + valDir string + checkDir string +} + +// NewProvider creates the parent dir and subdirs +// for validators and checkpoints as needed +func NewProvider(dir string) certifiers.Provider { + valDir := filepath.Join(dir, ValDir) + checkDir := filepath.Join(dir, CheckDir) + for _, d := range []string{valDir, checkDir} { + err := os.MkdirAll(d, dirPerm) + if err != nil { + panic(err) + } + } + return &provider{valDir: valDir, checkDir: checkDir} +} + +func (p *provider) encodeHash(hash []byte) string { + return hex.EncodeToString(hash) + Ext +} + +func (p *provider) encodeHeight(h int) string { + // pad up to 10^12 for height... + return fmt.Sprintf("%012d%s", h, Ext) +} + +func (p *provider) StoreCommit(fc certifiers.FullCommit) error { + // make sure the fc is self-consistent before saving + err := fc.ValidateBasic(fc.Commit.Header.ChainID) + if err != nil { + return err + } + + paths := []string{ + filepath.Join(p.checkDir, p.encodeHeight(fc.Height())), + filepath.Join(p.valDir, p.encodeHash(fc.Header.ValidatorsHash)), + } + for _, path := range paths { + err := SaveFullCommit(fc, path) + // unknown error in creating or writing immediately breaks + if err != nil { + return err + } + } + return nil +} + +func (p *provider) GetByHeight(h int) (certifiers.FullCommit, error) { + // first we look for exact match, then search... + path := filepath.Join(p.checkDir, p.encodeHeight(h)) + fc, err := LoadFullCommit(path) + if certerr.IsCommitNotFoundErr(err) { + path, err = p.searchForHeight(h) + if err == nil { + fc, err = LoadFullCommit(path) + } + } + return fc, err +} + +func (p *provider) LatestCommit() (fc certifiers.FullCommit, err error) { + // Note to future: please update by 2077 to avoid rollover + return p.GetByHeight(math.MaxInt32 - 1) +} + +// search for height, looks for a file with highest height < h +// return certifiers.ErrCommitNotFound() if not there... +func (p *provider) searchForHeight(h int) (string, error) { + d, err := os.Open(p.checkDir) + if err != nil { + return "", errors.WithStack(err) + } + files, err := d.Readdirnames(0) + + d.Close() + if err != nil { + return "", errors.WithStack(err) + } + + desired := p.encodeHeight(h) + sort.Strings(files) + i := sort.SearchStrings(files, desired) + if i == 0 { + return "", certerr.ErrCommitNotFound() + } + found := files[i-1] + path := filepath.Join(p.checkDir, found) + return path, errors.WithStack(err) +} + +func (p *provider) GetByHash(hash []byte) (certifiers.FullCommit, error) { + path := filepath.Join(p.valDir, p.encodeHash(hash)) + return LoadFullCommit(path) +} diff --git a/certifiers/files/provider_test.go b/certifiers/files/provider_test.go new file mode 100644 index 000000000..05e8f59d2 --- /dev/null +++ b/certifiers/files/provider_test.go @@ -0,0 +1,96 @@ +package files_test + +import ( + "bytes" + "errors" + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/certifiers" + certerr "github.com/tendermint/tendermint/certifiers/errors" + "github.com/tendermint/tendermint/certifiers/files" +) + +func checkEqual(stored, loaded certifiers.FullCommit, chainID string) error { + err := loaded.ValidateBasic(chainID) + if err != nil { + return err + } + if !bytes.Equal(stored.ValidatorsHash(), loaded.ValidatorsHash()) { + return errors.New("Different block hashes") + } + return nil +} + +func TestFileProvider(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + dir, err := ioutil.TempDir("", "fileprovider-test") + assert.Nil(err) + defer os.RemoveAll(dir) + p := files.NewProvider(dir) + + chainID := "test-files" + appHash := []byte("some-data") + keys := certifiers.GenValKeys(5) + count := 10 + + // make a bunch of seeds... + seeds := make([]certifiers.FullCommit, count) + for i := 0; i < count; i++ { + // two seeds for each validator, to check how we handle dups + // (10, 0), (10, 1), (10, 1), (10, 2), (10, 2), ... + vals := keys.ToValidators(10, int64(count/2)) + h := 20 + 10*i + check := keys.GenCommit(chainID, h, nil, vals, appHash, 0, 5) + seeds[i] = certifiers.NewFullCommit(check, vals) + } + + // check provider is empty + seed, err := p.GetByHeight(20) + require.NotNil(err) + assert.True(certerr.IsCommitNotFoundErr(err)) + + seed, err = p.GetByHash(seeds[3].ValidatorsHash()) + require.NotNil(err) + assert.True(certerr.IsCommitNotFoundErr(err)) + + // now add them all to the provider + for _, s := range seeds { + err = p.StoreCommit(s) + require.Nil(err) + // and make sure we can get it back + s2, err := p.GetByHash(s.ValidatorsHash()) + assert.Nil(err) + err = checkEqual(s, s2, chainID) + assert.Nil(err) + // by height as well + s2, err = p.GetByHeight(s.Height()) + err = checkEqual(s, s2, chainID) + assert.Nil(err) + } + + // make sure we get the last hash if we overstep + seed, err = p.GetByHeight(5000) + if assert.Nil(err, "%+v", err) { + assert.Equal(seeds[count-1].Height(), seed.Height()) + err = checkEqual(seeds[count-1], seed, chainID) + assert.Nil(err) + } + + // and middle ones as well + seed, err = p.GetByHeight(47) + if assert.Nil(err, "%+v", err) { + // we only step by 10, so 40 must be the one below this + assert.Equal(40, seed.Height()) + } + + // and proper error for too low + _, err = p.GetByHeight(5) + assert.NotNil(err) + assert.True(certerr.IsCommitNotFoundErr(err)) +} diff --git a/certifiers/helper.go b/certifiers/helper.go new file mode 100644 index 000000000..09c60775e --- /dev/null +++ b/certifiers/helper.go @@ -0,0 +1,149 @@ +package certifiers + +import ( + "time" + + crypto "github.com/tendermint/go-crypto" + + "github.com/tendermint/tendermint/types" +) + +// ValKeys is a helper for testing. +// +// It lets us simulate signing with many keys, either ed25519 or secp256k1. +// The main use case is to create a set, and call GenCommit +// to get propely signed header for testing. +// +// You can set different weights of validators each time you call +// ToValidators, and can optionally extend the validator set later +// with Extend or ExtendSecp +type ValKeys []crypto.PrivKey + +// GenValKeys produces an array of private keys to generate commits +func GenValKeys(n int) ValKeys { + res := make(ValKeys, n) + for i := range res { + res[i] = crypto.GenPrivKeyEd25519().Wrap() + } + return res +} + +// Change replaces the key at index i +func (v ValKeys) Change(i int) ValKeys { + res := make(ValKeys, len(v)) + copy(res, v) + res[i] = crypto.GenPrivKeyEd25519().Wrap() + return res +} + +// Extend adds n more keys (to remove, just take a slice) +func (v ValKeys) Extend(n int) ValKeys { + extra := GenValKeys(n) + return append(v, extra...) +} + +// GenSecpValKeys produces an array of secp256k1 private keys to generate commits +func GenSecpValKeys(n int) ValKeys { + res := make(ValKeys, n) + for i := range res { + res[i] = crypto.GenPrivKeySecp256k1().Wrap() + } + return res +} + +// ExtendSecp adds n more secp256k1 keys (to remove, just take a slice) +func (v ValKeys) ExtendSecp(n int) ValKeys { + extra := GenSecpValKeys(n) + return append(v, extra...) +} + +// ToValidators produces a list of validators from the set of keys +// The first key has weight `init` and it increases by `inc` every step +// so we can have all the same weight, or a simple linear distribution +// (should be enough for testing) +func (v ValKeys) ToValidators(init, inc int64) *types.ValidatorSet { + res := make([]*types.Validator, len(v)) + for i, k := range v { + res[i] = types.NewValidator(k.PubKey(), init+int64(i)*inc) + } + return types.NewValidatorSet(res) +} + +// signHeader properly signs the header with all keys from first to last exclusive +func (v ValKeys) signHeader(header *types.Header, first, last int) *types.Commit { + votes := make([]*types.Vote, len(v)) + + // we need this list to keep the ordering... + vset := v.ToValidators(1, 0) + + // fill in the votes we want + for i := first; i < last; i++ { + vote := makeVote(header, vset, v[i]) + votes[vote.ValidatorIndex] = vote + } + + res := &types.Commit{ + BlockID: types.BlockID{Hash: header.Hash()}, + Precommits: votes, + } + return res +} + +func makeVote(header *types.Header, vals *types.ValidatorSet, key crypto.PrivKey) *types.Vote { + addr := key.PubKey().Address() + idx, _ := vals.GetByAddress(addr) + vote := &types.Vote{ + ValidatorAddress: addr, + ValidatorIndex: idx, + Height: header.Height, + Round: 1, + Type: types.VoteTypePrecommit, + BlockID: types.BlockID{Hash: header.Hash()}, + } + // Sign it + signBytes := types.SignBytes(header.ChainID, vote) + vote.Signature = key.Sign(signBytes) + return vote +} + +func genHeader(chainID string, height int, txs types.Txs, + vals *types.ValidatorSet, appHash []byte) *types.Header { + + return &types.Header{ + ChainID: chainID, + Height: height, + Time: time.Now(), + NumTxs: len(txs), + // LastBlockID + // LastCommitHash + ValidatorsHash: vals.Hash(), + DataHash: txs.Hash(), + AppHash: appHash, + } +} + +// GenCommit calls genHeader and signHeader and combines them into a *Commit +func (v ValKeys) GenCommit(chainID string, height int, txs types.Txs, + vals *types.ValidatorSet, appHash []byte, first, last int) *Commit { + + header := genHeader(chainID, height, txs, vals, appHash) + check := &Commit{ + Header: header, + Commit: v.signHeader(header, first, last), + CanonicalCommit: true, + } + return check +} + +// GenFullCommit calls genHeader and signHeader and combines them into a *Commit +func (v ValKeys) GenFullCommit(chainID string, height int, txs types.Txs, + vals *types.ValidatorSet, appHash []byte, first, last int) FullCommit { + + header := genHeader(chainID, height, txs, vals, appHash) + commit := &Commit{ + Header: header, + Commit: v.signHeader(header, first, last), + CanonicalCommit: true, + } + return NewFullCommit(commit, vals) +} diff --git a/certifiers/inquirer.go b/certifiers/inquirer.go new file mode 100644 index 000000000..9dd44e89b --- /dev/null +++ b/certifiers/inquirer.go @@ -0,0 +1,142 @@ +package certifiers + +import ( + "github.com/tendermint/tendermint/types" + + certerr "github.com/tendermint/tendermint/certifiers/errors" +) + +type Inquiring struct { + cert *Dynamic + // These are only properly validated data, from local system + trusted Provider + // This is a source of new info, like a node rpc, or other import method + Source Provider +} + +func NewInquiring(chainID string, fc FullCommit, trusted Provider, source Provider) *Inquiring { + // store the data in trusted + trusted.StoreCommit(fc) + + return &Inquiring{ + cert: NewDynamic(chainID, fc.Validators, fc.Height()), + trusted: trusted, + Source: source, + } +} + +func (c *Inquiring) ChainID() string { + return c.cert.ChainID() +} + +func (c *Inquiring) Validators() *types.ValidatorSet { + return c.cert.cert.vSet +} + +func (c *Inquiring) LastHeight() int { + return c.cert.lastHeight +} + +// Certify makes sure this is checkpoint is valid. +// +// If the validators have changed since the last know time, it looks +// for a path to prove the new validators. +// +// On success, it will store the checkpoint in the store for later viewing +func (c *Inquiring) Certify(commit *Commit) error { + err := c.useClosestTrust(commit.Height()) + if err != nil { + return err + } + + err = c.cert.Certify(commit) + if !certerr.IsValidatorsChangedErr(err) { + return err + } + err = c.updateToHash(commit.Header.ValidatorsHash) + if err != nil { + return err + } + + err = c.cert.Certify(commit) + if err != nil { + return err + } + + // store the new checkpoint + c.trusted.StoreCommit( + NewFullCommit(commit, c.Validators())) + return nil +} + +func (c *Inquiring) Update(fc FullCommit) error { + err := c.useClosestTrust(fc.Height()) + if err != nil { + return err + } + + err = c.cert.Update(fc) + if err == nil { + c.trusted.StoreCommit(fc) + } + return err +} + +func (c *Inquiring) useClosestTrust(h int) error { + closest, err := c.trusted.GetByHeight(h) + if err != nil { + return err + } + + // if the best seed is not the one we currently use, + // let's just reset the dynamic validator + if closest.Height() != c.LastHeight() { + c.cert = NewDynamic(c.ChainID(), closest.Validators, closest.Height()) + } + return nil +} + +// updateToHash gets the validator hash we want to update to +// if IsTooMuchChangeErr, we try to find a path by binary search over height +func (c *Inquiring) updateToHash(vhash []byte) error { + // try to get the match, and update + fc, err := c.Source.GetByHash(vhash) + if err != nil { + return err + } + err = c.cert.Update(fc) + // handle IsTooMuchChangeErr by using divide and conquer + if certerr.IsTooMuchChangeErr(err) { + err = c.updateToHeight(fc.Height()) + } + return err +} + +// updateToHeight will use divide-and-conquer to find a path to h +func (c *Inquiring) updateToHeight(h int) error { + // try to update to this height (with checks) + fc, err := c.Source.GetByHeight(h) + if err != nil { + return err + } + start, end := c.LastHeight(), fc.Height() + if end <= start { + return certerr.ErrNoPathFound() + } + err = c.Update(fc) + + // we can handle IsTooMuchChangeErr specially + if !certerr.IsTooMuchChangeErr(err) { + return err + } + + // try to update to mid + mid := (start + end) / 2 + err = c.updateToHeight(mid) + if err != nil { + return err + } + + // if we made it to mid, we recurse + return c.updateToHeight(h) +} diff --git a/certifiers/inquirer_test.go b/certifiers/inquirer_test.go new file mode 100644 index 000000000..2a0ee5551 --- /dev/null +++ b/certifiers/inquirer_test.go @@ -0,0 +1,165 @@ +package certifiers_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/certifiers" +) + +func TestInquirerValidPath(t *testing.T) { + assert, require := assert.New(t), require.New(t) + trust := certifiers.NewMemStoreProvider() + source := certifiers.NewMemStoreProvider() + + // set up the validators to generate test blocks + var vote int64 = 10 + keys := certifiers.GenValKeys(5) + vals := keys.ToValidators(vote, 0) + + // construct a bunch of commits, each with one more height than the last + chainID := "inquiry-test" + count := 50 + commits := make([]certifiers.FullCommit, count) + for i := 0; i < count; i++ { + // extend the keys by 1 each time + keys = keys.Extend(1) + vals = keys.ToValidators(vote, 0) + h := 20 + 10*i + appHash := []byte(fmt.Sprintf("h=%d", h)) + commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, 0, len(keys)) + } + + // initialize a certifier with the initial state + cert := certifiers.NewInquiring(chainID, commits[0], trust, source) + + // this should fail validation.... + commit := commits[count-1].Commit + err := cert.Certify(commit) + require.NotNil(err) + + // add a few seed in the middle should be insufficient + for i := 10; i < 13; i++ { + err := source.StoreCommit(commits[i]) + require.Nil(err) + } + err = cert.Certify(commit) + assert.NotNil(err) + + // with more info, we succeed + for i := 0; i < count; i++ { + err := source.StoreCommit(commits[i]) + require.Nil(err) + } + err = cert.Certify(commit) + assert.Nil(err, "%+v", err) +} + +func TestInquirerMinimalPath(t *testing.T) { + assert, require := assert.New(t), require.New(t) + trust := certifiers.NewMemStoreProvider() + source := certifiers.NewMemStoreProvider() + + // set up the validators to generate test blocks + var vote int64 = 10 + keys := certifiers.GenValKeys(5) + vals := keys.ToValidators(vote, 0) + + // construct a bunch of commits, each with one more height than the last + chainID := "minimal-path" + count := 12 + commits := make([]certifiers.FullCommit, count) + for i := 0; i < count; i++ { + // extend the validators, so we are just below 2/3 + keys = keys.Extend(len(keys)/2 - 1) + vals = keys.ToValidators(vote, 0) + h := 5 + 10*i + appHash := []byte(fmt.Sprintf("h=%d", h)) + commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, 0, len(keys)) + } + + // initialize a certifier with the initial state + cert := certifiers.NewInquiring(chainID, commits[0], trust, source) + + // this should fail validation.... + commit := commits[count-1].Commit + err := cert.Certify(commit) + require.NotNil(err) + + // add a few seed in the middle should be insufficient + for i := 5; i < 8; i++ { + err := source.StoreCommit(commits[i]) + require.Nil(err) + } + err = cert.Certify(commit) + assert.NotNil(err) + + // with more info, we succeed + for i := 0; i < count; i++ { + err := source.StoreCommit(commits[i]) + require.Nil(err) + } + err = cert.Certify(commit) + assert.Nil(err, "%+v", err) +} + +func TestInquirerVerifyHistorical(t *testing.T) { + assert, require := assert.New(t), require.New(t) + trust := certifiers.NewMemStoreProvider() + source := certifiers.NewMemStoreProvider() + + // set up the validators to generate test blocks + var vote int64 = 10 + keys := certifiers.GenValKeys(5) + vals := keys.ToValidators(vote, 0) + + // construct a bunch of commits, each with one more height than the last + chainID := "inquiry-test" + count := 10 + commits := make([]certifiers.FullCommit, count) + for i := 0; i < count; i++ { + // extend the keys by 1 each time + keys = keys.Extend(1) + vals = keys.ToValidators(vote, 0) + h := 20 + 10*i + appHash := []byte(fmt.Sprintf("h=%d", h)) + commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, 0, len(keys)) + } + + // initialize a certifier with the initial state + cert := certifiers.NewInquiring(chainID, commits[0], trust, source) + + // store a few commits as trust + for _, i := range []int{2, 5} { + trust.StoreCommit(commits[i]) + } + + // let's see if we can jump forward using trusted commits + err := source.StoreCommit(commits[7]) + require.Nil(err, "%+v", err) + check := commits[7].Commit + err = cert.Certify(check) + require.Nil(err, "%+v", err) + assert.Equal(check.Height(), cert.LastHeight()) + + // add access to all commits via untrusted source + for i := 0; i < count; i++ { + err := source.StoreCommit(commits[i]) + require.Nil(err) + } + + // try to check an unknown seed in the past + mid := commits[3].Commit + err = cert.Certify(mid) + require.Nil(err, "%+v", err) + assert.Equal(mid.Height(), cert.LastHeight()) + + // and jump all the way forward again + end := commits[count-1].Commit + err = cert.Certify(end) + require.Nil(err, "%+v", err) + assert.Equal(end.Height(), cert.LastHeight()) +} diff --git a/certifiers/memprovider.go b/certifiers/memprovider.go new file mode 100644 index 000000000..cdad75e49 --- /dev/null +++ b/certifiers/memprovider.go @@ -0,0 +1,78 @@ +package certifiers + +import ( + "encoding/hex" + "sort" + + certerr "github.com/tendermint/tendermint/certifiers/errors" +) + +type memStoreProvider struct { + // byHeight is always sorted by Height... need to support range search (nil, h] + // btree would be more efficient for larger sets + byHeight fullCommits + byHash map[string]FullCommit +} + +// fullCommits just exists to allow easy sorting +type fullCommits []FullCommit + +func (s fullCommits) Len() int { return len(s) } +func (s fullCommits) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s fullCommits) Less(i, j int) bool { + return s[i].Height() < s[j].Height() +} + +func NewMemStoreProvider() Provider { + return &memStoreProvider{ + byHeight: fullCommits{}, + byHash: map[string]FullCommit{}, + } +} + +func (m *memStoreProvider) encodeHash(hash []byte) string { + return hex.EncodeToString(hash) +} + +func (m *memStoreProvider) StoreCommit(fc FullCommit) error { + // make sure the fc is self-consistent before saving + err := fc.ValidateBasic(fc.Commit.Header.ChainID) + if err != nil { + return err + } + + // store the valid fc + key := m.encodeHash(fc.ValidatorsHash()) + m.byHash[key] = fc + m.byHeight = append(m.byHeight, fc) + sort.Sort(m.byHeight) + return nil +} + +func (m *memStoreProvider) GetByHeight(h int) (FullCommit, error) { + // search from highest to lowest + for i := len(m.byHeight) - 1; i >= 0; i-- { + fc := m.byHeight[i] + if fc.Height() <= h { + return fc, nil + } + } + return FullCommit{}, certerr.ErrCommitNotFound() +} + +func (m *memStoreProvider) GetByHash(hash []byte) (FullCommit, error) { + var err error + fc, ok := m.byHash[m.encodeHash(hash)] + if !ok { + err = certerr.ErrCommitNotFound() + } + return fc, err +} + +func (m *memStoreProvider) LatestCommit() (FullCommit, error) { + l := len(m.byHeight) + if l == 0 { + return FullCommit{}, certerr.ErrCommitNotFound() + } + return m.byHeight[l-1], nil +} diff --git a/certifiers/performance_test.go b/certifiers/performance_test.go new file mode 100644 index 000000000..2a6c6ced8 --- /dev/null +++ b/certifiers/performance_test.go @@ -0,0 +1,116 @@ +package certifiers_test + +import ( + "fmt" + "testing" + + "github.com/tendermint/tendermint/certifiers" +) + +func BenchmarkGenCommit20(b *testing.B) { + keys := certifiers.GenValKeys(20) + benchmarkGenCommit(b, keys) +} + +func BenchmarkGenCommit100(b *testing.B) { + keys := certifiers.GenValKeys(100) + benchmarkGenCommit(b, keys) +} + +func BenchmarkGenCommitSec20(b *testing.B) { + keys := certifiers.GenSecpValKeys(20) + benchmarkGenCommit(b, keys) +} + +func BenchmarkGenCommitSec100(b *testing.B) { + keys := certifiers.GenSecpValKeys(100) + benchmarkGenCommit(b, keys) +} + +func benchmarkGenCommit(b *testing.B, keys certifiers.ValKeys) { + chainID := fmt.Sprintf("bench-%d", len(keys)) + vals := keys.ToValidators(20, 10) + for i := 0; i < b.N; i++ { + h := 1 + i + appHash := []byte(fmt.Sprintf("h=%d", h)) + keys.GenCommit(chainID, h, nil, vals, appHash, 0, len(keys)) + } +} + +// this benchmarks generating one key +func BenchmarkGenValKeys(b *testing.B) { + keys := certifiers.GenValKeys(20) + for i := 0; i < b.N; i++ { + keys = keys.Extend(1) + } +} + +// this benchmarks generating one key +func BenchmarkGenSecpValKeys(b *testing.B) { + keys := certifiers.GenSecpValKeys(20) + for i := 0; i < b.N; i++ { + keys = keys.Extend(1) + } +} + +func BenchmarkToValidators20(b *testing.B) { + benchmarkToValidators(b, 20) +} + +func BenchmarkToValidators100(b *testing.B) { + benchmarkToValidators(b, 100) +} + +// this benchmarks constructing the validator set (.PubKey() * nodes) +func benchmarkToValidators(b *testing.B, nodes int) { + keys := certifiers.GenValKeys(nodes) + for i := 1; i <= b.N; i++ { + keys.ToValidators(int64(2*i), int64(i)) + } +} + +func BenchmarkToValidatorsSec100(b *testing.B) { + benchmarkToValidatorsSec(b, 100) +} + +// this benchmarks constructing the validator set (.PubKey() * nodes) +func benchmarkToValidatorsSec(b *testing.B, nodes int) { + keys := certifiers.GenSecpValKeys(nodes) + for i := 1; i <= b.N; i++ { + keys.ToValidators(int64(2*i), int64(i)) + } +} + +func BenchmarkCertifyCommit20(b *testing.B) { + keys := certifiers.GenValKeys(20) + benchmarkCertifyCommit(b, keys) +} + +func BenchmarkCertifyCommit100(b *testing.B) { + keys := certifiers.GenValKeys(100) + benchmarkCertifyCommit(b, keys) +} + +func BenchmarkCertifyCommitSec20(b *testing.B) { + keys := certifiers.GenSecpValKeys(20) + benchmarkCertifyCommit(b, keys) +} + +func BenchmarkCertifyCommitSec100(b *testing.B) { + keys := certifiers.GenSecpValKeys(100) + benchmarkCertifyCommit(b, keys) +} + +func benchmarkCertifyCommit(b *testing.B, keys certifiers.ValKeys) { + chainID := "bench-certify" + vals := keys.ToValidators(20, 10) + cert := certifiers.NewStatic(chainID, vals) + check := keys.GenCommit(chainID, 123, nil, vals, []byte("foo"), 0, len(keys)) + for i := 0; i < b.N; i++ { + err := cert.Certify(check) + if err != nil { + panic(err) + } + } + +} diff --git a/certifiers/provider.go b/certifiers/provider.go new file mode 100644 index 000000000..64b4212d8 --- /dev/null +++ b/certifiers/provider.go @@ -0,0 +1,125 @@ +package certifiers + +import ( + certerr "github.com/tendermint/tendermint/certifiers/errors" +) + +// Provider is used to get more validators by other means +// +// Examples: MemProvider, files.Provider, client.Provider.... +type Provider interface { + // StoreCommit saves a FullCommit after we have verified it, + // so we can query for it later. Important for updating our + // store of trusted commits + StoreCommit(fc FullCommit) error + // GetByHeight returns the closest commit with height <= h + GetByHeight(h int) (FullCommit, error) + // GetByHash returns a commit exactly matching this validator hash + GetByHash(hash []byte) (FullCommit, error) + // LatestCommit returns the newest commit stored + LatestCommit() (FullCommit, error) +} + +// cacheProvider allows you to place one or more caches in front of a source +// Provider. It runs through them in order until a match is found. +// So you can keep a local cache, and check with the network if +// no data is there. +type cacheProvider struct { + Providers []Provider +} + +func NewCacheProvider(providers ...Provider) Provider { + return cacheProvider{ + Providers: providers, + } +} + +// StoreCommit tries to add the seed to all providers. +// +// Aborts on first error it encounters (closest provider) +func (c cacheProvider) StoreCommit(fc FullCommit) (err error) { + for _, p := range c.Providers { + err = p.StoreCommit(fc) + if err != nil { + break + } + } + return err +} + +/* +GetByHeight should return the closest possible match from all providers. + +The Cache is usually organized in order from cheapest call (memory) +to most expensive calls (disk/network). However, since GetByHeight returns +a FullCommit at h' <= h, if the memory has a seed at h-10, but the network would +give us the exact match, a naive "stop at first non-error" would hide +the actual desired results. + +Thus, we query each provider in order until we find an exact match +or we finished querying them all. If at least one returned a non-error, +then this returns the best match (minimum h-h'). +*/ +func (c cacheProvider) GetByHeight(h int) (fc FullCommit, err error) { + for _, p := range c.Providers { + var tfc FullCommit + tfc, err = p.GetByHeight(h) + if err == nil { + if tfc.Height() > fc.Height() { + fc = tfc + } + if tfc.Height() == h { + break + } + } + } + // even if the last one had an error, if any was a match, this is good + if fc.Height() > 0 { + err = nil + } + return fc, err +} + +func (c cacheProvider) GetByHash(hash []byte) (fc FullCommit, err error) { + for _, p := range c.Providers { + fc, err = p.GetByHash(hash) + if err == nil { + break + } + } + return fc, err +} + +func (c cacheProvider) LatestCommit() (fc FullCommit, err error) { + for _, p := range c.Providers { + var tfc FullCommit + tfc, err = p.LatestCommit() + if err == nil && tfc.Height() > fc.Height() { + fc = tfc + } + } + // even if the last one had an error, if any was a match, this is good + if fc.Height() > 0 { + err = nil + } + return fc, err +} + +// missingProvider doens't store anything, always a miss +// Designed as a mock for testing +type missingProvider struct{} + +func NewMissingProvider() Provider { + return missingProvider{} +} + +func (missingProvider) StoreCommit(_ FullCommit) error { return nil } +func (missingProvider) GetByHeight(_ int) (FullCommit, error) { + return FullCommit{}, certerr.ErrCommitNotFound() +} +func (missingProvider) GetByHash(_ []byte) (FullCommit, error) { + return FullCommit{}, certerr.ErrCommitNotFound() +} +func (missingProvider) LatestCommit() (FullCommit, error) { + return FullCommit{}, certerr.ErrCommitNotFound() +} diff --git a/certifiers/provider_test.go b/certifiers/provider_test.go new file mode 100644 index 000000000..c1e9ae514 --- /dev/null +++ b/certifiers/provider_test.go @@ -0,0 +1,128 @@ +package certifiers_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/certifiers" + "github.com/tendermint/tendermint/certifiers/errors" +) + +func TestMemProvider(t *testing.T) { + p := certifiers.NewMemStoreProvider() + checkProvider(t, p, "test-mem", "empty") +} + +func TestCacheProvider(t *testing.T) { + p := certifiers.NewCacheProvider( + certifiers.NewMissingProvider(), + certifiers.NewMemStoreProvider(), + certifiers.NewMissingProvider(), + ) + checkProvider(t, p, "test-cache", "kjfhekfhkewhgit") +} + +func checkProvider(t *testing.T, p certifiers.Provider, chainID, app string) { + assert, require := assert.New(t), require.New(t) + appHash := []byte(app) + keys := certifiers.GenValKeys(5) + count := 10 + + // make a bunch of commits... + commits := make([]certifiers.FullCommit, count) + for i := 0; i < count; i++ { + // two commits for each validator, to check how we handle dups + // (10, 0), (10, 1), (10, 1), (10, 2), (10, 2), ... + vals := keys.ToValidators(10, int64(count/2)) + h := 20 + 10*i + commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, 0, 5) + } + + // check provider is empty + fc, err := p.GetByHeight(20) + require.NotNil(err) + assert.True(errors.IsCommitNotFoundErr(err)) + + fc, err = p.GetByHash(commits[3].ValidatorsHash()) + require.NotNil(err) + assert.True(errors.IsCommitNotFoundErr(err)) + + // now add them all to the provider + for _, s := range commits { + err = p.StoreCommit(s) + require.Nil(err) + // and make sure we can get it back + s2, err := p.GetByHash(s.ValidatorsHash()) + assert.Nil(err) + assert.Equal(s, s2) + // by height as well + s2, err = p.GetByHeight(s.Height()) + assert.Nil(err) + assert.Equal(s, s2) + } + + // make sure we get the last hash if we overstep + fc, err = p.GetByHeight(5000) + if assert.Nil(err) { + assert.Equal(commits[count-1].Height(), fc.Height()) + assert.Equal(commits[count-1], fc) + } + + // and middle ones as well + fc, err = p.GetByHeight(47) + if assert.Nil(err) { + // we only step by 10, so 40 must be the one below this + assert.Equal(40, fc.Height()) + } + +} + +// this will make a get height, and if it is good, set the data as well +func checkGetHeight(t *testing.T, p certifiers.Provider, ask, expect int) { + fc, err := p.GetByHeight(ask) + require.Nil(t, err, "%+v", err) + if assert.Equal(t, expect, fc.Height()) { + err = p.StoreCommit(fc) + require.Nil(t, err, "%+v", err) + } +} + +func TestCacheGetsBestHeight(t *testing.T) { + // assert, require := assert.New(t), require.New(t) + require := require.New(t) + + // we will write data to the second level of the cache (p2), + // and see what gets cached, stored in + p := certifiers.NewMemStoreProvider() + p2 := certifiers.NewMemStoreProvider() + cp := certifiers.NewCacheProvider(p, p2) + + chainID := "cache-best-height" + appHash := []byte("01234567") + keys := certifiers.GenValKeys(5) + count := 10 + + // set a bunch of commits + for i := 0; i < count; i++ { + vals := keys.ToValidators(10, int64(count/2)) + h := 10 * (i + 1) + fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, 0, 5) + err := p2.StoreCommit(fc) + require.NoError(err) + } + + // let's get a few heights from the cache and set them proper + checkGetHeight(t, cp, 57, 50) + checkGetHeight(t, cp, 33, 30) + + // make sure they are set in p as well (but nothing else) + checkGetHeight(t, p, 44, 30) + checkGetHeight(t, p, 50, 50) + checkGetHeight(t, p, 99, 50) + + // now, query the cache for a higher value + checkGetHeight(t, p2, 99, 90) + checkGetHeight(t, cp, 99, 90) +} diff --git a/certifiers/static.go b/certifiers/static.go new file mode 100644 index 000000000..a2b9b8ae7 --- /dev/null +++ b/certifiers/static.go @@ -0,0 +1,66 @@ +package certifiers + +import ( + "bytes" + + "github.com/pkg/errors" + + "github.com/tendermint/tendermint/types" + + certerr "github.com/tendermint/tendermint/certifiers/errors" +) + +var _ Certifier = &Static{} + +// Static assumes a static set of validators, set on +// initilization and checks against them. +// The signatures on every header is checked for > 2/3 votes +// against the known validator set upon Certify +// +// Good for testing or really simple chains. Building block +// to support real-world functionality. +type Static struct { + chainID string + vSet *types.ValidatorSet + vhash []byte +} + +func NewStatic(chainID string, vals *types.ValidatorSet) *Static { + return &Static{ + chainID: chainID, + vSet: vals, + } +} + +func (c *Static) ChainID() string { + return c.chainID +} + +func (c *Static) Validators() *types.ValidatorSet { + return c.vSet +} + +func (c *Static) Hash() []byte { + if len(c.vhash) == 0 { + c.vhash = c.vSet.Hash() + } + return c.vhash +} + +func (c *Static) Certify(commit *Commit) error { + // do basic sanity checks + err := commit.ValidateBasic(c.chainID) + if err != nil { + return err + } + + // make sure it has the same validator set we have (static means static) + if !bytes.Equal(c.Hash(), commit.Header.ValidatorsHash) { + return certerr.ErrValidatorsChanged() + } + + // then make sure we have the proper signatures for this + err = c.vSet.VerifyCommit(c.chainID, commit.Commit.BlockID, + commit.Header.Height, commit.Commit) + return errors.WithStack(err) +} diff --git a/certifiers/static_test.go b/certifiers/static_test.go new file mode 100644 index 000000000..f1f40c6c9 --- /dev/null +++ b/certifiers/static_test.go @@ -0,0 +1,59 @@ +package certifiers_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/tendermint/tendermint/types" + + "github.com/tendermint/tendermint/certifiers" + errors "github.com/tendermint/tendermint/certifiers/errors" +) + +func TestStaticCert(t *testing.T) { + // assert, require := assert.New(t), require.New(t) + assert := assert.New(t) + // require := require.New(t) + + keys := certifiers.GenValKeys(4) + // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! + vals := keys.ToValidators(20, 10) + // and a certifier based on our known set + chainID := "test-static" + cert := certifiers.NewStatic(chainID, vals) + + cases := []struct { + keys certifiers.ValKeys + vals *types.ValidatorSet + height int + first, last int // who actually signs + proper bool // true -> expect no error + changed bool // true -> expect validator change error + }{ + // perfect, signed by everyone + {keys, vals, 1, 0, len(keys), true, false}, + // skip little guy is okay + {keys, vals, 2, 1, len(keys), true, false}, + // but not the big guy + {keys, vals, 3, 0, len(keys) - 1, false, false}, + // even changing the power a little bit breaks the static validator + // the sigs are enough, but the validator hash is unknown + {keys, keys.ToValidators(20, 11), 4, 0, len(keys), false, true}, + } + + for _, tc := range cases { + check := tc.keys.GenCommit(chainID, tc.height, nil, tc.vals, + []byte("foo"), tc.first, tc.last) + err := cert.Certify(check) + if tc.proper { + assert.Nil(err, "%+v", err) + } else { + assert.NotNil(err) + if tc.changed { + assert.True(errors.IsValidatorsChangedErr(err), "%+v", err) + } + } + } + +} From 94b36bb65eba66f5d5398deb4a9125b42177acf9 Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Tue, 24 Oct 2017 12:40:50 +0200 Subject: [PATCH 16/19] Move VerifyCommitAny into the types package --- certifiers/dynamic.go | 86 +--------------------------------- types/validator_set.go | 103 +++++++++++++++++++++++++++++++---------- 2 files changed, 80 insertions(+), 109 deletions(-) diff --git a/certifiers/dynamic.go b/certifiers/dynamic.go index 38f4bbb95..a9c1a54f0 100644 --- a/certifiers/dynamic.go +++ b/certifiers/dynamic.go @@ -1,8 +1,6 @@ package certifiers import ( - "github.com/pkg/errors" - "github.com/tendermint/tendermint/types" certerr "github.com/tendermint/tendermint/certifiers/errors" @@ -78,7 +76,7 @@ func (c *Dynamic) Update(fc FullCommit) error { // would be approved by the currently known validator set // as well as the new set commit := fc.Commit.Commit - err = VerifyCommitAny(c.Validators(), fc.Validators, c.ChainID(), + err = c.Validators().VerifyCommitAny(fc.Validators, c.ChainID(), commit.BlockID, h, commit) if err != nil { return certerr.ErrTooMuchChange() @@ -89,85 +87,3 @@ func (c *Dynamic) Update(fc FullCommit) error { c.lastHeight = h return nil } - -// VerifyCommitAny will check to see if the set would -// be valid with a different validator set. -// -// old is the validator set that we know -// * over 2/3 of the power in old signed this block -// -// cur is the validator set that signed this block -// * only votes from old are sufficient for 2/3 majority -// in the new set as well -// -// That means that: -// * 10% of the valset can't just declare themselves kings -// * If the validator set is 3x old size, we need more proof to trust -// -// *** TODO: move this. -// It belongs in tendermint/types/validator_set.go: VerifyCommitAny -func VerifyCommitAny(old, cur *types.ValidatorSet, chainID string, - blockID types.BlockID, height int, commit *types.Commit) error { - - if cur.Size() != len(commit.Precommits) { - return errors.Errorf("Invalid commit -- wrong set size: %v vs %v", cur.Size(), len(commit.Precommits)) - } - if height != commit.Height() { - return errors.Errorf("Invalid commit -- wrong height: %v vs %v", height, commit.Height()) - } - - oldVotingPower := int64(0) - curVotingPower := int64(0) - seen := map[int]bool{} - round := commit.Round() - - for idx, precommit := range commit.Precommits { - // first check as in VerifyCommit - if precommit == nil { - continue - } - if precommit.Height != height { - return certerr.ErrHeightMismatch(height, precommit.Height) - } - if precommit.Round != round { - return errors.Errorf("Invalid commit -- wrong round: %v vs %v", round, precommit.Round) - } - if precommit.Type != types.VoteTypePrecommit { - return errors.Errorf("Invalid commit -- not precommit @ index %v", idx) - } - if !blockID.Equals(precommit.BlockID) { - continue // Not an error, but doesn't count - } - - // we only grab by address, ignoring unknown validators - vi, ov := old.GetByAddress(precommit.ValidatorAddress) - if ov == nil || seen[vi] { - continue // missing or double vote... - } - seen[vi] = true - - // Validate signature old school - precommitSignBytes := types.SignBytes(chainID, precommit) - if !ov.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { - return errors.Errorf("Invalid commit -- invalid signature: %v", precommit) - } - // Good precommit! - oldVotingPower += ov.VotingPower - - // check new school - _, cv := cur.GetByIndex(idx) - if cv.PubKey.Equals(ov.PubKey) { - // make sure this is properly set in the current block as well - curVotingPower += cv.VotingPower - } - } - - if oldVotingPower <= old.TotalVotingPower()*2/3 { - return errors.Errorf("Invalid commit -- insufficient old voting power: got %v, needed %v", - oldVotingPower, (old.TotalVotingPower()*2/3 + 1)) - } else if curVotingPower <= cur.TotalVotingPower()*2/3 { - return errors.Errorf("Invalid commit -- insufficient cur voting power: got %v, needed %v", - curVotingPower, (cur.TotalVotingPower()*2/3 + 1)) - } - return nil -} diff --git a/types/validator_set.go b/types/validator_set.go index 0e20417a7..61e7eab2e 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -6,6 +6,7 @@ import ( "sort" "strings" + "github.com/pkg/errors" "github.com/tendermint/go-wire" cmn "github.com/tendermint/tmlibs/common" "github.com/tendermint/tmlibs/merkle" @@ -268,30 +269,84 @@ func (valSet *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height } } -// Verify that +2/3 of this set had signed the given signBytes. -// Unlike VerifyCommit(), this function can verify commits with differeent sets. -func (valSet *ValidatorSet) VerifyCommitAny(chainID string, blockID BlockID, height int, commit *Commit) error { - panic("Not yet implemented") - /* - Start like: - - FOR_LOOP: - for _, val := range vals { - if len(precommits) == 0 { - break FOR_LOOP - } - next := precommits[0] - switch bytes.Compare(val.Address(), next.ValidatorAddress) { - case -1: - continue FOR_LOOP - case 0: - signBytes := tm.SignBytes(next) - ... - case 1: - ... // error? - } - } - */ +// VerifyCommitAny will check to see if the set would +// be valid with a different validator set. +// +// valSet is the validator set that we know +// * over 2/3 of the power in old signed this block +// +// newSet is the validator set that signed this block +// * only votes from old are sufficient for 2/3 majority +// in the new set as well +// +// That means that: +// * 10% of the valset can't just declare themselves kings +// * If the validator set is 3x old size, we need more proof to trust +func (valSet *ValidatorSet) VerifyCommitAny(newSet *ValidatorSet, chainID string, + blockID BlockID, height int, commit *Commit) error { + + if newSet.Size() != len(commit.Precommits) { + return errors.Errorf("Invalid commit -- wrong set size: %v vs %v", newSet.Size(), len(commit.Precommits)) + } + if height != commit.Height() { + return errors.Errorf("Invalid commit -- wrong height: %v vs %v", height, commit.Height()) + } + + oldVotingPower := int64(0) + newVotingPower := int64(0) + seen := map[int]bool{} + round := commit.Round() + + for idx, precommit := range commit.Precommits { + // first check as in VerifyCommit + if precommit == nil { + continue + } + if precommit.Height != height { + // return certerr.ErrHeightMismatch(height, precommit.Height) + return errors.Errorf("Blocks don't match - %d vs %d", round, precommit.Round) + } + if precommit.Round != round { + return errors.Errorf("Invalid commit -- wrong round: %v vs %v", round, precommit.Round) + } + if precommit.Type != VoteTypePrecommit { + return errors.Errorf("Invalid commit -- not precommit @ index %v", idx) + } + if !blockID.Equals(precommit.BlockID) { + continue // Not an error, but doesn't count + } + + // we only grab by address, ignoring unknown validators + vi, ov := valSet.GetByAddress(precommit.ValidatorAddress) + if ov == nil || seen[vi] { + continue // missing or double vote... + } + seen[vi] = true + + // Validate signature old school + precommitSignBytes := SignBytes(chainID, precommit) + if !ov.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { + return errors.Errorf("Invalid commit -- invalid signature: %v", precommit) + } + // Good precommit! + oldVotingPower += ov.VotingPower + + // check new school + _, cv := newSet.GetByIndex(idx) + if cv.PubKey.Equals(ov.PubKey) { + // make sure this is properly set in the current block as well + newVotingPower += cv.VotingPower + } + } + + if oldVotingPower <= valSet.TotalVotingPower()*2/3 { + return errors.Errorf("Invalid commit -- insufficient old voting power: got %v, needed %v", + oldVotingPower, (valSet.TotalVotingPower()*2/3 + 1)) + } else if newVotingPower <= newSet.TotalVotingPower()*2/3 { + return errors.Errorf("Invalid commit -- insufficient cur voting power: got %v, needed %v", + newVotingPower, (newSet.TotalVotingPower()*2/3 + 1)) + } + return nil } func (valSet *ValidatorSet) ToBytes() []byte { From 0396b6d521f86190e736c58939da48ee7cfd762f Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Wed, 25 Oct 2017 16:12:58 +0200 Subject: [PATCH 17/19] Rename checkpoint.go --- certifiers/{checkpoint.go => commit.go} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename certifiers/{checkpoint.go => commit.go} (100%) diff --git a/certifiers/checkpoint.go b/certifiers/commit.go similarity index 100% rename from certifiers/checkpoint.go rename to certifiers/commit.go From f653ba63bf27bc6b5d702db30dd17a797d1ae9aa Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Wed, 25 Oct 2017 16:43:18 +0200 Subject: [PATCH 18/19] Separated out certifiers.Commit from rpc structs --- certifiers/client/provider.go | 8 ++++++-- certifiers/commit.go | 25 ++++++++++--------------- certifiers/dynamic.go | 2 +- certifiers/files/provider.go | 4 ++-- certifiers/helper.go | 20 +++++++++----------- certifiers/inquirer.go | 2 +- certifiers/static.go | 2 +- rpc/core/blocks.go | 6 +++--- rpc/core/types/responses.go | 21 ++++++++++++++++++--- types/block.go | 8 ++++++++ 10 files changed, 59 insertions(+), 39 deletions(-) diff --git a/certifiers/client/provider.go b/certifiers/client/provider.go index d4c170ce9..6240da11c 100644 --- a/certifiers/client/provider.go +++ b/certifiers/client/provider.go @@ -89,6 +89,10 @@ func (p *provider) GetLatestCommit() (*ctypes.ResultCommit, error) { return p.node.Commit(&status.LatestBlockHeight) } +func CommitFromResult(result *ctypes.ResultCommit) certifiers.Commit { + return (certifiers.Commit)(result.SignedHeader) +} + func (p *provider) seedFromVals(vals *ctypes.ResultValidators) (certifiers.FullCommit, error) { // now get the commits and build a full commit commit, err := p.node.Commit(&vals.BlockHeight) @@ -96,14 +100,14 @@ func (p *provider) seedFromVals(vals *ctypes.ResultValidators) (certifiers.FullC return certifiers.FullCommit{}, err } fc := certifiers.NewFullCommit( - certifiers.CommitFromResult(commit), + CommitFromResult(commit), types.NewValidatorSet(vals.Validators), ) return fc, nil } func (p *provider) seedFromCommit(commit *ctypes.ResultCommit) (fc certifiers.FullCommit, err error) { - fc.Commit = certifiers.CommitFromResult(commit) + fc.Commit = CommitFromResult(commit) // now get the proper validators vals, err := p.node.Validators(&commit.Header.Height) diff --git a/certifiers/commit.go b/certifiers/commit.go index 3c37b8599..464a48ba2 100644 --- a/certifiers/commit.go +++ b/certifiers/commit.go @@ -5,7 +5,6 @@ import ( "github.com/pkg/errors" - rtypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" certerr "github.com/tendermint/tendermint/certifiers/errors" @@ -14,46 +13,42 @@ import ( // Certifier checks the votes to make sure the block really is signed properly. // Certifier must know the current set of validitors by some other means. type Certifier interface { - Certify(check *Commit) error + Certify(check Commit) error ChainID() string } -// *Commit is basically the rpc /commit response, but extended +// Commit is basically the rpc /commit response, but extended // // This is the basepoint for proving anything on the blockchain. It contains // a signed header. If the signatures are valid and > 2/3 of the known set, // we can store this checkpoint and use it to prove any number of aspects of // the system: such as txs, abci state, validator sets, etc... -type Commit rtypes.ResultCommit +type Commit types.SignedHeader // FullCommit is a commit and the actual validator set, // the base info you need to update to a given point, // assuming knowledge of some previous validator set type FullCommit struct { - *Commit `json:"commit"` + Commit `json:"commit"` Validators *types.ValidatorSet `json:"validator_set"` } -func NewFullCommit(commit *Commit, vals *types.ValidatorSet) FullCommit { +func NewFullCommit(commit Commit, vals *types.ValidatorSet) FullCommit { return FullCommit{ Commit: commit, Validators: vals, } } -func CommitFromResult(commit *rtypes.ResultCommit) *Commit { - return (*Commit)(commit) -} - -func (c *Commit) Height() int { - if c == nil || c.Header == nil { +func (c Commit) Height() int { + if c.Header == nil { return 0 } return c.Header.Height } -func (c *Commit) ValidatorsHash() []byte { - if c == nil || c.Header == nil { +func (c Commit) ValidatorsHash() []byte { + if c.Header == nil { return nil } return c.Header.ValidatorsHash @@ -64,7 +59,7 @@ func (c *Commit) ValidatorsHash() []byte { // // Make sure to use a Verifier to validate the signatures actually provide // a significantly strong proof for this header's validity. -func (c *Commit) ValidateBasic(chainID string) error { +func (c Commit) ValidateBasic(chainID string) error { // make sure the header is reasonable if c.Header == nil { return errors.New("Commit missing header") diff --git a/certifiers/dynamic.go b/certifiers/dynamic.go index a9c1a54f0..b40177949 100644 --- a/certifiers/dynamic.go +++ b/certifiers/dynamic.go @@ -46,7 +46,7 @@ func (c *Dynamic) LastHeight() int { } // Certify handles this with -func (c *Dynamic) Certify(check *Commit) error { +func (c *Dynamic) Certify(check Commit) error { err := c.cert.Certify(check) if err == nil { // update last seen height if input is valid diff --git a/certifiers/files/provider.go b/certifiers/files/provider.go index 8b5c23de6..9dcfb1691 100644 --- a/certifiers/files/provider.go +++ b/certifiers/files/provider.go @@ -2,11 +2,11 @@ Package files defines a Provider that stores all data in the filesystem We assume the same validator hash may be reused by many different -headers/*Commits, and thus store it separately. This leaves us +headers/Commits, and thus store it separately. This leaves us with three issues: 1. Given a validator hash, retrieve the validator set if previously stored - 2. Given a block height, find the *Commit with the highest height <= h + 2. Given a block height, find the Commit with the highest height <= h 3. Given a FullCommit, store it quickly to satisfy 1 and 2 Note that we do not worry about caching, as that can be achieved by diff --git a/certifiers/helper.go b/certifiers/helper.go index 09c60775e..6f2daa63b 100644 --- a/certifiers/helper.go +++ b/certifiers/helper.go @@ -122,28 +122,26 @@ func genHeader(chainID string, height int, txs types.Txs, } } -// GenCommit calls genHeader and signHeader and combines them into a *Commit +// GenCommit calls genHeader and signHeader and combines them into a Commit func (v ValKeys) GenCommit(chainID string, height int, txs types.Txs, - vals *types.ValidatorSet, appHash []byte, first, last int) *Commit { + vals *types.ValidatorSet, appHash []byte, first, last int) Commit { header := genHeader(chainID, height, txs, vals, appHash) - check := &Commit{ - Header: header, - Commit: v.signHeader(header, first, last), - CanonicalCommit: true, + check := Commit{ + Header: header, + Commit: v.signHeader(header, first, last), } return check } -// GenFullCommit calls genHeader and signHeader and combines them into a *Commit +// GenFullCommit calls genHeader and signHeader and combines them into a Commit func (v ValKeys) GenFullCommit(chainID string, height int, txs types.Txs, vals *types.ValidatorSet, appHash []byte, first, last int) FullCommit { header := genHeader(chainID, height, txs, vals, appHash) - commit := &Commit{ - Header: header, - Commit: v.signHeader(header, first, last), - CanonicalCommit: true, + commit := Commit{ + Header: header, + Commit: v.signHeader(header, first, last), } return NewFullCommit(commit, vals) } diff --git a/certifiers/inquirer.go b/certifiers/inquirer.go index 9dd44e89b..460b622ab 100644 --- a/certifiers/inquirer.go +++ b/certifiers/inquirer.go @@ -43,7 +43,7 @@ func (c *Inquiring) LastHeight() int { // for a path to prove the new validators. // // On success, it will store the checkpoint in the store for later viewing -func (c *Inquiring) Certify(commit *Commit) error { +func (c *Inquiring) Certify(commit Commit) error { err := c.useClosestTrust(commit.Height()) if err != nil { return err diff --git a/certifiers/static.go b/certifiers/static.go index a2b9b8ae7..787aecb3f 100644 --- a/certifiers/static.go +++ b/certifiers/static.go @@ -47,7 +47,7 @@ func (c *Static) Hash() []byte { return c.vhash } -func (c *Static) Certify(commit *Commit) error { +func (c *Static) Certify(commit Commit) error { // do basic sanity checks err := commit.ValidateBasic(c.chainID) if err != nil { diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index ad00060f4..6b5e2166f 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -280,7 +280,7 @@ func Commit(heightPtr *int) (*ctypes.ResultCommit, error) { height := blockStore.Height() header := blockStore.LoadBlockMeta(height).Header commit := blockStore.LoadSeenCommit(height) - return &ctypes.ResultCommit{header, commit, false}, nil + return ctypes.NewResultCommit(header, commit, false), nil } height := *heightPtr @@ -298,10 +298,10 @@ func Commit(heightPtr *int) (*ctypes.ResultCommit, error) { // use a non-canonical commit if height == storeHeight { commit := blockStore.LoadSeenCommit(height) - return &ctypes.ResultCommit{header, commit, false}, nil + return ctypes.NewResultCommit(header, commit, false), nil } // Return the canonical commit (comes from the block at height+1) commit := blockStore.LoadBlockCommit(height) - return &ctypes.ResultCommit{header, commit, true}, nil + return ctypes.NewResultCommit(header, commit, true), nil } diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index a5ed6f5a8..874e351d3 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -26,9 +26,24 @@ type ResultBlock struct { } type ResultCommit struct { - Header *types.Header `json:"header"` - Commit *types.Commit `json:"commit"` - CanonicalCommit bool `json:"canonical"` + // SignedHeader is header and commit, embedded so we only have + // one level in the json output + types.SignedHeader + CanonicalCommit bool `json:"canonical"` +} + +// NewResultCommit is a helper to initialize the ResultCommit with +// the embedded struct +func NewResultCommit(header *types.Header, commit *types.Commit, + canonical bool) *ResultCommit { + + return &ResultCommit{ + SignedHeader: types.SignedHeader{ + Header: header, + Commit: commit, + }, + CanonicalCommit: canonical, + } } type ResultStatus struct { diff --git a/types/block.go b/types/block.go index 24e0f7445..2291de316 100644 --- a/types/block.go +++ b/types/block.go @@ -368,6 +368,14 @@ func (commit *Commit) StringIndented(indent string) string { //----------------------------------------------------------------------------- +// SignedHeader is a header along with the commits that prove it +type SignedHeader struct { + Header *Header `json:"header"` + Commit *Commit `json:"commit"` +} + +//----------------------------------------------------------------------------- + // Data contains the set of transactions included in the block type Data struct { From ceedd4d96852bee9e8bfb166a22fa2edd9107c67 Mon Sep 17 00:00:00 2001 From: Petabyte Storage Date: Tue, 24 Oct 2017 15:14:13 -0700 Subject: [PATCH 19/19] remove unnecessary plus [ci skip] --- p2p/peer.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/p2p/peer.go b/p2p/peer.go index 1bdb8210a..3652c4654 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -268,11 +268,11 @@ func (p *peer) CanSend(chID byte) bool { } // WriteTo writes the peer's public key to w. -func (p *peer) WriteTo(w io.Writer) (n int64, err error) { - var n_ int - wire.WriteString(p.key, w, &n_, &err) - n += int64(n_) - return +func (p *peer) WriteTo(w io.Writer) (int64, error) { + var n int + var err error + wire.WriteString(p.key, w, &n, &err) + return int64(n), err } // String representation.