@ -0,0 +1,28 @@ | |||
FROM amazonlinux:2 | |||
RUN yum -y update && \ | |||
yum -y install wget | |||
RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \ | |||
rpm -ivh epel-release-latest-7.noarch.rpm | |||
RUN yum -y groupinstall "Development Tools" | |||
RUN yum -y install leveldb-devel which | |||
ENV GOVERSION=1.12.9 | |||
RUN cd /tmp && \ | |||
wget https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz && \ | |||
tar -C /usr/local -xf go${GOVERSION}.linux-amd64.tar.gz && \ | |||
mkdir -p /go/src && \ | |||
mkdir -p /go/bin | |||
ENV PATH=$PATH:/usr/local/go/bin:/go/bin | |||
ENV GOBIN=/go/bin | |||
ENV GOPATH=/go/src | |||
RUN mkdir -p /tendermint | |||
WORKDIR /tendermint | |||
CMD ["/usr/bin/make", "build_c"] | |||
@ -1,23 +0,0 @@ | |||
# Roadmap | |||
BREAKING CHANGES: | |||
- Better support for injecting randomness | |||
- Upgrade consensus for more real-time use of evidence | |||
FEATURES: | |||
- Use the chain as its own CA for nodes and validators | |||
- Tooling to run multiple blockchains/apps, possibly in a single process | |||
- State syncing (without transaction replay) | |||
- Add authentication and rate-limitting to the RPC | |||
IMPROVEMENTS: | |||
- Improve subtleties around mempool caching and logic | |||
- Consensus optimizations: | |||
- cache block parts for faster agreement after round changes | |||
- propagate block parts rarest first | |||
- Better testing of the consensus state machine (ie. use a DSL) | |||
- Auto compiled serialization/deserialization code instead of go-wire reflection | |||
BUG FIXES: | |||
- Graceful handling/recovery for apps that have non-determinism or fail to halt | |||
- Graceful handling/recovery for violations of safety, or liveness |
@ -0,0 +1,387 @@ | |||
// nolint:unused | |||
package v2 | |||
import ( | |||
"fmt" | |||
"math" | |||
"math/rand" | |||
"time" | |||
"github.com/tendermint/tendermint/p2p" | |||
) | |||
type Event interface{} | |||
type blockState int | |||
const ( | |||
blockStateUnknown blockState = iota | |||
blockStateNew | |||
blockStatePending | |||
blockStateReceived | |||
blockStateProcessed | |||
) | |||
func (e blockState) String() string { | |||
switch e { | |||
case blockStateUnknown: | |||
return "Unknown" | |||
case blockStateNew: | |||
return "New" | |||
case blockStatePending: | |||
return "Pending" | |||
case blockStateReceived: | |||
return "Received" | |||
case blockStateProcessed: | |||
return "Processed" | |||
default: | |||
return fmt.Sprintf("unknown blockState: %d", e) | |||
} | |||
} | |||
type peerState int | |||
const ( | |||
peerStateNew = iota | |||
peerStateReady | |||
peerStateRemoved | |||
) | |||
func (e peerState) String() string { | |||
switch e { | |||
case peerStateNew: | |||
return "New" | |||
case peerStateReady: | |||
return "Ready" | |||
case peerStateRemoved: | |||
return "Removed" | |||
default: | |||
return fmt.Sprintf("unknown peerState: %d", e) | |||
} | |||
} | |||
type scPeer struct { | |||
peerID p2p.ID | |||
state peerState | |||
height int64 | |||
lastTouched time.Time | |||
lastRate int64 | |||
} | |||
func newScPeer(peerID p2p.ID) *scPeer { | |||
return &scPeer{ | |||
peerID: peerID, | |||
state: peerStateNew, | |||
height: -1, | |||
lastTouched: time.Time{}, | |||
} | |||
} | |||
// The schedule is a composite data structure which allows a scheduler to keep | |||
// track of which blocks have been scheduled into which state. | |||
type schedule struct { | |||
initHeight int64 | |||
// a list of blocks in which blockState | |||
blockStates map[int64]blockState | |||
// a map of peerID to schedule specific peer struct `scPeer` used to keep | |||
// track of peer specific state | |||
peers map[p2p.ID]*scPeer | |||
// a map of heights to the peer we are waiting for a response from | |||
pendingBlocks map[int64]p2p.ID | |||
// the time at which a block was put in blockStatePending | |||
pendingTime map[int64]time.Time | |||
// the peerID of the peer which put the block in blockStateReceived | |||
receivedBlocks map[int64]p2p.ID | |||
} | |||
func newSchedule(initHeight int64) *schedule { | |||
sc := schedule{ | |||
initHeight: initHeight, | |||
blockStates: make(map[int64]blockState), | |||
peers: make(map[p2p.ID]*scPeer), | |||
pendingBlocks: make(map[int64]p2p.ID), | |||
pendingTime: make(map[int64]time.Time), | |||
receivedBlocks: make(map[int64]p2p.ID), | |||
} | |||
sc.setStateAtHeight(initHeight, blockStateNew) | |||
return &sc | |||
} | |||
func (sc *schedule) addPeer(peerID p2p.ID) error { | |||
if _, ok := sc.peers[peerID]; ok { | |||
return fmt.Errorf("Cannot add duplicate peer %s", peerID) | |||
} | |||
sc.peers[peerID] = newScPeer(peerID) | |||
return nil | |||
} | |||
func (sc *schedule) touchPeer(peerID p2p.ID, time time.Time) error { | |||
peer, ok := sc.peers[peerID] | |||
if !ok { | |||
return fmt.Errorf("Couldn't find peer %s", peerID) | |||
} | |||
if peer.state == peerStateRemoved { | |||
return fmt.Errorf("Tried to touch peer in peerStateRemoved") | |||
} | |||
peer.lastTouched = time | |||
return nil | |||
} | |||
func (sc *schedule) removePeer(peerID p2p.ID) error { | |||
peer, ok := sc.peers[peerID] | |||
if !ok { | |||
return fmt.Errorf("Couldn't find peer %s", peerID) | |||
} | |||
if peer.state == peerStateRemoved { | |||
return fmt.Errorf("Tried to remove peer %s in peerStateRemoved", peerID) | |||
} | |||
for height, pendingPeerID := range sc.pendingBlocks { | |||
if pendingPeerID == peerID { | |||
sc.setStateAtHeight(height, blockStateNew) | |||
delete(sc.pendingTime, height) | |||
delete(sc.pendingBlocks, height) | |||
} | |||
} | |||
for height, rcvPeerID := range sc.receivedBlocks { | |||
if rcvPeerID == peerID { | |||
sc.setStateAtHeight(height, blockStateNew) | |||
delete(sc.receivedBlocks, height) | |||
} | |||
} | |||
peer.state = peerStateRemoved | |||
return nil | |||
} | |||
func (sc *schedule) setPeerHeight(peerID p2p.ID, height int64) error { | |||
peer, ok := sc.peers[peerID] | |||
if !ok { | |||
return fmt.Errorf("Can't find peer %s", peerID) | |||
} | |||
if peer.state == peerStateRemoved { | |||
return fmt.Errorf("Cannot set peer height for a peer in peerStateRemoved") | |||
} | |||
if height < peer.height { | |||
return fmt.Errorf("Cannot move peer height lower. from %d to %d", peer.height, height) | |||
} | |||
peer.height = height | |||
peer.state = peerStateReady | |||
for i := sc.minHeight(); i <= height; i++ { | |||
if sc.getStateAtHeight(i) == blockStateUnknown { | |||
sc.setStateAtHeight(i, blockStateNew) | |||
} | |||
} | |||
return nil | |||
} | |||
func (sc *schedule) getStateAtHeight(height int64) blockState { | |||
if height < sc.initHeight { | |||
return blockStateProcessed | |||
} else if state, ok := sc.blockStates[height]; ok { | |||
return state | |||
} else { | |||
return blockStateUnknown | |||
} | |||
} | |||
func (sc *schedule) getPeersAtHeight(height int64) []*scPeer { | |||
peers := []*scPeer{} | |||
for _, peer := range sc.peers { | |||
if peer.height >= height { | |||
peers = append(peers, peer) | |||
} | |||
} | |||
return peers | |||
} | |||
func (sc *schedule) peersInactiveSince(duration time.Duration, now time.Time) []p2p.ID { | |||
peers := []p2p.ID{} | |||
for _, peer := range sc.peers { | |||
if now.Sub(peer.lastTouched) > duration { | |||
peers = append(peers, peer.peerID) | |||
} | |||
} | |||
return peers | |||
} | |||
func (sc *schedule) peersSlowerThan(minSpeed int64) []p2p.ID { | |||
peers := []p2p.ID{} | |||
for _, peer := range sc.peers { | |||
if peer.lastRate < minSpeed { | |||
peers = append(peers, peer.peerID) | |||
} | |||
} | |||
return peers | |||
} | |||
func (sc *schedule) setStateAtHeight(height int64, state blockState) { | |||
sc.blockStates[height] = state | |||
} | |||
func (sc *schedule) markReceived(peerID p2p.ID, height int64, size int64, now time.Time) error { | |||
peer, ok := sc.peers[peerID] | |||
if !ok { | |||
return fmt.Errorf("Can't find peer %s", peerID) | |||
} | |||
if peer.state == peerStateRemoved { | |||
return fmt.Errorf("Cannot receive blocks from removed peer %s", peerID) | |||
} | |||
if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID { | |||
return fmt.Errorf("Received block %d from peer %s without being requested", height, peerID) | |||
} | |||
pendingTime, ok := sc.pendingTime[height] | |||
if !ok || now.Sub(pendingTime) <= 0 { | |||
return fmt.Errorf("Clock error. Block %d received at %s but requested at %s", | |||
height, pendingTime, now) | |||
} | |||
peer.lastRate = size / int64(now.Sub(pendingTime).Seconds()) | |||
sc.setStateAtHeight(height, blockStateReceived) | |||
delete(sc.pendingBlocks, height) | |||
delete(sc.pendingTime, height) | |||
sc.receivedBlocks[height] = peerID | |||
return nil | |||
} | |||
func (sc *schedule) markPending(peerID p2p.ID, height int64, time time.Time) error { | |||
peer, ok := sc.peers[peerID] | |||
if !ok { | |||
return fmt.Errorf("Can't find peer %s", peerID) | |||
} | |||
state := sc.getStateAtHeight(height) | |||
if state != blockStateNew { | |||
return fmt.Errorf("Block %d should be in blockStateNew but was %s", height, state) | |||
} | |||
if peer.state != peerStateReady { | |||
return fmt.Errorf("Cannot schedule %d from %s in %s", height, peerID, peer.state) | |||
} | |||
if height > peer.height { | |||
return fmt.Errorf("Cannot request height %d from peer %s who is at height %d", | |||
height, peerID, peer.height) | |||
} | |||
sc.setStateAtHeight(height, blockStatePending) | |||
sc.pendingBlocks[height] = peerID | |||
// XXX: to make this more accurate we can introduce a message from | |||
// the IO routine which indicates the time the request was put on the wire | |||
sc.pendingTime[height] = time | |||
return nil | |||
} | |||
func (sc *schedule) markProcessed(height int64) error { | |||
state := sc.getStateAtHeight(height) | |||
if state != blockStateReceived { | |||
return fmt.Errorf("Can't mark height %d received from block state %s", height, state) | |||
} | |||
delete(sc.receivedBlocks, height) | |||
sc.setStateAtHeight(height, blockStateProcessed) | |||
return nil | |||
} | |||
// allBlockProcessed returns true if all blocks are in blockStateProcessed and | |||
// determines if the schedule has been completed | |||
func (sc *schedule) allBlocksProcessed() bool { | |||
for _, state := range sc.blockStates { | |||
if state != blockStateProcessed { | |||
return false | |||
} | |||
} | |||
return true | |||
} | |||
// highest block | state == blockStateNew | |||
func (sc *schedule) maxHeight() int64 { | |||
var max int64 = 0 | |||
for height, state := range sc.blockStates { | |||
if state == blockStateNew && height > max { | |||
max = height | |||
} | |||
} | |||
return max | |||
} | |||
// lowest block | state == blockStateNew | |||
func (sc *schedule) minHeight() int64 { | |||
var min int64 = math.MaxInt64 | |||
for height, state := range sc.blockStates { | |||
if state == blockStateNew && height < min { | |||
min = height | |||
} | |||
} | |||
return min | |||
} | |||
func (sc *schedule) pendingFrom(peerID p2p.ID) []int64 { | |||
heights := []int64{} | |||
for height, pendingPeerID := range sc.pendingBlocks { | |||
if pendingPeerID == peerID { | |||
heights = append(heights, height) | |||
} | |||
} | |||
return heights | |||
} | |||
func (sc *schedule) selectPeer(peers []*scPeer) *scPeer { | |||
// FIXME: properPeerSelector | |||
s := rand.NewSource(time.Now().Unix()) | |||
r := rand.New(s) | |||
return peers[r.Intn(len(peers))] | |||
} | |||
// XXX: this duplicates the logic of peersInactiveSince and peersSlowerThan | |||
func (sc *schedule) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []p2p.ID { | |||
prunable := []p2p.ID{} | |||
for peerID, peer := range sc.peers { | |||
if now.Sub(peer.lastTouched) > peerTimout || peer.lastRate < minRecvRate { | |||
prunable = append(prunable, peerID) | |||
} | |||
} | |||
return prunable | |||
} | |||
func (sc *schedule) numBlockInState(targetState blockState) uint32 { | |||
var num uint32 = 0 | |||
for _, state := range sc.blockStates { | |||
if state == targetState { | |||
num++ | |||
} | |||
} | |||
return num | |||
} |
@ -0,0 +1,272 @@ | |||
package v2 | |||
import ( | |||
"testing" | |||
"time" | |||
"github.com/stretchr/testify/assert" | |||
"github.com/tendermint/tendermint/p2p" | |||
) | |||
func TestScheduleInit(t *testing.T) { | |||
var ( | |||
initHeight int64 = 5 | |||
sc = newSchedule(initHeight) | |||
) | |||
assert.Equal(t, blockStateNew, sc.getStateAtHeight(initHeight)) | |||
assert.Equal(t, blockStateProcessed, sc.getStateAtHeight(initHeight-1)) | |||
assert.Equal(t, blockStateUnknown, sc.getStateAtHeight(initHeight+1)) | |||
} | |||
func TestAddPeer(t *testing.T) { | |||
var ( | |||
initHeight int64 = 5 | |||
peerID p2p.ID = "1" | |||
peerIDTwo p2p.ID = "2" | |||
sc = newSchedule(initHeight) | |||
) | |||
assert.Nil(t, sc.addPeer(peerID)) | |||
assert.Nil(t, sc.addPeer(peerIDTwo)) | |||
assert.Error(t, sc.addPeer(peerID)) | |||
} | |||
func TestTouchPeer(t *testing.T) { | |||
var ( | |||
initHeight int64 = 5 | |||
peerID p2p.ID = "1" | |||
sc = newSchedule(initHeight) | |||
now = time.Now() | |||
) | |||
assert.Error(t, sc.touchPeer(peerID, now), | |||
"Touching an unknown peer should return errPeerNotFound") | |||
assert.Nil(t, sc.addPeer(peerID), | |||
"Adding a peer should return no error") | |||
assert.Nil(t, sc.touchPeer(peerID, now), | |||
"Touching a peer should return no error") | |||
threshold := 10 * time.Second | |||
assert.Empty(t, sc.peersInactiveSince(threshold, now.Add(9*time.Second)), | |||
"Expected no peers to have been touched over 9 seconds") | |||
assert.Containsf(t, sc.peersInactiveSince(threshold, now.Add(11*time.Second)), peerID, | |||
"Expected one %s to have been touched over 10 seconds ago", peerID) | |||
} | |||
func TestPeerHeight(t *testing.T) { | |||
var ( | |||
initHeight int64 = 5 | |||
peerID p2p.ID = "1" | |||
peerHeight int64 = 20 | |||
sc = newSchedule(initHeight) | |||
) | |||
assert.NoError(t, sc.addPeer(peerID), | |||
"Adding a peer should return no error") | |||
assert.NoError(t, sc.setPeerHeight(peerID, peerHeight)) | |||
for i := initHeight; i <= peerHeight; i++ { | |||
assert.Equal(t, sc.getStateAtHeight(i), blockStateNew, | |||
"Expected all blocks to be in blockStateNew") | |||
peerIDs := []p2p.ID{} | |||
for _, peer := range sc.getPeersAtHeight(i) { | |||
peerIDs = append(peerIDs, peer.peerID) | |||
} | |||
assert.Containsf(t, peerIDs, peerID, | |||
"Expected %s to have block %d", peerID, i) | |||
} | |||
} | |||
func TestTransitionPending(t *testing.T) { | |||
var ( | |||
initHeight int64 = 5 | |||
peerID p2p.ID = "1" | |||
peerIDTwo p2p.ID = "2" | |||
peerHeight int64 = 20 | |||
sc = newSchedule(initHeight) | |||
now = time.Now() | |||
) | |||
assert.NoError(t, sc.addPeer(peerID), | |||
"Adding a peer should return no error") | |||
assert.Nil(t, sc.addPeer(peerIDTwo), | |||
"Adding a peer should return no error") | |||
assert.Error(t, sc.markPending(peerID, peerHeight, now), | |||
"Expected scheduling a block from a peer in peerStateNew to fail") | |||
assert.NoError(t, sc.setPeerHeight(peerID, peerHeight), | |||
"Expected setPeerHeight to return no error") | |||
assert.NoError(t, sc.setPeerHeight(peerIDTwo, peerHeight), | |||
"Expected setPeerHeight to return no error") | |||
assert.NoError(t, sc.markPending(peerID, peerHeight, now), | |||
"Expected markingPending new block to succeed") | |||
assert.Error(t, sc.markPending(peerIDTwo, peerHeight, now), | |||
"Expected markingPending by a second peer to fail") | |||
assert.Equal(t, blockStatePending, sc.getStateAtHeight(peerHeight), | |||
"Expected the block to to be in blockStatePending") | |||
assert.NoError(t, sc.removePeer(peerID), | |||
"Expected removePeer to return no error") | |||
assert.Equal(t, blockStateNew, sc.getStateAtHeight(peerHeight), | |||
"Expected the block to to be in blockStateNew") | |||
assert.Error(t, sc.markPending(peerID, peerHeight, now), | |||
"Expected markingPending removed peer to fail") | |||
assert.NoError(t, sc.markPending(peerIDTwo, peerHeight, now), | |||
"Expected markingPending on a ready peer to succeed") | |||
assert.Equal(t, blockStatePending, sc.getStateAtHeight(peerHeight), | |||
"Expected the block to to be in blockStatePending") | |||
} | |||
func TestTransitionReceived(t *testing.T) { | |||
var ( | |||
initHeight int64 = 5 | |||
peerID p2p.ID = "1" | |||
peerIDTwo p2p.ID = "2" | |||
peerHeight int64 = 20 | |||
blockSize int64 = 1024 | |||
sc = newSchedule(initHeight) | |||
now = time.Now() | |||
receivedAt = now.Add(1 * time.Second) | |||
) | |||
assert.NoError(t, sc.addPeer(peerID), | |||
"Expected adding peer %s to succeed", peerID) | |||
assert.NoError(t, sc.addPeer(peerIDTwo), | |||
"Expected adding peer %s to succeed", peerIDTwo) | |||
assert.NoError(t, sc.setPeerHeight(peerID, peerHeight), | |||
"Expected setPeerHeight to return no error") | |||
assert.NoErrorf(t, sc.setPeerHeight(peerIDTwo, peerHeight), | |||
"Expected setPeerHeight on %s to %d to succeed", peerIDTwo, peerHeight) | |||
assert.NoError(t, sc.markPending(peerID, initHeight, now), | |||
"Expected markingPending new block to succeed") | |||
assert.Error(t, sc.markReceived(peerIDTwo, initHeight, blockSize, receivedAt), | |||
"Expected marking markReceived from a non requesting peer to fail") | |||
assert.NoError(t, sc.markReceived(peerID, initHeight, blockSize, receivedAt), | |||
"Expected marking markReceived on a pending block to succeed") | |||
assert.Error(t, sc.markReceived(peerID, initHeight, blockSize, receivedAt), | |||
"Expected marking markReceived on received block to fail") | |||
assert.Equalf(t, blockStateReceived, sc.getStateAtHeight(initHeight), | |||
"Expected block %d to be blockHeightReceived", initHeight) | |||
assert.NoErrorf(t, sc.removePeer(peerID), | |||
"Expected removePeer removing %s to succeed", peerID) | |||
assert.Equalf(t, blockStateNew, sc.getStateAtHeight(initHeight), | |||
"Expected block %d to be blockStateNew", initHeight) | |||
assert.NoErrorf(t, sc.markPending(peerIDTwo, initHeight, now), | |||
"Expected markingPending %d from %s to succeed", initHeight, peerIDTwo) | |||
assert.NoErrorf(t, sc.markReceived(peerIDTwo, initHeight, blockSize, receivedAt), | |||
"Expected marking markReceived %d from %s to succeed", initHeight, peerIDTwo) | |||
assert.Equalf(t, blockStateReceived, sc.getStateAtHeight(initHeight), | |||
"Expected block %d to be blockStateReceived", initHeight) | |||
} | |||
func TestTransitionProcessed(t *testing.T) { | |||
var ( | |||
initHeight int64 = 5 | |||
peerID p2p.ID = "1" | |||
peerHeight int64 = 20 | |||
blockSize int64 = 1024 | |||
sc = newSchedule(initHeight) | |||
now = time.Now() | |||
receivedAt = now.Add(1 * time.Second) | |||
) | |||
assert.NoError(t, sc.addPeer(peerID), | |||
"Expected adding peer %s to succeed", peerID) | |||
assert.NoErrorf(t, sc.setPeerHeight(peerID, peerHeight), | |||
"Expected setPeerHeight on %s to %d to succeed", peerID, peerHeight) | |||
assert.NoError(t, sc.markPending(peerID, initHeight, now), | |||
"Expected markingPending new block to succeed") | |||
assert.NoError(t, sc.markReceived(peerID, initHeight, blockSize, receivedAt), | |||
"Expected marking markReceived on a pending block to succeed") | |||
assert.Error(t, sc.markProcessed(initHeight+1), | |||
"Expected marking %d as processed to fail", initHeight+1) | |||
assert.NoError(t, sc.markProcessed(initHeight), | |||
"Expected marking %d as processed to succeed", initHeight) | |||
assert.Equalf(t, blockStateProcessed, sc.getStateAtHeight(initHeight), | |||
"Expected block %d to be blockStateProcessed", initHeight) | |||
assert.NoError(t, sc.removePeer(peerID), | |||
"Expected removing peer %s to succeed", peerID) | |||
assert.Equalf(t, blockStateProcessed, sc.getStateAtHeight(initHeight), | |||
"Expected block %d to be blockStateProcessed", initHeight) | |||
} | |||
func TestMinMaxHeight(t *testing.T) { | |||
var ( | |||
initHeight int64 = 5 | |||
peerID p2p.ID = "1" | |||
peerHeight int64 = 20 | |||
sc = newSchedule(initHeight) | |||
now = time.Now() | |||
) | |||
assert.Equal(t, initHeight, sc.minHeight(), | |||
"Expected min height to be the initialized height") | |||
assert.Equal(t, initHeight, sc.maxHeight(), | |||
"Expected max height to be the initialized height") | |||
assert.NoError(t, sc.addPeer(peerID), | |||
"Adding a peer should return no error") | |||
assert.NoError(t, sc.setPeerHeight(peerID, peerHeight), | |||
"Expected setPeerHeight to return no error") | |||
assert.Equal(t, peerHeight, sc.maxHeight(), | |||
"Expected max height to increase to peerHeight") | |||
assert.Nil(t, sc.markPending(peerID, initHeight, now.Add(1*time.Second)), | |||
"Expected marking initHeight as pending to return no error") | |||
assert.Equal(t, initHeight+1, sc.minHeight(), | |||
"Expected marking initHeight as pending to move minHeight forward") | |||
} | |||
func TestPeersSlowerThan(t *testing.T) { | |||
var ( | |||
initHeight int64 = 5 | |||
peerID p2p.ID = "1" | |||
peerHeight int64 = 20 | |||
blockSize int64 = 1024 | |||
sc = newSchedule(initHeight) | |||
now = time.Now() | |||
receivedAt = now.Add(1 * time.Second) | |||
) | |||
assert.NoError(t, sc.addPeer(peerID), | |||
"Adding a peer should return no error") | |||
assert.NoError(t, sc.setPeerHeight(peerID, peerHeight), | |||
"Expected setPeerHeight to return no error") | |||
assert.NoError(t, sc.markPending(peerID, peerHeight, now), | |||
"Expected markingPending on to return no error") | |||
assert.NoError(t, sc.markReceived(peerID, peerHeight, blockSize, receivedAt), | |||
"Expected markingPending on to return no error") | |||
assert.Empty(t, sc.peersSlowerThan(blockSize-1), | |||
"expected no peers to be slower than blockSize-1 bytes/sec") | |||
assert.Containsf(t, sc.peersSlowerThan(blockSize+1), peerID, | |||
"expected %s to be slower than blockSize+1 bytes/sec", peerID) | |||
} |
@ -0,0 +1,34 @@ | |||
package main | |||
import ( | |||
"fmt" | |||
"strings" | |||
"github.com/snikch/goodman/hooks" | |||
"github.com/snikch/goodman/transaction" | |||
) | |||
func main() { | |||
// This must be compiled beforehand and given to dredd as parameter, in the meantime the server should be running | |||
h := hooks.NewHooks() | |||
server := hooks.NewServer(hooks.NewHooksRunner(h)) | |||
h.BeforeAll(func(t []*transaction.Transaction) { | |||
fmt.Println(t[0].Name) | |||
}) | |||
h.BeforeEach(func(t *transaction.Transaction) { | |||
if strings.HasPrefix(t.Name, "Tx") || | |||
// We need a proper example of evidence to broadcast | |||
strings.HasPrefix(t.Name, "Info > /broadcast_evidence") || | |||
// We need a proper example of path and data | |||
strings.HasPrefix(t.Name, "ABCI > /abci_query") || | |||
// We need to find a way to make a transaction before starting the tests, | |||
// that hash should replace the dummy one in hte swagger file | |||
strings.HasPrefix(t.Name, "Info > /tx") { | |||
t.Skip = true | |||
fmt.Printf("%s Has been skipped\n", t.Name) | |||
} | |||
}) | |||
server.Serve() | |||
defer server.Listener.Close() | |||
fmt.Print("FINE") | |||
} |
@ -0,0 +1,33 @@ | |||
# Developer Sessions | |||
The Tendermint Core developer call is comprised of both [Interchain | |||
Foundation](http://interchain.io/) and [All in Bits](https://tendermint.com/) | |||
team members discussing the development of [Tendermint | |||
BFT](https://github.com/tendermint/tendermint) and related research. The goal | |||
of the Tendermint Core developer calls is to provide transparency into the | |||
decision making process, technical information, update cycles etc. | |||
## List | |||
| Date | Topic | Link(s) | | |||
| --------------- | ----------------------------------------- | -------------------------------------------------------------------------------------------------------------- | | |||
| August 2019 | Part Three: Tendermint Lite Client | [YouTube](https://www.youtube.com/watch?v=whyL6UrKe7I&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=5) | | |||
| August 2019 | Fork Accountability | [YouTube](https://www.youtube.com/watch?v=Jph-4PGtdPo&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=4) | | |||
| July 2019 | Part Two: Tendermint Lite Client | [YouTube](https://www.youtube.com/watch?v=gTjG7jNNdKQ&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=6) | | |||
| July 2019 | Part One: Tendermint Lite Client | [YouTube](https://www.youtube.com/watch?v=C6fH_sgPJzA&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=7) | | |||
| June 2019 | Testnet Deployments | [YouTube](https://www.youtube.com/watch?v=gYA6no7tRlM&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=10) | | |||
| June 2019 | Blockchain Reactor Refactor | [YouTube](https://www.youtube.com/watch?v=JLBGH8yxABk&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=11) | | |||
| June 2019 | Tendermint Rust Libraries | [YouTube](https://www.youtube.com/watch?v=-WXKdyoGHwA&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=9) | | |||
| May 2019 | Merkle Tree Deep Dive | [YouTube](https://www.youtube.com/watch?v=L3bt2Uw8ICg&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=8) | | |||
| May 2019 | Remote Signer Refactor | [YouTube](https://www.youtube.com/watch?v=eUyXXEEuBzQ&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=12) | | |||
| May 2019 | Introduction to Ansible | [YouTube](https://www.youtube.com/watch?v=72clQLjzPg4&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=14&t=0s) | | | | |||
| April 2019 | Tendermint State Sync Design Discussion | [YouTube](https://www.youtube.com/watch?v=4k23j2QHwrM&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=11) | | |||
| April 2019 | ADR-036 - Blockchain Reactor Refactor | [YouTube](https://www.youtube.com/watch?v=TW2xC1LwEkE&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=10) | | |||
| April 2019 | Verifying Distributed Algorithms | [YouTube](https://www.youtube.com/watch?v=tMd4lgPVBxE&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=9) | | |||
| April 2019 | Byzantine Model Checker Presentation | [YouTube](https://www.youtube.com/watch?v=rdXl4VCQyow&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=8) | | |||
| January 2019 | Proposer Selection in Idris | [YouTube](https://www.youtube.com/watch?v=hWZdc9c1aH8&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=7) | | |||
| January 2019 | Current Mempool Design | [YouTube](https://www.youtube.com/watch?v=--iGIYYiLu4&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=6) | | |||
| December 2018 | ABCI Proxy App | [YouTube](https://www.youtube.com/watch?v=s6sQ2HOVHdo&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=5) | | |||
| October 2018 | DB Performance | [YouTube](https://www.youtube.com/watch?v=jVSNHi4l0fQ&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=4) | | |||
| October 2018 | Alternative Mempool Algorithms | [YouTube](https://www.youtube.com/watch?v=XxH5ZtM4vMM&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=2) | | |||
| October 2018 | Tendermint Termination | [YouTube](https://www.youtube.com/watch?v=YBZjecfjeIk&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv) | |
@ -0,0 +1,239 @@ | |||
# ADR 042: State Sync Design | |||
## Changelog | |||
2019-06-27: Init by EB | |||
2019-07-04: Follow up by brapse | |||
## Context | |||
StateSync is a feature which would allow a new node to receive a | |||
snapshot of the application state without downloading blocks or going | |||
through consensus. Once downloaded, the node could switch to FastSync | |||
and eventually participate in consensus. The goal of StateSync is to | |||
facilitate setting up a new node as quickly as possible. | |||
## Considerations | |||
Because Tendermint doesn't know anything about the application state, | |||
StateSync will broker messages between nodes and through | |||
the ABCI to an opaque applicaton. The implementation will have multiple | |||
touch points on both the tendermint code base and ABCI application. | |||
* A StateSync reactor to facilitate peer communication - Tendermint | |||
* A Set of ABCI messages to transmit application state to the reactor - Tendermint | |||
* A Set of MultiStore APIs for exposing snapshot data to the ABCI - ABCI application | |||
* A Storage format with validation and performance considerations - ABCI application | |||
### Implementation Properties | |||
Beyond the approach, any implementation of StateSync can be evaluated | |||
across different criteria: | |||
* Speed: Expected throughput of producing and consuming snapshots | |||
* Safety: Cost of pushing invalid snapshots to a node | |||
* Liveness: Cost of preventing a node from receiving/constructing a snapshot | |||
* Effort: How much effort does an implementation require | |||
### Implementation Question | |||
* What is the format of a snapshot | |||
* Complete snapshot | |||
* Ordered IAVL key ranges | |||
* Compressed individually chunks which can be validated | |||
* How is data validated | |||
* Trust a peer with it's data blindly | |||
* Trust a majority of peers | |||
* Use light client validation to validate each chunk against consensus | |||
produced merkle tree root | |||
* What are the performance characteristics | |||
* Random vs sequential reads | |||
* How parallelizeable is the scheduling algorithm | |||
### Proposals | |||
Broadly speaking there are two approaches to this problem which have had | |||
varying degrees of discussion and progress. These approach can be | |||
summarized as: | |||
**Lazy:** Where snapshots are produced dynamically at request time. This | |||
solution would use the existing data structure. | |||
**Eager:** Where snapshots are produced periodically and served from disk at | |||
request time. This solution would create an auxiliary data structure | |||
optimized for batch read/writes. | |||
Additionally the propsosals tend to vary on how they provide safety | |||
properties. | |||
**LightClient** Where a client can aquire the merkle root from the block | |||
headers synchronized from a trusted validator set. Subsets of the application state, | |||
called chunks can therefore be validated on receipt to ensure each chunk | |||
is part of the merkle root. | |||
**Majority of Peers** Where manifests of chunks along with checksums are | |||
downloaded and compared against versions provided by a majority of | |||
peers. | |||
#### Lazy StateSync | |||
An [initial specification](https://docs.google.com/document/d/15MFsQtNA0MGBv7F096FFWRDzQ1vR6_dics5Y49vF8JU/edit?ts=5a0f3629) was published by Alexis Sellier. | |||
In this design, the state has a given `size` of primitive elements (like | |||
keys or nodes), each element is assigned a number from 0 to `size-1`, | |||
and chunks consists of a range of such elements. Ackratos raised | |||
[some concerns](https://docs.google.com/document/d/1npGTAa1qxe8EQZ1wG0a0Sip9t5oX2vYZNUDwr_LVRR4/edit) | |||
about this design, somewhat specific to the IAVL tree, and mainly concerning | |||
performance of random reads and of iterating through the tree to determine element numbers | |||
(ie. elements aren't indexed by the element number). | |||
An alternative design was suggested by Jae Kwon in | |||
[#3639](https://github.com/tendermint/tendermint/issues/3639) where chunking | |||
happens lazily and in a dynamic way: nodes request key ranges from their peers, | |||
and peers respond with some subset of the | |||
requested range and with notes on how to request the rest in parallel from other | |||
peers. Unlike chunk numbers, keys can be verified directly. And if some keys in the | |||
range are ommitted, proofs for the range will fail to verify. | |||
This way a node can start by requesting the entire tree from one peer, | |||
and that peer can respond with say the first few keys, and the ranges to request | |||
from other peers. | |||
Additionally, per chunk validation tends to come more naturally to the | |||
Lazy approach since it tends to use the existing structure of the tree | |||
(ie. keys or nodes) rather than state-sync specific chunks. Such a | |||
design for tendermint was originally tracked in | |||
[#828](https://github.com/tendermint/tendermint/issues/828). | |||
#### Eager StateSync | |||
Warp Sync as implemented in Parity | |||
["Warp Sync"](https://wiki.parity.io/Warp-Sync-Snapshot-Format.html) to rapidly | |||
download both blocks and state snapshots from peers. Data is carved into ~4MB | |||
chunks and snappy compressed. Hashes of snappy compressed chunks are stored in a | |||
manifest file which co-ordinates the state-sync. Obtaining a correct manifest | |||
file seems to require an honest majority of peers. This means you may not find | |||
out the state is incorrect until you download the whole thing and compare it | |||
with a verified block header. | |||
A similar solution was implemented by Binance in | |||
[#3594](https://github.com/tendermint/tendermint/pull/3594) | |||
based on their initial implementation in | |||
[PR #3243](https://github.com/tendermint/tendermint/pull/3243) | |||
and [some learnings](https://docs.google.com/document/d/1npGTAa1qxe8EQZ1wG0a0Sip9t5oX2vYZNUDwr_LVRR4/edit). | |||
Note this still requires the honest majority peer assumption. | |||
As an eager protocol, warp-sync can efficiently compress larger, more | |||
predicatable chunks once per snapshot and service many new peers. By | |||
comparison lazy chunkers would have to compress each chunk at request | |||
time. | |||
### Analysis of Lazy vs Eager | |||
Lazy vs Eager have more in common than they differ. They all require | |||
reactors on the tendermint side, a set of ABCI messages and a method for | |||
serializing/deserializing snapshots facilitated by a SnapshotFormat. | |||
The biggest difference between Lazy and Eager proposals is in the | |||
read/write patterns necessitated by serving a snapshot chunk. | |||
Specifically, Lazy State Sync performs random reads to the underlying data | |||
structure while Eager can optimize for sequential reads. | |||
This distinctin between approaches was demonstrated by Binance's | |||
[ackratos](https://github.com/ackratos) in their implementation of [Lazy | |||
State sync](https://github.com/tendermint/tendermint/pull/3243), The | |||
[analysis](https://docs.google.com/document/d/1npGTAa1qxe8EQZ1wG0a0Sip9t5oX2vYZNUDwr_LVRR4/) | |||
of the performance, and follow up implementation of [Warp | |||
Sync](http://github.com/tendermint/tendermint/pull/3594). | |||
#### Compairing Security Models | |||
There are several different security models which have been | |||
discussed/proposed in the past but generally fall into two categories. | |||
Light client validation: In which the node receiving data is expected to | |||
first perform a light client sync and have all the nessesary block | |||
headers. Within the trusted block header (trusted in terms of from a | |||
validator set subject to [weak | |||
subjectivity](https://github.com/tendermint/tendermint/pull/3795)) and | |||
can compare any subset of keys called a chunk against the merkle root. | |||
The advantage of light client validation is that the block headers are | |||
signed by validators which have something to lose for malicious | |||
behaviour. If a validator were to provide an invalid proof, they can be | |||
slashed. | |||
Majority of peer validation: A manifest file containing a list of chunks | |||
along with checksums of each chunk is downloaded from a | |||
trusted source. That source can be a community resource similar to | |||
[sum.golang.org](https://sum.golang.org) or downloaded from the majority | |||
of peers. One disadantage of the majority of peer security model is the | |||
vuliberability to eclipse attacks in which a malicious users looks to | |||
saturate a target node's peer list and produce a manufactured picture of | |||
majority. | |||
A third option would be to include snapshot related data in the | |||
block header. This could include the manifest with related checksums and be | |||
secured through consensus. One challenge of this approach is to | |||
ensure that creating snapshots does not put undo burden on block | |||
propsers by synchronizing snapshot creation and block creation. One | |||
approach to minimizing the burden is for snapshots for height | |||
`H` to be included in block `H+n` where `n` is some `n` block away, | |||
giving the block propser enough time to complete the snapshot | |||
asynchronousy. | |||
## Proposal: Eager StateSync With Per Chunk Light Client Validation | |||
The conclusion after some concideration of the advantages/disadvances of | |||
eager/lazy and different security models is to produce a state sync | |||
which eagerly produces snapshots and uses light client validation. This | |||
approach has the performance advantages of pre-computing efficient | |||
snapshots which can streamed to new nodes on demand using sequential IO. | |||
Secondly, by using light client validation we cna validate each chunk on | |||
receipt and avoid the potential eclipse attack of majority of peer based | |||
security. | |||
### Implementation | |||
Tendermint is responsible for downloading and verifying chunks of | |||
AppState from peers. ABCI Application is responsible for taking | |||
AppStateChunk objects from TM and constructing a valid state tree whose | |||
root corresponds with the AppHash of syncing block. In particular we | |||
will need implement: | |||
* Build new StateSync reactor brokers message transmission between the peers | |||
and the ABCI application | |||
* A set of ABCI Messages | |||
* Design SnapshotFormat as an interface which can: | |||
* validate chunks | |||
* read/write chunks from file | |||
* read/write chunks to/from application state store | |||
* convert manifests into chunkRequest ABCI messages | |||
* Implement SnapshotFormat for cosmos-hub with concrete implementation for: | |||
* read/write chunks in a way which can be: | |||
* parallelized across peers | |||
* validated on receipt | |||
* read/write to/from IAVL+ tree | |||
![StateSync Architecture Diagram](img/state-sync.png) | |||
## Implementation Path | |||
* Create StateSync reactor based on [#3753](https://github.com/tendermint/tendermint/pull/3753) | |||
* Design SnapshotFormat with an eye towards cosmos-hub implementation | |||
* ABCI message to send/receive SnapshotFormat | |||
* IAVL+ changes to support SnapshotFormat | |||
* Deliver Warp sync (no chunk validation) | |||
* light client implementation for weak subjectivity | |||
* Deliver StateSync with chunk validation | |||
## Status | |||
Proposed | |||
## Concequences | |||
### Neutral | |||
### Positive | |||
* Safe & performant state sync design substantiated with real world implementation experience | |||
* General interfaces allowing application specific innovation | |||
* Parallizable implementation trajectory with reasonable engineering effort | |||
### Negative | |||
* Static Scheduling lacks opportunity for real time chunk availability optimizations | |||
## References | |||
[sync: Sync current state without full replay for Applications](https://github.com/tendermint/tendermint/issues/828) - original issue | |||
[tendermint state sync proposal](https://docs.google.com/document/d/15MFsQtNA0MGBv7F096FFWRDzQ1vR6_dics5Y49vF8JU/edit?ts=5a0f3629) - Cloudhead proposal | |||
[tendermint state sync proposal 2](https://docs.google.com/document/d/1npGTAa1qxe8EQZ1wG0a0Sip9t5oX2vYZNUDwr_LVRR4/edit) - ackratos proposal | |||
[proposal 2 implementation](https://github.com/tendermint/tendermint/pull/3243) - ackratos implementation | |||
[WIP General/Lazy State-Sync pseudo-spec](https://github.com/tendermint/tendermint/issues/3639) - Jae Proposal | |||
[Warp Sync Implementation](https://github.com/tendermint/tendermint/pull/3594) - ackratos | |||
[Chunk Proposal](https://github.com/tendermint/tendermint/pull/3799) - Bucky proposed | |||
@ -0,0 +1,141 @@ | |||
# ADR 044: Lite Client with Weak Subjectivity | |||
## Changelog | |||
* 13-07-2019: Initial draft | |||
* 14-08-2019: Address cwgoes comments | |||
## Context | |||
The concept of light clients was introduced in the Bitcoin white paper. It | |||
describes a watcher of distributed consensus process that only validates the | |||
consensus algorithm and not the state machine transactions within. | |||
Tendermint light clients allow bandwidth & compute-constrained devices, such as smartphones, low-power embedded chips, or other blockchains to | |||
efficiently verify the consensus of a Tendermint blockchain. This forms the | |||
basis of safe and efficient state synchronization for new network nodes and | |||
inter-blockchain communication (where a light client of one Tendermint instance | |||
runs in another chain's state machine). | |||
In a network that is expected to reliably punish validators for misbehavior | |||
by slashing bonded stake and where the validator set changes | |||
infrequently, clients can take advantage of this assumption to safely | |||
synchronize a lite client without downloading the intervening headers. | |||
Light clients (and full nodes) operating in the Proof Of Stake context need a | |||
trusted block height from a trusted source that is no older than 1 unbonding | |||
window plus a configurable evidence submission synchrony bound. This is called “weak subjectivity”. | |||
Weak subjectivity is required in Proof of Stake blockchains because it is | |||
costless for an attacker to buy up voting keys that are no longer bonded and | |||
fork the network at some point in its prior history. See Vitalik’s post at | |||
[Proof of Stake: How I Learned to Love Weak | |||
Subjectivity](https://blog.ethereum.org/2014/11/25/proof-stake-learned-love-weak-subjectivity/). | |||
Currently, Tendermint provides a lite client implementation in the | |||
[lite](https://github.com/tendermint/tendermint/tree/master/lite) package. This | |||
lite client implements a bisection algorithm that tries to use a binary search | |||
to find the minimum number of block headers where the validator set voting | |||
power changes are less than < 1/3rd. This interface does not support weak | |||
subjectivity at this time. The Cosmos SDK also does not support counterfactual | |||
slashing, nor does the lite client have any capacity to report evidence making | |||
these systems *theoretically unsafe*. | |||
NOTE: Tendermint provides a somewhat different (stronger) light client model | |||
than Bitcoin under eclipse, since the eclipsing node(s) can only fool the light | |||
client if they have two-thirds of the private keys from the last root-of-trust. | |||
## Decision | |||
### The Weak Subjectivity Interface | |||
Add the weak subjectivity interface for when a new light client connects to the | |||
network or when a light client that has been offline for longer than the | |||
unbonding period connects to the network. Specifically, the node needs to | |||
initialize the following structure before syncing from user input: | |||
``` | |||
type TrustOptions struct { | |||
// Required: only trust commits up to this old. | |||
// Should be equal to the unbonding period minus some delta for evidence reporting. | |||
TrustPeriod time.Duration `json:"trust-period"` | |||
// Option 1: TrustHeight and TrustHash can both be provided | |||
// to force the trusting of a particular height and hash. | |||
// If the latest trusted height/hash is more recent, then this option is | |||
// ignored. | |||
TrustHeight int64 `json:"trust-height"` | |||
TrustHash []byte `json:"trust-hash"` | |||
// Option 2: Callback can be set to implement a confirmation | |||
// step if the trust store is uninitialized, or expired. | |||
Callback func(height int64, hash []byte) error | |||
} | |||
``` | |||
The expectation is the user will get this information from a trusted source | |||
like a validator, a friend, or a secure website. A more user friendly | |||
solution with trust tradeoffs is that we establish an https based protocol with | |||
a default end point that populates this information. Also an on-chain registry | |||
of roots-of-trust (e.g. on the Cosmos Hub) seems likely in the future. | |||
### Linear Verification | |||
The linear verification algorithm requires downloading all headers | |||
between the `TrustHeight` and the `LatestHeight`. The lite client downloads the | |||
full header for the provided `TrustHeight` and then proceeds to download `N+1` | |||
headers and applies the [Tendermint validation | |||
rules](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/blockchain.md#validation) | |||
to each block. | |||
### Bisecting Verification | |||
Bisecting Verification is a more bandwidth and compute intensive mechanism that | |||
in the most optimistic case requires a light client to only download two block | |||
headers to come into synchronization. | |||
The bisection algorithm proceeds in the following fashion. The client downloads | |||
and verifies the full block header for `TrustHeight` and then fetches | |||
`LatestHeight` blocker header. The client then verifies the `LatestHeight` | |||
header. Finally the client attempts to verify the `LatestHeight` header with | |||
voting powers taken from `NextValidatorSet` in the `TrustHeight` header. This | |||
verification will succeed if the validators from `TrustHeight` still have > 2/3 | |||
+1 of voting power in the `LatestHeight`. If this succeeds, the client is fully | |||
synchronized. If this fails, then following Bisection Algorithm should be | |||
executed. | |||
The Client tries to download the block at the mid-point block between | |||
`LatestHeight` and `TrustHeight` and attempts that same algorithm as above | |||
using `MidPointHeight` instead of `LatestHeight` and a different threshold - | |||
1/3 +1 of voting power for *non-adjacent headers*. In the case the of failure, | |||
recursively perform the `MidPoint` verification until success then start over | |||
with an updated `NextValidatorSet` and `TrustHeight`. | |||
If the client encounters a forged header, it should submit the header along | |||
with some other intermediate headers as the evidence of misbehavior to other | |||
full nodes. After that, it can retry the bisection using another full node. An | |||
optimal client will cache trusted headers from the previous run to minimize | |||
network usage. | |||
--- | |||
Check out the formal specification | |||
[here](https://github.com/tendermint/tendermint/blob/master/docs/spec/consensus/light-client.md). | |||
## Status | |||
Accepted. | |||
## Consequences | |||
### Positive | |||
* light client which is safe to use (it can go offline, but not for too long) | |||
### Negative | |||
* complexity of bisection | |||
### Neutral | |||
* social consensus can be prone to errors (for cases where a new light client | |||
joins a network or it has been offline for too long) |
@ -0,0 +1,600 @@ | |||
# Creating an application in Java | |||
## Guide Assumptions | |||
This guide is designed for beginners who want to get started with a Tendermint | |||
Core application from scratch. It does not assume that you have any prior | |||
experience with Tendermint Core. | |||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state | |||
transition machine (your application) - written in any programming language - and securely | |||
replicates it on many machines. | |||
By following along with this guide, you'll create a Tendermint Core project | |||
called kvstore, a (very) simple distributed BFT key-value store. The application (which should | |||
implementing the blockchain interface (ABCI)) will be written in Java. | |||
This guide assumes that you are not new to JVM world. If you are new please see [JVM Minimal Survival Guide](https://hadihariri.com/2013/12/29/jvm-minimal-survival-guide-for-the-dotnet-developer/#java-the-language-java-the-ecosystem-java-the-jvm) and [Gradle Docs](https://docs.gradle.org/current/userguide/userguide.html). | |||
## Built-in app vs external app | |||
If you use Golang, you can run your app and Tendermint Core in the same process to get maximum performance. | |||
[Cosmos SDK](https://github.com/cosmos/cosmos-sdk) is written this way. | |||
Please refer to [Writing a built-in Tendermint Core application in Go](./go-built-in.md) guide for details. | |||
If you choose another language, like we did in this guide, you have to write a separate app, | |||
which will communicate with Tendermint Core via a socket (UNIX or TCP) or gRPC. | |||
This guide will show you how to build external application using RPC server. | |||
Having a separate application might give you better security guarantees as two | |||
processes would be communicating via established binary protocol. Tendermint | |||
Core will not have access to application's state. | |||
## 1.1 Installing Java and Gradle | |||
Please refer to [the Oracle's guide for installing JDK](https://www.oracle.com/technetwork/java/javase/downloads/index.html). | |||
Verify that you have installed Java successfully: | |||
```sh | |||
$ java -version | |||
java version "12.0.2" 2019-07-16 | |||
Java(TM) SE Runtime Environment (build 12.0.2+10) | |||
Java HotSpot(TM) 64-Bit Server VM (build 12.0.2+10, mixed mode, sharing) | |||
``` | |||
You can choose any version of Java higher or equal to 8. | |||
This guide is written using Java SE Development Kit 12. | |||
Make sure you have `$JAVA_HOME` environment variable set: | |||
```sh | |||
$ echo $JAVA_HOME | |||
/Library/Java/JavaVirtualMachines/jdk-12.0.2.jdk/Contents/Home | |||
``` | |||
For Gradle installation, please refer to [their official guide](https://gradle.org/install/). | |||
## 1.2 Creating a new Java project | |||
We'll start by creating a new Gradle project. | |||
```sh | |||
$ export KVSTORE_HOME=~/kvstore | |||
$ mkdir $KVSTORE_HOME | |||
$ cd $KVSTORE_HOME | |||
``` | |||
Inside the example directory run: | |||
```sh | |||
gradle init --dsl groovy --package io.example --project-name example --type java-application --test-framework junit | |||
``` | |||
This will create a new project for you. The tree of files should look like: | |||
```sh | |||
$ tree | |||
. | |||
|-- build.gradle | |||
|-- gradle | |||
| `-- wrapper | |||
| |-- gradle-wrapper.jar | |||
| `-- gradle-wrapper.properties | |||
|-- gradlew | |||
|-- gradlew.bat | |||
|-- settings.gradle | |||
`-- src | |||
|-- main | |||
| |-- java | |||
| | `-- io | |||
| | `-- example | |||
| | `-- App.java | |||
| `-- resources | |||
`-- test | |||
|-- java | |||
| `-- io | |||
| `-- example | |||
| `-- AppTest.java | |||
`-- resources | |||
``` | |||
When run, this should print "Hello world." to the standard output. | |||
```sh | |||
$ ./gradlew run | |||
> Task :run | |||
Hello world. | |||
``` | |||
## 1.3 Writing a Tendermint Core application | |||
Tendermint Core communicates with the application through the Application | |||
BlockChain Interface (ABCI). All message types are defined in the [protobuf | |||
file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto). | |||
This allows Tendermint Core to run applications written in any programming | |||
language. | |||
### 1.3.1 Compile .proto files | |||
Add the following piece to the top of the `build.gradle`: | |||
```groovy | |||
buildscript { | |||
repositories { | |||
mavenCentral() | |||
} | |||
dependencies { | |||
classpath 'com.google.protobuf:protobuf-gradle-plugin:0.8.8' | |||
} | |||
} | |||
``` | |||
Enable the protobuf plugin in the `plugins` section of the `build.gradle`: | |||
```groovy | |||
plugins { | |||
id 'com.google.protobuf' version '0.8.8' | |||
} | |||
``` | |||
Add the following code to `build.gradle`: | |||
```groovy | |||
protobuf { | |||
protoc { | |||
artifact = "com.google.protobuf:protoc:3.7.1" | |||
} | |||
plugins { | |||
grpc { | |||
artifact = 'io.grpc:protoc-gen-grpc-java:1.22.1' | |||
} | |||
} | |||
generateProtoTasks { | |||
all()*.plugins { | |||
grpc {} | |||
} | |||
} | |||
} | |||
``` | |||
Now we should be ready to compile the `*.proto` files. | |||
Copy the necessary `.proto` files to your project: | |||
```sh | |||
mkdir -p \ | |||
$KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/abci/types \ | |||
$KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/crypto/merkle \ | |||
$KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/libs/common \ | |||
$KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto | |||
cp $GOPATH/src/github.com/tendermint/tendermint/abci/types/types.proto \ | |||
$KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/abci/types/types.proto | |||
cp $GOPATH/src/github.com/tendermint/tendermint/crypto/merkle/merkle.proto \ | |||
$KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/crypto/merkle/merkle.proto | |||
cp $GOPATH/src/github.com/tendermint/tendermint/libs/common/types.proto \ | |||
$KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/libs/common/types.proto | |||
cp $GOPATH/src/github.com/gogo/protobuf/gogoproto/gogo.proto \ | |||
$KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto/gogo.proto | |||
``` | |||
Add these dependencies to `build.gradle`: | |||
```groovy | |||
dependencies { | |||
implementation 'io.grpc:grpc-protobuf:1.22.1' | |||
implementation 'io.grpc:grpc-netty-shaded:1.22.1' | |||
implementation 'io.grpc:grpc-stub:1.22.1' | |||
} | |||
``` | |||
To generate all protobuf-type classes run: | |||
```sh | |||
./gradlew generateProto | |||
``` | |||
To verify that everything went smoothly, you can inspect the `build/generated/` directory: | |||
```sh | |||
$ tree build/generated/ | |||
build/generated/ | |||
|-- source | |||
| `-- proto | |||
| `-- main | |||
| |-- grpc | |||
| | `-- types | |||
| | `-- ABCIApplicationGrpc.java | |||
| `-- java | |||
| |-- com | |||
| | `-- protobuf | |||
| | `-- GoGoProtos.java | |||
| |-- common | |||
| | `-- Types.java | |||
| |-- merkle | |||
| | `-- Merkle.java | |||
| `-- types | |||
| `-- Types.java | |||
``` | |||
### 1.3.2 Implementing ABCI | |||
The resulting `$KVSTORE_HOME/build/generated/source/proto/main/grpc/types/ABCIApplicationGrpc.java` file | |||
contains the abstract class `ABCIApplicationImplBase`, which is an interface we'll need to implement. | |||
Create `$KVSTORE_HOME/src/main/java/io/example/KVStoreApp.java` file with the following content: | |||
```java | |||
package io.example; | |||
import io.grpc.stub.StreamObserver; | |||
import types.ABCIApplicationGrpc; | |||
import types.Types.*; | |||
class KVStoreApp extends ABCIApplicationGrpc.ABCIApplicationImplBase { | |||
// methods implementation | |||
} | |||
``` | |||
Now I will go through each method of `ABCIApplicationImplBase` explaining when it's called and adding | |||
required business logic. | |||
### 1.3.3 CheckTx | |||
When a new transaction is added to the Tendermint Core, it will ask the | |||
application to check it (validate the format, signatures, etc.). | |||
```java | |||
@Override | |||
public void checkTx(RequestCheckTx req, StreamObserver<ResponseCheckTx> responseObserver) { | |||
var tx = req.getTx(); | |||
int code = validate(tx); | |||
var resp = ResponseCheckTx.newBuilder() | |||
.setCode(code) | |||
.setGasWanted(1) | |||
.build(); | |||
responseObserver.onNext(resp); | |||
responseObserver.onCompleted(); | |||
} | |||
private int validate(ByteString tx) { | |||
List<byte[]> parts = split(tx, '='); | |||
if (parts.size() != 2) { | |||
return 1; | |||
} | |||
byte[] key = parts.get(0); | |||
byte[] value = parts.get(1); | |||
// check if the same key=value already exists | |||
var stored = getPersistedValue(key); | |||
if (stored != null && Arrays.equals(stored, value)) { | |||
return 2; | |||
} | |||
return 0; | |||
} | |||
private List<byte[]> split(ByteString tx, char separator) { | |||
var arr = tx.toByteArray(); | |||
int i; | |||
for (i = 0; i < tx.size(); i++) { | |||
if (arr[i] == (byte)separator) { | |||
break; | |||
} | |||
} | |||
if (i == tx.size()) { | |||
return Collections.emptyList(); | |||
} | |||
return List.of( | |||
tx.substring(0, i).toByteArray(), | |||
tx.substring(i + 1).toByteArray() | |||
); | |||
} | |||
``` | |||
Don't worry if this does not compile yet. | |||
If the transaction does not have a form of `{bytes}={bytes}`, we return `1` | |||
code. When the same key=value already exist (same key and value), we return `2` | |||
code. For others, we return a zero code indicating that they are valid. | |||
Note that anything with non-zero code will be considered invalid (`-1`, `100`, | |||
etc.) by Tendermint Core. | |||
Valid transactions will eventually be committed given they are not too big and | |||
have enough gas. To learn more about gas, check out ["the | |||
specification"](https://tendermint.com/docs/spec/abci/apps.html#gas). | |||
For the underlying key-value store we'll use | |||
[JetBrains Xodus](https://github.com/JetBrains/xodus), which is a transactional schema-less embedded high-performance database written in Java. | |||
`build.gradle`: | |||
```groovy | |||
dependencies { | |||
implementation 'org.jetbrains.xodus:xodus-environment:1.3.91' | |||
} | |||
``` | |||
```java | |||
... | |||
import jetbrains.exodus.ArrayByteIterable; | |||
import jetbrains.exodus.ByteIterable; | |||
import jetbrains.exodus.env.Environment; | |||
import jetbrains.exodus.env.Store; | |||
import jetbrains.exodus.env.StoreConfig; | |||
import jetbrains.exodus.env.Transaction; | |||
class KVStoreApp extends ABCIApplicationGrpc.ABCIApplicationImplBase { | |||
private Environment env; | |||
private Transaction txn = null; | |||
private Store store = null; | |||
KVStoreApp(Environment env) { | |||
this.env = env; | |||
} | |||
... | |||
private byte[] getPersistedValue(byte[] k) { | |||
return env.computeInReadonlyTransaction(txn -> { | |||
var store = env.openStore("store", StoreConfig.WITHOUT_DUPLICATES, txn); | |||
ByteIterable byteIterable = store.get(txn, new ArrayByteIterable(k)); | |||
if (byteIterable == null) { | |||
return null; | |||
} | |||
return byteIterable.getBytesUnsafe(); | |||
}); | |||
} | |||
} | |||
``` | |||
### 1.3.4 BeginBlock -> DeliverTx -> EndBlock -> Commit | |||
When Tendermint Core has decided on the block, it's transferred to the | |||
application in 3 parts: `BeginBlock`, one `DeliverTx` per transaction and | |||
`EndBlock` in the end. `DeliverTx` are being transferred asynchronously, but the | |||
responses are expected to come in order. | |||
```java | |||
@Override | |||
public void beginBlock(RequestBeginBlock req, StreamObserver<ResponseBeginBlock> responseObserver) { | |||
txn = env.beginTransaction(); | |||
store = env.openStore("store", StoreConfig.WITHOUT_DUPLICATES, txn); | |||
var resp = ResponseBeginBlock.newBuilder().build(); | |||
responseObserver.onNext(resp); | |||
responseObserver.onCompleted(); | |||
} | |||
``` | |||
Here we begin a new transaction, which will accumulate the block's transactions and open the corresponding store. | |||
```java | |||
@Override | |||
public void deliverTx(RequestDeliverTx req, StreamObserver<ResponseDeliverTx> responseObserver) { | |||
var tx = req.getTx(); | |||
int code = validate(tx); | |||
if (code == 0) { | |||
List<byte[]> parts = split(tx, '='); | |||
var key = new ArrayByteIterable(parts.get(0)); | |||
var value = new ArrayByteIterable(parts.get(1)); | |||
store.put(txn, key, value); | |||
} | |||
var resp = ResponseDeliverTx.newBuilder() | |||
.setCode(code) | |||
.build(); | |||
responseObserver.onNext(resp); | |||
responseObserver.onCompleted(); | |||
} | |||
``` | |||
If the transaction is badly formatted or the same key=value already exist, we | |||
again return the non-zero code. Otherwise, we add it to the store. | |||
In the current design, a block can include incorrect transactions (those who | |||
passed `CheckTx`, but failed `DeliverTx` or transactions included by the proposer | |||
directly). This is done for performance reasons. | |||
Note we can't commit transactions inside the `DeliverTx` because in such case | |||
`Query`, which may be called in parallel, will return inconsistent data (i.e. | |||
it will report that some value already exist even when the actual block was not | |||
yet committed). | |||
`Commit` instructs the application to persist the new state. | |||
```java | |||
@Override | |||
public void commit(RequestCommit req, StreamObserver<ResponseCommit> responseObserver) { | |||
txn.commit(); | |||
var resp = ResponseCommit.newBuilder() | |||
.setData(ByteString.copyFrom(new byte[8])) | |||
.build(); | |||
responseObserver.onNext(resp); | |||
responseObserver.onCompleted(); | |||
} | |||
``` | |||
### 1.3.5 Query | |||
Now, when the client wants to know whenever a particular key/value exist, it | |||
will call Tendermint Core RPC `/abci_query` endpoint, which in turn will call | |||
the application's `Query` method. | |||
Applications are free to provide their own APIs. But by using Tendermint Core | |||
as a proxy, clients (including [light client | |||
package](https://godoc.org/github.com/tendermint/tendermint/lite)) can leverage | |||
the unified API across different applications. Plus they won't have to call the | |||
otherwise separate Tendermint Core API for additional proofs. | |||
Note we don't include a proof here. | |||
```java | |||
@Override | |||
public void query(RequestQuery req, StreamObserver<ResponseQuery> responseObserver) { | |||
var k = req.getData().toByteArray(); | |||
var v = getPersistedValue(k); | |||
var builder = ResponseQuery.newBuilder(); | |||
if (v == null) { | |||
builder.setLog("does not exist"); | |||
} else { | |||
builder.setLog("exists"); | |||
builder.setKey(ByteString.copyFrom(k)); | |||
builder.setValue(ByteString.copyFrom(v)); | |||
} | |||
responseObserver.onNext(builder.build()); | |||
responseObserver.onCompleted(); | |||
} | |||
``` | |||
The complete specification can be found | |||
[here](https://tendermint.com/docs/spec/abci/). | |||
## 1.4 Starting an application and a Tendermint Core instances | |||
Put the following code into the `$KVSTORE_HOME/src/main/java/io/example/App.java` file: | |||
```java | |||
package io.example; | |||
import jetbrains.exodus.env.Environment; | |||
import jetbrains.exodus.env.Environments; | |||
import java.io.IOException; | |||
public class App { | |||
public static void main(String[] args) throws IOException, InterruptedException { | |||
try (Environment env = Environments.newInstance("tmp/storage")) { | |||
var app = new KVStoreApp(env); | |||
var server = new GrpcServer(app, 26658); | |||
server.start(); | |||
server.blockUntilShutdown(); | |||
} | |||
} | |||
} | |||
``` | |||
It is the entry point of the application. | |||
Here we create a special object `Environment`, which knows where to store the application state. | |||
Then we create and start the gRPC server to handle Tendermint Core requests. | |||
Create the `$KVSTORE_HOME/src/main/java/io/example/GrpcServer.java` file with the following content: | |||
```java | |||
package io.example; | |||
import io.grpc.BindableService; | |||
import io.grpc.Server; | |||
import io.grpc.ServerBuilder; | |||
import java.io.IOException; | |||
class GrpcServer { | |||
private Server server; | |||
GrpcServer(BindableService service, int port) { | |||
this.server = ServerBuilder.forPort(port) | |||
.addService(service) | |||
.build(); | |||
} | |||
void start() throws IOException { | |||
server.start(); | |||
System.out.println("gRPC server started, listening on $port"); | |||
Runtime.getRuntime().addShutdownHook(new Thread(() -> { | |||
System.out.println("shutting down gRPC server since JVM is shutting down"); | |||
GrpcServer.this.stop(); | |||
System.out.println("server shut down"); | |||
})); | |||
} | |||
private void stop() { | |||
server.shutdown(); | |||
} | |||
/** | |||
* Await termination on the main thread since the grpc library uses daemon threads. | |||
*/ | |||
void blockUntilShutdown() throws InterruptedException { | |||
server.awaitTermination(); | |||
} | |||
} | |||
``` | |||
## 1.5 Getting Up and Running | |||
To create a default configuration, nodeKey and private validator files, let's | |||
execute `tendermint init`. But before we do that, we will need to install | |||
Tendermint Core. | |||
```sh | |||
$ rm -rf /tmp/example | |||
$ cd $GOPATH/src/github.com/tendermint/tendermint | |||
$ make install | |||
$ TMHOME="/tmp/example" tendermint init | |||
I[2019-07-16|18:20:36.480] Generated private validator module=main keyFile=/tmp/example/config/priv_validator_key.json stateFile=/tmp/example2/data/priv_validator_state.json | |||
I[2019-07-16|18:20:36.481] Generated node key module=main path=/tmp/example/config/node_key.json | |||
I[2019-07-16|18:20:36.482] Generated genesis file module=main path=/tmp/example/config/genesis.json | |||
``` | |||
Feel free to explore the generated files, which can be found at | |||
`/tmp/example/config` directory. Documentation on the config can be found | |||
[here](https://tendermint.com/docs/tendermint-core/configuration.html). | |||
We are ready to start our application: | |||
```sh | |||
./gradlew run | |||
gRPC server started, listening on 26658 | |||
``` | |||
Then we need to start Tendermint Core and point it to our application. Staying | |||
within the application directory execute: | |||
```sh | |||
$ TMHOME="/tmp/example" tendermint node --abci grpc --proxy_app tcp://127.0.0.1:26658 | |||
I[2019-07-28|15:44:53.632] Version info module=main software=0.32.1 block=10 p2p=7 | |||
I[2019-07-28|15:44:53.677] Starting Node module=main impl=Node | |||
I[2019-07-28|15:44:53.681] Started node module=main nodeInfo="{ProtocolVersion:{P2P:7 Block:10 App:0} ID_:7639e2841ccd47d5ae0f5aad3011b14049d3f452 ListenAddr:tcp://0.0.0.0:26656 Network:test-chain-Nhl3zk Version:0.32.1 Channels:4020212223303800 Moniker:Ivans-MacBook-Pro.local Other:{TxIndex:on RPCAddress:tcp://127.0.0.1:26657}}" | |||
I[2019-07-28|15:44:54.801] Executed block module=state height=8 validTxs=0 invalidTxs=0 | |||
I[2019-07-28|15:44:54.814] Committed state module=state height=8 txs=0 appHash=0000000000000000 | |||
``` | |||
Now open another tab in your terminal and try sending a transaction: | |||
```sh | |||
$ curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"' | |||
{ | |||
"jsonrpc": "2.0", | |||
"id": "", | |||
"result": { | |||
"check_tx": { | |||
"gasWanted": "1" | |||
}, | |||
"deliver_tx": {}, | |||
"hash": "CDD3C6DFA0A08CAEDF546F9938A2EEC232209C24AA0E4201194E0AFB78A2C2BB", | |||
"height": "33" | |||
} | |||
``` | |||
Response should contain the height where this transaction was committed. | |||
Now let's check if the given key now exists and its value: | |||
```sh | |||
$ curl -s 'localhost:26657/abci_query?data="tendermint"' | |||
{ | |||
"jsonrpc": "2.0", | |||
"id": "", | |||
"result": { | |||
"response": { | |||
"log": "exists", | |||
"key": "dGVuZGVybWludA==", | |||
"value": "cm9ja3My" | |||
} | |||
} | |||
} | |||
``` | |||
`dGVuZGVybWludA==` and `cm9ja3M=` are the base64-encoding of the ASCII of `tendermint` and `rocks` accordingly. | |||
## Outro | |||
I hope everything went smoothly and your first, but hopefully not the last, | |||
Tendermint Core application is up and running. If not, please [open an issue on | |||
Github](https://github.com/tendermint/tendermint/issues/new/choose). To dig | |||
deeper, read [the docs](https://tendermint.com/docs/). | |||
The full source code of this example project can be found [here](https://github.com/climber73/tendermint-abci-grpc-java). |
@ -0,0 +1,319 @@ | |||
# Fork accountability -- Problem statement and attacks | |||
## Problem Statement | |||
Tendermint consensus guarantees the following specifications for all heights: | |||
* agreement -- no two correct full nodes decide differently. | |||
* validity -- the decided block satisfies the predefined predicate *valid()*. | |||
* termination -- all correct full nodes eventually decide, | |||
if the | |||
faulty validators have at most 1/3 of voting power in the current validator set. In the case where this assumption | |||
does not hold, each of the specification may be violated. | |||
The agreement property says that for a given height, any two correct validators that decide on a block for that height decide on the same block. That the block was indeed generated by the blockchain, can be verified starting from a trusted (genesis) block, and checking that all subsequent blocks are properly signed. | |||
However, faulty nodes may forge blocks and try to convince users (lite clients) that the blocks had been correctly generated. In addition, Tendermint agreement might be violated in the case where more than 1/3 of the voting power belongs to faulty validators: Two correct validators decide on different blocks. The latter case motivates the term "fork": as Tendermint consensus also agrees on the next validator set, correct validators may have decided on disjoint next validator sets, and the chain branches into two or more partitions (possibly having faulty validators in common) and each branch continues to generate blocks independently of the other. | |||
We say that a fork is a case in which there are two commits for different blocks at the same height of the blockchain. The proplem is to ensure that in those cases we are able to detect faulty validators (and not mistakenly accuse correct validators), and incentivize therefore validators to behave according to the protocol specification. | |||
**Conceptual Limit.** In order to prove misbehavior of a node, we have to show that the behavior deviates from correct behavior with respect to a given algorithm. Thus, an algorithm that detects misbehavior of nodes executing some algorithm *A* must be defined with respect to algorithm *A*. In our case, *A* is Tendermint consensus (+ other protocols in the infrastructure; e.g.,full nodes and the Lite Client). If the consensus algorithm is changed/updated/optimized in the future, we have to check whether changes to the accountability algorithm are also required. All the discussions in this document are thus inherently specific to Tendermint consensus and the Lite Client specification. | |||
**Q:** Should we distinguish agreement for validators and full nodes for agreement? The case where all correct validators agree on a block, but a correct full node decides on a different block seems to be slightly less severe that the case where two correct validators decide on different blocks. Still, if a contaminated full node becomes validator that may be problematic later on. Also it is not clear how gossiping is impaired if a contaminated full node is on a different branch. | |||
*Remark.* In the case more than 1/3 of the voting power belongs to faulty validators, also validity and termination can be broken. Termination can be broken if faulty processes just do not send the messages that are needed to make progress. Due to asynchrony, this is not punishable, because faulty validators can always claim they never received the messages that would have forced them to send messages. | |||
## The Misbehavior of Faulty Validators | |||
Forks are the result of faulty validators deviating from the protocol. In principle several such deviations can be detected without a fork actually occurring: | |||
1. double proposal: A faulty proposer proposes two different values (blocks) for the same height and the same round in Tendermint consensus. | |||
2. double signing: Tendermint consensus forces correct validators to prevote and precommit for at most one value per round. In case a faulty validator sends multiple prevote and/or precommit messages for different values for the same height/round, this is a misbehavior. | |||
3. lunatic validator: Tendermint consensus forces correct validators to prevote and precommit only for values *v* that satisfy *valid(v)*. If faulty validators prevote and precommit for *v* although *valid(v)=false* this is misbehavior. | |||
*Remark.* In isolation, Point 3 is an attack on validity (rather than agreement). However, the prevotes and precommits can then also be used to forge blocks. | |||
1. amnesia: Tendermint consensus has a locking mechanism. If a validator has some value v locked, then it can only prevote/precommit for v or nil. Sending prevote/precomit message for a different value v' (that is not nil) while holding lock on value v is misbehavior. | |||
2. spurious messages: In Tendermint consensus most of the message send instructions are guarded by threshold guards, e.g., one needs to receive *2f + 1* prevote messages to send precommit. Faulty validators may send precommit without having received the prevote messages. | |||
Independently of a fork happening, punishing this behavior might be important to prevent forks altogether. This should keep attackers from misbehaving: if at most 1/3 of the voting power is faulty, this misbehavior is detectable but will not lead to a safety violation. Thus, unless they have more than 1/3 (or in some cases more than 2/3) of the voting power attackers have the incentive to not misbehave. If attackers control too much voting power, we have to deal with forks, as discussed in this document. | |||
## Two types of forks | |||
* Fork-Full. Two correct validators decide on different blocks for the same height. Since also the next validator sets are decided upon, the correct validators may be partitioned to participate in two distinct branches of the forked chain. | |||
As in this case we have two different blocks (both having the same right/no right to exist), a central system invariant (one block per height decided by correct validators) is violated. As full nodes are contaminated in this case, the contamination can spread also to lite clients. However, even without breaking this system invariant, lite clients can be subject to a fork: | |||
* Fork-Lite. All correct validators decide on the same block for height *h*, but faulty processes (validators or not), forge a different block for that height, in order to fool users (who use the lite client). | |||
# Attack scenarios | |||
## On-chain attacks | |||
### Equivocation (one round) | |||
There are several scenarios in which forks might happen. The first is double signing within a round. | |||
* F1. Equivocation: faulty validators sign multiple vote messages (prevote and/or precommit) for different values *during the same round r* at a given height h. | |||
### Flip-flopping | |||
Tendermint consensus implements a locking mechanism: If a correct validator *p* receives proposal for value v and *2f + 1* prevotes for a value *id(v)* in round *r*, it locks *v* and remembers *r*. In this case, *p* also sends a precommit message for *id(v)*, which later may serve as proof that *p* locked *v*. | |||
In subsequent rounds, *p* only sends prevote messages for a value it had previously locked. However, it is possible to change the locked value if in a future round *r' > r*, if the process receives proposal and *2f + 1* prevotes for a different value *v'*. In this case, *p* could send a prevote/precommit for *id(v')*. This algorithmic feature can be exploited in two ways: | |||
* F2. Faulty Flip-flopping (Amnesia): faulty validators precommit some value *id(v)* in round *r* (value *v* is locked in round *r*) and then prevote for different value *id(v')* in higher round *r' > r* without previously correctly unlocking value *v*. In this case faulty processes "forget" that they have locked value *v* and prevote some other value in the following rounds. | |||
Some correct validators might have decided on *v* in *r*, and other correct validators decide on *v'* in *r'*. Here we can have branching on the main chain (Fork-Full). | |||
* F3. Correct Flip-flopping (Back to the past): There are some precommit messages signed by (correct) validators for value *id(v)* in round *r*. Still, *v* is not decided upon, and all processes move on to the next round. Then correct validators (correctly) lock and decide a different value *v'* in some round *r' > r*. And the correct validators continue; there is no branching on the main chain. | |||
However, faulty validators may use the correct precommit messages from round *r* together with a posteriori generated faulty precommit messages for round *r* to forge a block for a value that was not decided on the main chain (Fork-Lite). | |||
## Off-chain attacks | |||
F1-F3 may contaminate the state of full nodes (and even validators). Contaminated (but otherwise correct) full nodes may thus communicate faulty blocks to lite clients. | |||
Similarly, without actually interfering with the main chain, we can have the following: | |||
* F4. Phantom validators: faulty validators vote (sign prevote and precommit messages) in heights in which they are not part of the validator sets (at the main chain). | |||
* F5. Lunatic validator: faulty validator that sign vote messages to support (arbitrary) application state that is different from the application state that resulted from valid state transitions. | |||
## Types of victims | |||
We consider three types of potential attack victims: | |||
- FN: full node | |||
- LCS: lite client with sequential header verification | |||
- LCB: lite client with bisection based header verification | |||
F1 and F2 can be used by faulty validators to actually create multiple branches on the blockchain. That means that correctly operating full nodes decide on different blocks for the same height. Until a fork is detected locally by a full node (by receiving evidence from others or by some other local check that fails), the full node can spread corrupted blocks to lite clients. | |||
*Remark.* If full nodes take a branch different from the one taken by the validators, it may be that the liveness of the gossip protocol may be affected. We should eventually look at this more closely. However, as it does not influence safety it is not a primary concern. | |||
F3 is similar to F1, except that no two correct validators decide on different blocks. It may still be the case that full nodes become affected. | |||
In addition, without creating a fork on the main chain, lite clients can be contaminated by more than a third of validators that are faulty and sign a forged header | |||
F4 cannot fool correct full nodes as they know the current validator set. Similarly, LCS know who the validators are. Hence, F4 is an attack against LCB that do not necessarily know the complete prefix of headers (Fork-Lite), as they trust a header that is signed by at least one correct validator (trusting period method). | |||
The following table gives an overview of how the different attacks may affect different nodes. F1-F3 are *on-chain* attacks so they can corrupt the state of full nodes. Then if a lite client (LCS or LCB) contacts a full node to obtain headers (or blocks), the corrupted state may propagate to the lite client. | |||
F4 and F5 are *off-chain*, that is, these attacks cannot be used to corrupt the state of full nodes (which have sufficient knowledge on the state of the chain to not be fooled). | |||
| Attack | FN | LCS | LCB | | |||
|:------:|:------:|:------:|:------:| | |||
| F1 | direct | FN | FN | | |||
| F2 | direct | FN | FN | | |||
| F3 | direct | FN | FN | | |||
| F4 | | | direct | | |||
| F5 | | | direct | | |||
**Q:** Lite clients are more vulnerable than full nodes, because the former do only verify headers but do not execute transactions. What kind of certainty is gained by a full node that executes a transaction? | |||
As a full node verifies all transactions, it can only be | |||
contaminated by an attack if the blockchain itself violates its invariant (one block per height), that is, in case of a fork that leads to branching. | |||
## Detailed Attack Scenarios | |||
### Equivocation based attacks | |||
In case of equivocation based attacks, faulty validators sign multiple votes (prevote and/or precommit) in the same | |||
round of some height. This attack can be executed on both full nodes and lite clients. It requires more than 1/3 of voting power to be executed. | |||
#### Scenario 1: Equivocation on the main chain | |||
Validators: | |||
* CA - a set of correct validators with less than 1/3 of the voting power | |||
* CB - a set of correct validators with less than 1/3 of the voting power | |||
* CA and CB are disjoint | |||
* F - a set of faulty validators with more than 1/3 voting power | |||
Observe that this setting violates the Tendermint failure model. | |||
Execution: | |||
* A faulty proposer proposes block A to CA | |||
* A faulty proposer proposes block B to CB | |||
* Validators from the set CA and CB prevote for A and B, respectively. | |||
* Faulty validators from the set F prevote both for A and B. | |||
* The faulty prevote messages | |||
- for A arrive at CA long before the B messages | |||
- for B arrive at CB long before the A messages | |||
* Therefore correct validators from set CA and CB will observe | |||
more than 2/3 of prevotes for A and B and precommit for A and B, respectively. | |||
* Faulty validators from the set F precommit both values A and B. | |||
* Thus, we have more than 2/3 commits for both A and B. | |||
Consequences: | |||
* Creating evidence of misbehavior is simple in this case as we have multiple messages signed by the same faulty processes for different values in the same round. | |||
* We have to ensure that these different messages reach a correct process (full node, monitor?), which can submit evidence. | |||
* This is an attack on the full node level (Fork-Full). | |||
* It extends also to the lite clients, | |||
* For both we need a detection and recovery mechanism. | |||
#### Scenario 2: Equivocation to a lite client (LCS) | |||
Validators: | |||
* a set F of faulty validators with more than 2/3 of the voting power. | |||
Execution: | |||
* for the main chain F behaves nicely | |||
* F coordinates to sign a block B that is different from the one on the main chain. | |||
* the lite clients obtains B and trusts at as it is signed by more and 2/3 of the voting power. | |||
Consequences: | |||
Once equivocation is used to attack lite client it opens space | |||
for different kind of attacks as application state can be diverged in any direction. For example, it can modify validator set such that it contains only validators that do not have any stake bonded. Note that after a lite client is fooled by a fork, that means that an attacker can change application state and validator set arbitrarily. | |||
In order to detect such (equivocation-based attack), the lite client would need to cross check its state with some correct validator (or to obtain a hash of the state from the main chain using out of band channels). | |||
*Remark.* The lite client would be able to create evidence of misbehavior, but this would require to pull potentially a lot of data from correct full nodes. Maybe we need to figure out different architecture where a lite client that is attacked will push all its data for the current unbonding period to a correct node that will inspect this data and submit corresponding evidence. There are also architectures that assumes a special role (sometimes called fisherman) whose goal is to collect as much as possible useful data from the network, to do analysis and create evidence transactions. That functionality is outside the scope of this document. | |||
*Remark.* The difference between LCS and LCB might only be in the amount of voting power needed to convince lite client about arbitrary state. In case of LCB where security threshold is at minimum, an attacker can arbitrarily modify application state with more than 1/3 of voting power, while in case of LCS it requires more than 2/3 of the voting power. | |||
### Flip-flopping: Amnesia based attacks | |||
In case of amnesia, faulty validators lock some value *v* in some round *r*, and then vote for different value *v'* in higher rounds without correctly unlocking value *v*. This attack can be used both on full nodes and lite clients. | |||
#### Scenario 3: At most 2/3 of faults | |||
Validators: | |||
* a set F of faulty validators with more than 1/3 but at most 2/3 of the voting power | |||
* a set C of correct validators | |||
Execution: | |||
* Faulty validators commit (without exposing it on the main chain) a block A in round *r* by collecting more than 2/3 of the | |||
voting power (containing correct and faulty validators). | |||
* All validators (correct and faulty) reach a round *r' > r*. | |||
* Some correct validators in C do not lock any value before round *r'*. | |||
* The faulty validators in F deviate from Tendermint consensus by ignoring that they locked A in *r*, and propose a different block B in *r'*. | |||
* As the validators in C that have not locked any value find B acceptable, they accept the proposal for B and commit a block B. | |||
*Remark.* In this case, the more than 1/3 of faulty validators do not need to commit an equivocation (F1) as they only vote once per round in the execution. | |||
Detecting faulty validators in the case of such an attack can be done by the fork accountability mechanism described in: https://docs.google.com/document/d/11ZhMsCj3y7zIZz4udO9l25xqb0kl7gmWqNpGVRzOeyY/edit?usp=sharing. | |||
If a lite client is attacked using this attack with more than 1/3 of voting power (and less than 2/3), the attacker cannot change the application state arbitrarily. Rather, the attacker is limited to a state a correct validator finds acceptable: In the execution above, correct validators still find the value acceptable, however, the block the lite client trusts deviates from the one on the main chain. | |||
#### Scenario 4: More than 2/3 of faults | |||
In case there is an attack with more than 2/3 of the voting power, an attacker can arbitrarily change application state. | |||
Validators: | |||
* a set F1 of faulty validators with more than 1/3 of the voting power | |||
* a set F2 of faulty validators with at most 1/3 of the voting power | |||
Execution | |||
* Similar to Scenario 3 (however, messages by correct validators are not needed) | |||
* The faulty validators in F1 lock value A in round *r* | |||
* They sign a different value in follow-up rounds | |||
* F2 does not lock A in round *r* | |||
Consequences: | |||
* The validators in F1 will be detectable by the the fork accountability mechanisms. | |||
* The validators in F2 cannot be detected using this mechanism. | |||
Only in case they signed something which conflicts with the application this can be used against them. Otherwise they do not do anything incorrect. | |||
* This case is not covered by the report https://docs.google.com/document/d/11ZhMsCj3y7zIZz4udO9l25xqb0kl7gmWqNpGVRzOeyY/edit?usp=sharing as it only assumes at most 2/3 of faulty validators. | |||
**Q:** do we need to define a special kind of attack for the case where a validator sign arbitrarily state? It seems that detecting such attack requires different mechanism that would require as an evidence a sequence of blocks that lead to that state. This might be very tricky to implement. | |||
### Back to the past | |||
In this kind of attacks faulty validators take advantage of the fact that they did not sign messages in some of the past rounds. Due to the asynchronous network in which Tendermint operates, we cannot easily differentiate between such an attack and delayed message. This kind of attack can be used at both full nodes and lite clients. | |||
#### Scenario 5: | |||
Validators: | |||
* C1 - a set of correct validators with 1/3 of the voting power | |||
* C2 - a set of correct validators with 1/3 of the voting power | |||
* C1 and C2 are disjoint | |||
* F - a set of faulty validators with 1/3 voting power | |||
* one additional faulty process *q* | |||
* F and *q* violate the Tendermint failure model. | |||
Execution: | |||
* in a round *r* of height *h* we have C1 precommitting a value A, | |||
* C2 precommits nil, | |||
* F does not send any message | |||
* *q* precommits nil. | |||
* In some round *r' > r*, F and *q* and C2 commit some other value B different from A. | |||
* F and *fp* "go back to the past" and sign precommit message for value A in round *r*. | |||
* Together with precomit messages of C1 this is sufficient for a commit for value A. | |||
Consequences: | |||
* Only a single faulty validator that previously precommited nil did equivocation, while the other 1/3 of faulty validators actually executed an attack that has exactly the same sequence of messages as part of amnesia attack. Detecting this kind of attack boil down to mechanisms for equivocation and amnesia. | |||
**Q:** should we keep this as a separate kind of attack? It seems that equivocation, amnesia and phantom validators are the only kind of attack we need to support and this gives us security also in other cases. This would not be surprising as equivocation and amnesia are attacks that followed from the protocol and phantom attack is not really an attack to Tendermint but more to the Proof of stake module. | |||
### Phantom validators | |||
In case of phantom validators, processes that are not part of the current validator set but are still bonded (as attack happen during their unbonding period) can be part of the attack by signing vote messages. This attack can be executed against both full nodes and lite clients. | |||
#### Scenario 6: | |||
Validators: | |||
* F -- a set of faulty validators that are not part of the validator set on the main chain at height *h + k* | |||
Execution: | |||
* There is a fork, and there exists two different headers for height *h + k*, with different validator sets: | |||
- VS2 on the main chain | |||
- forged header VS2', signed by F (and others) | |||
* a lite client has a trust in a header for height *h* (and the corresponding validator set VS1). | |||
* As part of bisection header verification, it verifies header at height *h + k* with new validator set VS2'. | |||
Consequences: | |||
* To detect this, a node needs to see both, the forged header and the canonical header from the chain. | |||
* If this is the case, detecting these kind of attacks is easy as it just requires verifying if processes are signing messages in heights in which they are not part of the validator set. | |||
**Remark.** We can have phantom-validator-based attacks as a follow up of equivocation or amnesia based attack where forked state contains validators that are not part of the validator set at the main chain. In this case, they keep signing messages contributed to a forked chain (the wrong branch) although they are not part of the validator set on the main chain. This attack can also be used to attack full node during a period of time he is eclipsed. | |||
### Lunatic validator | |||
Lunatic validator agrees to sign commit messages for arbitrary application state. It is used to attack lite clients. | |||
Note that detecting this behavior require application knowledge. Detecting this behavior can probably be done by | |||
referring to the block before the one in which height happen. | |||
**Q:** can we say that in this case a validator ignore to check if proposed value is valid before voting for it? |
@ -1,113 +1,329 @@ | |||
# Light Client | |||
A light client is a process that connects to the Tendermint Full Node(s) and then tries to verify the Merkle proofs | |||
about the blockchain application. In this document we describe mechanisms that ensures that the Tendermint light client | |||
has the same level of security as Full Node processes (without being itself a Full Node). | |||
To be able to validate a Merkle proof, a light client needs to validate the blockchain header that contains the root app hash. | |||
Validating a blockchain header in Tendermint consists in verifying that the header is committed (signed) by >2/3 of the | |||
voting power of the corresponding validator set. As the validator set is a dynamic set (it is changing), one of the | |||
core functionality of the light client is updating the current validator set, that is then used to verify the | |||
blockchain header, and further the corresponding Merkle proofs. | |||
For the purpose of this light client specification, we assume that the Tendermint Full Node exposes the following functions over | |||
Tendermint RPC: | |||
```golang | |||
Header(height int64) (SignedHeader, error) // returns signed header for the given height | |||
Validators(height int64) (ResultValidators, error) // returns validator set for the given height | |||
LastHeader(valSetNumber int64) (SignedHeader, error) // returns last header signed by the validator set with the given validator set number | |||
type SignedHeader struct { | |||
Header Header | |||
Commit Commit | |||
ValSetNumber int64 | |||
} | |||
# Lite client | |||
type ResultValidators struct { | |||
BlockHeight int64 | |||
Validators []Validator | |||
// time the current validator set is initialised, i.e, time of the last validator change before header BlockHeight | |||
ValSetTime int64 | |||
} | |||
A lite client is a process that connects to Tendermint full nodes and then tries to verify application data using the Merkle proofs. | |||
## Context of this document | |||
In order to make sure that full nodes have the incentive to follow the protocol, we have to address the following three Issues | |||
1) The lite client needs a method to verify headers it obtains from full nodes according to trust assumptions -- this document. | |||
2) The lite client must be able to connect to one correct full node to detect and report on failures in the trust assumptions (i.e., conflicting headers) -- a future document. | |||
3) In the event the trust assumption fails (i.e., a lite client is fooled by a conflicting header), the Tendermint fork accountability protocol must account for the evidence -- see #3840 | |||
## Problem statement | |||
We assume that the lite client knows a (base) header *inithead* it trusts (by social consensus or because the lite client has decided to trust the header before). The goal is to check whether another header *newhead* can be trusted based on the data in *inithead*. | |||
The correctness of the protocol is based on the assumption that *inithead* was generated by an instance of Tendermint consensus. The term "trusting" above indicates that the correctness on the protocol depends on this assumption. It is in the responsibility of the user that runs the lite client to make sure that the risk of trusting a corrupted/forged *inithead* is negligible. | |||
## Definitions | |||
### Data structures | |||
In the following, only the details of the data structures needed for this specification are given. | |||
* header fields | |||
- *height* | |||
- *bfttime*: the chain time when the header (block) was generated | |||
- *V*: validator set containing validators for this block. | |||
- *NextV*: validator set for next block. | |||
- *commit*: evidence that block with height *height* - 1 was committed by a set of validators (canonical commit). We will use ```signers(commit)``` to refer to the set of validators that committed the block. | |||
* signed header fields: contains a header and a *commit* for the current header; a "seen commit". In the Tendermint consensus the "canonical commit" is stored in header *height* + 1. | |||
* For each header *h* it has locally stored, the lite client stores whether | |||
it trusts *h*. We write *trust(h) = true*, if this is the case. | |||
* Validator fields. We will write a validator as a tuple *(v,p)* such that | |||
+ *v* is the identifier (we assume identifiers are unique in each validator set) | |||
+ *p* is its voting power | |||
### Functions | |||
For the purpose of this lite client specification, we assume that the Tendermint Full Node exposes the following function over Tendermint RPC: | |||
```go | |||
func Commit(height int64) (SignedHeader, error) | |||
// returns signed header: header (with the fields from | |||
// above) with Commit that include signatures of | |||
// validators that signed the header | |||
type SignedHeader struct { | |||
Header Header | |||
Commit Commit | |||
} | |||
``` | |||
We assume that Tendermint keeps track of the validator set changes and that each time a validator set is changed it is | |||
being assigned the next sequence number. We can call this number the validator set sequence number. Tendermint also remembers | |||
the Time from the header when the next validator set is initialised (starts to be in power), and we refer to this time | |||
as validator set init time. | |||
Furthermore, we assume that each validator set change is signed (committed) by the current validator set. More precisely, | |||
given a block `H` that contains transactions that are modifying the current validator set, the Merkle root hash of the next | |||
validator set (modified based on transactions from block H) will be in block `H+1` (and signed by the current validator | |||
set), and then starting from the block `H+2`, it will be signed by the next validator set. | |||
Note that the real Tendermint RPC API is slightly different (for example, response messages contain more data and function | |||
names are slightly different); we shortened (and modified) it for the purpose of this document to make the spec more | |||
clear and simple. Furthermore, note that in case of the third function, the returned header has `ValSetNumber` equals to | |||
`valSetNumber+1`. | |||
Locally, light client manages the following state: | |||
```golang | |||
valSet []Validator // current validator set (last known and verified validator set) | |||
valSetNumber int64 // sequence number of the current validator set | |||
valSetHash []byte // hash of the current validator set | |||
valSetTime int64 // time when the current validator set is initialised | |||
### Definitions | |||
* *tp*: trusting period | |||
* for realtime *t*, the predicate *correct(v,t)* is true if the validator *v* | |||
follows the protocol until time *t* (we will see about recovery later). | |||
### Tendermint Failure Model | |||
If a block *h* is generated at time *bfttime* (and this time is stored in the block), then a set of validators that hold more than 2/3 of the voting power in h.Header.NextV is correct until time h.Header.bfttime + tp. | |||
Formally, | |||
\[ | |||
\sum_{(v,p) \in h.Header.NextV \wedge correct(v,h.Header.bfttime + tp)} p > | |||
2/3 \sum_{(v,p) \in h.Header.NextV} p | |||
\] | |||
*Assumption*: "correct" is defined w.r.t. realtime (some Newtonian global notion of time, i.e., wall time), while *bfttime* corresponds to the reading of the local clock of a validator (how this time is computed may change when the Tendermint consensus is modified). In this note, we assume that all clocks are synchronized to realtime. We can make this more precise eventually (incorporating clock drift, accuracy, precision, etc.). Right now, we consider this assumption sufficient, as clock synchronization (under NTP) is in the order of milliseconds and *tp* is in the order of weeks. | |||
*Remark*: This failure model might change to a hybrid version that takes heights into account in the future. | |||
The specification in this document considers an implementation of the lite client under this assumption. Issues like *counter-factual signing* and *fork accountability* and *evidence submission* are mechanisms that justify this assumption by incentivizing validators to follow the protocol. | |||
If they don't, and we have more that 1/3 faults, safety may be violated. Our approach then is to *detect* these cases (after the fact), and take suitable repair actions (automatic and social). This is discussed in an upcoming document on "Fork accountability". (These safety violations include the lite client wrongly trusting a header, a fork in the blockchain, etc.) | |||
## Lite Client Trusting Spec | |||
The lite client communicates with a full node and learns new headers. The goal is to locally decide whether to trust a header. Our implementation needs to ensure the following two properties: | |||
- Lite Client Completeness: If header *h* was correctly generated by an instance of Tendermint consensus (and its age is less than the trusting period), then the lite client should eventually set *trust(h)* to true. | |||
- Lite Client Accuracy: If header *h* was *not generated* by an instance of Tendermint consensus, then the lite client should never set *trust(h)* to true. | |||
*Remark*: If in the course of the computation, the lite client obtains certainty that some headers were forged by adversaries (that is were not generated by an instance of Tendermint consensus), it may submit (a subset of) the headers it has seen as evidence of misbehavior. | |||
*Remark*: In Completeness we use "eventually", while in practice *trust(h)* should be set to true before *h.Header.bfttime + tp*. If not, the block cannot be trusted because it is too old. | |||
*Remark*: If a header *h* is marked with *trust(h)*, but it is too old (its bfttime is more than *tp* ago), then the lite client should set *trust(h)* to false again. | |||
*Assumption*: Initially, the lite client has a header *inithead* that it trusts correctly, that is, *inithead* was correctly generated by the Tendermint consensus. | |||
To reason about the correctness, we may prove the following invariant. | |||
*Verification Condition: Lite Client Invariant.* | |||
For each lite client *l* and each header *h*: | |||
if *l* has set *trust(h) = true*, | |||
then validators that are correct until time *h.Header.bfttime + tp* have more than two thirds of the voting power in *h.Header.NextV*. | |||
Formally, | |||
\[ | |||
\sum_{(v,p) \in h.Header.NextV \wedge correct(v,h.Header.bfttime + tp)} p > | |||
2/3 \sum_{(v,p) \in h.Header.NextV} p | |||
\] | |||
*Remark.* To prove the invariant, we will have to prove that the lite client only trusts headers that were correctly generated by Tendermint consensus, then the formula above follows from the Tendermint failure model. | |||
## High Level Solution | |||
Upon initialization, the lite client is given a header *inithead* it trusts (by | |||
social consensus). It is assumed that *inithead* satisfies the lite client invariant. (If *inithead* has been correctly generated by Tendermint consensus, the invariant follows from the Tendermint Failure Model.) | |||
When a lite clients sees a signed new header *snh*, it has to decide whether to trust the new | |||
header. Trust can be obtained by (possibly) the combination of three methods. | |||
1. **Uninterrupted sequence of proof.** If a block is appended to the chain, where the last block | |||
is trusted (and properly committed by the old validator set in the next block), | |||
and the new block contains a new validator set, the new block is trusted if the lite client knows all headers in the prefix. | |||
Intuitively, a trusted validator set is assumed to only chose a new validator set that will obey the Tendermint Failure Model. | |||
2. **Trusting period.** Based on a trusted block *h*, and the lite client | |||
invariant, which ensures the fault assumption during the trusting period, we can check whether at least one validator, that has been continuously correct from *h.Header.bfttime* until now, has signed *snh*. | |||
If this is the case, similarly to above, the chosen validator set in *snh* does not violate the Tendermint Failure Model. | |||
3. **Bisection.** If a check according to the trusting period fails, the lite client can try to obtain a header *hp* whose height lies between *h* and *snh* in order to check whether *h* can be used to get trust for *hp*, and *hp* can be used to get trust for *snh*. If this is the case we can trust *snh*; if not, we may continue recursively. | |||
## How to use it | |||
We consider the following use case: | |||
the lite client wants to verify a header for some given height *k*. Thus: | |||
- it requests the signed header for height *k* from a full node | |||
- it tries to verify this header with the methods described here. | |||
This can be used in several settings: | |||
- someone tells the lite client that application data that is relevant for it can be read in the block of height *k*. | |||
- the lite clients wants the latest state. It asks a full nude for the current height, and uses the response for *k*. | |||
## Details | |||
*Assumptions* | |||
1. *tp < unbonding period*. | |||
2. *snh.Header.bfttime < now* | |||
3. *snh.Header.bfttime < h.Header.bfttime+tp* | |||
4. *trust(h)=true* | |||
**Observation 1.** If *h.Header.bfttime + tp > now*, we trust the old | |||
validator set *h.Header.NextV*. | |||
When we say we trust *h.Header.NextV* we do *not* trust that each individual validator in *h.Header.NextV* is correct, but we only trust the fact that at most 1/3 of them are faulty (more precisely, the faulty ones have at most 1/3 of the total voting power). | |||
### Functions | |||
The function *Bisection* checks whether to trust header *h2* based on the trusted header *h1*. It does so by calling | |||
the function *CheckSupport* in the process of | |||
bisection/recursion. *CheckSupport* implements the trusted period method and, for two adjacent headers (in term of heights), it checks uninterrupted sequence of proof. | |||
*Assumption*: In the following, we assume that *h2.Header.height > h1.Header.height*. We will quickly discuss the other case in the next section. | |||
We consider the following set-up: | |||
- the lite client communicates with one full node | |||
- the lite client locally stores all the signed headers it obtained (trusted or not). In the pseudo code below we write *Store(header)* for this. | |||
- If *Bisection* returns *false*, then the lite client has seen a forged header. | |||
* However, it does not know which header(s) is/are the problematic one(s). | |||
* In this case, the lite client can submit (some of) the headers it has seen as evidence. As the lite client communicates with one full node only when executing Bisection, there are two cases | |||
- the full node is faulty | |||
- the full node is correct and there was a fork in Tendermint consensus. Header *h1* is from a different branch than the one taken by the full node. This case is not focus of this document, but will be treated in the document on fork accountability. | |||
- the lite client must retry to retrieve correct headers from another full node | |||
* it picks a new full node | |||
* it restarts *Bisection* | |||
* there might be optimizations; a lite client may not need to call *Commit(k)*, for a height *k* for which it already has a signed header it trusts. | |||
* how to make sure that a lite client can communicate with a correct full node will be the focus of a separate document (recall Issue 3 from "Context of this document"). | |||
**Auxiliary Functions.** We will use the function ```votingpower_in(V1,V2)``` to compute the voting power the validators in set V1 have according to their voting power in set V2; | |||
we will write ```totalVotingPower(V)``` for ```votingpower_in(V,V)```, which returns the total voting power in V. | |||
We further use the function ```signers(Commit)``` that returns the set of validators that signed the Commit. | |||
**CheckSupport.** The following function checks whether we can trust the header h2 based on header h1 following the trusting period method. | |||
```go | |||
func CheckSupport(h1,h2,trustlevel) bool { | |||
if h1.Header.bfttime + tp < now { // Observation 1 | |||
return false // old header was once trusted but it is expired | |||
} | |||
vp_all := totalVotingPower(h1.Header.NextV) | |||
// total sum of voting power of validators in h2 | |||
if h2.Header.height == h1.Header.height + 1 { | |||
// specific check for adjacent headers; everything must be | |||
// properly signed. | |||
// also check that h2.Header.V == h1.Header.NextV | |||
// Plus the following check that 2/3 of the voting power | |||
// in h1 signed h2 | |||
return (votingpower_in(signers(h2.Commit),h1.Header.NextV) > | |||
2/3 * vp_all) | |||
// signing validators are more than two third in h1. | |||
} | |||
return (votingpower_in(signers(h2.Commit),h1.Header.NextV) > | |||
max(1/3,trustlevel) * vp_all) | |||
// get validators in h1 that signed h2 | |||
// sum of voting powers in h1 of | |||
// validators that signed h2 | |||
// is more than a third in h1 | |||
} | |||
``` | |||
The light client is initialised with the trusted validator set, for example based on the known validator set hash, | |||
validator set sequence number and the validator set init time. | |||
The core of the light client logic is captured by the VerifyAndUpdate function that is used to 1) verify if the given header is valid, | |||
and 2) update the validator set (when the given header is valid and it is more recent than the seen headers). | |||
*Remark*: Basic header verification must be done for *h2*. Similar checks are done in: | |||
https://github.com/tendermint/tendermint/blob/master/types/validator_set.go#L591-L633 | |||
*Remark*: There are some sanity checks which are not in the code: | |||
*h2.Header.height > h1.Header.height* and *h2.Header.bfttime > h1.Header.bfttime* and *h2.Header.bfttime < now*. | |||
*Remark*: ```return (votingpower_in(signers(h2.Commit),h1.Header.NextV) > max(1/3,trustlevel) * vp_all)``` may return false even if *h2* was properly generated by Tendermint consensus in the case of big changes in the validator sets. However, the check ```return (votingpower_in(signers(h2.Commit),h1.Header.NextV) > | |||
2/3 * vp_all)``` must return true if *h1* and *h2* were generated by Tendermint consensus. | |||
*Remark*: The 1/3 check differs from a previously proposed method that was based on intersecting validator sets and checking that the new validator set contains "enough" correct validators. We found that the old check is not suited for realistic changes in the validator sets. The new method is not only based on cardinalities, but also exploits that we can trust what is signed by a correct validator (i.e., signed by more than 1/3 of the voting power). | |||
```golang | |||
VerifyAndUpdate(signedHeader SignedHeader): | |||
assertThat signedHeader.valSetNumber >= valSetNumber | |||
if isValid(signedHeader) and signedHeader.Header.Time <= valSetTime + UNBONDING_PERIOD then | |||
setValidatorSet(signedHeader) | |||
*Correctness arguments* | |||
Towards Lite Client Accuracy: | |||
- Assume by contradiction that *h2* was not generated correctly and the lite client sets trust to true because *CheckSupport* returns true. | |||
- h1 is trusted and sufficiently new | |||
- by Tendermint Fault Model, less than 1/3 of voting power held by faulty validators => at least one correct validator *v* has signed *h2*. | |||
- as *v* is correct up to now, it followed the Tendermint consensus protocol at least up to signing *h2* => *h2* was correctly generated, we arrive at the required contradiction. | |||
Towards Lite Client Completeness: | |||
- The check is successful if sufficiently many validators of *h1* are still validators in *h2* and signed *h2*. | |||
- If *h2.Header.height = h1.Header.height + 1*, and both headers were generated correctly, the test passes | |||
*Verification Condition:* We may need a Tendermint invariant stating that if *h2.Header.height = h1.Header.height + 1* then *signers(h2.Commit) \subseteq h1.Header.NextV*. | |||
*Remark*: The variable *trustlevel* can be used if the user believes that relying on one correct validator is not sufficient. However, in case of (frequent) changes in the validator set, the higher the *trustlevel* is chosen, the more unlikely it becomes that CheckSupport returns true for non-adjacent headers. | |||
**Bisection.** The following function uses CheckSupport in a recursion to find intermediate headers that allow to establish a sequence of trust. | |||
```go | |||
func Bisection(h1,h2,trustlevel) bool{ | |||
if CheckSupport(h1,h2,trustlevel) { | |||
return true | |||
else | |||
updateValidatorSet(signedHeader.ValSetNumber) | |||
return VerifyAndUpdate(signedHeader) | |||
isValid(signedHeader SignedHeader): | |||
valSetOfTheHeader = Validators(signedHeader.Header.Height) | |||
assertThat Hash(valSetOfTheHeader) == signedHeader.Header.ValSetHash | |||
assertThat signedHeader is passing basic validation | |||
if votingPower(signedHeader.Commit) > 2/3 * votingPower(valSetOfTheHeader) then return true | |||
else | |||
} | |||
if h2.Header.height == h1.Header.height + 1 { | |||
// we have adjacent headers that are not matching (failed | |||
// the CheckSupport) | |||
// we could submit evidence here | |||
return false | |||
} | |||
pivot := (h1.Header.height + h2.Header.height) / 2 | |||
hp := Commit(pivot) | |||
// ask a full node for header of height pivot | |||
Store(hp) | |||
// store header hp locally | |||
if Bisection(h1,hp,trustlevel) { | |||
// only check right branch if hp is trusted | |||
// (otherwise a lot of unnecessary computation may be done) | |||
return Bisection(hp,h2,trustlevel) | |||
} | |||
else { | |||
return false | |||
} | |||
} | |||
``` | |||
setValidatorSet(signedHeader SignedHeader): | |||
nextValSet = Validators(signedHeader.Header.Height) | |||
assertThat Hash(nextValSet) == signedHeader.Header.ValidatorsHash | |||
valSet = nextValSet.Validators | |||
valSetHash = signedHeader.Header.ValidatorsHash | |||
valSetNumber = signedHeader.ValSetNumber | |||
valSetTime = nextValSet.ValSetTime | |||
votingPower(commit Commit): | |||
votingPower = 0 | |||
for each precommit in commit.Precommits do: | |||
if precommit.ValidatorAddress is in valSet and signature of the precommit verifies then | |||
votingPower += valSet[precommit.ValidatorAddress].VotingPower | |||
return votingPower | |||
votingPower(validatorSet []Validator): | |||
for each validator in validatorSet do: | |||
votingPower += validator.VotingPower | |||
return votingPower | |||
updateValidatorSet(valSetNumberOfTheHeader): | |||
while valSetNumber != valSetNumberOfTheHeader do | |||
signedHeader = LastHeader(valSetNumber) | |||
if isValid(signedHeader) then | |||
setValidatorSet(signedHeader) | |||
else return error | |||
return | |||
``` | |||
Note that in the logic above we assume that the light client will always go upward with respect to header verifications, | |||
i.e., that it will always be used to verify more recent headers. In case a light client needs to be used to verify older | |||
headers (go backward) the same mechanisms and similar logic can be used. In case a call to the FullNode or subsequent | |||
checks fail, a light client need to implement some recovery strategy, for example connecting to other FullNode. | |||
*Correctness arguments (sketch)* | |||
Lite Client Accuracy: | |||
- Assume by contradiction that *h2* was not generated correctly and the lite client sets trust to true because Bisection returns true. | |||
- Bisection returns true only if all calls to CheckSupport in the recursion return true. | |||
- Thus we have a sequence of headers that all satisfied the CheckSupport | |||
- again a contradiction | |||
Lite Client Completeness: | |||
This is only ensured if upon *Commit(pivot)* the lite client is always provided with a correctly generated header. | |||
*Stalling* | |||
With Bisection, a faulty full node could stall a lite client by creating a long sequence of headers that are queried one-by-one by the lite client and look OK, before the lite client eventually detects a problem. There are several ways to address this: | |||
* Each call to ```Commit``` could be issued to a different full node | |||
* Instead of querying header by header, the lite client tells a full node which header it trusts, and the height of the header it needs. The full node responds with the header along with a proof consisting of intermediate headers that the light client can use to verify. Roughly, Bisection would then be executed at the full node. | |||
* We may set a timeout how long bisection may take. | |||
### The case *h2.Header.height < h1.Header.height* | |||
In the use case where someone tells the lite client that application data that is relevant for it can be read in the block of height *k* and the lite client trusts a more recent header, we can use the hashes to verify headers "down the chain." That is, we iterate down the heights and check the hashes in each step. | |||
*Remark.* For the case were the lite client trusts two headers *i* and *j* with *i < k < j*, we should discuss/experiment whether the forward or the backward method is more effective. | |||
```go | |||
func Backwards(h1,h2) bool { | |||
assert (h2.Header.height < h1.Header.height) | |||
old := h1 | |||
for i := h1.Header.height - 1; i > h2.Header.height; i-- { | |||
new := Commit(i) | |||
Store(new) | |||
if (hash(new) != old.Header.hash) { | |||
return false | |||
} | |||
old := new | |||
} | |||
return (hash(h2) == old.Header.hash) | |||
} | |||
``` |
@ -0,0 +1,25 @@ | |||
<!-- HTML for static distribution bundle build --> | |||
<!DOCTYPE html> | |||
<html lang="en"> | |||
<head> | |||
<meta charset="UTF-8"> | |||
<title>Tendermint RPC</title> | |||
<link rel="stylesheet" type="text/css" href="//unpkg.com/swagger-ui-dist@3/swagger-ui.css" > | |||
<link rel="icon" type="image/png" href="//unpkg.com/swagger-ui-dist@3/favicon-16x16.png"/> | |||
<script src="//unpkg.com/swagger-ui-dist@3/swagger-ui-bundle.js"></script> | |||
</head> | |||
<body> | |||
<div id="swagger-ui"></div> | |||
<script> | |||
window.onload = function() { | |||
window.ui = SwaggerUIBundle({ | |||
url: "./swagger.yaml", | |||
dom_id: '#swagger-ui', | |||
deepLinking: true, | |||
layout: "BaseLayout" | |||
}); | |||
} | |||
</script> | |||
</body> | |||
</html> |
@ -0,0 +1,33 @@ | |||
color: true | |||
dry-run: null | |||
hookfiles: build/contract_tests | |||
language: go | |||
require: null | |||
server: make localnet-start | |||
server-wait: 30 | |||
init: false | |||
custom: {} | |||
names: false | |||
only: [] | |||
reporter: [] | |||
output: [] | |||
header: [] | |||
sorted: false | |||
user: null | |||
inline-errors: false | |||
details: false | |||
method: [GET] | |||
loglevel: warning | |||
path: [] | |||
hooks-worker-timeout: 5000 | |||
hooks-worker-connect-timeout: 1500 | |||
hooks-worker-connect-retry: 500 | |||
hooks-worker-after-connect-wait: 100 | |||
hooks-worker-term-timeout: 5000 | |||
hooks-worker-term-retry: 500 | |||
hooks-worker-handler-host: 127.0.0.1 | |||
hooks-worker-handler-port: 61321 | |||
config: ./dredd.yml | |||
# This path accepts no variables | |||
blueprint: ./docs/spec/rpc/swagger.yaml | |||
endpoint: 'http://127.0.0.1:26657/' |