Release/v0.26.0pull/2753/head v0.26.0
@ -1,11 +0,0 @@ | |||
package consensus | |||
import "fmt" | |||
// kind of arbitrary | |||
var Spec = "1" // async | |||
var Major = "0" // | |||
var Minor = "2" // replay refactor | |||
var Revision = "2" // validation -> commit | |||
var Version = fmt.Sprintf("v%s/%s.%s.%s", Spec, Major, Minor, Revision) |
@ -0,0 +1,6 @@ | |||
#! /bin/bash | |||
protoc --gogo_out=. -I $GOPATH/src/ -I . -I $GOPATH/src/github.com/gogo/protobuf/protobuf merkle.proto | |||
echo "--> adding nolint declarations to protobuf generated files" | |||
awk '/package merkle/ { print "//nolint: gas"; print; next }1' merkle.pb.go > merkle.pb.go.new | |||
mv merkle.pb.go.new merkle.pb.go |
@ -0,0 +1,792 @@ | |||
// Code generated by protoc-gen-gogo. DO NOT EDIT. | |||
// source: crypto/merkle/merkle.proto | |||
package merkle | |||
import proto "github.com/gogo/protobuf/proto" | |||
import fmt "fmt" | |||
import math "math" | |||
import _ "github.com/gogo/protobuf/gogoproto" | |||
import bytes "bytes" | |||
import io "io" | |||
// Reference imports to suppress errors if they are not otherwise used. | |||
var _ = proto.Marshal | |||
var _ = fmt.Errorf | |||
var _ = math.Inf | |||
// This is a compile-time assertion to ensure that this generated file | |||
// is compatible with the proto package it is being compiled against. | |||
// A compilation error at this line likely means your copy of the | |||
// proto package needs to be updated. | |||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package | |||
// ProofOp defines an operation used for calculating Merkle root | |||
// The data could be arbitrary format, providing nessecary data | |||
// for example neighbouring node hash | |||
type ProofOp struct { | |||
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` | |||
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` | |||
Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` | |||
XXX_NoUnkeyedLiteral struct{} `json:"-"` | |||
XXX_unrecognized []byte `json:"-"` | |||
XXX_sizecache int32 `json:"-"` | |||
} | |||
func (m *ProofOp) Reset() { *m = ProofOp{} } | |||
func (m *ProofOp) String() string { return proto.CompactTextString(m) } | |||
func (*ProofOp) ProtoMessage() {} | |||
func (*ProofOp) Descriptor() ([]byte, []int) { | |||
return fileDescriptor_merkle_5d3f6051907285da, []int{0} | |||
} | |||
func (m *ProofOp) XXX_Unmarshal(b []byte) error { | |||
return m.Unmarshal(b) | |||
} | |||
func (m *ProofOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |||
if deterministic { | |||
return xxx_messageInfo_ProofOp.Marshal(b, m, deterministic) | |||
} else { | |||
b = b[:cap(b)] | |||
n, err := m.MarshalTo(b) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return b[:n], nil | |||
} | |||
} | |||
func (dst *ProofOp) XXX_Merge(src proto.Message) { | |||
xxx_messageInfo_ProofOp.Merge(dst, src) | |||
} | |||
func (m *ProofOp) XXX_Size() int { | |||
return m.Size() | |||
} | |||
func (m *ProofOp) XXX_DiscardUnknown() { | |||
xxx_messageInfo_ProofOp.DiscardUnknown(m) | |||
} | |||
var xxx_messageInfo_ProofOp proto.InternalMessageInfo | |||
func (m *ProofOp) GetType() string { | |||
if m != nil { | |||
return m.Type | |||
} | |||
return "" | |||
} | |||
func (m *ProofOp) GetKey() []byte { | |||
if m != nil { | |||
return m.Key | |||
} | |||
return nil | |||
} | |||
func (m *ProofOp) GetData() []byte { | |||
if m != nil { | |||
return m.Data | |||
} | |||
return nil | |||
} | |||
// Proof is Merkle proof defined by the list of ProofOps | |||
type Proof struct { | |||
Ops []ProofOp `protobuf:"bytes,1,rep,name=ops" json:"ops"` | |||
XXX_NoUnkeyedLiteral struct{} `json:"-"` | |||
XXX_unrecognized []byte `json:"-"` | |||
XXX_sizecache int32 `json:"-"` | |||
} | |||
func (m *Proof) Reset() { *m = Proof{} } | |||
func (m *Proof) String() string { return proto.CompactTextString(m) } | |||
func (*Proof) ProtoMessage() {} | |||
func (*Proof) Descriptor() ([]byte, []int) { | |||
return fileDescriptor_merkle_5d3f6051907285da, []int{1} | |||
} | |||
func (m *Proof) XXX_Unmarshal(b []byte) error { | |||
return m.Unmarshal(b) | |||
} | |||
func (m *Proof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |||
if deterministic { | |||
return xxx_messageInfo_Proof.Marshal(b, m, deterministic) | |||
} else { | |||
b = b[:cap(b)] | |||
n, err := m.MarshalTo(b) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return b[:n], nil | |||
} | |||
} | |||
func (dst *Proof) XXX_Merge(src proto.Message) { | |||
xxx_messageInfo_Proof.Merge(dst, src) | |||
} | |||
func (m *Proof) XXX_Size() int { | |||
return m.Size() | |||
} | |||
func (m *Proof) XXX_DiscardUnknown() { | |||
xxx_messageInfo_Proof.DiscardUnknown(m) | |||
} | |||
var xxx_messageInfo_Proof proto.InternalMessageInfo | |||
func (m *Proof) GetOps() []ProofOp { | |||
if m != nil { | |||
return m.Ops | |||
} | |||
return nil | |||
} | |||
func init() { | |||
proto.RegisterType((*ProofOp)(nil), "merkle.ProofOp") | |||
proto.RegisterType((*Proof)(nil), "merkle.Proof") | |||
} | |||
func (this *ProofOp) Equal(that interface{}) bool { | |||
if that == nil { | |||
return this == nil | |||
} | |||
that1, ok := that.(*ProofOp) | |||
if !ok { | |||
that2, ok := that.(ProofOp) | |||
if ok { | |||
that1 = &that2 | |||
} else { | |||
return false | |||
} | |||
} | |||
if that1 == nil { | |||
return this == nil | |||
} else if this == nil { | |||
return false | |||
} | |||
if this.Type != that1.Type { | |||
return false | |||
} | |||
if !bytes.Equal(this.Key, that1.Key) { | |||
return false | |||
} | |||
if !bytes.Equal(this.Data, that1.Data) { | |||
return false | |||
} | |||
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { | |||
return false | |||
} | |||
return true | |||
} | |||
func (this *Proof) Equal(that interface{}) bool { | |||
if that == nil { | |||
return this == nil | |||
} | |||
that1, ok := that.(*Proof) | |||
if !ok { | |||
that2, ok := that.(Proof) | |||
if ok { | |||
that1 = &that2 | |||
} else { | |||
return false | |||
} | |||
} | |||
if that1 == nil { | |||
return this == nil | |||
} else if this == nil { | |||
return false | |||
} | |||
if len(this.Ops) != len(that1.Ops) { | |||
return false | |||
} | |||
for i := range this.Ops { | |||
if !this.Ops[i].Equal(&that1.Ops[i]) { | |||
return false | |||
} | |||
} | |||
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { | |||
return false | |||
} | |||
return true | |||
} | |||
func (m *ProofOp) Marshal() (dAtA []byte, err error) { | |||
size := m.Size() | |||
dAtA = make([]byte, size) | |||
n, err := m.MarshalTo(dAtA) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return dAtA[:n], nil | |||
} | |||
func (m *ProofOp) MarshalTo(dAtA []byte) (int, error) { | |||
var i int | |||
_ = i | |||
var l int | |||
_ = l | |||
if len(m.Type) > 0 { | |||
dAtA[i] = 0xa | |||
i++ | |||
i = encodeVarintMerkle(dAtA, i, uint64(len(m.Type))) | |||
i += copy(dAtA[i:], m.Type) | |||
} | |||
if len(m.Key) > 0 { | |||
dAtA[i] = 0x12 | |||
i++ | |||
i = encodeVarintMerkle(dAtA, i, uint64(len(m.Key))) | |||
i += copy(dAtA[i:], m.Key) | |||
} | |||
if len(m.Data) > 0 { | |||
dAtA[i] = 0x1a | |||
i++ | |||
i = encodeVarintMerkle(dAtA, i, uint64(len(m.Data))) | |||
i += copy(dAtA[i:], m.Data) | |||
} | |||
if m.XXX_unrecognized != nil { | |||
i += copy(dAtA[i:], m.XXX_unrecognized) | |||
} | |||
return i, nil | |||
} | |||
func (m *Proof) Marshal() (dAtA []byte, err error) { | |||
size := m.Size() | |||
dAtA = make([]byte, size) | |||
n, err := m.MarshalTo(dAtA) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return dAtA[:n], nil | |||
} | |||
func (m *Proof) MarshalTo(dAtA []byte) (int, error) { | |||
var i int | |||
_ = i | |||
var l int | |||
_ = l | |||
if len(m.Ops) > 0 { | |||
for _, msg := range m.Ops { | |||
dAtA[i] = 0xa | |||
i++ | |||
i = encodeVarintMerkle(dAtA, i, uint64(msg.Size())) | |||
n, err := msg.MarshalTo(dAtA[i:]) | |||
if err != nil { | |||
return 0, err | |||
} | |||
i += n | |||
} | |||
} | |||
if m.XXX_unrecognized != nil { | |||
i += copy(dAtA[i:], m.XXX_unrecognized) | |||
} | |||
return i, nil | |||
} | |||
func encodeVarintMerkle(dAtA []byte, offset int, v uint64) int { | |||
for v >= 1<<7 { | |||
dAtA[offset] = uint8(v&0x7f | 0x80) | |||
v >>= 7 | |||
offset++ | |||
} | |||
dAtA[offset] = uint8(v) | |||
return offset + 1 | |||
} | |||
func NewPopulatedProofOp(r randyMerkle, easy bool) *ProofOp { | |||
this := &ProofOp{} | |||
this.Type = string(randStringMerkle(r)) | |||
v1 := r.Intn(100) | |||
this.Key = make([]byte, v1) | |||
for i := 0; i < v1; i++ { | |||
this.Key[i] = byte(r.Intn(256)) | |||
} | |||
v2 := r.Intn(100) | |||
this.Data = make([]byte, v2) | |||
for i := 0; i < v2; i++ { | |||
this.Data[i] = byte(r.Intn(256)) | |||
} | |||
if !easy && r.Intn(10) != 0 { | |||
this.XXX_unrecognized = randUnrecognizedMerkle(r, 4) | |||
} | |||
return this | |||
} | |||
func NewPopulatedProof(r randyMerkle, easy bool) *Proof { | |||
this := &Proof{} | |||
if r.Intn(10) != 0 { | |||
v3 := r.Intn(5) | |||
this.Ops = make([]ProofOp, v3) | |||
for i := 0; i < v3; i++ { | |||
v4 := NewPopulatedProofOp(r, easy) | |||
this.Ops[i] = *v4 | |||
} | |||
} | |||
if !easy && r.Intn(10) != 0 { | |||
this.XXX_unrecognized = randUnrecognizedMerkle(r, 2) | |||
} | |||
return this | |||
} | |||
type randyMerkle interface { | |||
Float32() float32 | |||
Float64() float64 | |||
Int63() int64 | |||
Int31() int32 | |||
Uint32() uint32 | |||
Intn(n int) int | |||
} | |||
func randUTF8RuneMerkle(r randyMerkle) rune { | |||
ru := r.Intn(62) | |||
if ru < 10 { | |||
return rune(ru + 48) | |||
} else if ru < 36 { | |||
return rune(ru + 55) | |||
} | |||
return rune(ru + 61) | |||
} | |||
func randStringMerkle(r randyMerkle) string { | |||
v5 := r.Intn(100) | |||
tmps := make([]rune, v5) | |||
for i := 0; i < v5; i++ { | |||
tmps[i] = randUTF8RuneMerkle(r) | |||
} | |||
return string(tmps) | |||
} | |||
func randUnrecognizedMerkle(r randyMerkle, maxFieldNumber int) (dAtA []byte) { | |||
l := r.Intn(5) | |||
for i := 0; i < l; i++ { | |||
wire := r.Intn(4) | |||
if wire == 3 { | |||
wire = 5 | |||
} | |||
fieldNumber := maxFieldNumber + r.Intn(100) | |||
dAtA = randFieldMerkle(dAtA, r, fieldNumber, wire) | |||
} | |||
return dAtA | |||
} | |||
func randFieldMerkle(dAtA []byte, r randyMerkle, fieldNumber int, wire int) []byte { | |||
key := uint32(fieldNumber)<<3 | uint32(wire) | |||
switch wire { | |||
case 0: | |||
dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key)) | |||
v6 := r.Int63() | |||
if r.Intn(2) == 0 { | |||
v6 *= -1 | |||
} | |||
dAtA = encodeVarintPopulateMerkle(dAtA, uint64(v6)) | |||
case 1: | |||
dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key)) | |||
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) | |||
case 2: | |||
dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key)) | |||
ll := r.Intn(100) | |||
dAtA = encodeVarintPopulateMerkle(dAtA, uint64(ll)) | |||
for j := 0; j < ll; j++ { | |||
dAtA = append(dAtA, byte(r.Intn(256))) | |||
} | |||
default: | |||
dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key)) | |||
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) | |||
} | |||
return dAtA | |||
} | |||
func encodeVarintPopulateMerkle(dAtA []byte, v uint64) []byte { | |||
for v >= 1<<7 { | |||
dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) | |||
v >>= 7 | |||
} | |||
dAtA = append(dAtA, uint8(v)) | |||
return dAtA | |||
} | |||
func (m *ProofOp) Size() (n int) { | |||
var l int | |||
_ = l | |||
l = len(m.Type) | |||
if l > 0 { | |||
n += 1 + l + sovMerkle(uint64(l)) | |||
} | |||
l = len(m.Key) | |||
if l > 0 { | |||
n += 1 + l + sovMerkle(uint64(l)) | |||
} | |||
l = len(m.Data) | |||
if l > 0 { | |||
n += 1 + l + sovMerkle(uint64(l)) | |||
} | |||
if m.XXX_unrecognized != nil { | |||
n += len(m.XXX_unrecognized) | |||
} | |||
return n | |||
} | |||
func (m *Proof) Size() (n int) { | |||
var l int | |||
_ = l | |||
if len(m.Ops) > 0 { | |||
for _, e := range m.Ops { | |||
l = e.Size() | |||
n += 1 + l + sovMerkle(uint64(l)) | |||
} | |||
} | |||
if m.XXX_unrecognized != nil { | |||
n += len(m.XXX_unrecognized) | |||
} | |||
return n | |||
} | |||
func sovMerkle(x uint64) (n int) { | |||
for { | |||
n++ | |||
x >>= 7 | |||
if x == 0 { | |||
break | |||
} | |||
} | |||
return n | |||
} | |||
func sozMerkle(x uint64) (n int) { | |||
return sovMerkle(uint64((x << 1) ^ uint64((int64(x) >> 63)))) | |||
} | |||
func (m *ProofOp) Unmarshal(dAtA []byte) error { | |||
l := len(dAtA) | |||
iNdEx := 0 | |||
for iNdEx < l { | |||
preIndex := iNdEx | |||
var wire uint64 | |||
for shift := uint(0); ; shift += 7 { | |||
if shift >= 64 { | |||
return ErrIntOverflowMerkle | |||
} | |||
if iNdEx >= l { | |||
return io.ErrUnexpectedEOF | |||
} | |||
b := dAtA[iNdEx] | |||
iNdEx++ | |||
wire |= (uint64(b) & 0x7F) << shift | |||
if b < 0x80 { | |||
break | |||
} | |||
} | |||
fieldNum := int32(wire >> 3) | |||
wireType := int(wire & 0x7) | |||
if wireType == 4 { | |||
return fmt.Errorf("proto: ProofOp: wiretype end group for non-group") | |||
} | |||
if fieldNum <= 0 { | |||
return fmt.Errorf("proto: ProofOp: illegal tag %d (wire type %d)", fieldNum, wire) | |||
} | |||
switch fieldNum { | |||
case 1: | |||
if wireType != 2 { | |||
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) | |||
} | |||
var stringLen uint64 | |||
for shift := uint(0); ; shift += 7 { | |||
if shift >= 64 { | |||
return ErrIntOverflowMerkle | |||
} | |||
if iNdEx >= l { | |||
return io.ErrUnexpectedEOF | |||
} | |||
b := dAtA[iNdEx] | |||
iNdEx++ | |||
stringLen |= (uint64(b) & 0x7F) << shift | |||
if b < 0x80 { | |||
break | |||
} | |||
} | |||
intStringLen := int(stringLen) | |||
if intStringLen < 0 { | |||
return ErrInvalidLengthMerkle | |||
} | |||
postIndex := iNdEx + intStringLen | |||
if postIndex > l { | |||
return io.ErrUnexpectedEOF | |||
} | |||
m.Type = string(dAtA[iNdEx:postIndex]) | |||
iNdEx = postIndex | |||
case 2: | |||
if wireType != 2 { | |||
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) | |||
} | |||
var byteLen int | |||
for shift := uint(0); ; shift += 7 { | |||
if shift >= 64 { | |||
return ErrIntOverflowMerkle | |||
} | |||
if iNdEx >= l { | |||
return io.ErrUnexpectedEOF | |||
} | |||
b := dAtA[iNdEx] | |||
iNdEx++ | |||
byteLen |= (int(b) & 0x7F) << shift | |||
if b < 0x80 { | |||
break | |||
} | |||
} | |||
if byteLen < 0 { | |||
return ErrInvalidLengthMerkle | |||
} | |||
postIndex := iNdEx + byteLen | |||
if postIndex > l { | |||
return io.ErrUnexpectedEOF | |||
} | |||
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) | |||
if m.Key == nil { | |||
m.Key = []byte{} | |||
} | |||
iNdEx = postIndex | |||
case 3: | |||
if wireType != 2 { | |||
return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) | |||
} | |||
var byteLen int | |||
for shift := uint(0); ; shift += 7 { | |||
if shift >= 64 { | |||
return ErrIntOverflowMerkle | |||
} | |||
if iNdEx >= l { | |||
return io.ErrUnexpectedEOF | |||
} | |||
b := dAtA[iNdEx] | |||
iNdEx++ | |||
byteLen |= (int(b) & 0x7F) << shift | |||
if b < 0x80 { | |||
break | |||
} | |||
} | |||
if byteLen < 0 { | |||
return ErrInvalidLengthMerkle | |||
} | |||
postIndex := iNdEx + byteLen | |||
if postIndex > l { | |||
return io.ErrUnexpectedEOF | |||
} | |||
m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) | |||
if m.Data == nil { | |||
m.Data = []byte{} | |||
} | |||
iNdEx = postIndex | |||
default: | |||
iNdEx = preIndex | |||
skippy, err := skipMerkle(dAtA[iNdEx:]) | |||
if err != nil { | |||
return err | |||
} | |||
if skippy < 0 { | |||
return ErrInvalidLengthMerkle | |||
} | |||
if (iNdEx + skippy) > l { | |||
return io.ErrUnexpectedEOF | |||
} | |||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) | |||
iNdEx += skippy | |||
} | |||
} | |||
if iNdEx > l { | |||
return io.ErrUnexpectedEOF | |||
} | |||
return nil | |||
} | |||
func (m *Proof) Unmarshal(dAtA []byte) error { | |||
l := len(dAtA) | |||
iNdEx := 0 | |||
for iNdEx < l { | |||
preIndex := iNdEx | |||
var wire uint64 | |||
for shift := uint(0); ; shift += 7 { | |||
if shift >= 64 { | |||
return ErrIntOverflowMerkle | |||
} | |||
if iNdEx >= l { | |||
return io.ErrUnexpectedEOF | |||
} | |||
b := dAtA[iNdEx] | |||
iNdEx++ | |||
wire |= (uint64(b) & 0x7F) << shift | |||
if b < 0x80 { | |||
break | |||
} | |||
} | |||
fieldNum := int32(wire >> 3) | |||
wireType := int(wire & 0x7) | |||
if wireType == 4 { | |||
return fmt.Errorf("proto: Proof: wiretype end group for non-group") | |||
} | |||
if fieldNum <= 0 { | |||
return fmt.Errorf("proto: Proof: illegal tag %d (wire type %d)", fieldNum, wire) | |||
} | |||
switch fieldNum { | |||
case 1: | |||
if wireType != 2 { | |||
return fmt.Errorf("proto: wrong wireType = %d for field Ops", wireType) | |||
} | |||
var msglen int | |||
for shift := uint(0); ; shift += 7 { | |||
if shift >= 64 { | |||
return ErrIntOverflowMerkle | |||
} | |||
if iNdEx >= l { | |||
return io.ErrUnexpectedEOF | |||
} | |||
b := dAtA[iNdEx] | |||
iNdEx++ | |||
msglen |= (int(b) & 0x7F) << shift | |||
if b < 0x80 { | |||
break | |||
} | |||
} | |||
if msglen < 0 { | |||
return ErrInvalidLengthMerkle | |||
} | |||
postIndex := iNdEx + msglen | |||
if postIndex > l { | |||
return io.ErrUnexpectedEOF | |||
} | |||
m.Ops = append(m.Ops, ProofOp{}) | |||
if err := m.Ops[len(m.Ops)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { | |||
return err | |||
} | |||
iNdEx = postIndex | |||
default: | |||
iNdEx = preIndex | |||
skippy, err := skipMerkle(dAtA[iNdEx:]) | |||
if err != nil { | |||
return err | |||
} | |||
if skippy < 0 { | |||
return ErrInvalidLengthMerkle | |||
} | |||
if (iNdEx + skippy) > l { | |||
return io.ErrUnexpectedEOF | |||
} | |||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) | |||
iNdEx += skippy | |||
} | |||
} | |||
if iNdEx > l { | |||
return io.ErrUnexpectedEOF | |||
} | |||
return nil | |||
} | |||
func skipMerkle(dAtA []byte) (n int, err error) { | |||
l := len(dAtA) | |||
iNdEx := 0 | |||
for iNdEx < l { | |||
var wire uint64 | |||
for shift := uint(0); ; shift += 7 { | |||
if shift >= 64 { | |||
return 0, ErrIntOverflowMerkle | |||
} | |||
if iNdEx >= l { | |||
return 0, io.ErrUnexpectedEOF | |||
} | |||
b := dAtA[iNdEx] | |||
iNdEx++ | |||
wire |= (uint64(b) & 0x7F) << shift | |||
if b < 0x80 { | |||
break | |||
} | |||
} | |||
wireType := int(wire & 0x7) | |||
switch wireType { | |||
case 0: | |||
for shift := uint(0); ; shift += 7 { | |||
if shift >= 64 { | |||
return 0, ErrIntOverflowMerkle | |||
} | |||
if iNdEx >= l { | |||
return 0, io.ErrUnexpectedEOF | |||
} | |||
iNdEx++ | |||
if dAtA[iNdEx-1] < 0x80 { | |||
break | |||
} | |||
} | |||
return iNdEx, nil | |||
case 1: | |||
iNdEx += 8 | |||
return iNdEx, nil | |||
case 2: | |||
var length int | |||
for shift := uint(0); ; shift += 7 { | |||
if shift >= 64 { | |||
return 0, ErrIntOverflowMerkle | |||
} | |||
if iNdEx >= l { | |||
return 0, io.ErrUnexpectedEOF | |||
} | |||
b := dAtA[iNdEx] | |||
iNdEx++ | |||
length |= (int(b) & 0x7F) << shift | |||
if b < 0x80 { | |||
break | |||
} | |||
} | |||
iNdEx += length | |||
if length < 0 { | |||
return 0, ErrInvalidLengthMerkle | |||
} | |||
return iNdEx, nil | |||
case 3: | |||
for { | |||
var innerWire uint64 | |||
var start int = iNdEx | |||
for shift := uint(0); ; shift += 7 { | |||
if shift >= 64 { | |||
return 0, ErrIntOverflowMerkle | |||
} | |||
if iNdEx >= l { | |||
return 0, io.ErrUnexpectedEOF | |||
} | |||
b := dAtA[iNdEx] | |||
iNdEx++ | |||
innerWire |= (uint64(b) & 0x7F) << shift | |||
if b < 0x80 { | |||
break | |||
} | |||
} | |||
innerWireType := int(innerWire & 0x7) | |||
if innerWireType == 4 { | |||
break | |||
} | |||
next, err := skipMerkle(dAtA[start:]) | |||
if err != nil { | |||
return 0, err | |||
} | |||
iNdEx = start + next | |||
} | |||
return iNdEx, nil | |||
case 4: | |||
return iNdEx, nil | |||
case 5: | |||
iNdEx += 4 | |||
return iNdEx, nil | |||
default: | |||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType) | |||
} | |||
} | |||
panic("unreachable") | |||
} | |||
var ( | |||
ErrInvalidLengthMerkle = fmt.Errorf("proto: negative length found during unmarshaling") | |||
ErrIntOverflowMerkle = fmt.Errorf("proto: integer overflow") | |||
) | |||
func init() { proto.RegisterFile("crypto/merkle/merkle.proto", fileDescriptor_merkle_5d3f6051907285da) } | |||
var fileDescriptor_merkle_5d3f6051907285da = []byte{ | |||
// 200 bytes of a gzipped FileDescriptorProto | |||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4a, 0x2e, 0xaa, 0x2c, | |||
0x28, 0xc9, 0xd7, 0xcf, 0x4d, 0x2d, 0xca, 0xce, 0x49, 0x85, 0x52, 0x7a, 0x05, 0x45, 0xf9, 0x25, | |||
0xf9, 0x42, 0x6c, 0x10, 0x9e, 0x94, 0x6e, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, | |||
0xae, 0x7e, 0x7a, 0x7e, 0x7a, 0xbe, 0x3e, 0x58, 0x3a, 0xa9, 0x34, 0x0d, 0xcc, 0x03, 0x73, 0xc0, | |||
0x2c, 0x88, 0x36, 0x25, 0x67, 0x2e, 0xf6, 0x80, 0xa2, 0xfc, 0xfc, 0x34, 0xff, 0x02, 0x21, 0x21, | |||
0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x30, 0x5b, 0x48, | |||
0x80, 0x8b, 0x39, 0x3b, 0xb5, 0x52, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc4, 0x04, 0xa9, | |||
0x4a, 0x49, 0x2c, 0x49, 0x94, 0x60, 0x06, 0x0b, 0x81, 0xd9, 0x4a, 0x06, 0x5c, 0xac, 0x60, 0x43, | |||
0x84, 0xd4, 0xb9, 0x98, 0xf3, 0x0b, 0x8a, 0x25, 0x18, 0x15, 0x98, 0x35, 0xb8, 0x8d, 0xf8, 0xf5, | |||
0xa0, 0x0e, 0x84, 0x5a, 0xe0, 0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x48, 0x85, 0x93, 0xc8, | |||
0x8f, 0x87, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, | |||
0xe3, 0x83, 0x47, 0x72, 0x8c, 0x49, 0x6c, 0x60, 0x37, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, | |||
0xb9, 0x2b, 0x0f, 0xd1, 0xe8, 0x00, 0x00, 0x00, | |||
} |
@ -0,0 +1,30 @@ | |||
syntax = "proto3"; | |||
package merkle; | |||
// For more information on gogo.proto, see: | |||
// https://github.com/gogo/protobuf/blob/master/extensions.md | |||
import "github.com/gogo/protobuf/gogoproto/gogo.proto"; | |||
option (gogoproto.marshaler_all) = true; | |||
option (gogoproto.unmarshaler_all) = true; | |||
option (gogoproto.sizer_all) = true; | |||
option (gogoproto.populate_all) = true; | |||
option (gogoproto.equal_all) = true; | |||
//---------------------------------------- | |||
// Message types | |||
// ProofOp defines an operation used for calculating Merkle root | |||
// The data could be arbitrary format, providing nessecary data | |||
// for example neighbouring node hash | |||
message ProofOp { | |||
string type = 1; | |||
bytes key = 2; | |||
bytes data = 3; | |||
} | |||
// Proof is Merkle proof defined by the list of ProofOps | |||
message Proof { | |||
repeated ProofOp ops = 1 [(gogoproto.nullable)=false]; | |||
} |
@ -0,0 +1,135 @@ | |||
package merkle | |||
import ( | |||
"bytes" | |||
cmn "github.com/tendermint/tendermint/libs/common" | |||
) | |||
//---------------------------------------- | |||
// ProofOp gets converted to an instance of ProofOperator: | |||
// ProofOperator is a layer for calculating intermediate Merkle roots | |||
// when a series of Merkle trees are chained together. | |||
// Run() takes leaf values from a tree and returns the Merkle | |||
// root for the corresponding tree. It takes and returns a list of bytes | |||
// to allow multiple leaves to be part of a single proof, for instance in a range proof. | |||
// ProofOp() encodes the ProofOperator in a generic way so it can later be | |||
// decoded with OpDecoder. | |||
type ProofOperator interface { | |||
Run([][]byte) ([][]byte, error) | |||
GetKey() []byte | |||
ProofOp() ProofOp | |||
} | |||
//---------------------------------------- | |||
// Operations on a list of ProofOperators | |||
// ProofOperators is a slice of ProofOperator(s). | |||
// Each operator will be applied to the input value sequentially | |||
// and the last Merkle root will be verified with already known data | |||
type ProofOperators []ProofOperator | |||
func (poz ProofOperators) VerifyValue(root []byte, keypath string, value []byte) (err error) { | |||
return poz.Verify(root, keypath, [][]byte{value}) | |||
} | |||
func (poz ProofOperators) Verify(root []byte, keypath string, args [][]byte) (err error) { | |||
keys, err := KeyPathToKeys(keypath) | |||
if err != nil { | |||
return | |||
} | |||
for i, op := range poz { | |||
key := op.GetKey() | |||
if len(key) != 0 { | |||
lastKey := keys[len(keys)-1] | |||
if !bytes.Equal(lastKey, key) { | |||
return cmn.NewError("Key mismatch on operation #%d: expected %+v but got %+v", i, string(lastKey), string(key)) | |||
} | |||
keys = keys[:len(keys)-1] | |||
} | |||
args, err = op.Run(args) | |||
if err != nil { | |||
return | |||
} | |||
} | |||
if !bytes.Equal(root, args[0]) { | |||
return cmn.NewError("Calculated root hash is invalid: expected %+v but got %+v", root, args[0]) | |||
} | |||
if len(keys) != 0 { | |||
return cmn.NewError("Keypath not consumed all") | |||
} | |||
return nil | |||
} | |||
//---------------------------------------- | |||
// ProofRuntime - main entrypoint | |||
type OpDecoder func(ProofOp) (ProofOperator, error) | |||
type ProofRuntime struct { | |||
decoders map[string]OpDecoder | |||
} | |||
func NewProofRuntime() *ProofRuntime { | |||
return &ProofRuntime{ | |||
decoders: make(map[string]OpDecoder), | |||
} | |||
} | |||
func (prt *ProofRuntime) RegisterOpDecoder(typ string, dec OpDecoder) { | |||
_, ok := prt.decoders[typ] | |||
if ok { | |||
panic("already registered for type " + typ) | |||
} | |||
prt.decoders[typ] = dec | |||
} | |||
func (prt *ProofRuntime) Decode(pop ProofOp) (ProofOperator, error) { | |||
decoder := prt.decoders[pop.Type] | |||
if decoder == nil { | |||
return nil, cmn.NewError("unrecognized proof type %v", pop.Type) | |||
} | |||
return decoder(pop) | |||
} | |||
func (prt *ProofRuntime) DecodeProof(proof *Proof) (ProofOperators, error) { | |||
var poz ProofOperators | |||
for _, pop := range proof.Ops { | |||
operator, err := prt.Decode(pop) | |||
if err != nil { | |||
return nil, cmn.ErrorWrap(err, "decoding a proof operator") | |||
} | |||
poz = append(poz, operator) | |||
} | |||
return poz, nil | |||
} | |||
func (prt *ProofRuntime) VerifyValue(proof *Proof, root []byte, keypath string, value []byte) (err error) { | |||
return prt.Verify(proof, root, keypath, [][]byte{value}) | |||
} | |||
// TODO In the long run we'll need a method of classifcation of ops, | |||
// whether existence or absence or perhaps a third? | |||
func (prt *ProofRuntime) VerifyAbsence(proof *Proof, root []byte, keypath string) (err error) { | |||
return prt.Verify(proof, root, keypath, nil) | |||
} | |||
func (prt *ProofRuntime) Verify(proof *Proof, root []byte, keypath string, args [][]byte) (err error) { | |||
poz, err := prt.DecodeProof(proof) | |||
if err != nil { | |||
return cmn.ErrorWrap(err, "decoding proof") | |||
} | |||
return poz.Verify(root, keypath, args) | |||
} | |||
// DefaultProofRuntime only knows about Simple value | |||
// proofs. | |||
// To use e.g. IAVL proofs, register op-decoders as | |||
// defined in the IAVL package. | |||
func DefaultProofRuntime() (prt *ProofRuntime) { | |||
prt = NewProofRuntime() | |||
prt.RegisterOpDecoder(ProofOpSimpleValue, SimpleValueOpDecoder) | |||
return | |||
} |
@ -0,0 +1,111 @@ | |||
package merkle | |||
import ( | |||
"encoding/hex" | |||
"fmt" | |||
"net/url" | |||
"strings" | |||
cmn "github.com/tendermint/tendermint/libs/common" | |||
) | |||
/* | |||
For generalized Merkle proofs, each layer of the proof may require an | |||
optional key. The key may be encoded either by URL-encoding or | |||
(upper-case) hex-encoding. | |||
TODO: In the future, more encodings may be supported, like base32 (e.g. | |||
/32:) | |||
For example, for a Cosmos-SDK application where the first two proof layers | |||
are SimpleValueOps, and the third proof layer is an IAVLValueOp, the keys | |||
might look like: | |||
0: []byte("App") | |||
1: []byte("IBC") | |||
2: []byte{0x01, 0x02, 0x03} | |||
Assuming that we know that the first two layers are always ASCII texts, we | |||
probably want to use URLEncoding for those, whereas the third layer will | |||
require HEX encoding for efficient representation. | |||
kp := new(KeyPath) | |||
kp.AppendKey([]byte("App"), KeyEncodingURL) | |||
kp.AppendKey([]byte("IBC"), KeyEncodingURL) | |||
kp.AppendKey([]byte{0x01, 0x02, 0x03}, KeyEncodingURL) | |||
kp.String() // Should return "/App/IBC/x:010203" | |||
NOTE: Key paths must begin with a `/`. | |||
NOTE: All encodings *MUST* work compatibly, such that you can choose to use | |||
whatever encoding, and the decoded keys will always be the same. In other | |||
words, it's just as good to encode all three keys using URL encoding or HEX | |||
encoding... it just wouldn't be optimal in terms of readability or space | |||
efficiency. | |||
NOTE: Punycode will never be supported here, because not all values can be | |||
decoded. For example, no string decodes to the string "xn--blah" in | |||
Punycode. | |||
*/ | |||
type keyEncoding int | |||
const ( | |||
KeyEncodingURL keyEncoding = iota | |||
KeyEncodingHex | |||
KeyEncodingMax // Number of known encodings. Used for testing | |||
) | |||
type Key struct { | |||
name []byte | |||
enc keyEncoding | |||
} | |||
type KeyPath []Key | |||
func (pth KeyPath) AppendKey(key []byte, enc keyEncoding) KeyPath { | |||
return append(pth, Key{key, enc}) | |||
} | |||
func (pth KeyPath) String() string { | |||
res := "" | |||
for _, key := range pth { | |||
switch key.enc { | |||
case KeyEncodingURL: | |||
res += "/" + url.PathEscape(string(key.name)) | |||
case KeyEncodingHex: | |||
res += "/x:" + fmt.Sprintf("%X", key.name) | |||
default: | |||
panic("unexpected key encoding type") | |||
} | |||
} | |||
return res | |||
} | |||
// Decode a path to a list of keys. Path must begin with `/`. | |||
// Each key must use a known encoding. | |||
func KeyPathToKeys(path string) (keys [][]byte, err error) { | |||
if path == "" || path[0] != '/' { | |||
return nil, cmn.NewError("key path string must start with a forward slash '/'") | |||
} | |||
parts := strings.Split(path[1:], "/") | |||
keys = make([][]byte, len(parts)) | |||
for i, part := range parts { | |||
if strings.HasPrefix(part, "x:") { | |||
hexPart := part[2:] | |||
key, err := hex.DecodeString(hexPart) | |||
if err != nil { | |||
return nil, cmn.ErrorWrap(err, "decoding hex-encoded part #%d: /%s", i, part) | |||
} | |||
keys[i] = key | |||
} else { | |||
key, err := url.PathUnescape(part) | |||
if err != nil { | |||
return nil, cmn.ErrorWrap(err, "decoding url-encoded part #%d: /%s", i, part) | |||
} | |||
keys[i] = []byte(key) // TODO Test this with random bytes, I'm not sure that it works for arbitrary bytes... | |||
} | |||
} | |||
return keys, nil | |||
} |
@ -0,0 +1,41 @@ | |||
package merkle | |||
import ( | |||
"math/rand" | |||
"testing" | |||
"github.com/stretchr/testify/require" | |||
) | |||
func TestKeyPath(t *testing.T) { | |||
var path KeyPath | |||
keys := make([][]byte, 10) | |||
alphanum := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" | |||
for d := 0; d < 1e4; d++ { | |||
path = nil | |||
for i := range keys { | |||
enc := keyEncoding(rand.Intn(int(KeyEncodingMax))) | |||
keys[i] = make([]byte, rand.Uint32()%20) | |||
switch enc { | |||
case KeyEncodingURL: | |||
for j := range keys[i] { | |||
keys[i][j] = alphanum[rand.Intn(len(alphanum))] | |||
} | |||
case KeyEncodingHex: | |||
rand.Read(keys[i]) | |||
default: | |||
panic("Unexpected encoding") | |||
} | |||
path = path.AppendKey(keys[i], enc) | |||
} | |||
res, err := KeyPathToKeys(path.String()) | |||
require.Nil(t, err) | |||
for i, key := range keys { | |||
require.Equal(t, key, res[i]) | |||
} | |||
} | |||
} |
@ -0,0 +1,91 @@ | |||
package merkle | |||
import ( | |||
"bytes" | |||
"fmt" | |||
"github.com/tendermint/tendermint/crypto/tmhash" | |||
cmn "github.com/tendermint/tendermint/libs/common" | |||
) | |||
const ProofOpSimpleValue = "simple:v" | |||
// SimpleValueOp takes a key and a single value as argument and | |||
// produces the root hash. The corresponding tree structure is | |||
// the SimpleMap tree. SimpleMap takes a Hasher, and currently | |||
// Tendermint uses aminoHasher. SimpleValueOp should support | |||
// the hash function as used in aminoHasher. TODO support | |||
// additional hash functions here as options/args to this | |||
// operator. | |||
// | |||
// If the produced root hash matches the expected hash, the | |||
// proof is good. | |||
type SimpleValueOp struct { | |||
// Encoded in ProofOp.Key. | |||
key []byte | |||
// To encode in ProofOp.Data | |||
Proof *SimpleProof `json:"simple_proof"` | |||
} | |||
var _ ProofOperator = SimpleValueOp{} | |||
func NewSimpleValueOp(key []byte, proof *SimpleProof) SimpleValueOp { | |||
return SimpleValueOp{ | |||
key: key, | |||
Proof: proof, | |||
} | |||
} | |||
func SimpleValueOpDecoder(pop ProofOp) (ProofOperator, error) { | |||
if pop.Type != ProofOpSimpleValue { | |||
return nil, cmn.NewError("unexpected ProofOp.Type; got %v, want %v", pop.Type, ProofOpSimpleValue) | |||
} | |||
var op SimpleValueOp // a bit strange as we'll discard this, but it works. | |||
err := cdc.UnmarshalBinaryLengthPrefixed(pop.Data, &op) | |||
if err != nil { | |||
return nil, cmn.ErrorWrap(err, "decoding ProofOp.Data into SimpleValueOp") | |||
} | |||
return NewSimpleValueOp(pop.Key, op.Proof), nil | |||
} | |||
func (op SimpleValueOp) ProofOp() ProofOp { | |||
bz := cdc.MustMarshalBinaryLengthPrefixed(op) | |||
return ProofOp{ | |||
Type: ProofOpSimpleValue, | |||
Key: op.key, | |||
Data: bz, | |||
} | |||
} | |||
func (op SimpleValueOp) String() string { | |||
return fmt.Sprintf("SimpleValueOp{%v}", op.GetKey()) | |||
} | |||
func (op SimpleValueOp) Run(args [][]byte) ([][]byte, error) { | |||
if len(args) != 1 { | |||
return nil, cmn.NewError("expected 1 arg, got %v", len(args)) | |||
} | |||
value := args[0] | |||
hasher := tmhash.New() | |||
hasher.Write(value) // does not error | |||
vhash := hasher.Sum(nil) | |||
// Wrap <op.Key, vhash> to hash the KVPair. | |||
hasher = tmhash.New() | |||
encodeByteSlice(hasher, []byte(op.key)) // does not error | |||
encodeByteSlice(hasher, []byte(vhash)) // does not error | |||
kvhash := hasher.Sum(nil) | |||
if !bytes.Equal(kvhash, op.Proof.LeafHash) { | |||
return nil, cmn.NewError("leaf hash mismatch: want %X got %X", op.Proof.LeafHash, kvhash) | |||
} | |||
return [][]byte{ | |||
op.Proof.ComputeRootHash(), | |||
}, nil | |||
} | |||
func (op SimpleValueOp) GetKey() []byte { | |||
return op.key | |||
} |
@ -0,0 +1,136 @@ | |||
package merkle | |||
import ( | |||
"testing" | |||
"github.com/stretchr/testify/assert" | |||
"github.com/tendermint/go-amino" | |||
cmn "github.com/tendermint/tendermint/libs/common" | |||
) | |||
const ProofOpDomino = "test:domino" | |||
// Expects given input, produces given output. | |||
// Like the game dominos. | |||
type DominoOp struct { | |||
key string // unexported, may be empty | |||
Input string | |||
Output string | |||
} | |||
func NewDominoOp(key, input, output string) DominoOp { | |||
return DominoOp{ | |||
key: key, | |||
Input: input, | |||
Output: output, | |||
} | |||
} | |||
func DominoOpDecoder(pop ProofOp) (ProofOperator, error) { | |||
if pop.Type != ProofOpDomino { | |||
panic("unexpected proof op type") | |||
} | |||
var op DominoOp // a bit strange as we'll discard this, but it works. | |||
err := amino.UnmarshalBinaryLengthPrefixed(pop.Data, &op) | |||
if err != nil { | |||
return nil, cmn.ErrorWrap(err, "decoding ProofOp.Data into SimpleValueOp") | |||
} | |||
return NewDominoOp(string(pop.Key), op.Input, op.Output), nil | |||
} | |||
func (dop DominoOp) ProofOp() ProofOp { | |||
bz := amino.MustMarshalBinaryLengthPrefixed(dop) | |||
return ProofOp{ | |||
Type: ProofOpDomino, | |||
Key: []byte(dop.key), | |||
Data: bz, | |||
} | |||
} | |||
func (dop DominoOp) Run(input [][]byte) (output [][]byte, err error) { | |||
if len(input) != 1 { | |||
return nil, cmn.NewError("Expected input of length 1") | |||
} | |||
if string(input[0]) != dop.Input { | |||
return nil, cmn.NewError("Expected input %v, got %v", | |||
dop.Input, string(input[0])) | |||
} | |||
return [][]byte{[]byte(dop.Output)}, nil | |||
} | |||
func (dop DominoOp) GetKey() []byte { | |||
return []byte(dop.key) | |||
} | |||
//---------------------------------------- | |||
func TestProofOperators(t *testing.T) { | |||
var err error | |||
// ProofRuntime setup | |||
// TODO test this somehow. | |||
// prt := NewProofRuntime() | |||
// prt.RegisterOpDecoder(ProofOpDomino, DominoOpDecoder) | |||
// ProofOperators setup | |||
op1 := NewDominoOp("KEY1", "INPUT1", "INPUT2") | |||
op2 := NewDominoOp("KEY2", "INPUT2", "INPUT3") | |||
op3 := NewDominoOp("", "INPUT3", "INPUT4") | |||
op4 := NewDominoOp("KEY4", "INPUT4", "OUTPUT4") | |||
// Good | |||
popz := ProofOperators([]ProofOperator{op1, op2, op3, op4}) | |||
err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) | |||
assert.Nil(t, err) | |||
err = popz.VerifyValue(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", bz("INPUT1")) | |||
assert.Nil(t, err) | |||
// BAD INPUT | |||
err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1_WRONG")}) | |||
assert.NotNil(t, err) | |||
err = popz.VerifyValue(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", bz("INPUT1_WRONG")) | |||
assert.NotNil(t, err) | |||
// BAD KEY 1 | |||
err = popz.Verify(bz("OUTPUT4"), "/KEY3/KEY2/KEY1", [][]byte{bz("INPUT1")}) | |||
assert.NotNil(t, err) | |||
// BAD KEY 2 | |||
err = popz.Verify(bz("OUTPUT4"), "KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) | |||
assert.NotNil(t, err) | |||
// BAD KEY 3 | |||
err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1/", [][]byte{bz("INPUT1")}) | |||
assert.NotNil(t, err) | |||
// BAD KEY 4 | |||
err = popz.Verify(bz("OUTPUT4"), "//KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) | |||
assert.NotNil(t, err) | |||
// BAD OUTPUT 1 | |||
err = popz.Verify(bz("OUTPUT4_WRONG"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) | |||
assert.NotNil(t, err) | |||
// BAD OUTPUT 2 | |||
err = popz.Verify(bz(""), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) | |||
assert.NotNil(t, err) | |||
// BAD POPZ 1 | |||
popz = []ProofOperator{op1, op2, op4} | |||
err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) | |||
assert.NotNil(t, err) | |||
// BAD POPZ 2 | |||
popz = []ProofOperator{op4, op3, op2, op1} | |||
err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) | |||
assert.NotNil(t, err) | |||
// BAD POPZ 3 | |||
popz = []ProofOperator{} | |||
err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) | |||
assert.NotNil(t, err) | |||
} | |||
func bz(s string) []byte { | |||
return []byte(s) | |||
} |
@ -0,0 +1,12 @@ | |||
package merkle | |||
import ( | |||
"github.com/tendermint/go-amino" | |||
) | |||
var cdc *amino.Codec | |||
func init() { | |||
cdc = amino.NewCodec() | |||
cdc.Seal() | |||
} |
@ -0,0 +1,23 @@ | |||
package crypto_test | |||
import ( | |||
"testing" | |||
"github.com/stretchr/testify/require" | |||
"github.com/tendermint/tendermint/crypto" | |||
) | |||
// the purpose of this test is primarily to ensure that the randomness | |||
// generation won't error. | |||
func TestRandomConsistency(t *testing.T) { | |||
x1 := crypto.CRandBytes(256) | |||
x2 := crypto.CRandBytes(256) | |||
x3 := crypto.CRandBytes(256) | |||
x4 := crypto.CRandBytes(256) | |||
x5 := crypto.CRandBytes(256) | |||
require.NotEqual(t, x1, x2) | |||
require.NotEqual(t, x3, x4) | |||
require.NotEqual(t, x4, x5) | |||
require.NotEqual(t, x1, x5) | |||
} |
@ -1,41 +1,29 @@ | |||
# Tendermint | |||
Welcome to the Tendermint Core documentation! Below you'll find an | |||
overview of the documentation. | |||
Welcome to the Tendermint Core documentation! | |||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state | |||
transition machine - written in any programming language - and securely | |||
replicates it on many machines. In other words, a blockchain. | |||
Tendermint Core is a blockchain application platform; it provides the equivalent | |||
of a web-server, database, and supporting libraries for blockchain applications | |||
written in any programming language. Like a web-server serving web applications, | |||
Tendermint serves blockchain applications. | |||
Tendermint requires an application running over the Application Blockchain | |||
Interface (ABCI) - and comes packaged with an example application to do so. | |||
More formally, Tendermint Core performs Byzantine Fault Tolerant (BFT) | |||
State Machine Replication (SMR) for arbitrary deterministic, finite state machines. | |||
For more background, see [What is | |||
Tendermint?](introduction/what-is-tendermint.md). | |||
## Getting Started | |||
To get started quickly with an example application, see the [quick start guide](introduction/quick-start.md). | |||
Here you'll find quick start guides and links to more advanced "get up and running" | |||
documentation. | |||
To learn about application development on Tendermint, see the [Application Blockchain Interface](spec/abci/). | |||
## Core | |||
For more details on using Tendermint, see the respective documentation for | |||
[Tendermint Core](tendermint-core/), [benchmarking and monitoring](tools/), and [network deployments](networks/). | |||
Details about the core functionality and configuration of Tendermint. | |||
## Contribute | |||
## Tools | |||
Benchmarking and monitoring tools. | |||
## Networks | |||
Setting up testnets manually or automated, local or in the cloud. | |||
## Apps | |||
Building appplications with the ABCI. | |||
## Specification | |||
Dive deep into the spec. There's one for each Tendermint and the ABCI | |||
To contribute to the documentation, see [this file](./DOCS_README.md) for details of the build process and | |||
considerations when making changes. | |||
## Edit the Documentation | |||
## Version | |||
See [this file](./DOCS_README.md) for details of the build process and | |||
considerations when making changes. | |||
This documentation is built from the following commit: |
@ -0,0 +1,234 @@ | |||
# ADR 024: SignBytes and validator types in privval | |||
## Context | |||
Currently, the messages exchanged between tendermint and a (potentially remote) signer/validator, | |||
namely votes, proposals, and heartbeats, are encoded as a JSON string | |||
(e.g., via `Vote.SignBytes(...)`) and then | |||
signed . JSON encoding is sub-optimal for both, hardware wallets | |||
and for usage in ethereum smart contracts. Both is laid down in detail in [issue#1622]. | |||
Also, there are currently no differences between sign-request and -replies. Also, there is no possibility | |||
for a remote signer to include an error code or message in case something went wrong. | |||
The messages exchanged between tendermint and a remote signer currently live in | |||
[privval/socket.go] and encapsulate the corresponding types in [types]. | |||
[privval/socket.go]: https://github.com/tendermint/tendermint/blob/d419fffe18531317c28c29a292ad7d253f6cafdf/privval/socket.go#L496-L502 | |||
[issue#1622]: https://github.com/tendermint/tendermint/issues/1622 | |||
[types]: https://github.com/tendermint/tendermint/tree/master/types | |||
## Decision | |||
- restructure vote, proposal, and heartbeat such that their encoding is easily parseable by | |||
hardware devices and smart contracts using a binary encoding format ([amino] in this case) | |||
- split up the messages exchanged between tendermint and remote signers into requests and | |||
responses (see details below) | |||
- include an error type in responses | |||
### Overview | |||
``` | |||
+--------------+ +----------------+ | |||
| | SignXRequest | | | |||
|Remote signer |<---------------------+ tendermint | | |||
| (e.g. KMS) | | | | |||
| +--------------------->| | | |||
+--------------+ SignedXReply +----------------+ | |||
SignXRequest { | |||
x: X | |||
} | |||
SignedXReply { | |||
x: X | |||
sig: Signature // []byte | |||
err: Error{ | |||
code: int | |||
desc: string | |||
} | |||
} | |||
``` | |||
TODO: Alternatively, the type `X` might directly include the signature. A lot of places expect a vote with a | |||
signature and do not necessarily deal with "Replies". | |||
Still exploring what would work best here. | |||
This would look like (exemplified using X = Vote): | |||
``` | |||
Vote { | |||
// all fields besides signature | |||
} | |||
SignedVote { | |||
Vote Vote | |||
Signature []byte | |||
} | |||
SignVoteRequest { | |||
Vote Vote | |||
} | |||
SignedVoteReply { | |||
Vote SignedVote | |||
Err Error | |||
} | |||
``` | |||
**Note:** There was a related discussion around including a fingerprint of, or, the whole public-key | |||
into each sign-request to tell the signer which corresponding private-key to | |||
use to sign the message. This is particularly relevant in the context of the KMS | |||
but is currently not considered in this ADR. | |||
[amino]: https://github.com/tendermint/go-amino/ | |||
### Vote | |||
As explained in [issue#1622] `Vote` will be changed to contain the following fields | |||
(notation in protobuf-like syntax for easy readability): | |||
```proto | |||
// vanilla protobuf / amino encoded | |||
message Vote { | |||
Version fixed32 | |||
Height sfixed64 | |||
Round sfixed32 | |||
VoteType fixed32 | |||
Timestamp Timestamp // << using protobuf definition | |||
BlockID BlockID // << as already defined | |||
ChainID string // at the end because length could vary a lot | |||
} | |||
// this is an amino registered type; like currently privval.SignVoteMsg: | |||
// registered with "tendermint/socketpv/SignVoteRequest" | |||
message SignVoteRequest { | |||
Vote vote | |||
} | |||
// amino registered type | |||
// registered with "tendermint/socketpv/SignedVoteReply" | |||
message SignedVoteReply { | |||
Vote Vote | |||
Signature Signature | |||
Err Error | |||
} | |||
// we will use this type everywhere below | |||
message Error { | |||
Type uint // error code | |||
Description string // optional description | |||
} | |||
``` | |||
The `ChainID` gets moved into the vote message directly. Previously, it was injected | |||
using the [Signable] interface method `SignBytes(chainID string) []byte`. Also, the | |||
signature won't be included directly, only in the corresponding `SignedVoteReply` message. | |||
[Signable]: https://github.com/tendermint/tendermint/blob/d419fffe18531317c28c29a292ad7d253f6cafdf/types/signable.go#L9-L11 | |||
### Proposal | |||
```proto | |||
// vanilla protobuf / amino encoded | |||
message Proposal { | |||
Height sfixed64 | |||
Round sfixed32 | |||
Timestamp Timestamp // << using protobuf definition | |||
BlockPartsHeader PartSetHeader // as already defined | |||
POLRound sfixed32 | |||
POLBlockID BlockID // << as already defined | |||
} | |||
// amino registered with "tendermint/socketpv/SignProposalRequest" | |||
message SignProposalRequest { | |||
Proposal proposal | |||
} | |||
// amino registered with "tendermint/socketpv/SignProposalReply" | |||
message SignProposalReply { | |||
Prop Proposal | |||
Sig Signature | |||
Err Error // as defined above | |||
} | |||
``` | |||
### Heartbeat | |||
**TODO**: clarify if heartbeat also needs a fixed offset and update the fields accordingly: | |||
```proto | |||
message Heartbeat { | |||
ValidatorAddress Address | |||
ValidatorIndex int | |||
Height int64 | |||
Round int | |||
Sequence int | |||
} | |||
// amino registered with "tendermint/socketpv/SignHeartbeatRequest" | |||
message SignHeartbeatRequest { | |||
Hb Heartbeat | |||
} | |||
// amino registered with "tendermint/socketpv/SignHeartbeatReply" | |||
message SignHeartbeatReply { | |||
Hb Heartbeat | |||
Sig Signature | |||
Err Error // as defined above | |||
} | |||
``` | |||
## PubKey | |||
TBA - this needs further thoughts: e.g. what todo like in the case of the KMS which holds | |||
several keys? How does it know with which key to reply? | |||
## SignBytes | |||
`SignBytes` will not require a `ChainID` parameter: | |||
```golang | |||
type Signable interface { | |||
SignBytes() []byte | |||
} | |||
``` | |||
And the implementation for vote, heartbeat, proposal will look like: | |||
```golang | |||
// type T is one of vote, sign, proposal | |||
func (tp *T) SignBytes() []byte { | |||
bz, err := cdc.MarshalBinary(tp) | |||
if err != nil { | |||
panic(err) | |||
} | |||
return bz | |||
} | |||
``` | |||
## Status | |||
DRAFT | |||
## Consequences | |||
### Positive | |||
The most relevant positive effect is that the signing bytes can easily be parsed by a | |||
hardware module and a smart contract. Besides that: | |||
- clearer separation between requests and responses | |||
- added error messages enable better error handling | |||
### Negative | |||
- relatively huge change / refactoring touching quite some code | |||
- lot's of places assume a `Vote` with a signature included -> they will need to | |||
- need to modify some interfaces | |||
### Neutral | |||
not even the swiss are neutral |
@ -0,0 +1,75 @@ | |||
# ADR 025 Commit | |||
## Context | |||
Currently the `Commit` structure contains a lot of potentially redundant or unnecessary data. | |||
In particular it contains an array of every precommit from the validators, which includes many copies of the same data. Such as `Height`, `Round`, `Type`, and `BlockID`. Also the `ValidatorIndex` could be derived from the vote's position in the array, and the `ValidatorAddress` could potentially be derived from runtime context. The only truely necessary data is the `Signature` and `Timestamp` associated with each `Vote`. | |||
``` | |||
type Commit struct { | |||
BlockID BlockID `json:"block_id"` | |||
Precommits []*Vote `json:"precommits"` | |||
} | |||
type Vote struct { | |||
ValidatorAddress Address `json:"validator_address"` | |||
ValidatorIndex int `json:"validator_index"` | |||
Height int64 `json:"height"` | |||
Round int `json:"round"` | |||
Timestamp time.Time `json:"timestamp"` | |||
Type byte `json:"type"` | |||
BlockID BlockID `json:"block_id"` | |||
Signature []byte `json:"signature"` | |||
} | |||
``` | |||
References: | |||
[#1648](https://github.com/tendermint/tendermint/issues/1648) | |||
[#2179](https://github.com/tendermint/tendermint/issues/2179) | |||
[#2226](https://github.com/tendermint/tendermint/issues/2226) | |||
## Proposed Solution | |||
We can improve efficiency by replacing the usage of the `Vote` struct with a subset of each vote, and by storing the constant values (`Height`, `Round`, `BlockID`) in the Commit itself. | |||
``` | |||
type Commit struct { | |||
Height int64 | |||
Round int | |||
BlockID BlockID `json:"block_id"` | |||
Precommits []*CommitSig `json:"precommits"` | |||
} | |||
type CommitSig struct { | |||
ValidatorAddress Address | |||
Signature []byte | |||
Timestamp time.Time | |||
} | |||
``` | |||
Continuing to store the `ValidatorAddress` in the `CommitSig` takes up extra space, but simplifies the process and allows for easier debugging. | |||
## Status | |||
Proposed | |||
## Consequences | |||
### Positive | |||
The size of a `Commit` transmitted over the network goes from: | |||
|BlockID| + n * (|Address| + |ValidatorIndex| + |Height| + |Round| + |Timestamp| + |Type| + |BlockID| + |Signature|) | |||
to: | |||
|BlockID|+|Height|+|Round| + n*(|Address| + |Signature| + |Timestamp|) | |||
This saves: | |||
n * (|BlockID| + |ValidatorIndex| + |Type|) + (n-1) * (Height + Round) | |||
In the current context, this would concretely be: | |||
(assuming all ints are int64, and hashes are 32 bytes) | |||
n *(72 + 8 + 1 + 8 + 8) - 16 = n * 97 - 16 | |||
With 100 validators this is a savings of almost 10KB on every block. | |||
### Negative | |||
This would add some complexity to the processing and verification of blocks and commits, as votes would have to be reconstructed to be verified and gossiped. The reconstruction could be relatively straightforward, only requiring the copying of data from the `Commit` itself into the newly created `Vote`. | |||
### Neutral | |||
This design leaves the `ValidatorAddress` in the `CommitSig` and in the `Vote`. These could be removed at some point for additional savings, but that would introduce more complexity, and make printing of `Commit` and `VoteSet` objects less informative, which could harm debugging efficiency and UI/UX. |
@ -0,0 +1,47 @@ | |||
# ADR 026: General Merkle Proof | |||
## Context | |||
We are using raw `[]byte` for merkle proofs in `abci.ResponseQuery`. It makes hard to handle multilayer merkle proofs and general cases. Here, new interface `ProofOperator` is defined. The users can defines their own Merkle proof format and layer them easily. | |||
Goals: | |||
- Layer Merkle proofs without decoding/reencoding | |||
- Provide general way to chain proofs | |||
- Make the proof format extensible, allowing thirdparty proof types | |||
## Decision | |||
### ProofOperator | |||
`type ProofOperator` is an interface for Merkle proofs. The definition is: | |||
```go | |||
type ProofOperator interface { | |||
Run([][]byte) ([][]byte, error) | |||
GetKey() []byte | |||
ProofOp() ProofOp | |||
} | |||
``` | |||
Since a proof can treat various data type, `Run()` takes `[][]byte` as the argument, not `[]byte`. For example, a range proof's `Run()` can take multiple key-values as its argument. It will then return the root of the tree for the further process, calculated with the input value. | |||
`ProofOperator` does not have to be a Merkle proof - it can be a function that transforms the argument for intermediate process e.g. prepending the length to the `[]byte`. | |||
### ProofOp | |||
`type ProofOp` is a protobuf message which is a triple of `Type string`, `Key []byte`, and `Data []byte`. `ProofOperator` and `ProofOp`are interconvertible, using `ProofOperator.ProofOp()` and `OpDecoder()`, where `OpDecoder` is a function that each proof type can register for their own encoding scheme. For example, we can add an byte for encoding scheme before the serialized proof, supporting JSON decoding. | |||
## Status | |||
## Consequences | |||
### Positive | |||
- Layering becomes easier (no encoding/decoding at each step) | |||
- Thirdparty proof format is available | |||
### Negative | |||
- Larger size for abci.ResponseQuery | |||
- Unintuitive proof chaining(it is not clear what `Run()` is doing) | |||
- Additional codes for registering `OpDecoder`s |
@ -0,0 +1,128 @@ | |||
# ADR 029: Check block txs before prevote | |||
## Changelog | |||
04-10-2018: Update with link to issue | |||
[#2384](https://github.com/tendermint/tendermint/issues/2384) and reason for rejection | |||
19-09-2018: Initial Draft | |||
## Context | |||
We currently check a tx's validity through 2 ways. | |||
1. Through checkTx in mempool connection. | |||
2. Through deliverTx in consensus connection. | |||
The 1st is called when external tx comes in, so the node should be a proposer this time. The 2nd is called when external block comes in and reach the commit phase, the node doesn't need to be the proposer of the block, however it should check the txs in that block. | |||
In the 2nd situation, if there are many invalid txs in the block, it would be too late for all nodes to discover that most txs in the block are invalid, and we'd better not record invalid txs in the blockchain too. | |||
## Proposed solution | |||
Therefore, we should find a way to check the txs' validity before send out a prevote. Currently we have cs.isProposalComplete() to judge whether a block is complete. We can have | |||
``` | |||
func (blockExec *BlockExecutor) CheckBlock(block *types.Block) error { | |||
// check txs of block. | |||
for _, tx := range block.Txs { | |||
reqRes := blockExec.proxyApp.CheckTxAsync(tx) | |||
reqRes.Wait() | |||
if reqRes.Response == nil || reqRes.Response.GetCheckTx() == nil || reqRes.Response.GetCheckTx().Code != abci.CodeTypeOK { | |||
return errors.Errorf("tx %v check failed. response: %v", tx, reqRes.Response) | |||
} | |||
} | |||
return nil | |||
} | |||
``` | |||
such a method in BlockExecutor to check all txs' validity in that block. | |||
However, this method should not be implemented like that, because checkTx will share the same state used in mempool in the app. So we should define a new interface method checkBlock in Application to indicate it to use the same state as deliverTx. | |||
``` | |||
type Application interface { | |||
// Info/Query Connection | |||
Info(RequestInfo) ResponseInfo // Return application info | |||
SetOption(RequestSetOption) ResponseSetOption // Set application option | |||
Query(RequestQuery) ResponseQuery // Query for state | |||
// Mempool Connection | |||
CheckTx(tx []byte) ResponseCheckTx // Validate a tx for the mempool | |||
// Consensus Connection | |||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain with validators and other info from TendermintCore | |||
CheckBlock(RequestCheckBlock) ResponseCheckBlock | |||
BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block | |||
DeliverTx(tx []byte) ResponseDeliverTx // Deliver a tx for full processing | |||
EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set | |||
Commit() ResponseCommit // Commit the state and return the application Merkle root hash | |||
} | |||
``` | |||
All app should implement that method. For example, counter: | |||
``` | |||
func (app *CounterApplication) CheckBlock(block types.Request_CheckBlock) types.ResponseCheckBlock { | |||
if app.serial { | |||
app.originalTxCount = app.txCount //backup the txCount state | |||
for _, tx := range block.CheckBlock.Block.Txs { | |||
if len(tx) > 8 { | |||
return types.ResponseCheckBlock{ | |||
Code: code.CodeTypeEncodingError, | |||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))} | |||
} | |||
tx8 := make([]byte, 8) | |||
copy(tx8[len(tx8)-len(tx):], tx) | |||
txValue := binary.BigEndian.Uint64(tx8) | |||
if txValue < uint64(app.txCount) { | |||
return types.ResponseCheckBlock{ | |||
Code: code.CodeTypeBadNonce, | |||
Log: fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue)} | |||
} | |||
app.txCount++ | |||
} | |||
} | |||
return types.ResponseCheckBlock{Code: code.CodeTypeOK} | |||
} | |||
``` | |||
In BeginBlock, the app should restore the state to the orignal state before checking the block: | |||
``` | |||
func (app *CounterApplication) DeliverTx(tx []byte) types.ResponseDeliverTx { | |||
if app.serial { | |||
app.txCount = app.originalTxCount //restore the txCount state | |||
} | |||
app.txCount++ | |||
return types.ResponseDeliverTx{Code: code.CodeTypeOK} | |||
} | |||
``` | |||
The txCount is like the nonce in ethermint, it should be restored when entering the deliverTx phase. While some operation like checking the tx signature needs not to be done again. So the deliverTx can focus on how a tx can be applied, ignoring the checking of the tx, because all the checking has already been done in the checkBlock phase before. | |||
An optional optimization is alter the deliverTx to deliverBlock. For the block has already been checked by checkBlock, so all the txs in it are valid. So the app can cache the block, and in the deliverBlock phase, it just needs to apply the block in the cache. This optimization can save network current in deliverTx. | |||
## Status | |||
Rejected | |||
## Decision | |||
Performance impact is considered too great. See [#2384](https://github.com/tendermint/tendermint/issues/2384) | |||
## Consequences | |||
### Positive | |||
- more robust to defend the adversary to propose a block full of invalid txs. | |||
### Negative | |||
- add a new interface method. app logic needs to adjust to appeal to it. | |||
- sending all the tx data over the ABCI twice | |||
- potentially redundant validations (eg. signature checks in both CheckBlock and | |||
DeliverTx) | |||
### Neutral |
@ -0,0 +1,152 @@ | |||
# ADR 030: Consensus Refactor | |||
## Context | |||
One of the biggest challenges this project faces is to proof that the | |||
implementations of the specifications are correct, much like we strive to | |||
formaly verify our alogrithms and protocols we should work towards high | |||
confidence about the correctness of our program code. One of those is the core | |||
of Tendermint - Consensus - which currently resides in the `consensus` package. | |||
Over time there has been high friction making changes to the package due to the | |||
algorithm being scattered in a side-effectful container (the current | |||
`ConsensusState`). In order to test the algorithm a large object-graph needs to | |||
be set up and even than the non-deterministic parts of the container makes will | |||
prevent high certainty. Where ideally we have a 1-to-1 representation of the | |||
[spec](https://github.com/tendermint/spec), ready and easy to test for domain | |||
experts. | |||
Addresses: | |||
- [#1495](https://github.com/tendermint/tendermint/issues/1495) | |||
- [#1692](https://github.com/tendermint/tendermint/issues/1692) | |||
## Decision | |||
To remedy these issues we plan a gradual, non-invasive refactoring of the | |||
`consensus` package. Starting of by isolating the consensus alogrithm into | |||
a pure function and a finite state machine to address the most pressuring issue | |||
of lack of confidence. Doing so while leaving the rest of the package in tact | |||
and have follow-up optional changes to improve the sepration of concerns. | |||
### Implementation changes | |||
The core of Consensus can be modelled as a function with clear defined inputs: | |||
* `State` - data container for current round, height, etc. | |||
* `Event`- significant events in the network | |||
producing clear outputs; | |||
* `State` - updated input | |||
* `Message` - signal what actions to perform | |||
```go | |||
type Event int | |||
const ( | |||
EventUnknown Event = iota | |||
EventProposal | |||
Majority23PrevotesBlock | |||
Majority23PrecommitBlock | |||
Majority23PrevotesAny | |||
Majority23PrecommitAny | |||
TimeoutNewRound | |||
TimeoutPropose | |||
TimeoutPrevotes | |||
TimeoutPrecommit | |||
) | |||
type Message int | |||
const ( | |||
MeesageUnknown Message = iota | |||
MessageProposal | |||
MessageVotes | |||
MessageDecision | |||
) | |||
type State struct { | |||
height uint64 | |||
round uint64 | |||
step uint64 | |||
lockedValue interface{} // TODO: Define proper type. | |||
lockedRound interface{} // TODO: Define proper type. | |||
validValue interface{} // TODO: Define proper type. | |||
validRound interface{} // TODO: Define proper type. | |||
// From the original notes: valid(v) | |||
valid interface{} // TODO: Define proper type. | |||
// From the original notes: proposer(h, r) | |||
proposer interface{} // TODO: Define proper type. | |||
} | |||
func Consensus(Event, State) (State, Message) { | |||
// Consolidate implementation. | |||
} | |||
``` | |||
Tracking of relevant information to feed `Event` into the function and act on | |||
the output is left to the `ConsensusExecutor` (formerly `ConsensusState`). | |||
Benefits for testing surfacing nicely as testing for a sequence of events | |||
against algorithm could be as simple as the following example: | |||
``` go | |||
func TestConsensusXXX(t *testing.T) { | |||
type expected struct { | |||
message Message | |||
state State | |||
} | |||
// Setup order of events, initial state and expectation. | |||
var ( | |||
events = []struct { | |||
event Event | |||
want expected | |||
}{ | |||
// ... | |||
} | |||
state = State{ | |||
// ... | |||
} | |||
) | |||
for _, e := range events { | |||
sate, msg = Consensus(e.event, state) | |||
// Test message expectation. | |||
if msg != e.want.message { | |||
t.Fatalf("have %v, want %v", msg, e.want.message) | |||
} | |||
// Test state expectation. | |||
if !reflect.DeepEqual(state, e.want.state) { | |||
t.Fatalf("have %v, want %v", state, e.want.state) | |||
} | |||
} | |||
} | |||
``` | |||
### Implementation roadmap | |||
* implement proposed implementation | |||
* replace currently scattered calls in `ConsensusState` with calls to the new | |||
`Consensus` function | |||
* rename `ConsensusState` to `ConsensusExecutor` to avoid confusion | |||
* propose design for improved separation and clear information flow between | |||
`ConsensusExecutor` and `ConsensusReactor` | |||
## Status | |||
Draft. | |||
## Consequences | |||
### Positive | |||
- isolated implementation of the algorithm | |||
- improved testability - simpler to proof correctness | |||
- clearer separation of concerns - easier to reason | |||
### Negative | |||
### Neutral |
@ -0,0 +1,122 @@ | |||
# ADR 033: pubsub 2.0 | |||
Author: Anton Kaliaev (@melekes) | |||
## Changelog | |||
02-10-2018: Initial draft | |||
## Context | |||
Since the initial version of the pubsub, there's been a number of issues | |||
raised: #951, #1879, #1880. Some of them are high-level issues questioning the | |||
core design choices made. Others are minor and mostly about the interface of | |||
`Subscribe()` / `Publish()` functions. | |||
### Sync vs Async | |||
Now, when publishing a message to subscribers, we can do it in a goroutine: | |||
_using channels for data transmission_ | |||
```go | |||
for each subscriber { | |||
out := subscriber.outc | |||
go func() { | |||
out <- msg | |||
} | |||
} | |||
``` | |||
_by invoking callback functions_ | |||
```go | |||
for each subscriber { | |||
go subscriber.callbackFn() | |||
} | |||
``` | |||
This gives us greater performance and allows us to avoid "slow client problem" | |||
(when other subscribers have to wait for a slow subscriber). A pool of | |||
goroutines can be used to avoid uncontrolled memory growth. | |||
In certain cases, this is what you want. But in our case, because we need | |||
strict ordering of events (if event A was published before B, the guaranteed | |||
delivery order will be A -> B), we can't use goroutines. | |||
There is also a question whenever we should have a non-blocking send: | |||
```go | |||
for each subscriber { | |||
out := subscriber.outc | |||
select { | |||
case out <- msg: | |||
default: | |||
log("subscriber %v buffer is full, skipping...") | |||
} | |||
} | |||
``` | |||
This fixes the "slow client problem", but there is no way for a slow client to | |||
know if it had missed a message. On the other hand, if we're going to stick | |||
with blocking send, **devs must always ensure subscriber's handling code does not | |||
block**. As you can see, there is an implicit choice between ordering guarantees | |||
and using goroutines. | |||
The interim option is to run goroutines pool for a single message, wait for all | |||
goroutines to finish. This will solve "slow client problem", but we'd still | |||
have to wait `max(goroutine_X_time)` before we can publish the next message. | |||
My opinion: not worth doing. | |||
### Channels vs Callbacks | |||
Yet another question is whether we should use channels for message transmission or | |||
call subscriber-defined callback functions. Callback functions give subscribers | |||
more flexibility - you can use mutexes in there, channels, spawn goroutines, | |||
anything you really want. But they also carry local scope, which can result in | |||
memory leaks and/or memory usage increase. | |||
Go channels are de-facto standard for carrying data between goroutines. | |||
**Question: Is it worth switching to callback functions?** | |||
### Why `Subscribe()` accepts an `out` channel? | |||
Because in our tests, we create buffered channels (cap: 1). Alternatively, we | |||
can make capacity an argument. | |||
## Decision | |||
Change Subscribe() function to return out channel: | |||
```go | |||
// outCap can be used to set capacity of out channel (unbuffered by default). | |||
Subscribe(ctx context.Context, clientID string, query Query, outCap... int) (out <-chan interface{}, err error) { | |||
``` | |||
It's more idiomatic since we're closing it during Unsubscribe/UnsubscribeAll calls. | |||
Also, we should make tags available to subscribers: | |||
```go | |||
type MsgAndTags struct { | |||
Msg interface{} | |||
Tags TagMap | |||
} | |||
// outCap can be used to set capacity of out channel (unbuffered by default). | |||
Subscribe(ctx context.Context, clientID string, query Query, outCap... int) (out <-chan MsgAndTags, err error) { | |||
``` | |||
## Status | |||
In review | |||
## Consequences | |||
### Positive | |||
- more idiomatic interface | |||
- subscribers know what tags msg was published with | |||
### Negative | |||
### Neutral |
@ -0,0 +1,15 @@ | |||
# Overview | |||
## Quick Start | |||
Get Tendermint up-and-running quickly with the [quick-start guide](./quick-start.md)! | |||
## Install | |||
Detailed [installation instructions](./install.md). | |||
## What is Tendermint? | |||
Dive into [what Tendermint is and why](./what-is-tendermint.md)! | |||
@ -0,0 +1,332 @@ | |||
# What is Tendermint? | |||
Tendermint is software for securely and consistently replicating an | |||
application on many machines. By securely, we mean that Tendermint works | |||
even if up to 1/3 of machines fail in arbitrary ways. By consistently, | |||
we mean that every non-faulty machine sees the same transaction log and | |||
computes the same state. Secure and consistent replication is a | |||
fundamental problem in distributed systems; it plays a critical role in | |||
the fault tolerance of a broad range of applications, from currencies, | |||
to elections, to infrastructure orchestration, and beyond. | |||
The ability to tolerate machines failing in arbitrary ways, including | |||
becoming malicious, is known as Byzantine fault tolerance (BFT). The | |||
theory of BFT is decades old, but software implementations have only | |||
became popular recently, due largely to the success of "blockchain | |||
technology" like Bitcoin and Ethereum. Blockchain technology is just a | |||
reformalization of BFT in a more modern setting, with emphasis on | |||
peer-to-peer networking and cryptographic authentication. The name | |||
derives from the way transactions are batched in blocks, where each | |||
block contains a cryptographic hash of the previous one, forming a | |||
chain. In practice, the blockchain data structure actually optimizes BFT | |||
design. | |||
Tendermint consists of two chief technical components: a blockchain | |||
consensus engine and a generic application interface. The consensus | |||
engine, called Tendermint Core, ensures that the same transactions are | |||
recorded on every machine in the same order. The application interface, | |||
called the Application BlockChain Interface (ABCI), enables the | |||
transactions to be processed in any programming language. Unlike other | |||
blockchain and consensus solutions, which come pre-packaged with built | |||
in state machines (like a fancy key-value store, or a quirky scripting | |||
language), developers can use Tendermint for BFT state machine | |||
replication of applications written in whatever programming language and | |||
development environment is right for them. | |||
Tendermint is designed to be easy-to-use, simple-to-understand, highly | |||
performant, and useful for a wide variety of distributed applications. | |||
## Tendermint vs. X | |||
Tendermint is broadly similar to two classes of software. The first | |||
class consists of distributed key-value stores, like Zookeeper, etcd, | |||
and consul, which use non-BFT consensus. The second class is known as | |||
"blockchain technology", and consists of both cryptocurrencies like | |||
Bitcoin and Ethereum, and alternative distributed ledger designs like | |||
Hyperledger's Burrow. | |||
### Zookeeper, etcd, consul | |||
Zookeeper, etcd, and consul are all implementations of a key-value store | |||
atop a classical, non-BFT consensus algorithm. Zookeeper uses a version | |||
of Paxos called Zookeeper Atomic Broadcast, while etcd and consul use | |||
the Raft consensus algorithm, which is much younger and simpler. A | |||
typical cluster contains 3-5 machines, and can tolerate crash failures | |||
in up to 1/2 of the machines, but even a single Byzantine fault can | |||
destroy the system. | |||
Each offering provides a slightly different implementation of a | |||
featureful key-value store, but all are generally focused around | |||
providing basic services to distributed systems, such as dynamic | |||
configuration, service discovery, locking, leader-election, and so on. | |||
Tendermint is in essence similar software, but with two key differences: | |||
- It is Byzantine Fault Tolerant, meaning it can only tolerate up to a | |||
1/3 of failures, but those failures can include arbitrary behaviour - | |||
including hacking and malicious attacks. - It does not specify a | |||
particular application, like a fancy key-value store. Instead, it | |||
focuses on arbitrary state machine replication, so developers can build | |||
the application logic that's right for them, from key-value store to | |||
cryptocurrency to e-voting platform and beyond. | |||
The layout of this Tendermint website content is also ripped directly | |||
and without shame from [consul.io](https://www.consul.io/) and the other | |||
[Hashicorp sites](https://www.hashicorp.com/#tools). | |||
### Bitcoin, Ethereum, etc. | |||
Tendermint emerged in the tradition of cryptocurrencies like Bitcoin, | |||
Ethereum, etc. with the goal of providing a more efficient and secure | |||
consensus algorithm than Bitcoin's Proof of Work. In the early days, | |||
Tendermint had a simple currency built in, and to participate in | |||
consensus, users had to "bond" units of the currency into a security | |||
deposit which could be revoked if they misbehaved -this is what made | |||
Tendermint a Proof-of-Stake algorithm. | |||
Since then, Tendermint has evolved to be a general purpose blockchain | |||
consensus engine that can host arbitrary application states. That means | |||
it can be used as a plug-and-play replacement for the consensus engines | |||
of other blockchain software. So one can take the current Ethereum code | |||
base, whether in Rust, or Go, or Haskell, and run it as a ABCI | |||
application using Tendermint consensus. Indeed, [we did that with | |||
Ethereum](https://github.com/cosmos/ethermint). And we plan to do | |||
the same for Bitcoin, ZCash, and various other deterministic | |||
applications as well. | |||
Another example of a cryptocurrency application built on Tendermint is | |||
[the Cosmos network](http://cosmos.network). | |||
### Other Blockchain Projects | |||
[Fabric](https://github.com/hyperledger/fabric) takes a similar approach | |||
to Tendermint, but is more opinionated about how the state is managed, | |||
and requires that all application behaviour runs in potentially many | |||
docker containers, modules it calls "chaincode". It uses an | |||
implementation of [PBFT](http://pmg.csail.mit.edu/papers/osdi99.pdf). | |||
from a team at IBM that is [augmented to handle potentially | |||
non-deterministic | |||
chaincode](https://www.zurich.ibm.com/~cca/papers/sieve.pdf) It is | |||
possible to implement this docker-based behaviour as a ABCI app in | |||
Tendermint, though extending Tendermint to handle non-determinism | |||
remains for future work. | |||
[Burrow](https://github.com/hyperledger/burrow) is an implementation of | |||
the Ethereum Virtual Machine and Ethereum transaction mechanics, with | |||
additional features for a name-registry, permissions, and native | |||
contracts, and an alternative blockchain API. It uses Tendermint as its | |||
consensus engine, and provides a particular application state. | |||
## ABCI Overview | |||
The [Application BlockChain Interface | |||
(ABCI)](https://github.com/tendermint/tendermint/tree/develop/abci) | |||
allows for Byzantine Fault Tolerant replication of applications | |||
written in any programming language. | |||
### Motivation | |||
Thus far, all blockchains "stacks" (such as | |||
[Bitcoin](https://github.com/bitcoin/bitcoin)) have had a monolithic | |||
design. That is, each blockchain stack is a single program that handles | |||
all the concerns of a decentralized ledger; this includes P2P | |||
connectivity, the "mempool" broadcasting of transactions, consensus on | |||
the most recent block, account balances, Turing-complete contracts, | |||
user-level permissions, etc. | |||
Using a monolithic architecture is typically bad practice in computer | |||
science. It makes it difficult to reuse components of the code, and | |||
attempts to do so result in complex maintenance procedures for forks of | |||
the codebase. This is especially true when the codebase is not modular | |||
in design and suffers from "spaghetti code". | |||
Another problem with monolithic design is that it limits you to the | |||
language of the blockchain stack (or vice versa). In the case of | |||
Ethereum which supports a Turing-complete bytecode virtual-machine, it | |||
limits you to languages that compile down to that bytecode; today, those | |||
are Serpent and Solidity. | |||
In contrast, our approach is to decouple the consensus engine and P2P | |||
layers from the details of the application state of the particular | |||
blockchain application. We do this by abstracting away the details of | |||
the application to an interface, which is implemented as a socket | |||
protocol. | |||
Thus we have an interface, the Application BlockChain Interface (ABCI), | |||
and its primary implementation, the Tendermint Socket Protocol (TSP, or | |||
Teaspoon). | |||
### Intro to ABCI | |||
[Tendermint Core](https://github.com/tendermint/tendermint) (the | |||
"consensus engine") communicates with the application via a socket | |||
protocol that satisfies the ABCI. | |||
To draw an analogy, lets talk about a well-known cryptocurrency, | |||
Bitcoin. Bitcoin is a cryptocurrency blockchain where each node | |||
maintains a fully audited Unspent Transaction Output (UTXO) database. If | |||
one wanted to create a Bitcoin-like system on top of ABCI, Tendermint | |||
Core would be responsible for | |||
- Sharing blocks and transactions between nodes | |||
- Establishing a canonical/immutable order of transactions | |||
(the blockchain) | |||
The application will be responsible for | |||
- Maintaining the UTXO database | |||
- Validating cryptographic signatures of transactions | |||
- Preventing transactions from spending non-existent transactions | |||
- Allowing clients to query the UTXO database. | |||
Tendermint is able to decompose the blockchain design by offering a very | |||
simple API (ie. the ABCI) between the application process and consensus | |||
process. | |||
The ABCI consists of 3 primary message types that get delivered from the | |||
core to the application. The application replies with corresponding | |||
response messages. | |||
The messages are specified here: [ABCI Message | |||
Types](https://github.com/tendermint/tendermint/blob/develop/abci/README.md#message-types). | |||
The **DeliverTx** message is the work horse of the application. Each | |||
transaction in the blockchain is delivered with this message. The | |||
application needs to validate each transaction received with the | |||
**DeliverTx** message against the current state, application protocol, | |||
and the cryptographic credentials of the transaction. A validated | |||
transaction then needs to update the application state — by binding a | |||
value into a key values store, or by updating the UTXO database, for | |||
instance. | |||
The **CheckTx** message is similar to **DeliverTx**, but it's only for | |||
validating transactions. Tendermint Core's mempool first checks the | |||
validity of a transaction with **CheckTx**, and only relays valid | |||
transactions to its peers. For instance, an application may check an | |||
incrementing sequence number in the transaction and return an error upon | |||
**CheckTx** if the sequence number is old. Alternatively, they might use | |||
a capabilities based system that requires capabilities to be renewed | |||
with every transaction. | |||
The **Commit** message is used to compute a cryptographic commitment to | |||
the current application state, to be placed into the next block header. | |||
This has some handy properties. Inconsistencies in updating that state | |||
will now appear as blockchain forks which catches a whole class of | |||
programming errors. This also simplifies the development of secure | |||
lightweight clients, as Merkle-hash proofs can be verified by checking | |||
against the block hash, and that the block hash is signed by a quorum. | |||
There can be multiple ABCI socket connections to an application. | |||
Tendermint Core creates three ABCI connections to the application; one | |||
for the validation of transactions when broadcasting in the mempool, one | |||
for the consensus engine to run block proposals, and one more for | |||
querying the application state. | |||
It's probably evident that applications designers need to very carefully | |||
design their message handlers to create a blockchain that does anything | |||
useful but this architecture provides a place to start. The diagram | |||
below illustrates the flow of messages via ABCI. | |||
![](../imgs/abci.png) | |||
## A Note on Determinism | |||
The logic for blockchain transaction processing must be deterministic. | |||
If the application logic weren't deterministic, consensus would not be | |||
reached among the Tendermint Core replica nodes. | |||
Solidity on Ethereum is a great language of choice for blockchain | |||
applications because, among other reasons, it is a completely | |||
deterministic programming language. However, it's also possible to | |||
create deterministic applications using existing popular languages like | |||
Java, C++, Python, or Go. Game programmers and blockchain developers are | |||
already familiar with creating deterministic programs by avoiding | |||
sources of non-determinism such as: | |||
- random number generators (without deterministic seeding) | |||
- race conditions on threads (or avoiding threads altogether) | |||
- the system clock | |||
- uninitialized memory (in unsafe programming languages like C | |||
or C++) | |||
- [floating point | |||
arithmetic](http://gafferongames.com/networking-for-game-programmers/floating-point-determinism/) | |||
- language features that are random (e.g. map iteration in Go) | |||
While programmers can avoid non-determinism by being careful, it is also | |||
possible to create a special linter or static analyzer for each language | |||
to check for determinism. In the future we may work with partners to | |||
create such tools. | |||
## Consensus Overview | |||
Tendermint is an easy-to-understand, mostly asynchronous, BFT consensus | |||
protocol. The protocol follows a simple state machine that looks like | |||
this: | |||
![](../imgs/consensus_logic.png) | |||
Participants in the protocol are called **validators**; they take turns | |||
proposing blocks of transactions and voting on them. Blocks are | |||
committed in a chain, with one block at each **height**. A block may | |||
fail to be committed, in which case the protocol moves to the next | |||
**round**, and a new validator gets to propose a block for that height. | |||
Two stages of voting are required to successfully commit a block; we | |||
call them **pre-vote** and **pre-commit**. A block is committed when | |||
more than 2/3 of validators pre-commit for the same block in the same | |||
round. | |||
There is a picture of a couple doing the polka because validators are | |||
doing something like a polka dance. When more than two-thirds of the | |||
validators pre-vote for the same block, we call that a **polka**. Every | |||
pre-commit must be justified by a polka in the same round. | |||
Validators may fail to commit a block for a number of reasons; the | |||
current proposer may be offline, or the network may be slow. Tendermint | |||
allows them to establish that a validator should be skipped. Validators | |||
wait a small amount of time to receive a complete proposal block from | |||
the proposer before voting to move to the next round. This reliance on a | |||
timeout is what makes Tendermint a weakly synchronous protocol, rather | |||
than an asynchronous one. However, the rest of the protocol is | |||
asynchronous, and validators only make progress after hearing from more | |||
than two-thirds of the validator set. A simplifying element of | |||
Tendermint is that it uses the same mechanism to commit a block as it | |||
does to skip to the next round. | |||
Assuming less than one-third of the validators are Byzantine, Tendermint | |||
guarantees that safety will never be violated - that is, validators will | |||
never commit conflicting blocks at the same height. To do this it | |||
introduces a few **locking** rules which modulate which paths can be | |||
followed in the flow diagram. Once a validator precommits a block, it is | |||
locked on that block. Then, | |||
1. it must prevote for the block it is locked on | |||
2. it can only unlock, and precommit for a new block, if there is a | |||
polka for that block in a later round | |||
## Stake | |||
In many systems, not all validators will have the same "weight" in the | |||
consensus protocol. Thus, we are not so much interested in one-third or | |||
two-thirds of the validators, but in those proportions of the total | |||
voting power, which may not be uniformly distributed across individual | |||
validators. | |||
Since Tendermint can replicate arbitrary applications, it is possible to | |||
define a currency, and denominate the voting power in that currency. | |||
When voting power is denominated in a native currency, the system is | |||
often referred to as Proof-of-Stake. Validators can be forced, by logic | |||
in the application, to "bond" their currency holdings in a security | |||
deposit that can be destroyed if they're found to misbehave in the | |||
consensus protocol. This adds an economic element to the security of the | |||
protocol, allowing one to quantify the cost of violating the assumption | |||
that less than one-third of voting power is Byzantine. | |||
The [Cosmos Network](https://cosmos.network) is designed to use this | |||
Proof-of-Stake mechanism across an array of cryptocurrencies implemented | |||
as ABCI applications. | |||
The following diagram is Tendermint in a (technical) nutshell. [See here | |||
for high resolution | |||
version](https://github.com/mobfoundry/hackatom/blob/master/tminfo.pdf). | |||
![](../imgs/tm-transaction-flow.png) |
@ -0,0 +1,9 @@ | |||
# Overview | |||
Use [Docker Compose](./docker-compose.md) to spin up Tendermint testnets on your | |||
local machine. | |||
Use [Terraform and Ansible](./terraform-and-ansible.md) to deploy Tendermint | |||
testnets to the cloud. | |||
See the `tendermint testnet --help` command for more help initializing testnets. |
@ -0,0 +1,85 @@ | |||
# Docker Compose | |||
With Docker Compose, we can spin up local testnets in a single command: | |||
``` | |||
make localnet-start | |||
``` | |||
## Requirements | |||
- [Install tendermint](/docs/install.md) | |||
- [Install docker](https://docs.docker.com/engine/installation/) | |||
- [Install docker-compose](https://docs.docker.com/compose/install/) | |||
## Build | |||
Build the `tendermint` binary and the `tendermint/localnode` docker image. | |||
Note the binary will be mounted into the container so it can be updated without | |||
rebuilding the image. | |||
``` | |||
cd $GOPATH/src/github.com/tendermint/tendermint | |||
# Build the linux binary in ./build | |||
make build-linux | |||
# Build tendermint/localnode image | |||
make build-docker-localnode | |||
``` | |||
## Run a testnet | |||
To start a 4 node testnet run: | |||
``` | |||
make localnet-start | |||
``` | |||
The nodes bind their RPC servers to ports 26657, 26660, 26662, and 26664 on the host. | |||
This file creates a 4-node network using the localnode image. | |||
The nodes of the network expose their P2P and RPC endpoints to the host machine on ports 26656-26657, 26659-26660, 26661-26662, and 26663-26664 respectively. | |||
To update the binary, just rebuild it and restart the nodes: | |||
``` | |||
make build-linux | |||
make localnet-stop | |||
make localnet-start | |||
``` | |||
## Configuration | |||
The `make localnet-start` creates files for a 4-node testnet in `./build` by calling the `tendermint testnet` command. | |||
The `./build` directory is mounted to the `/tendermint` mount point to attach the binary and config files to the container. | |||
For instance, to create a single node testnet: | |||
``` | |||
cd $GOPATH/src/github.com/tendermint/tendermint | |||
# Clear the build folder | |||
rm -rf ./build | |||
# Build binary | |||
make build-linux | |||
# Create configuration | |||
docker run -e LOG="stdout" -v `pwd`/build:/tendermint tendermint/localnode testnet --o . --v 1 | |||
#Run the node | |||
docker run -v `pwd`/build:/tendermint tendermint/localnode | |||
``` | |||
## Logging | |||
Log is saved under the attached volume, in the `tendermint.log` file. If the `LOG` environment variable is set to `stdout` at start, the log is not saved, but printed on the screen. | |||
## Special binaries | |||
If you have multiple binaries with different names, you can specify which one to run with the BINARY environment variable. The path of the binary is relative to the attached volume. | |||