Release/v0.32.4release/v0.32.5 v0.32.4
@ -0,0 +1,28 @@ | |||
FROM amazonlinux:2 | |||
RUN yum -y update && \ | |||
yum -y install wget | |||
RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \ | |||
rpm -ivh epel-release-latest-7.noarch.rpm | |||
RUN yum -y groupinstall "Development Tools" | |||
RUN yum -y install leveldb-devel which | |||
ENV GOVERSION=1.12.9 | |||
RUN cd /tmp && \ | |||
wget https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz && \ | |||
tar -C /usr/local -xf go${GOVERSION}.linux-amd64.tar.gz && \ | |||
mkdir -p /go/src && \ | |||
mkdir -p /go/bin | |||
ENV PATH=$PATH:/usr/local/go/bin:/go/bin | |||
ENV GOBIN=/go/bin | |||
ENV GOPATH=/go/src | |||
RUN mkdir -p /tendermint | |||
WORKDIR /tendermint | |||
CMD ["/usr/bin/make", "build_c"] | |||
@ -0,0 +1,124 @@ | |||
package v2 | |||
import ( | |||
"github.com/go-kit/kit/metrics" | |||
"github.com/go-kit/kit/metrics/discard" | |||
"github.com/go-kit/kit/metrics/prometheus" | |||
stdprometheus "github.com/prometheus/client_golang/prometheus" | |||
) | |||
const ( | |||
// MetricsSubsystem is a subsystem shared by all metrics exposed by this | |||
// package. | |||
MetricsSubsystem = "blockchain" | |||
) | |||
// Metrics contains metrics exposed by this package. | |||
type Metrics struct { | |||
// events_in | |||
EventsIn metrics.Counter | |||
// events_in | |||
EventsHandled metrics.Counter | |||
// events_out | |||
EventsOut metrics.Counter | |||
// errors_in | |||
ErrorsIn metrics.Counter | |||
// errors_handled | |||
ErrorsHandled metrics.Counter | |||
// errors_out | |||
ErrorsOut metrics.Counter | |||
// events_shed | |||
EventsShed metrics.Counter | |||
// events_sent | |||
EventsSent metrics.Counter | |||
// errors_sent | |||
ErrorsSent metrics.Counter | |||
// errors_shed | |||
ErrorsShed metrics.Counter | |||
} | |||
// Can we burn in the routine name here? | |||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { | |||
labels := []string{} | |||
for i := 0; i < len(labelsAndValues); i += 2 { | |||
labels = append(labels, labelsAndValues[i]) | |||
} | |||
return &Metrics{ | |||
EventsIn: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ | |||
Namespace: namespace, | |||
Subsystem: MetricsSubsystem, | |||
Name: "events_in", | |||
Help: "Events read from the channel.", | |||
}, labels).With(labelsAndValues...), | |||
EventsHandled: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ | |||
Namespace: namespace, | |||
Subsystem: MetricsSubsystem, | |||
Name: "events_handled", | |||
Help: "Events handled", | |||
}, labels).With(labelsAndValues...), | |||
EventsOut: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ | |||
Namespace: namespace, | |||
Subsystem: MetricsSubsystem, | |||
Name: "events_out", | |||
Help: "Events output from routine.", | |||
}, labels).With(labelsAndValues...), | |||
ErrorsIn: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ | |||
Namespace: namespace, | |||
Subsystem: MetricsSubsystem, | |||
Name: "errors_in", | |||
Help: "Errors read from the channel.", | |||
}, labels).With(labelsAndValues...), | |||
ErrorsHandled: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ | |||
Namespace: namespace, | |||
Subsystem: MetricsSubsystem, | |||
Name: "errors_handled", | |||
Help: "Errors handled.", | |||
}, labels).With(labelsAndValues...), | |||
ErrorsOut: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ | |||
Namespace: namespace, | |||
Subsystem: MetricsSubsystem, | |||
Name: "errors_out", | |||
Help: "Errors output from routine.", | |||
}, labels).With(labelsAndValues...), | |||
ErrorsSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ | |||
Namespace: namespace, | |||
Subsystem: MetricsSubsystem, | |||
Name: "errors_sent", | |||
Help: "Errors sent to routine.", | |||
}, labels).With(labelsAndValues...), | |||
ErrorsShed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ | |||
Namespace: namespace, | |||
Subsystem: MetricsSubsystem, | |||
Name: "errors_shed", | |||
Help: "Errors dropped from sending.", | |||
}, labels).With(labelsAndValues...), | |||
EventsSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ | |||
Namespace: namespace, | |||
Subsystem: MetricsSubsystem, | |||
Name: "events_sent", | |||
Help: "Events sent to routine.", | |||
}, labels).With(labelsAndValues...), | |||
EventsShed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ | |||
Namespace: namespace, | |||
Subsystem: MetricsSubsystem, | |||
Name: "events_shed", | |||
Help: "Events dropped from sending.", | |||
}, labels).With(labelsAndValues...), | |||
} | |||
} | |||
// NopMetrics returns no-op Metrics. | |||
func NopMetrics() *Metrics { | |||
return &Metrics{ | |||
EventsIn: discard.NewCounter(), | |||
EventsHandled: discard.NewCounter(), | |||
EventsOut: discard.NewCounter(), | |||
ErrorsIn: discard.NewCounter(), | |||
ErrorsHandled: discard.NewCounter(), | |||
ErrorsOut: discard.NewCounter(), | |||
EventsShed: discard.NewCounter(), | |||
EventsSent: discard.NewCounter(), | |||
ErrorsSent: discard.NewCounter(), | |||
ErrorsShed: discard.NewCounter(), | |||
} | |||
} |
@ -0,0 +1,117 @@ | |||
package v2 | |||
import ( | |||
"fmt" | |||
"time" | |||
"github.com/tendermint/tendermint/libs/log" | |||
) | |||
type timeCheck struct { | |||
priorityHigh | |||
time time.Time | |||
} | |||
func schedulerHandle(event Event) (Event, error) { | |||
if _, ok := event.(timeCheck); ok { | |||
fmt.Println("scheduler handle timeCheck") | |||
} | |||
return noOp, nil | |||
} | |||
func processorHandle(event Event) (Event, error) { | |||
if _, ok := event.(timeCheck); ok { | |||
fmt.Println("processor handle timeCheck") | |||
} | |||
return noOp, nil | |||
} | |||
type Reactor struct { | |||
events chan Event | |||
stopDemux chan struct{} | |||
scheduler *Routine | |||
processor *Routine | |||
ticker *time.Ticker | |||
logger log.Logger | |||
} | |||
func NewReactor(bufferSize int) *Reactor { | |||
return &Reactor{ | |||
events: make(chan Event, bufferSize), | |||
stopDemux: make(chan struct{}), | |||
scheduler: newRoutine("scheduler", schedulerHandle, bufferSize), | |||
processor: newRoutine("processor", processorHandle, bufferSize), | |||
ticker: time.NewTicker(1 * time.Second), | |||
logger: log.NewNopLogger(), | |||
} | |||
} | |||
// nolint:unused | |||
func (r *Reactor) setLogger(logger log.Logger) { | |||
r.logger = logger | |||
r.scheduler.setLogger(logger) | |||
r.processor.setLogger(logger) | |||
} | |||
func (r *Reactor) Start() { | |||
go r.scheduler.start() | |||
go r.processor.start() | |||
go r.demux() | |||
<-r.scheduler.ready() | |||
<-r.processor.ready() | |||
go func() { | |||
for t := range r.ticker.C { | |||
r.events <- timeCheck{time: t} | |||
} | |||
}() | |||
} | |||
// XXX: Would it be possible here to provide some kind of type safety for the types | |||
// of events that each routine can produce and consume? | |||
func (r *Reactor) demux() { | |||
for { | |||
select { | |||
case event := <-r.events: | |||
// XXX: check for backpressure | |||
r.scheduler.send(event) | |||
r.processor.send(event) | |||
case <-r.stopDemux: | |||
r.logger.Info("demuxing stopped") | |||
return | |||
case event := <-r.scheduler.next(): | |||
r.processor.send(event) | |||
case event := <-r.processor.next(): | |||
r.scheduler.send(event) | |||
case err := <-r.scheduler.final(): | |||
r.logger.Info(fmt.Sprintf("scheduler final %s", err)) | |||
case err := <-r.processor.final(): | |||
r.logger.Info(fmt.Sprintf("processor final %s", err)) | |||
// XXX: switch to consensus | |||
} | |||
} | |||
} | |||
func (r *Reactor) Stop() { | |||
r.logger.Info("reactor stopping") | |||
r.ticker.Stop() | |||
r.scheduler.stop() | |||
r.processor.stop() | |||
close(r.stopDemux) | |||
close(r.events) | |||
r.logger.Info("reactor stopped") | |||
} | |||
func (r *Reactor) Receive(event Event) { | |||
// XXX: decode and serialize write events | |||
// TODO: backpressure | |||
r.events <- event | |||
} | |||
func (r *Reactor) AddPeer() { | |||
// TODO: add peer event and send to demuxer | |||
} |
@ -0,0 +1,22 @@ | |||
package v2 | |||
import ( | |||
"testing" | |||
) | |||
func TestReactor(t *testing.T) { | |||
var ( | |||
bufferSize = 10 | |||
reactor = NewReactor(bufferSize) | |||
) | |||
reactor.Start() | |||
script := []Event{ | |||
// TODO | |||
} | |||
for _, event := range script { | |||
reactor.Receive(event) | |||
} | |||
reactor.Stop() | |||
} |
@ -0,0 +1,134 @@ | |||
package v2 | |||
import ( | |||
"fmt" | |||
"sync/atomic" | |||
"github.com/Workiva/go-datastructures/queue" | |||
"github.com/tendermint/tendermint/libs/log" | |||
) | |||
type handleFunc = func(event Event) (Event, error) | |||
// Routines are a structure which model a finite state machine as serialized | |||
// stream of events processed by a handle function. This Routine structure | |||
// handles the concurrency and messaging guarantees. Events are sent via | |||
// `send` are handled by the `handle` function to produce an iterator | |||
// `next()`. Calling `close()` on a routine will conclude processing of all | |||
// sent events and produce `final()` event representing the terminal state. | |||
type Routine struct { | |||
name string | |||
handle handleFunc | |||
queue *queue.PriorityQueue | |||
out chan Event | |||
fin chan error | |||
rdy chan struct{} | |||
running *uint32 | |||
logger log.Logger | |||
metrics *Metrics | |||
} | |||
func newRoutine(name string, handleFunc handleFunc, bufferSize int) *Routine { | |||
return &Routine{ | |||
name: name, | |||
handle: handleFunc, | |||
queue: queue.NewPriorityQueue(bufferSize, true), | |||
out: make(chan Event, bufferSize), | |||
rdy: make(chan struct{}, 1), | |||
fin: make(chan error, 1), | |||
running: new(uint32), | |||
logger: log.NewNopLogger(), | |||
metrics: NopMetrics(), | |||
} | |||
} | |||
// nolint: unused | |||
func (rt *Routine) setLogger(logger log.Logger) { | |||
rt.logger = logger | |||
} | |||
// nolint:unused | |||
func (rt *Routine) setMetrics(metrics *Metrics) { | |||
rt.metrics = metrics | |||
} | |||
func (rt *Routine) start() { | |||
rt.logger.Info(fmt.Sprintf("%s: run\n", rt.name)) | |||
running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1)) | |||
if !running { | |||
panic(fmt.Sprintf("%s is already running", rt.name)) | |||
} | |||
close(rt.rdy) | |||
defer func() { | |||
stopped := atomic.CompareAndSwapUint32(rt.running, uint32(1), uint32(0)) | |||
if !stopped { | |||
panic(fmt.Sprintf("%s is failed to stop", rt.name)) | |||
} | |||
}() | |||
for { | |||
events, err := rt.queue.Get(1) | |||
if err != nil { | |||
rt.logger.Info(fmt.Sprintf("%s: stopping\n", rt.name)) | |||
rt.terminate(fmt.Errorf("stopped")) | |||
return | |||
} | |||
oEvent, err := rt.handle(events[0].(Event)) | |||
rt.metrics.EventsHandled.With("routine", rt.name).Add(1) | |||
if err != nil { | |||
rt.terminate(err) | |||
return | |||
} | |||
rt.metrics.EventsOut.With("routine", rt.name).Add(1) | |||
rt.logger.Debug(fmt.Sprintf("%s produced %T %+v\n", rt.name, oEvent, oEvent)) | |||
rt.out <- oEvent | |||
} | |||
} | |||
// XXX: look into returning OpError in the net package | |||
func (rt *Routine) send(event Event) bool { | |||
rt.logger.Debug(fmt.Sprintf("%s: received %T %+v", rt.name, event, event)) | |||
if !rt.isRunning() { | |||
return false | |||
} | |||
err := rt.queue.Put(event) | |||
if err != nil { | |||
rt.metrics.EventsShed.With("routine", rt.name).Add(1) | |||
rt.logger.Info(fmt.Sprintf("%s: send failed, queue was full/stopped \n", rt.name)) | |||
return false | |||
} | |||
rt.metrics.EventsSent.With("routine", rt.name).Add(1) | |||
return true | |||
} | |||
func (rt *Routine) isRunning() bool { | |||
return atomic.LoadUint32(rt.running) == 1 | |||
} | |||
func (rt *Routine) next() chan Event { | |||
return rt.out | |||
} | |||
func (rt *Routine) ready() chan struct{} { | |||
return rt.rdy | |||
} | |||
func (rt *Routine) stop() { | |||
if !rt.isRunning() { | |||
return | |||
} | |||
rt.logger.Info(fmt.Sprintf("%s: stop\n", rt.name)) | |||
rt.queue.Dispose() // this should block until all queue items are free? | |||
} | |||
func (rt *Routine) final() chan error { | |||
return rt.fin | |||
} | |||
// XXX: Maybe get rid of this | |||
func (rt *Routine) terminate(reason error) { | |||
close(rt.out) | |||
rt.fin <- reason | |||
} |
@ -0,0 +1,163 @@ | |||
package v2 | |||
import ( | |||
"fmt" | |||
"testing" | |||
"time" | |||
"github.com/stretchr/testify/assert" | |||
) | |||
type eventA struct { | |||
priorityNormal | |||
} | |||
var done = fmt.Errorf("done") | |||
func simpleHandler(event Event) (Event, error) { | |||
if _, ok := event.(eventA); ok { | |||
return noOp, done | |||
} | |||
return noOp, nil | |||
} | |||
func TestRoutineFinal(t *testing.T) { | |||
var ( | |||
bufferSize = 10 | |||
routine = newRoutine("simpleRoutine", simpleHandler, bufferSize) | |||
) | |||
assert.False(t, routine.isRunning(), | |||
"expected an initialized routine to not be running") | |||
go routine.start() | |||
<-routine.ready() | |||
assert.True(t, routine.isRunning(), | |||
"expected an started routine") | |||
assert.True(t, routine.send(eventA{}), | |||
"expected sending to a ready routine to succeed") | |||
assert.Equal(t, done, <-routine.final(), | |||
"expected the final event to be done") | |||
assert.False(t, routine.isRunning(), | |||
"expected an completed routine to no longer be running") | |||
} | |||
func TestRoutineStop(t *testing.T) { | |||
var ( | |||
bufferSize = 10 | |||
routine = newRoutine("simpleRoutine", simpleHandler, bufferSize) | |||
) | |||
assert.False(t, routine.send(eventA{}), | |||
"expected sending to an unstarted routine to fail") | |||
go routine.start() | |||
<-routine.ready() | |||
assert.True(t, routine.send(eventA{}), | |||
"expected sending to a running routine to succeed") | |||
routine.stop() | |||
assert.False(t, routine.send(eventA{}), | |||
"expected sending to a stopped routine to fail") | |||
} | |||
type finalCount struct { | |||
count int | |||
} | |||
func (f finalCount) Error() string { | |||
return "end" | |||
} | |||
func genStatefulHandler(maxCount int) handleFunc { | |||
counter := 0 | |||
return func(event Event) (Event, error) { | |||
if _, ok := event.(eventA); ok { | |||
counter += 1 | |||
if counter >= maxCount { | |||
return noOp, finalCount{counter} | |||
} | |||
return eventA{}, nil | |||
} | |||
return noOp, nil | |||
} | |||
} | |||
func feedback(r *Routine) { | |||
for event := range r.next() { | |||
r.send(event) | |||
} | |||
} | |||
func TestStatefulRoutine(t *testing.T) { | |||
var ( | |||
count = 10 | |||
handler = genStatefulHandler(count) | |||
bufferSize = 20 | |||
routine = newRoutine("statefulRoutine", handler, bufferSize) | |||
) | |||
go routine.start() | |||
go feedback(routine) | |||
<-routine.ready() | |||
assert.True(t, routine.send(eventA{}), | |||
"expected sending to a started routine to succeed") | |||
final := <-routine.final() | |||
if fnl, ok := final.(finalCount); ok { | |||
assert.Equal(t, count, fnl.count, | |||
"expected the routine to count to 10") | |||
} else { | |||
t.Fail() | |||
} | |||
} | |||
type lowPriorityEvent struct { | |||
priorityLow | |||
} | |||
type highPriorityEvent struct { | |||
priorityHigh | |||
} | |||
func handleWithPriority(event Event) (Event, error) { | |||
switch event.(type) { | |||
case lowPriorityEvent: | |||
return noOp, nil | |||
case highPriorityEvent: | |||
return noOp, done | |||
} | |||
return noOp, nil | |||
} | |||
func TestPriority(t *testing.T) { | |||
var ( | |||
bufferSize = 20 | |||
routine = newRoutine("priorityRoutine", handleWithPriority, bufferSize) | |||
) | |||
go routine.start() | |||
<-routine.ready() | |||
go func() { | |||
for { | |||
routine.send(lowPriorityEvent{}) | |||
time.Sleep(1 * time.Millisecond) | |||
} | |||
}() | |||
time.Sleep(10 * time.Millisecond) | |||
assert.True(t, routine.isRunning(), | |||
"expected an started routine") | |||
assert.True(t, routine.send(highPriorityEvent{}), | |||
"expected send to succeed even when saturated") | |||
assert.Equal(t, done, <-routine.final()) | |||
assert.False(t, routine.isRunning(), | |||
"expected an started routine") | |||
} |
@ -0,0 +1,64 @@ | |||
package v2 | |||
import ( | |||
"github.com/Workiva/go-datastructures/queue" | |||
) | |||
type Event queue.Item | |||
type priority interface { | |||
Compare(other queue.Item) int | |||
Priority() int | |||
} | |||
type priorityLow struct{} | |||
type priorityNormal struct{} | |||
type priorityHigh struct{} | |||
func (p priorityLow) Priority() int { | |||
return 1 | |||
} | |||
func (p priorityNormal) Priority() int { | |||
return 2 | |||
} | |||
func (p priorityHigh) Priority() int { | |||
return 3 | |||
} | |||
func (p priorityLow) Compare(other queue.Item) int { | |||
op := other.(priority) | |||
if p.Priority() > op.Priority() { | |||
return 1 | |||
} else if p.Priority() == op.Priority() { | |||
return 0 | |||
} | |||
return -1 | |||
} | |||
func (p priorityNormal) Compare(other queue.Item) int { | |||
op := other.(priority) | |||
if p.Priority() > op.Priority() { | |||
return 1 | |||
} else if p.Priority() == op.Priority() { | |||
return 0 | |||
} | |||
return -1 | |||
} | |||
func (p priorityHigh) Compare(other queue.Item) int { | |||
op := other.(priority) | |||
if p.Priority() > op.Priority() { | |||
return 1 | |||
} else if p.Priority() == op.Priority() { | |||
return 0 | |||
} | |||
return -1 | |||
} | |||
type noOpEvent struct { | |||
priorityLow | |||
} | |||
var noOp = noOpEvent{} |
@ -0,0 +1,53 @@ | |||
// nolint: dupl | |||
package merkle | |||
import ( | |||
"bytes" | |||
"encoding/json" | |||
"github.com/gogo/protobuf/jsonpb" | |||
) | |||
//--------------------------------------------------------------------------- | |||
// override JSON marshalling so we emit defaults (ie. disable omitempty) | |||
var ( | |||
jsonpbMarshaller = jsonpb.Marshaler{ | |||
EnumsAsInts: true, | |||
EmitDefaults: true, | |||
} | |||
jsonpbUnmarshaller = jsonpb.Unmarshaler{} | |||
) | |||
func (r *ProofOp) MarshalJSON() ([]byte, error) { | |||
s, err := jsonpbMarshaller.MarshalToString(r) | |||
return []byte(s), err | |||
} | |||
func (r *ProofOp) UnmarshalJSON(b []byte) error { | |||
reader := bytes.NewBuffer(b) | |||
return jsonpbUnmarshaller.Unmarshal(reader, r) | |||
} | |||
func (r *Proof) MarshalJSON() ([]byte, error) { | |||
s, err := jsonpbMarshaller.MarshalToString(r) | |||
return []byte(s), err | |||
} | |||
func (r *Proof) UnmarshalJSON(b []byte) error { | |||
reader := bytes.NewBuffer(b) | |||
return jsonpbUnmarshaller.Unmarshal(reader, r) | |||
} | |||
// Some compile time assertions to ensure we don't | |||
// have accidental runtime surprises later on. | |||
// jsonEncodingRoundTripper ensures that asserted | |||
// interfaces implement both MarshalJSON and UnmarshalJSON | |||
type jsonRoundTripper interface { | |||
json.Marshaler | |||
json.Unmarshaler | |||
} | |||
var _ jsonRoundTripper = (*ProofOp)(nil) | |||
var _ jsonRoundTripper = (*Proof)(nil) |
@ -0,0 +1,33 @@ | |||
# Developer Sessions | |||
The Tendermint Core developer call is comprised of both [Interchain | |||
Foundation](http://interchain.io/) and [All in Bits](https://tendermint.com/) | |||
team members discussing the development of [Tendermint | |||
BFT](https://github.com/tendermint/tendermint) and related research. The goal | |||
of the Tendermint Core developer calls is to provide transparency into the | |||
decision making process, technical information, update cycles etc. | |||
## List | |||
| Date | Topic | Link(s) | | |||
| --------------- | ----------------------------------------- | -------------------------------------------------------------------------------------------------------------- | | |||
| August 2019 | Part Three: Tendermint Lite Client | [YouTube](https://www.youtube.com/watch?v=whyL6UrKe7I&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=5) | | |||
| August 2019 | Fork Accountability | [YouTube](https://www.youtube.com/watch?v=Jph-4PGtdPo&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=4) | | |||
| July 2019 | Part Two: Tendermint Lite Client | [YouTube](https://www.youtube.com/watch?v=gTjG7jNNdKQ&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=6) | | |||
| July 2019 | Part One: Tendermint Lite Client | [YouTube](https://www.youtube.com/watch?v=C6fH_sgPJzA&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=7) | | |||
| June 2019 | Testnet Deployments | [YouTube](https://www.youtube.com/watch?v=gYA6no7tRlM&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=10) | | |||
| June 2019 | Blockchain Reactor Refactor | [YouTube](https://www.youtube.com/watch?v=JLBGH8yxABk&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=11) | | |||
| June 2019 | Tendermint Rust Libraries | [YouTube](https://www.youtube.com/watch?v=-WXKdyoGHwA&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=9) | | |||
| May 2019 | Merkle Tree Deep Dive | [YouTube](https://www.youtube.com/watch?v=L3bt2Uw8ICg&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=8) | | |||
| May 2019 | Remote Signer Refactor | [YouTube](https://www.youtube.com/watch?v=eUyXXEEuBzQ&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=12) | | |||
| May 2019 | Introduction to Ansible | [YouTube](https://www.youtube.com/watch?v=72clQLjzPg4&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=14&t=0s) | | | | |||
| April 2019 | Tendermint State Sync Design Discussion | [YouTube](https://www.youtube.com/watch?v=4k23j2QHwrM&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=11) | | |||
| April 2019 | ADR-036 - Blockchain Reactor Refactor | [YouTube](https://www.youtube.com/watch?v=TW2xC1LwEkE&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=10) | | |||
| April 2019 | Verifying Distributed Algorithms | [YouTube](https://www.youtube.com/watch?v=tMd4lgPVBxE&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=9) | | |||
| April 2019 | Byzantine Model Checker Presentation | [YouTube](https://www.youtube.com/watch?v=rdXl4VCQyow&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=8) | | |||
| January 2019 | Proposer Selection in Idris | [YouTube](https://www.youtube.com/watch?v=hWZdc9c1aH8&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=7) | | |||
| January 2019 | Current Mempool Design | [YouTube](https://www.youtube.com/watch?v=--iGIYYiLu4&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=6) | | |||
| December 2018 | ABCI Proxy App | [YouTube](https://www.youtube.com/watch?v=s6sQ2HOVHdo&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=5) | | |||
| October 2018 | DB Performance | [YouTube](https://www.youtube.com/watch?v=jVSNHi4l0fQ&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=4) | | |||
| October 2018 | Alternative Mempool Algorithms | [YouTube](https://www.youtube.com/watch?v=XxH5ZtM4vMM&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=2) | | |||
| October 2018 | Tendermint Termination | [YouTube](https://www.youtube.com/watch?v=YBZjecfjeIk&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv) | |
@ -0,0 +1,319 @@ | |||
# Fork accountability -- Problem statement and attacks | |||
## Problem Statement | |||
Tendermint consensus guarantees the following specifications for all heights: | |||
* agreement -- no two correct full nodes decide differently. | |||
* validity -- the decided block satisfies the predefined predicate *valid()*. | |||
* termination -- all correct full nodes eventually decide, | |||
if the | |||
faulty validators have at most 1/3 of voting power in the current validator set. In the case where this assumption | |||
does not hold, each of the specification may be violated. | |||
The agreement property says that for a given height, any two correct validators that decide on a block for that height decide on the same block. That the block was indeed generated by the blockchain, can be verified starting from a trusted (genesis) block, and checking that all subsequent blocks are properly signed. | |||
However, faulty nodes may forge blocks and try to convince users (lite clients) that the blocks had been correctly generated. In addition, Tendermint agreement might be violated in the case where more than 1/3 of the voting power belongs to faulty validators: Two correct validators decide on different blocks. The latter case motivates the term "fork": as Tendermint consensus also agrees on the next validator set, correct validators may have decided on disjoint next validator sets, and the chain branches into two or more partitions (possibly having faulty validators in common) and each branch continues to generate blocks independently of the other. | |||
We say that a fork is a case in which there are two commits for different blocks at the same height of the blockchain. The proplem is to ensure that in those cases we are able to detect faulty validators (and not mistakenly accuse correct validators), and incentivize therefore validators to behave according to the protocol specification. | |||
**Conceptual Limit.** In order to prove misbehavior of a node, we have to show that the behavior deviates from correct behavior with respect to a given algorithm. Thus, an algorithm that detects misbehavior of nodes executing some algorithm *A* must be defined with respect to algorithm *A*. In our case, *A* is Tendermint consensus (+ other protocols in the infrastructure; e.g.,full nodes and the Lite Client). If the consensus algorithm is changed/updated/optimized in the future, we have to check whether changes to the accountability algorithm are also required. All the discussions in this document are thus inherently specific to Tendermint consensus and the Lite Client specification. | |||
**Q:** Should we distinguish agreement for validators and full nodes for agreement? The case where all correct validators agree on a block, but a correct full node decides on a different block seems to be slightly less severe that the case where two correct validators decide on different blocks. Still, if a contaminated full node becomes validator that may be problematic later on. Also it is not clear how gossiping is impaired if a contaminated full node is on a different branch. | |||
*Remark.* In the case more than 1/3 of the voting power belongs to faulty validators, also validity and termination can be broken. Termination can be broken if faulty processes just do not send the messages that are needed to make progress. Due to asynchrony, this is not punishable, because faulty validators can always claim they never received the messages that would have forced them to send messages. | |||
## The Misbehavior of Faulty Validators | |||
Forks are the result of faulty validators deviating from the protocol. In principle several such deviations can be detected without a fork actually occurring: | |||
1. double proposal: A faulty proposer proposes two different values (blocks) for the same height and the same round in Tendermint consensus. | |||
2. double signing: Tendermint consensus forces correct validators to prevote and precommit for at most one value per round. In case a faulty validator sends multiple prevote and/or precommit messages for different values for the same height/round, this is a misbehavior. | |||
3. lunatic validator: Tendermint consensus forces correct validators to prevote and precommit only for values *v* that satisfy *valid(v)*. If faulty validators prevote and precommit for *v* although *valid(v)=false* this is misbehavior. | |||
*Remark.* In isolation, Point 3 is an attack on validity (rather than agreement). However, the prevotes and precommits can then also be used to forge blocks. | |||
1. amnesia: Tendermint consensus has a locking mechanism. If a validator has some value v locked, then it can only prevote/precommit for v or nil. Sending prevote/precomit message for a different value v' (that is not nil) while holding lock on value v is misbehavior. | |||
2. spurious messages: In Tendermint consensus most of the message send instructions are guarded by threshold guards, e.g., one needs to receive *2f + 1* prevote messages to send precommit. Faulty validators may send precommit without having received the prevote messages. | |||
Independently of a fork happening, punishing this behavior might be important to prevent forks altogether. This should keep attackers from misbehaving: if at most 1/3 of the voting power is faulty, this misbehavior is detectable but will not lead to a safety violation. Thus, unless they have more than 1/3 (or in some cases more than 2/3) of the voting power attackers have the incentive to not misbehave. If attackers control too much voting power, we have to deal with forks, as discussed in this document. | |||
## Two types of forks | |||
* Fork-Full. Two correct validators decide on different blocks for the same height. Since also the next validator sets are decided upon, the correct validators may be partitioned to participate in two distinct branches of the forked chain. | |||
As in this case we have two different blocks (both having the same right/no right to exist), a central system invariant (one block per height decided by correct validators) is violated. As full nodes are contaminated in this case, the contamination can spread also to lite clients. However, even without breaking this system invariant, lite clients can be subject to a fork: | |||
* Fork-Lite. All correct validators decide on the same block for height *h*, but faulty processes (validators or not), forge a different block for that height, in order to fool users (who use the lite client). | |||
# Attack scenarios | |||
## On-chain attacks | |||
### Equivocation (one round) | |||
There are several scenarios in which forks might happen. The first is double signing within a round. | |||
* F1. Equivocation: faulty validators sign multiple vote messages (prevote and/or precommit) for different values *during the same round r* at a given height h. | |||
### Flip-flopping | |||
Tendermint consensus implements a locking mechanism: If a correct validator *p* receives proposal for value v and *2f + 1* prevotes for a value *id(v)* in round *r*, it locks *v* and remembers *r*. In this case, *p* also sends a precommit message for *id(v)*, which later may serve as proof that *p* locked *v*. | |||
In subsequent rounds, *p* only sends prevote messages for a value it had previously locked. However, it is possible to change the locked value if in a future round *r' > r*, if the process receives proposal and *2f + 1* prevotes for a different value *v'*. In this case, *p* could send a prevote/precommit for *id(v')*. This algorithmic feature can be exploited in two ways: | |||
* F2. Faulty Flip-flopping (Amnesia): faulty validators precommit some value *id(v)* in round *r* (value *v* is locked in round *r*) and then prevote for different value *id(v')* in higher round *r' > r* without previously correctly unlocking value *v*. In this case faulty processes "forget" that they have locked value *v* and prevote some other value in the following rounds. | |||
Some correct validators might have decided on *v* in *r*, and other correct validators decide on *v'* in *r'*. Here we can have branching on the main chain (Fork-Full). | |||
* F3. Correct Flip-flopping (Back to the past): There are some precommit messages signed by (correct) validators for value *id(v)* in round *r*. Still, *v* is not decided upon, and all processes move on to the next round. Then correct validators (correctly) lock and decide a different value *v'* in some round *r' > r*. And the correct validators continue; there is no branching on the main chain. | |||
However, faulty validators may use the correct precommit messages from round *r* together with a posteriori generated faulty precommit messages for round *r* to forge a block for a value that was not decided on the main chain (Fork-Lite). | |||
## Off-chain attacks | |||
F1-F3 may contaminate the state of full nodes (and even validators). Contaminated (but otherwise correct) full nodes may thus communicate faulty blocks to lite clients. | |||
Similarly, without actually interfering with the main chain, we can have the following: | |||
* F4. Phantom validators: faulty validators vote (sign prevote and precommit messages) in heights in which they are not part of the validator sets (at the main chain). | |||
* F5. Lunatic validator: faulty validator that sign vote messages to support (arbitrary) application state that is different from the application state that resulted from valid state transitions. | |||
## Types of victims | |||
We consider three types of potential attack victims: | |||
- FN: full node | |||
- LCS: lite client with sequential header verification | |||
- LCB: lite client with bisection based header verification | |||
F1 and F2 can be used by faulty validators to actually create multiple branches on the blockchain. That means that correctly operating full nodes decide on different blocks for the same height. Until a fork is detected locally by a full node (by receiving evidence from others or by some other local check that fails), the full node can spread corrupted blocks to lite clients. | |||
*Remark.* If full nodes take a branch different from the one taken by the validators, it may be that the liveness of the gossip protocol may be affected. We should eventually look at this more closely. However, as it does not influence safety it is not a primary concern. | |||
F3 is similar to F1, except that no two correct validators decide on different blocks. It may still be the case that full nodes become affected. | |||
In addition, without creating a fork on the main chain, lite clients can be contaminated by more than a third of validators that are faulty and sign a forged header | |||
F4 cannot fool correct full nodes as they know the current validator set. Similarly, LCS know who the validators are. Hence, F4 is an attack against LCB that do not necessarily know the complete prefix of headers (Fork-Lite), as they trust a header that is signed by at least one correct validator (trusting period method). | |||
The following table gives an overview of how the different attacks may affect different nodes. F1-F3 are *on-chain* attacks so they can corrupt the state of full nodes. Then if a lite client (LCS or LCB) contacts a full node to obtain headers (or blocks), the corrupted state may propagate to the lite client. | |||
F4 and F5 are *off-chain*, that is, these attacks cannot be used to corrupt the state of full nodes (which have sufficient knowledge on the state of the chain to not be fooled). | |||
| Attack | FN | LCS | LCB | | |||
|:------:|:------:|:------:|:------:| | |||
| F1 | direct | FN | FN | | |||
| F2 | direct | FN | FN | | |||
| F3 | direct | FN | FN | | |||
| F4 | | | direct | | |||
| F5 | | | direct | | |||
**Q:** Lite clients are more vulnerable than full nodes, because the former do only verify headers but do not execute transactions. What kind of certainty is gained by a full node that executes a transaction? | |||
As a full node verifies all transactions, it can only be | |||
contaminated by an attack if the blockchain itself violates its invariant (one block per height), that is, in case of a fork that leads to branching. | |||
## Detailed Attack Scenarios | |||
### Equivocation based attacks | |||
In case of equivocation based attacks, faulty validators sign multiple votes (prevote and/or precommit) in the same | |||
round of some height. This attack can be executed on both full nodes and lite clients. It requires more than 1/3 of voting power to be executed. | |||
#### Scenario 1: Equivocation on the main chain | |||
Validators: | |||
* CA - a set of correct validators with less than 1/3 of the voting power | |||
* CB - a set of correct validators with less than 1/3 of the voting power | |||
* CA and CB are disjoint | |||
* F - a set of faulty validators with more than 1/3 voting power | |||
Observe that this setting violates the Tendermint failure model. | |||
Execution: | |||
* A faulty proposer proposes block A to CA | |||
* A faulty proposer proposes block B to CB | |||
* Validators from the set CA and CB prevote for A and B, respectively. | |||
* Faulty validators from the set F prevote both for A and B. | |||
* The faulty prevote messages | |||
- for A arrive at CA long before the B messages | |||
- for B arrive at CB long before the A messages | |||
* Therefore correct validators from set CA and CB will observe | |||
more than 2/3 of prevotes for A and B and precommit for A and B, respectively. | |||
* Faulty validators from the set F precommit both values A and B. | |||
* Thus, we have more than 2/3 commits for both A and B. | |||
Consequences: | |||
* Creating evidence of misbehavior is simple in this case as we have multiple messages signed by the same faulty processes for different values in the same round. | |||
* We have to ensure that these different messages reach a correct process (full node, monitor?), which can submit evidence. | |||
* This is an attack on the full node level (Fork-Full). | |||
* It extends also to the lite clients, | |||
* For both we need a detection and recovery mechanism. | |||
#### Scenario 2: Equivocation to a lite client (LCS) | |||
Validators: | |||
* a set F of faulty validators with more than 2/3 of the voting power. | |||
Execution: | |||
* for the main chain F behaves nicely | |||
* F coordinates to sign a block B that is different from the one on the main chain. | |||
* the lite clients obtains B and trusts at as it is signed by more and 2/3 of the voting power. | |||
Consequences: | |||
Once equivocation is used to attack lite client it opens space | |||
for different kind of attacks as application state can be diverged in any direction. For example, it can modify validator set such that it contains only validators that do not have any stake bonded. Note that after a lite client is fooled by a fork, that means that an attacker can change application state and validator set arbitrarily. | |||
In order to detect such (equivocation-based attack), the lite client would need to cross check its state with some correct validator (or to obtain a hash of the state from the main chain using out of band channels). | |||
*Remark.* The lite client would be able to create evidence of misbehavior, but this would require to pull potentially a lot of data from correct full nodes. Maybe we need to figure out different architecture where a lite client that is attacked will push all its data for the current unbonding period to a correct node that will inspect this data and submit corresponding evidence. There are also architectures that assumes a special role (sometimes called fisherman) whose goal is to collect as much as possible useful data from the network, to do analysis and create evidence transactions. That functionality is outside the scope of this document. | |||
*Remark.* The difference between LCS and LCB might only be in the amount of voting power needed to convince lite client about arbitrary state. In case of LCB where security threshold is at minimum, an attacker can arbitrarily modify application state with more than 1/3 of voting power, while in case of LCS it requires more than 2/3 of the voting power. | |||
### Flip-flopping: Amnesia based attacks | |||
In case of amnesia, faulty validators lock some value *v* in some round *r*, and then vote for different value *v'* in higher rounds without correctly unlocking value *v*. This attack can be used both on full nodes and lite clients. | |||
#### Scenario 3: At most 2/3 of faults | |||
Validators: | |||
* a set F of faulty validators with more than 1/3 but at most 2/3 of the voting power | |||
* a set C of correct validators | |||
Execution: | |||
* Faulty validators commit (without exposing it on the main chain) a block A in round *r* by collecting more than 2/3 of the | |||
voting power (containing correct and faulty validators). | |||
* All validators (correct and faulty) reach a round *r' > r*. | |||
* Some correct validators in C do not lock any value before round *r'*. | |||
* The faulty validators in F deviate from Tendermint consensus by ignoring that they locked A in *r*, and propose a different block B in *r'*. | |||
* As the validators in C that have not locked any value find B acceptable, they accept the proposal for B and commit a block B. | |||
*Remark.* In this case, the more than 1/3 of faulty validators do not need to commit an equivocation (F1) as they only vote once per round in the execution. | |||
Detecting faulty validators in the case of such an attack can be done by the fork accountability mechanism described in: https://docs.google.com/document/d/11ZhMsCj3y7zIZz4udO9l25xqb0kl7gmWqNpGVRzOeyY/edit?usp=sharing. | |||
If a lite client is attacked using this attack with more than 1/3 of voting power (and less than 2/3), the attacker cannot change the application state arbitrarily. Rather, the attacker is limited to a state a correct validator finds acceptable: In the execution above, correct validators still find the value acceptable, however, the block the lite client trusts deviates from the one on the main chain. | |||
#### Scenario 4: More than 2/3 of faults | |||
In case there is an attack with more than 2/3 of the voting power, an attacker can arbitrarily change application state. | |||
Validators: | |||
* a set F1 of faulty validators with more than 1/3 of the voting power | |||
* a set F2 of faulty validators with at most 1/3 of the voting power | |||
Execution | |||
* Similar to Scenario 3 (however, messages by correct validators are not needed) | |||
* The faulty validators in F1 lock value A in round *r* | |||
* They sign a different value in follow-up rounds | |||
* F2 does not lock A in round *r* | |||
Consequences: | |||
* The validators in F1 will be detectable by the the fork accountability mechanisms. | |||
* The validators in F2 cannot be detected using this mechanism. | |||
Only in case they signed something which conflicts with the application this can be used against them. Otherwise they do not do anything incorrect. | |||
* This case is not covered by the report https://docs.google.com/document/d/11ZhMsCj3y7zIZz4udO9l25xqb0kl7gmWqNpGVRzOeyY/edit?usp=sharing as it only assumes at most 2/3 of faulty validators. | |||
**Q:** do we need to define a special kind of attack for the case where a validator sign arbitrarily state? It seems that detecting such attack requires different mechanism that would require as an evidence a sequence of blocks that lead to that state. This might be very tricky to implement. | |||
### Back to the past | |||
In this kind of attacks faulty validators take advantage of the fact that they did not sign messages in some of the past rounds. Due to the asynchronous network in which Tendermint operates, we cannot easily differentiate between such an attack and delayed message. This kind of attack can be used at both full nodes and lite clients. | |||
#### Scenario 5: | |||
Validators: | |||
* C1 - a set of correct validators with 1/3 of the voting power | |||
* C2 - a set of correct validators with 1/3 of the voting power | |||
* C1 and C2 are disjoint | |||
* F - a set of faulty validators with 1/3 voting power | |||
* one additional faulty process *q* | |||
* F and *q* violate the Tendermint failure model. | |||
Execution: | |||
* in a round *r* of height *h* we have C1 precommitting a value A, | |||
* C2 precommits nil, | |||
* F does not send any message | |||
* *q* precommits nil. | |||
* In some round *r' > r*, F and *q* and C2 commit some other value B different from A. | |||
* F and *fp* "go back to the past" and sign precommit message for value A in round *r*. | |||
* Together with precomit messages of C1 this is sufficient for a commit for value A. | |||
Consequences: | |||
* Only a single faulty validator that previously precommited nil did equivocation, while the other 1/3 of faulty validators actually executed an attack that has exactly the same sequence of messages as part of amnesia attack. Detecting this kind of attack boil down to mechanisms for equivocation and amnesia. | |||
**Q:** should we keep this as a separate kind of attack? It seems that equivocation, amnesia and phantom validators are the only kind of attack we need to support and this gives us security also in other cases. This would not be surprising as equivocation and amnesia are attacks that followed from the protocol and phantom attack is not really an attack to Tendermint but more to the Proof of stake module. | |||
### Phantom validators | |||
In case of phantom validators, processes that are not part of the current validator set but are still bonded (as attack happen during their unbonding period) can be part of the attack by signing vote messages. This attack can be executed against both full nodes and lite clients. | |||
#### Scenario 6: | |||
Validators: | |||
* F -- a set of faulty validators that are not part of the validator set on the main chain at height *h + k* | |||
Execution: | |||
* There is a fork, and there exists two different headers for height *h + k*, with different validator sets: | |||
- VS2 on the main chain | |||
- forged header VS2', signed by F (and others) | |||
* a lite client has a trust in a header for height *h* (and the corresponding validator set VS1). | |||
* As part of bisection header verification, it verifies header at height *h + k* with new validator set VS2'. | |||
Consequences: | |||
* To detect this, a node needs to see both, the forged header and the canonical header from the chain. | |||
* If this is the case, detecting these kind of attacks is easy as it just requires verifying if processes are signing messages in heights in which they are not part of the validator set. | |||
**Remark.** We can have phantom-validator-based attacks as a follow up of equivocation or amnesia based attack where forked state contains validators that are not part of the validator set at the main chain. In this case, they keep signing messages contributed to a forked chain (the wrong branch) although they are not part of the validator set on the main chain. This attack can also be used to attack full node during a period of time he is eclipsed. | |||
### Lunatic validator | |||
Lunatic validator agrees to sign commit messages for arbitrary application state. It is used to attack lite clients. | |||
Note that detecting this behavior require application knowledge. Detecting this behavior can probably be done by | |||
referring to the block before the one in which height happen. | |||
**Q:** can we say that in this case a validator ignore to check if proposed value is valid before voting for it? |
@ -0,0 +1,54 @@ | |||
// nolint: dupl | |||
// dupl is reading this as the same file as crypto/merkle/result.go | |||
package common | |||
import ( | |||
"bytes" | |||
"encoding/json" | |||
"github.com/gogo/protobuf/jsonpb" | |||
) | |||
//--------------------------------------------------------------------------- | |||
// override JSON marshalling so we emit defaults (ie. disable omitempty) | |||
var ( | |||
jsonpbMarshaller = jsonpb.Marshaler{ | |||
EnumsAsInts: true, | |||
EmitDefaults: true, | |||
} | |||
jsonpbUnmarshaller = jsonpb.Unmarshaler{} | |||
) | |||
func (r *KVPair) MarshalJSON() ([]byte, error) { | |||
s, err := jsonpbMarshaller.MarshalToString(r) | |||
return []byte(s), err | |||
} | |||
func (r *KVPair) UnmarshalJSON(b []byte) error { | |||
reader := bytes.NewBuffer(b) | |||
return jsonpbUnmarshaller.Unmarshal(reader, r) | |||
} | |||
func (r *KI64Pair) MarshalJSON() ([]byte, error) { | |||
s, err := jsonpbMarshaller.MarshalToString(r) | |||
return []byte(s), err | |||
} | |||
func (r *KI64Pair) UnmarshalJSON(b []byte) error { | |||
reader := bytes.NewBuffer(b) | |||
return jsonpbUnmarshaller.Unmarshal(reader, r) | |||
} | |||
// Some compile time assertions to ensure we don't | |||
// have accidental runtime surprises later on. | |||
// jsonEncodingRoundTripper ensures that asserted | |||
// interfaces implement both MarshalJSON and UnmarshalJSON | |||
type jsonRoundTripper interface { | |||
json.Marshaler | |||
json.Unmarshaler | |||
} | |||
var _ jsonRoundTripper = (*KVPair)(nil) | |||
var _ jsonRoundTripper = (*KI64Pair)(nil) |
@ -1,13 +0,0 @@ | |||
--- | |||
title: RPC Reference | |||
language_tabs: # must be one of https://git.io/vQNgJ | |||
- shell | |||
- go | |||
toc_footers: | |||
- <a href='https://tendermint.com/'>Tendermint</a> | |||
- <a href='https://github.com/lord/slate'>Documentation Powered by Slate</a> | |||
search: true | |||
--- |
@ -1,67 +0,0 @@ | |||
#!/usr/bin/env bash | |||
set -e | |||
# This file downloads all of the binary dependencies we have, and checks out a | |||
# specific git hash. | |||
# | |||
# repos it installs: | |||
# github.com/golang/dep/cmd/dep | |||
# github.com/gogo/protobuf/protoc-gen-gogo | |||
# github.com/square/certstrap | |||
# github.com/mitchellh/gox | |||
# github.com/golangci/golangci-lint | |||
# github.com/petermattis/goid | |||
# github.com/sasha-s/go-deadlock | |||
# goimports | |||
## check if GOPATH is set | |||
if [ -z ${GOPATH+x} ]; then | |||
echo "please set GOPATH (https://github.com/golang/go/wiki/SettingGOPATH)" | |||
exit 1 | |||
fi | |||
mkdir -p "$GOPATH/src/github.com" | |||
cd "$GOPATH/src/github.com" || exit 1 | |||
installFromGithub() { | |||
repo=$1 | |||
commit=$2 | |||
# optional | |||
subdir=$3 | |||
echo "--> Installing $repo ($commit)..." | |||
if [ ! -d "$repo" ]; then | |||
mkdir -p "$repo" | |||
git clone "https://github.com/$repo.git" "$repo" | |||
fi | |||
if [ ! -z ${subdir+x} ] && [ ! -d "$repo/$subdir" ]; then | |||
echo "ERROR: no such directory $repo/$subdir" | |||
exit 1 | |||
fi | |||
pushd "$repo" && \ | |||
git fetch origin && \ | |||
git checkout -q "$commit" && \ | |||
if [ ! -z ${subdir+x} ]; then cd "$subdir" || exit 1; fi && \ | |||
go install && \ | |||
if [ ! -z ${subdir+x} ]; then cd - || exit 1; fi && \ | |||
popd || exit 1 | |||
echo "--> Done" | |||
echo "" | |||
} | |||
######################## DEVELOPER TOOLS ##################################### | |||
installFromGithub gogo/protobuf 61dbc136cf5d2f08d68a011382652244990a53a9 protoc-gen-gogo | |||
installFromGithub square/certstrap e27060a3643e814151e65b9807b6b06d169580a7 | |||
# used to build tm-monitor & tm-bench binaries | |||
installFromGithub mitchellh/gox 51ed453898ca5579fea9ad1f08dff6b121d9f2e8 | |||
## golangci-lint v1.13.2 | |||
installFromGithub golangci/golangci-lint 7b2421d55194c9dc385eff7720a037aa9244ca3c cmd/golangci-lint | |||
## make test_with_deadlock | |||
## XXX: https://github.com/tendermint/tendermint/issues/3242 | |||
installFromGithub petermattis/goid b0b1615b78e5ee59739545bb38426383b2cda4c9 | |||
installFromGithub sasha-s/go-deadlock d68e2bc52ae3291765881b9056f2c1527f245f1e | |||
go get golang.org/x/tools/cmd/goimports | |||
installFromGithub snikch/goodman 10e37e294daa3c9a90abded60ff9924bafab3888 cmd/goodman |
@ -0,0 +1,27 @@ | |||
#!/bin/bash | |||
set -euo pipefail | |||
f_sha256() { | |||
local l_file | |||
l_file=$1 | |||
python -sBc "import hashlib;print(hashlib.sha256(open('$l_file','rb').read()).hexdigest())" | |||
} | |||
installer="$(mktemp)" | |||
trap "rm -f ${installer}" EXIT | |||
GOBIN="${1}" | |||
VERSION="${2}" | |||
HASHSUM="${3}" | |||
CURL="$(which curl)" | |||
echo "Downloading golangci-lint ${VERSION} installer ..." >&2 | |||
"${CURL}" -sfL "https://raw.githubusercontent.com/golangci/golangci-lint/${VERSION}/install.sh" > "${installer}" | |||
echo "Checking hashsum ..." >&2 | |||
[ "${HASHSUM}" = "$(f_sha256 ${installer})" ] | |||
chmod +x "${installer}" | |||
echo "Launching installer ..." >&2 | |||
exec "${installer}" -d -b "${GOBIN}" "${VERSION}" |